filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_2532 | import os
import subprocess
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from joblib import Parallel, delayed
def main():
file_sra, in_dir, out_dir, n_j = getArgs()
sra_list = loadAccessions(file_sra)
Parallel(n_jobs=n_j, prefer="threads")(
delayed(runAriba)(sra, in_dir, out_dir) for sra in sra_list
)
def loadAccessions(file_sra):
"""
Loads in list of SRA accession numbers from file_sra
There is no jason model in ariba docker, otherwise, we can use:
sra_list = json.load(open(file_sra, "r"))
"""
sra_list = []
text = open(file_sra).read()
tmp = text.split("\n")
sra_list = [k[(k.index('"') + 1) : k.index('"', 8)] for k in tmp[1:-1]]
return sra_list
def getArgs():
parser = ArgumentParser(
formatter_class=RawTextHelpFormatter,
prog="runAribaInLoop_withBam.py",
description="Run Ariba for isolates to output variant report files and intermediate results",
)
parser.add_argument("-f", "--fSRAs", dest="fileSRAs")
parser.add_argument("-i", "--iDir", dest="inDir")
parser.add_argument("-o", "--oDir", dest="outDir")
parser.add_argument("-n", "--n_jobs", dest="nJobs")
args = parser.parse_args()
f_sra = args.fileSRAs
i_dir = args.inDir
o_dir = args.outDir
n_job = args.nJobs
return f_sra, i_dir, o_dir, n_job
def runAriba(sra, in_dir, out_dir):
# print (sra)
fastq_dir = in_dir + "/"
reads1 = fastq_dir + sra + "_1.fastq"
reads2 = fastq_dir + sra + "_2.fastq"
if os.path.isfile(reads1) and os.path.isfile(reads2):
out_dir = out_dir + "/outRun_" + sra
if not (os.path.isfile(out_dir + "/report.tsv")):
if os.path.isdir(out_dir):
subprocess.run(["rm", "-r", out_dir])
cmd = [
"ariba",
"run",
"--noclean",
"out.card.prepareref",
reads1,
reads2,
out_dir,
]
with open("./aribaRunLog.txt", "a+") as f:
subprocess.call(cmd, stdout=f)
else:
print("UGH! invalid path " + reads1 + " or " + reads2)
with open("./sra_paired_read_notFound.txt", "a+") as l:
l.write(sra + "\n")
if __name__ == "__main__":
main()
|
the-stack_0_2533 | import math
import torch.nn as nn
from mmcv.runner import ModuleList
from mmocr.models.builder import ENCODERS
from mmocr.models.textrecog.layers import (Adaptive2DPositionalEncoding,
SatrnEncoderLayer)
from .base_encoder import BaseEncoder
@ENCODERS.register_module()
class SatrnEncoder(BaseEncoder):
"""Implement encoder for SATRN, see `SATRN.
<https://arxiv.org/abs/1910.04396>`_.
"""
def __init__(self,
n_layers=12,
n_head=8,
d_k=64,
d_v=64,
d_model=512,
n_position=100,
d_inner=256,
dropout=0.1,
init_cfg=None,
**kwargs):
super().__init__(init_cfg=init_cfg)
self.d_model = d_model
self.position_enc = Adaptive2DPositionalEncoding(
d_hid=d_model,
n_height=n_position,
n_width=n_position,
dropout=dropout)
self.layer_stack = ModuleList([
SatrnEncoderLayer(
d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)
])
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, feat, img_metas=None):
valid_ratios = [1.0 for _ in range(feat.size(0))]
if img_metas is not None:
valid_ratios = [
img_meta.get('valid_ratio', 1.0) for img_meta in img_metas
]
feat += self.position_enc(feat)
n, c, h, w = feat.size()
mask = feat.new_zeros((n, h, w))
for i, valid_ratio in enumerate(valid_ratios):
valid_width = min(w, math.ceil(w * valid_ratio))
mask[i, :, :valid_width] = 1
mask = mask.view(n, h * w)
feat = feat.view(n, c, h * w)
output = feat.permute(0, 2, 1).contiguous()
for enc_layer in self.layer_stack:
output = enc_layer(output, h, w, mask)
output = self.layer_norm(output)
output = output.permute(0, 2, 1).contiguous()
output = output.view(n, self.d_model, h, w)
return output
|
the-stack_0_2536 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SqueezeNet implementation with TPU support.
Training loop and input pipeline.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
from hyperparameters import common_hparams_flags
from hyperparameters import common_tpu_flags
from hyperparameters import flags_to_params
from hyperparameters import params_dict
import data_pipeline
import squeezenet_model
from configs import squeezenet_config
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
common_tpu_flags.define_common_tpu_flags()
common_hparams_flags.define_common_hparams_flags()
flags.DEFINE_integer("num_examples_per_epoch", None,
"Number of examples to train per epoch.")
flags.DEFINE_integer("num_eval_examples", None,
"Number of examples to evaluate per run.")
flags.DEFINE_float("init_learning_rate", None, "Learning rate.")
flags.DEFINE_float("end_learning_rate", None, "The minimal end learning rate.")
flags.DEFINE_integer("num_epochs", None,
"Number of epochs of the training set to process.")
flags.DEFINE_integer("num_evals", None,
"How many times to run an evaluation during training.")
flags.DEFINE_integer(
"num_cores_per_replica", default=None,
help=("Number of TPU cores in total. For a single TPU device, this is 8"
" because each TPU has 4 chips each with 2 cores."))
flags.DEFINE_bool(
"use_async_checkpointing", default=None, help=("Enable async checkpoint"))
flags.DEFINE_integer(
"num_classes", default=None, help="Number of classes, at least 2")
FLAGS = flags.FLAGS
def main(unused_argv):
params = params_dict.ParamsDict(
squeezenet_config.SQUEEZENET_CFG,
squeezenet_config.SQUEEZENET_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=True)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params = flags_to_params.override_params_from_input_flags(params, FLAGS)
total_steps = ((params.train.num_epochs * params.train.num_examples_per_epoch)
// params.train.train_batch_size)
params.override({
"train": {
"total_steps": total_steps
},
"eval": {
"num_steps_per_eval": (total_steps // params.eval.num_evals)
},
}, is_strict=False)
params.validate()
params.lock()
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
if not params.use_async_checkpointing:
save_checkpoints_steps = max(5000, params.train.iterations_per_loop)
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=params.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=params.train.iterations_per_loop,
num_shards=params.train.num_cores_per_replica,
),
)
estimator = contrib_tpu.TPUEstimator(
model_fn=squeezenet_model.model_fn,
use_tpu=params.use_tpu,
config=run_config,
train_batch_size=params.train.train_batch_size,
eval_batch_size=params.eval.eval_batch_size,
params=params.as_dict(),
)
for eval_cycle in range(params.eval.num_evals):
current_cycle_last_train_step = ((eval_cycle + 1) *
params.eval.num_steps_per_eval)
estimator.train(
input_fn=data_pipeline.InputReader(FLAGS.data_dir, is_training=True),
steps=current_cycle_last_train_step)
tf.logging.info("Running evaluation")
tf.logging.info("%s",
estimator.evaluate(
input_fn=data_pipeline.InputReader(
FLAGS.data_dir, is_training=False),
steps=(params.eval.num_eval_examples //
params.eval.eval_batch_size)
))
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
the-stack_0_2538 | import os
from .base import NullBrowser, ExecutorBrowser, require_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor # noqa: F401
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {
"product": "servo",
"check_args": "check_args",
"browser": "ServoBrowser",
"executor": {
"testharness": "ServoTestharnessExecutor",
"reftest": "ServoRefTestExecutor",
"wdspec": "ServoWdspecExecutor",
},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"update_properties": "update_properties",
}
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {
"binary": kwargs["binary"],
"debug_info": kwargs["debug_info"],
"binary_args": kwargs["binary_args"],
"user_stylesheets": kwargs.get("user_stylesheets"),
"ca_certificate_path": config.ssl_config["ca_cert_path"],
}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
rv = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data, **kwargs)
rv["pause_after_test"] = kwargs["pause_after_test"]
if test_type == "wdspec":
rv["capabilities"] = {}
return rv
def env_extras(**kwargs):
return []
def env_options():
return {"server_host": "127.0.0.1",
"bind_address": False,
"testharnessreport": "testharnessreport-servo.js",
"supports_debugger": True}
def update_properties():
return ["debug", "os", "version", "processor", "bits"], None
class ServoBrowser(NullBrowser):
def __init__(self, logger, binary, debug_info=None, binary_args=None,
user_stylesheets=None, ca_certificate_path=None):
NullBrowser.__init__(self, logger)
self.binary = binary
self.debug_info = debug_info
self.binary_args = binary_args or []
self.user_stylesheets = user_stylesheets or []
self.ca_certificate_path = ca_certificate_path
def executor_browser(self):
return ExecutorBrowser, {
"binary": self.binary,
"debug_info": self.debug_info,
"binary_args": self.binary_args,
"user_stylesheets": self.user_stylesheets,
"ca_certificate_path": self.ca_certificate_path,
}
|
the-stack_0_2540 | import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="yanchor", parent_name="layout.legend", **kwargs):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["auto", "top", "middle", "bottom"]),
**kwargs
)
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="y", parent_name="layout.legend", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xanchor", parent_name="layout.legend", **kwargs):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["auto", "left", "center", "right"]),
**kwargs
)
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="layout.legend", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ValignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="valign", parent_name="layout.legend", **kwargs):
super(ValignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["top", "middle", "bottom"]),
**kwargs
)
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="layout.legend", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TraceorderValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="traceorder", parent_name="layout.legend", **kwargs):
super(TraceorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
extras=kwargs.pop("extras", ["normal"]),
flags=kwargs.pop("flags", ["reversed", "grouped"]),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TracegroupgapValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="tracegroupgap", parent_name="layout.legend", **kwargs
):
super(TracegroupgapValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="layout.legend", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend's title font.
side
Determines the location of legend's title with
respect to the legend items. Defaulted to "top"
with `orientation` is "h". Defaulted to "left"
with `orientation` is "v". The *top left*
options could be used to expand legend area in
both x and y sides.
text
Sets the title of the legend.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="orientation", parent_name="layout.legend", **kwargs
):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["v", "h"]),
**kwargs
)
import _plotly_utils.basevalidators
class ItemsizingValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="itemsizing", parent_name="layout.legend", **kwargs):
super(ItemsizingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["trace", "constant"]),
**kwargs
)
import _plotly_utils.basevalidators
class ItemdoubleclickValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="itemdoubleclick", parent_name="layout.legend", **kwargs
):
super(ItemdoubleclickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["toggle", "toggleothers", False]),
**kwargs
)
import _plotly_utils.basevalidators
class ItemclickValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="itemclick", parent_name="layout.legend", **kwargs):
super(ItemclickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["toggle", "toggleothers", False]),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="layout.legend", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="borderwidth", parent_name="layout.legend", **kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="layout.legend", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="bgcolor", parent_name="layout.legend", **kwargs):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
the-stack_0_2541 | # -*- coding: utf-8 -*-
"""Implementation of a trie data structure.
`Trie data structure <http://en.wikipedia.org/wiki/Trie>`_, also known as radix
or prefix tree, is a tree associating keys to values where all the descendants
of a node have a common prefix (associated with that node).
The trie module contains :class:`pygtrie.Trie`, :class:`pygtrie.CharTrie` and
:class:`pygtrie.StringTrie` classes each implementing a mutable mapping
interface, i.e. :class:`dict` interface. As such, in most circumstances,
:class:`pygtrie.Trie` could be used as a drop-in replacement for
a :class:`dict`, but the prefix nature of the data structure is trie’s real
strength.
The module also contains :class:`pygtrie.PrefixSet` class which uses a trie to
store a set of prefixes such that a key is contained in the set if it or its
prefix is stored in the set.
Features
--------
- A full mutable mapping implementation.
- Supports iterating over as well as deleting a subtrie.
- Supports prefix checking as well as shortest and longest prefix
look-up.
- Extensible for any kind of user-defined keys.
- A PrefixSet supports “all keys starting with given prefix” logic.
- Can store any value including None.
For some simple examples see ``example.py`` file.
"""
__author__ = 'Michal Nazarewicz <[email protected]>'
__copyright__ = 'Copyright 2014 Google Inc.'
try:
import collections.abc as _collections
except ImportError:
import collections as _collections
# Python 2.x and 3.x compatibility stuff
if hasattr(dict, 'iteritems'):
# pylint: disable=invalid-name
_iteritems = lambda d: d.iteritems()
_iterkeys = lambda d: d.iterkeys()
def _sorted_iteritems(d):
"""Returns d's items in sorted order."""
items = d.items()
items.sort()
return iter(items)
else:
_sorted_iteritems = lambda d: sorted(d.items()) # pylint: disable=invalid-name
_iteritems = lambda d: iter(d.items()) # pylint: disable=invalid-name
_iterkeys = lambda d: iter(d.keys()) # pylint: disable=invalid-name
try:
_basestring = basestring
except NameError:
_basestring = str
class ShortKeyError(KeyError):
"""Raised when given key is a prefix of a longer key."""
pass
_SENTINEL = object()
class _Node(object):
"""A single node of a trie.
Stores value associated with the node and dictionary of children.
"""
__slots__ = ('children', 'value')
def __init__(self):
self.children = {}
self.value = _SENTINEL
def iterate(self, path, shallow, iteritems):
"""Yields all the nodes with values associated to them in the trie.
Args:
path: Path leading to this node. Used to construct the key when
returning value of this node and as a prefix for children.
shallow: Perform a shallow traversal, i.e. do not yield nodes if
their prefix has been yielded.
iteritems: A function taking dictionary as argument and returning
iterator over its items. Something other than dict.iteritems
may be given to enable sorting.
Yields:
``(path, value)`` tuples.
"""
# Use iterative function with stack on the heap so we don't hit Python's
# recursion depth limits.
node = self
stack = []
while True:
if node.value is not _SENTINEL:
yield path, node.value
if (not shallow or node.value is _SENTINEL) and node.children:
stack.append(iter(iteritems(node.children)))
path.append(None)
while True:
try:
step, node = next(stack[-1])
path[-1] = step
break
except StopIteration:
stack.pop()
path.pop()
except IndexError:
return
def traverse(self, node_factory, path_conv, path, iteritems):
"""Traverses the node and returns another type of node from factory.
Args:
node_factory: Callable function to construct new nodes.
path_conv: Callable function to convert node path to a key.
path: Current path for this node.
iteritems: A function taking dictionary as argument and returning
iterator over its items. Something other than dict.iteritems
may be given to enable sorting.
Returns:
An object constructed by calling node_factory(path_conv, path,
children, value=...), where children are constructed by node_factory
from the children of this node. There doesn't need to be 1:1
correspondence between original nodes in the trie and constructed
nodes (see make_test_node_and_compress in test.py).
"""
def children():
"""Recursively traverses all of node's children."""
for step, node in iteritems(self.children):
yield node.traverse(node_factory, path_conv, path + [step],
iteritems)
args = [path_conv, tuple(path), children()]
if self.value is not _SENTINEL:
args.append(self.value)
return node_factory(*args)
def __eq__(self, other):
# Like iterate, we don't recurse so this works on deep tries.
a, b = self, other
stack = []
while True:
if a.value != b.value or len(a.children) != len(b.children):
return False
if a.children:
stack.append((_iteritems(a.children), b.children))
while True:
try:
key, a = next(stack[-1][0])
b = stack[-1][1].get(key)
if b is None:
return False
break
except StopIteration:
stack.pop()
except IndexError:
return True
return self.value == other.value and self.children == other.children
def __ne__(self, other):
return not self.__eq__(other)
def __bool__(self):
return bool(self.value is not _SENTINEL or self.children)
__nonzero__ = __bool__
__hash__ = None
def __getstate__(self):
"""Get state used for pickling.
The state is encoded as a list of simple commands which consist of an
integer and some command-dependent number of arguments. The commands
modify what the current node is by navigating the trie up and down and
setting node values. Possible commands are:
* [n, step0, step1, ..., stepn-1, value], for n >= 0, specifies step
needed to reach the next current node as well as its new value. There
is no way to create a child node without setting its (or its
descendant's) value.
* [-n], for -n < 0, specifies to go up n steps in the trie.
When encoded as a state, the commands are flattened into a single list.
For example::
[ 0, 'Root',
2, 'Foo', 'Bar', 'Root/Foo/Bar Node',
-1,
1, 'Baz', 'Root/Foo/Baz Node',
-2,
1, 'Qux', 'Root/Qux Node' ]
Creates the following hierarchy::
-* value: Root
+-- Foo --* no value
| +-- Bar -- * value: Root/Foo/Bar Node
| +-- Baz -- * value: Root/Foo/Baz Node
+-- Qux -- * value: Root/Qux Node
Returns:
A pickable state which can be passed to :func:`_Node.__setstate__`
to reconstruct the node and its full hierarchy.
"""
# Like iterate, we don't recurse so pickling works on deep tries.
state = [] if self.value is _SENTINEL else [0]
last_cmd = 0
node = self
stack = []
while True:
if node.value is not _SENTINEL:
last_cmd = 0
state.append(node.value)
stack.append(_iteritems(node.children))
while True:
try:
step, node = next(stack[-1])
except StopIteration:
if last_cmd < 0:
state[-1] -= 1
else:
last_cmd = -1
state.append(-1)
stack.pop()
continue
except IndexError:
if last_cmd < 0:
state.pop()
return state
if last_cmd > 0:
last_cmd += 1
state[-last_cmd] += 1
else:
last_cmd = 1
state.append(1)
state.append(step)
break
def __setstate__(self, state):
"""Unpickles node. See :func:`_Node.__getstate__`."""
self.__init__()
state = iter(state)
stack = [self]
for cmd in state:
if cmd < 0:
del stack[cmd:]
else:
while cmd > 0:
stack.append(type(self)())
stack[-2].children[next(state)] = stack[-1]
cmd -= 1
stack[-1].value = next(state)
_NONE_PAIR = type('NonePair', (tuple,), {
'__nonzero__': lambda _: False,
'__bool__': lambda _: False,
'__slots__': (),
})((None, None))
class Trie(_collections.MutableMapping):
"""A trie implementation with dict interface plus some extensions.
Keys used with the :class:`pygtrie.Trie` must be iterable, yielding hashable
objects. In other words, for a given key, ``dict.fromkeys(key)`` must be
valid.
In particular, strings work fine as trie keys, however when getting keys
back from iterkeys() method for example, instead of strings, tuples of
characters are produced. For that reason, :class:`pygtrie.CharTrie` or
:class:`pygtrie.StringTrie` may be preferred when using
:class:`pygtrie.Trie` with string keys.
"""
def __init__(self, *args, **kwargs):
"""Initialises the trie.
Arguments are interpreted the same way :func:`Trie.update` interprets
them.
"""
self._root = _Node()
self._sorted = False
self.update(*args, **kwargs)
@property
def _iteritems(self):
"""Returns function yielding over dict's items possibly in sorted order.
Returns:
A function iterating over items of a dictionary given as an
argument. If child nodes sorting has been enabled (via
:func:`Trie.enable_sorting` method), returned function will go
through the items in sorted order..
"""
return _sorted_iteritems if self._sorted else _iteritems
def enable_sorting(self, enable=True):
"""Enables sorting of child nodes when iterating and traversing.
Normally, child nodes are not sorted when iterating or traversing over
the trie (just like dict elements are not sorted). This method allows
sorting to be enabled (which was the behaviour prior to pygtrie 2.0
release).
For Trie class, enabling sorting of children is identical to simply
sorting the list of items since Trie returns keys as tuples. However,
for other implementations such as StringTrie the two may behove subtly
different. For example, sorting items might produce::
root/foo-bar
root/foo/baz
even though foo comes before foo-bar.
Args:
enable: Whether to enable sorting of child nodes.
"""
self._sorted = enable
def clear(self):
"""Removes all the values from the trie."""
self._root = _Node()
def update(self, *args, **kwargs):
"""Updates stored values. Works like :func:`dict.update`."""
if len(args) > 1:
raise ValueError('update() takes at most one positional argument, '
'%d given.' % len(args))
# We have this here instead of just letting MutableMapping.update()
# handle things because it will iterate over keys and for each key
# retrieve the value. With Trie, this may be expensive since the path
# to the node would have to be walked twice. Instead, we have our own
# implementation where iteritems() is used avoiding the unnecessary
# value look-up.
if args and isinstance(args[0], Trie):
for key, value in _iteritems(args[0]):
self[key] = value
args = ()
super(Trie, self).update(*args, **kwargs)
def copy(self):
"""Returns a shallow copy of the trie."""
return self.__class__(self)
@classmethod
def fromkeys(cls, keys, value=None):
"""Creates a new trie with given keys set.
This is roughly equivalent to calling the constructor with a ``(key,
value) for key in keys`` generator.
Args:
keys: An iterable of keys that should be set in the new trie.
value: Value to associate with given keys.
Returns:
A new trie where each key from ``keys`` has been set to the given
value.
"""
trie = cls()
for key in keys:
trie[key] = value
return trie
def _get_node(self, key, create=False):
"""Returns node for given key. Creates it if requested.
Args:
key: A key to look for.
create: Whether to create the node if it does not exist.
Returns:
``(node, trace)`` tuple where ``node`` is the node for given key and
``trace`` is a list specifying path to reach the node including all
the encountered nodes. Each element of trace is a ``(step, node)``
tuple where ``step`` is a step from parent node to given node and
``node`` is node on the path. The first element of the path is
always ``(None, self._root)``.
Raises:
KeyError: If there is no node for the key and ``create`` is
``False``.
"""
node = self._root
trace = [(None, node)]
for step in self.__path_from_key(key):
if create:
node = node.children.setdefault(step, _Node())
else:
node = node.children.get(step)
if not node:
raise KeyError(key)
trace.append((step, node))
return node, trace
def __iter__(self):
return self.iterkeys()
# pylint: disable=arguments-differ
def iteritems(self, prefix=_SENTINEL, shallow=False):
"""Yields all nodes with associated values with given prefix.
Only nodes with values are output. For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> t['qux'] = 'Qux'
>>> t.items()
[('qux', 'Qux'), ('foo', 'Foo'), ('foo/bar/baz', 'Baz')]
Items are generated in topological order but the order of siblings is
unspecified by default. In other words, in the above example, the
``('qux', 'Qux')`` pair might have been at the end of the list. At an
expense of efficiency, this can be changed via
:func:`Trie.enable_sorting`.
With ``prefix`` argument, only items with specified prefix are generated
(i.e. only given subtrie is traversed) as demonstrated by::
>>> t.items(prefix='foo/bar')
[('foo/bar/baz', 'Baz')]
With ``shallow`` argument, if a node has value associated with it, it's
children are not traversed even if they exist which can be seen in::
>>> t.items(shallow=True)
[('qux', 'Qux'), ('foo', 'Foo')]
Args:
prefix: Prefix to limit iteration to.
shallow: Perform a shallow traversal, i.e. do not yield items if
their prefix has been yielded.
Yields:
``(key, value)`` tuples.
Raises:
KeyError: If ``prefix`` does not match any node.
"""
node, _ = self._get_node(prefix)
for path, value in node.iterate(list(self.__path_from_key(prefix)),
shallow, self._iteritems):
yield (self._key_from_path(path), value)
def iterkeys(self, prefix=_SENTINEL, shallow=False):
"""Yields all keys having associated values with given prefix.
This is equivalent to taking first element of tuples generated by
:func:`Trie.iteritems` which see for more detailed documentation.
Args:
prefix: Prefix to limit iteration to.
shallow: Perform a shallow traversal, i.e. do not yield keys if
their prefix has been yielded.
Yields:
All the keys (with given prefix) with associated values in the trie.
Raises:
KeyError: If ``prefix`` does not match any node.
"""
for key, _ in self.iteritems(prefix=prefix, shallow=shallow):
yield key
def itervalues(self, prefix=_SENTINEL, shallow=False):
"""Yields all values associated with keys with given prefix.
This is equivalent to taking second element of tuples generated by
:func:`Trie.iteritems` which see for more detailed documentation.
Args:
prefix: Prefix to limit iteration to.
shallow: Perform a shallow traversal, i.e. do not yield values if
their prefix has been yielded.
Yields:
All the values associated with keys (with given prefix) in the trie.
Raises:
KeyError: If ``prefix`` does not match any node.
"""
node, _ = self._get_node(prefix)
for _, value in node.iterate(list(self.__path_from_key(prefix)),
shallow, self._iteritems):
yield value
def items(self, prefix=_SENTINEL, shallow=False):
"""Returns a list of ``(key, value)`` pairs in given subtrie.
This is equivalent to constructing a list from generator returned by
:func:`Trie.iteritems` which see for more detailed documentation.
"""
return list(self.iteritems(prefix=prefix, shallow=shallow))
def keys(self, prefix=_SENTINEL, shallow=False):
"""Returns a list of all the keys, with given prefix, in the trie.
This is equivalent to constructing a list from generator returned by
:func:`Trie.iterkeys` which see for more detailed documentation.
"""
return list(self.iterkeys(prefix=prefix, shallow=shallow))
def values(self, prefix=_SENTINEL, shallow=False):
"""Returns a list of values in given subtrie.
This is equivalent to constructing a list from generator returned by
:func:`Trie.iterivalues` which see for more detailed documentation.
"""
return list(self.itervalues(prefix=prefix, shallow=shallow))
# pylint: enable=arguments-differ
def __len__(self):
"""Returns number of values in a trie.
Note that this method is expensive as it iterates over the whole trie.
"""
return sum(1 for _ in self.itervalues())
def __nonzero__(self):
return bool(self._root)
HAS_VALUE = 1
HAS_SUBTRIE = 2
def has_node(self, key):
"""Returns whether given node is in the trie.
Return value is a bitwise or of ``HAS_VALUE`` and ``HAS_SUBTRIE``
constants indicating node has a value associated with it and that it is
a prefix of another existing key respectively. Both of those are
independent of each other and all of the four combinations are possible.
For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo/bar'] = 'Bar'
>>> t['foo/bar/baz'] = 'Baz'
>>> t.has_node('qux') == 0
True
>>> t.has_node('foo/bar/baz') == pygtrie.Trie.HAS_VALUE
True
>>> t.has_node('foo') == pygtrie.Trie.HAS_SUBTRIE
True
>>> t.has_node('foo/bar') == (pygtrie.Trie.HAS_VALUE |
... pygtrie.Trie.HAS_SUBTRIE)
True
There are two higher level methods built on top of this one which give
easier interface for the information. :func:`Trie.has_key` and returns
whether node has a value associated with it and :func:`Trie.has_subtrie`
checks whether node is a prefix. Continuing previous example::
>>> t.has_key('qux'), t.has_subtrie('qux')
False, False
>>> t.has_key('foo/bar/baz'), t.has_subtrie('foo/bar/baz')
True, False
>>> t.has_key('foo'), t.has_subtrie('foo')
False, True
>>> t.has_key('foo/bar'), t.has_subtrie('foo/bar')
True, True
Args:
key: A key to look for.
Returns:
Non-zero if node exists and if it does a bit-field denoting whether
it has a value associated with it and whether it has a subtrie.
"""
try:
node, _ = self._get_node(key)
except KeyError:
return 0
return ((self.HAS_VALUE * int(node.value is not _SENTINEL)) |
(self.HAS_SUBTRIE * int(bool(node.children))))
def has_key(self, key):
"""Indicates whether given key has value associated with it.
See :func:`Trie.has_node` for more detailed documentation.
"""
return bool(self.has_node(key) & self.HAS_VALUE)
def has_subtrie(self, key):
"""Returns whether given key is a prefix of another key in the trie.
See :func:`Trie.has_node` for more detailed documentation.
"""
return bool(self.has_node(key) & self.HAS_SUBTRIE)
@staticmethod
def _slice_maybe(key_or_slice):
"""Checks whether argument is a slice or a plain key.
Args:
key_or_slice: A key or a slice to test.
Returns:
``(key, is_slice)`` tuple. ``is_slice`` indicates whether
``key_or_slice`` is a slice and ``key`` is either ``key_or_slice``
itself (if it's not a slice) or slice's start position.
Raises:
TypeError: If ``key_or_slice`` is a slice whose stop or step are not
``None`` In other words, only ``[key:]`` slices are valid.
"""
if isinstance(key_or_slice, slice):
if key_or_slice.stop is not None or key_or_slice.step is not None:
raise TypeError(key_or_slice)
return key_or_slice.start, True
return key_or_slice, False
def __getitem__(self, key_or_slice):
"""Returns value associated with given key or raises KeyError.
When argument is a single key, value for that key is returned (or
:class:`KeyError` exception is thrown if the node does not exist or has
no value associated with it).
When argument is a slice, it must be one with only `start` set in which
case the access is identical to :func:`Trie.itervalues` invocation with
prefix argument.
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo/bar'] = 'Bar'
>>> t['foo/baz'] = 'Baz'
>>> t['qux'] = 'Qux'
>>> t['foo/bar']
'Bar'
>>> list(t['foo':])
['Baz', 'Bar']
>>> t['foo']
Traceback (most recent call last):
...
pygtrie.ShortKeyError: 'foo'
Args:
key_or_slice: A key or a slice to look for.
Returns:
If a single key is passed, a value associated with given key. If
a slice is passed, a generator of values in specified subtrie.
Raises:
ShortKeyError: If the key has no value associated with it but is
a prefix of some key with a value. Note that
:class:`ShortKeyError` is subclass of :class:`KeyError`.
KeyError: If key has no value associated with it nor is a prefix of
an existing key.
TypeError: If ``key_or_slice`` is a slice but it's stop or step are
not ``None``.
"""
if self._slice_maybe(key_or_slice)[1]:
return self.itervalues(key_or_slice.start)
node, _ = self._get_node(key_or_slice)
if node.value is _SENTINEL:
raise ShortKeyError(key_or_slice)
return node.value
def _set(self, key, value, only_if_missing=False, clear_children=False):
"""Sets value for a given key.
Args:
key: Key to set value of.
value: Value to set to.
only_if_missing: If ``True``, value won't be changed if the key is
already associated with a value.
clear_children: If ``True``, all children of the node, if any, will
be removed.
Returns:
Value of the node.
"""
node, _ = self._get_node(key, create=True)
if not only_if_missing or node.value is _SENTINEL:
node.value = value
if clear_children:
node.children.clear()
return node.value
def __setitem__(self, key_or_slice, value):
"""Sets value associated with given key.
If `key_or_slice` is a key, simply associate it with given value. If it
is a slice (which must have `start` set only), it in addition clears any
subtrie that might have been attached to particular key. For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo/bar'] = 'Bar'
>>> t['foo/baz'] = 'Baz'
>>> t.keys()
['foo/baz', 'foo/bar']
>>> t['foo':] = 'Foo'
>>> t.keys()
['foo']
Args:
key_or_slice: A key to look for or a slice. If it is a slice, the
whole subtrie (if present) will be replaced by a single node
with given value set.
value: Value to set.
Raises:
TypeError: If key is a slice whose stop or step are not None.
"""
key, is_slice = self._slice_maybe(key_or_slice)
self._set(key, value, clear_children=is_slice)
def setdefault(self, key, value=None):
"""Sets value of a given node if not set already. Also returns it.
In contrast to :func:`Trie.__setitem__`, this method does not accept
slice as a key.
"""
return self._set(key, value, only_if_missing=True)
@staticmethod
def _cleanup_trace(trace):
"""Removes empty nodes present on specified trace.
Args:
trace: Trace to the node to cleanup as returned by
:func:`Trie._get_node`.
"""
i = len(trace) - 1 # len(path) >= 1 since root is always there
step, node = trace[i]
while i and not node:
i -= 1
parent_step, parent = trace[i]
del parent.children[step]
step, node = parent_step, parent
def _pop_from_node(self, node, trace, default=_SENTINEL):
"""Removes a value from given node.
Args:
node: Node to get value of.
trace: Trace to that node as returned by :func:`Trie._get_node`.
default: A default value to return if node has no value set.
Returns:
Value of the node or ``default``.
Raises:
ShortKeyError: If the node has no value associated with it and
``default`` has not been given.
"""
if node.value is not _SENTINEL:
value = node.value
node.value = _SENTINEL
self._cleanup_trace(trace)
return value
elif default is _SENTINEL:
raise ShortKeyError()
else:
return default
def pop(self, key, default=_SENTINEL):
"""Deletes value associated with given key and returns it.
Args:
key: A key to look for.
default: If specified, value that will be returned if given key has
no value associated with it. If not specified, method will
throw KeyError in such cases.
Returns:
Removed value, if key had value associated with it, or ``default``
(if given).
Raises:
ShortKeyError: If ``default`` has not been specified and the key has
no value associated with it but is a prefix of some key with
a value. Note that :class:`ShortKeyError` is subclass of
:class:`KeyError`.
KeyError: If default has not been specified and key has no value
associated with it nor is a prefix of an existing key.
"""
try:
return self._pop_from_node(*self._get_node(key))
except KeyError:
if default is not _SENTINEL:
return default
raise
def popitem(self):
"""Deletes an arbitrary value from the trie and returns it.
There is no guarantee as to which item is deleted and returned. Neither
in respect to its lexicographical nor topological order.
Returns:
``(key, value)`` tuple indicating deleted key.
Raises:
KeyError: If the trie is empty.
"""
if not self:
raise KeyError()
node = self._root
trace = [(None, node)]
while node.value is _SENTINEL:
step = next(_iterkeys(node.children))
node = node.children[step]
trace.append((step, node))
return (self._key_from_path((step for step, _ in trace[1:])),
self._pop_from_node(node, trace))
def __delitem__(self, key_or_slice):
"""Deletes value associated with given key or raises KeyError.
If argument is a key, value associated with it is deleted. If the key
is also a prefix, its descendents are not affected. On the other hand,
if the argument is a slice (in which case it must have only start set),
the whole subtrie is removed. For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar'] = 'Bar'
>>> t['foo/bar/baz'] = 'Baz'
>>> del t['foo/bar']
>>> t.keys()
['foo', 'foo/bar/baz']
>>> del t['foo':]
>>> t.keys()
[]
Args:
key_or_slice: A key to look for or a slice. If key is a slice, the
whole subtrie will be removed.
Raises:
ShortKeyError: If the key has no value associated with it but is
a prefix of some key with a value. This is not thrown is
key_or_slice is a slice -- in such cases, the whole subtrie is
removed. Note that :class:`ShortKeyError` is subclass of
:class:`KeyError`.
KeyError: If key has no value associated with it nor is a prefix of
an existing key.
TypeError: If key is a slice whose stop or step are not ``None``.
"""
key, is_slice = self._slice_maybe(key_or_slice)
node, trace = self._get_node(key)
if is_slice:
node.children.clear()
elif node.value is _SENTINEL:
raise ShortKeyError(key)
node.value = _SENTINEL
self._cleanup_trace(trace)
def prefixes(self, key):
"""Walks towards the node specified by key and yields all found items.
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> list(t.prefixes('foo/bar/baz/qux'))
[('foo', 'Foo'), ('foo/bar/baz', 'Baz')]
>>> list(t.prefixes('does/not/exist'))
[]
Args:
key: Key to look for.
Yields:
``(k, value)`` pairs denoting keys with associated values
encountered on the way towards the specified key.
"""
node = self._root
path = self.__path_from_key(key)
pos = 0
while True:
if node.value is not _SENTINEL:
yield self._key_from_path(path[:pos]), node.value
if pos == len(path):
break
node = node.children.get(path[pos])
if not node:
break
pos += 1
def shortest_prefix(self, key):
"""Finds the shortest prefix of a key with a value.
This is equivalent to taking the first object yielded by
:func:`Trie.prefixes` with a default of `(None, None)` if said method
yields no items. As an added bonus, the pair in that case will be
a falsy value (as opposed to regular two-element tuple of ``None``
values).
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> t.shortest_prefix('foo/bar/baz/qux')
('foo', 'Foo')
>>> t.shortest_prefix('does/not/exist')
(None, None)
>>> bool(t.shortest_prefix('does/not/exist'))
False
Args:
key: Key to look for.
Returns:
``(k, value)`` where ``k`` is the shortest prefix of ``key`` (it may
equal ``key``) and ``value`` is a value associated with that key.
If no node is found, ``(None, None)`` is returned.
"""
return next(self.prefixes(key), _NONE_PAIR)
def longest_prefix(self, key):
"""Finds the longest prefix of a key with a value.
This is equivalent to taking the last object yielded by
:func:`Trie.prefixes` with a default of `(None, None)` if said method
yields no items. As an added bonus, the pair in that case will be
a falsy value (as opposed to regular two-element tuple of ``None``
values).
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> t.longest_prefix('foo/bar/baz/qux')
('foo/bar/baz', 'Baz')
>>> t.longest_prefix('does/not/exist')
(None, None)
>>> bool(t.longest_prefix('does/not/exist'))
False
Args:
key: Key to look for.
Returns:
``(k, value)`` where ``k`` is the longest prefix of ``key`` (it may
equal ``key``) and ``value`` is a value associated with that key.
If no node is found, ``(None, None)`` is returned.
"""
ret = _NONE_PAIR
for ret in self.prefixes(key):
pass
return ret
def __eq__(self, other):
return self._root == other._root # pylint: disable=protected-access
def __ne__(self, other):
return self._root != other._root # pylint: disable=protected-access
def __str__(self):
return 'Trie(%s)' % (
', '.join('%s: %s' % item for item in self.iteritems()))
def __repr__(self):
if self:
return 'Trie((%s,))' % (
', '.join('(%r, %r)' % item for item in self.iteritems()))
else:
return 'Trie()'
def __path_from_key(self, key):
"""Converts a user visible key object to internal path representation.
Args:
key: User supplied key or ``_SENTINEL``.
Returns:
An empty tuple if ``key`` was ``_SENTINEL``, otherwise whatever
:func:`Trie._path_from_key` returns.
Raises:
TypeError: If ``key`` is of invalid type.
"""
return () if key is _SENTINEL else self._path_from_key(key)
def _path_from_key(self, key): # pylint: disable=no-self-use
"""Converts a user visible key object to internal path representation.
The default implementation simply returns key.
Args:
key: User supplied key.
Returns:
A path, which is an iterable of steps. Each step must be hashable.
Raises:
TypeError: If key is of invalid type.
"""
return key
def _key_from_path(self, path): # pylint: disable=no-self-use
"""Converts an internal path into a user visible key object.
The default implementation creates a tuple from the path.
Args:
path: Internal path representation.
Returns:
A user visible key object.
"""
return tuple(path)
def traverse(self, node_factory, prefix=_SENTINEL):
"""Traverses the tree using node_factory object.
node_factory is a callable function which accepts (path_conv, path,
children, value=...) arguments, where path_conv is a lambda converting
path representation to key, path is the path to this node, children is
an iterable of children nodes constructed by node_factory, optional
value is the value associated with the path.
node_factory's children argument is a generator which has a few
consequences:
* To traverse into node's children, the generator must be iterated over.
This can by accomplished by a simple "children = list(children)"
statement.
* Ignoring the argument allows node_factory to stop the traversal from
going into the children of the node. In other words, whole subtrie
can be removed from traversal if node_factory chooses so.
* If children is stored as is (i.e. as a generator) when it is iterated
over later on it will see state of the trie as it is during the
iteration and not when traverse method was called.
:func:`Trie.traverse` has two advantages over :func:`Trie.iteritems` and
similar methods:
1. it allows subtries to be skipped completely when going through the
list of nodes based on the property of the parent node; and
2. it represents structure of the trie directly making it easy to
convert structure into a different representation.
For example, the below snippet prints all files in current directory
counting how many HTML files were found but ignores hidden files and
directories (i.e. those whose names start with a dot)::
import os
import pygtrie
t = pygtrie.StringTrie(separator=os.sep)
# Construct a trie with all files in current directory and all
# of its sub-directories. Files get set a True value.
# Directories are represented implicitly by being prefixes of
# files.
for root, _, files in os.walk('.'):
for name in files: t[os.path.join(root, name)] = True
def traverse_callback(path_conv, path, children, is_file=False):
if path and path[-1] != '.' and path[-1][0] == '.':
# Ignore hidden directory (but accept root node and '.')
return 0
elif is_file:
print path_conv(path)
return int(path[-1].endswith('.html'))
else:
# Otherwise, it's a directory. Traverse into children.
return sum(int(is_html) for is_html in children)
print t.traverse(traverse_callback)
As documented, ignoring the children argument causes subtrie to be
omitted and not walked into.
In the next example, the trie is converted to a tree representation
where child nodes include a pointer to their parent. As before, hidden
files and directories are ignored::
import os
import pygtrie
t = pygtrie.StringTrie(separator=os.sep)
for root, _, files in os.walk('.'):
for name in files: t[os.path.join(root, name)] = True
class File(object):
def __init__(self, name):
self.name = name
self.parent = None
class Directory(File):
def __init__(self, name, children):
super(Directory, self).__init__(name)
self._children = children
for child in children:
child.parent = self
def traverse_callback(path_conv, path, children, is_file=False):
if not path or path[-1] == '.' or path[-1][0] != '.':
if is_file:
return File(path[-1])
children = filter(None, children)
return Directory(path[-1] if path else '', children)
root = t.traverse(traverse_callback)
Note: Unlike iterators, traverse method uses stack recursion which means
that using it on deep tries may lead to a RuntimeError exception thrown
once Python's maximum recursion depth is reached.
Args:
node_factory: Makes opaque objects from the keys and values of the
trie.
prefix: Prefix for node to start traversal, by default starts at
root.
Returns:
Node object constructed by node_factory corresponding to the root
node.
"""
node, _ = self._get_node(prefix)
return node.traverse(node_factory, self._key_from_path,
list(self.__path_from_key(prefix)),
self._iteritems)
class CharTrie(Trie):
"""A variant of a :class:`pygtrie.Trie` which accepts strings as keys.
The only difference between :class:`pygtrie.CharTrie` and
:class:`pygtrie.Trie` is that when :class:`pygtrie.CharTrie` returns keys
back to the client (for instance in keys() method is called), those keys are
returned as strings.
Canonical example where this class can be used is a dictionary of words in
a natural language. For example::
>>> import pygtrie
>>> t = pygtrie.CharTrie()
>>> t['wombat'] = True
>>> t['woman'] = True
>>> t['man'] = True
>>> t['manhole'] = True
>>> t.has_subtrie('wo')
True
>>> t.has_key('man')
True
>>> t.has_subtrie('man')
True
>>> t.has_subtrie('manhole')
False
"""
def _key_from_path(self, path):
return ''.join(path)
class StringTrie(Trie):
""":class:`pygtrie.Trie` variant accepting strings with a separator as keys.
The trie accepts strings as keys which are split into components using
a separator specified during initialisation ("/" by default).
Canonical example where this class can be used is when keys are paths. For
example, it could map from a path to a request handler::
import pygtrie
def handle_root(): pass
def handle_admin(): pass
def handle_admin_images(): pass
handlers = pygtrie.StringTrie()
handlers[''] = handle_root
handlers['/admin'] = handle_admin
handlers['/admin/images'] = handle_admin_images
request_path = '/admin/images/foo'
handler = handlers.longest_prefix(request_path)
"""
def __init__(self, *args, **kwargs):
"""Initialises the trie.
Except for a ``separator`` named argument, all other arguments are
interpreted the same way :func:`Trie.update` interprets them.
Args:
*args: Passed to super class initialiser.
**kwargs: Passed to super class initialiser.
separator: A separator to use when splitting keys into paths used by
the trie. "/" is used if this argument is not specified. This
named argument is not specified on the function's prototype
because of Python's limitations.
"""
separator = kwargs.pop('separator', '/')
if not isinstance(separator, _basestring):
raise TypeError('separator must be a string')
if not separator:
raise ValueError('separator can not be empty')
self._separator = separator
super(StringTrie, self).__init__(*args, **kwargs)
@classmethod
def fromkeys(cls, keys, value=None, separator='/'): # pylint: disable=arguments-differ
trie = cls(separator=separator)
for key in keys:
trie[key] = value
return trie
def _path_from_key(self, key):
return key.split(self._separator)
def _key_from_path(self, path):
return self._separator.join(path)
class PrefixSet(_collections.MutableSet): # pylint: disable=abstract-class-not-used
"""A set of prefixes.
:class:`pygtrie.PrefixSet` works similar to a normal set except it is said
to contain a key if the key or it's prefix is stored in the set. For
instance, if "foo" is added to the set, the set contains "foo" as well as
"foobar".
The set supports addition of elements but does *not* support removal of
elements. This is because there's no obvious consistent and intuitive
behaviour for element deletion.
"""
def __init__(self, iterable=None, factory=Trie, **kwargs):
"""Initialises the prefix set.
Args:
iterable: A sequence of keys to add to the set.
factory: A function used to create a trie used by the
:class:`pygtrie.PrefixSet`.
kwargs: Additional keyword arguments passed to the factory function.
"""
super(PrefixSet, self).__init__()
trie = factory(**kwargs)
if iterable:
trie.update((key, True) for key in iterable)
self._trie = trie
def copy(self):
"""Returns a copy of the prefix set."""
return self.__class__(self._trie)
def clear(self):
"""Removes all keys from the set."""
self._trie.clear()
def __contains__(self, key):
"""Checks whether set contains key or its prefix."""
return bool(self._trie.shortest_prefix(key)[1])
def __iter__(self):
"""Return iterator over all prefixes in the set.
See :func:`PrefixSet.iter` method for more info.
"""
return self._trie.iterkeys()
def iter(self, prefix=_SENTINEL):
"""Iterates over all keys in the set optionally starting with a prefix.
Since a key does not have to be explicitly added to the set to be an
element of the set, this method does not iterate over all possible keys
that the set contains, but only over the shortest set of prefixes of all
the keys the set contains.
For example, if "foo" has been added to the set, the set contains also
"foobar", but this method will *not* iterate over "foobar".
If ``prefix`` argument is given, method will iterate over keys with
given prefix only. The keys yielded from the function if prefix is
given does not have to be a subset (in mathematical sense) of the keys
yielded when there is not prefix. This happens, if the set contains
a prefix of the given prefix.
For example, if only "foo" has been added to the set, iter method called
with no arguments will yield "foo" only. However, when called with
"foobar" argument, it will yield "foobar" only.
"""
if prefix is _SENTINEL:
return iter(self)
elif self._trie.has_node(prefix):
return self._trie.iterkeys(prefix=prefix)
elif prefix in self:
# Make sure the type of returned keys is consistent.
# pylint: disable=protected-access
return self._trie._key_from_path(self._trie._path_from_key(prefix)),
else:
return ()
def __len__(self):
"""Returns number of keys stored in the set.
Since a key does not have to be explicitly added to the set to be an
element of the set, this method does not count over all possible keys
that the set contains (since that would be infinity), but only over the
shortest set of prefixes of all the keys the set contains.
For example, if "foo" has been added to the set, the set contains also
"foobar", but this method will *not* count "foobar".
"""
return len(self._trie)
def add(self, key):
"""Adds given key to the set.
If the set already contains prefix of the key being added, this
operation has no effect. If the key being added is a prefix of some
existing keys in the set, those keys are deleted and replaced by
a single entry for the key being added.
For example, if the set contains key "foo" adding a key "foobar" does
not change anything. On the other hand, if the set contains keys
"foobar" and "foobaz", adding a key "foo" will replace those two keys
with a single key "foo".
This makes a difference when iterating over the keys or counting number
of keys. Counter intuitively, adding of a key can *decrease* size of
the set.
Args:
key: Key to add.
"""
if key not in self:
self._trie[key:] = True
def discard(self, key):
raise NotImplementedError(
'Removing keys from PrefixSet is not implemented.')
def remove(self, key):
raise NotImplementedError(
'Removing keys from PrefixSet is not implemented.')
def pop(self):
raise NotImplementedError(
'Removing keys from PrefixSet is not implemented.')
|
the-stack_0_2544 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A convenience script engine to read Gaussian output in a directory tree.
"""
import argparse
import logging
import multiprocessing
import os
import re
from tabulate import tabulate
from pymatgen.apps.borg.hive import GaussianToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
save_file = "gau_data.gz"
def get_energies(rootdir, reanalyze, verbose):
"""
:param rootdir:
:param reanalyze:
:param verbose:
:return:
"""
if verbose:
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
drone = GaussianToComputedEntryDrone(inc_structure=True, parameters=["filename"])
ncpus = multiprocessing.cpu_count()
logging.info("Detected {} cpus".format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(save_file) and not reanalyze:
msg = (
"Using previously assimilated data from {}.".format(save_file)
+ " Use -f to force re-analysis."
)
queen.load_data(save_file)
else:
queen.parallel_assimilate(rootdir)
msg = "Results saved to {} for faster reloading.".format(save_file)
queen.save_data(save_file)
entries = queen.get_data()
entries = sorted(entries, key=lambda x: x.parameters["filename"])
all_data = [
(
e.parameters["filename"].replace("./", ""),
re.sub(r"\s+", "", e.composition.formula),
"{}".format(e.parameters["charge"]),
"{}".format(e.parameters["spin_mult"]),
"{:.5f}".format(e.energy),
"{:.5f}".format(e.energy_per_atom),
)
for e in entries
]
headers = ("Directory", "Formula", "Charge", "Spin Mult.", "Energy", "E/Atom")
print(tabulate(all_data, headers=headers))
print("")
print(msg)
def main():
"""
Main function
"""
desc = """
Convenient Gaussian run analyzer which can recursively go into a directory
to search results.
Author: Shyue Ping Ong
Version: 1.0
Last updated: Jul 6 2012"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"directories",
metavar="dir",
default=".",
type=str,
nargs="*",
help="directory to process",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_const",
const=True,
help="Verbose mode. Provides detailed output on progress.",
)
parser.add_argument(
"-f",
"--force",
dest="reanalyze",
action="store_const",
const=True,
help="Force reanalysis, instead of reusing gaussian_analyzer_data.gz.",
)
args = parser.parse_args()
for d in args.directories:
get_energies(d, args.reanalyze, args.verbose)
if __name__ == "__main__":
main()
|
the-stack_0_2546 | from __future__ import print_function
from tornado import ioloop, gen
from tornado_mysql import pools
pools.DEBUG = True
POOL = pools.Pool(
dict(host='127.0.0.1', port=3306, user='test', passwd='', db='mysql'),
max_idle_connections=1,
max_recycle_sec=3)
@gen.coroutine
def worker(n):
for _ in range(10):
t = 1
print(n, "sleeping", t, "seconds")
cur = yield POOL.execute("SELECT SLEEP(%s)", (t,))
print(n, cur.fetchall())
@gen.coroutine
def main():
workers = [worker(i) for i in range(10)]
yield workers
ioloop.IOLoop.current().run_sync(main)
print(POOL._opened_conns) |
the-stack_0_2547 | """
@brief test log(time=1s)
You should indicate a time in seconds. The program ``run_unittests.py``
will sort all test files by increasing time and run them.
"""
import unittest
import itertools
from teachpyx.examples.construction_classique import enumerate_permutations_recursive, enumerate_permutations
class TestClassiquesPermutation (unittest.TestCase):
def test_permutation(self):
self.maxDiff = None
ens = list(range(5))
lt = list(tuple(p) for p in enumerate_permutations_recursive(ens))
self.assertEqual(len(lt), 120)
res = list(tuple(p) for p in itertools.permutations(ens))
self.assertEqual(len(res), 120)
self.assertEqual(set(res), set(lt))
res = list(tuple(p) for p in enumerate_permutations(ens))
self.assertEqual(len(res), 120)
self.assertEqual(set(res), set(lt))
res = list(tuple(p) for p in enumerate_permutations([1]))
self.assertEqual(res, [(1,)])
if __name__ == "__main__":
unittest.main()
|
the-stack_0_2548 | import tensorflow as tf
class Load_Data:
def __init__(self,MAX_LENGTH,tokenizer_en,tokenizer_pt):
self.MAX_LENGTH = MAX_LENGTH
self.tokenizer_pt = tokenizer_pt
self.tokenizer_en = tokenizer_en
def encode(self,lang1, lang2):
lang1 = [self.tokenizer_pt.vocab_size] + self.tokenizer_pt.encode(
lang1.numpy()) + [self.tokenizer_pt.vocab_size+1]
lang2 = [self.tokenizer_en.vocab_size] + self.tokenizer_en.encode(
lang2.numpy()) + [self.tokenizer_en.vocab_size+1]
return lang1, lang2
def tf_encode(self,pt, en):
result_pt, result_en = tf.py_function(self.encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
def filter_max_length(self,x, y):
return tf.logical_and(tf.size(x) <= self.MAX_LENGTH,
tf.size(y) <= self.MAX_LENGTH)
|
the-stack_0_2550 | import redis
import urllib.parse as parse
local_redis = redis.Redis(host='127.0.0.1', port=6379, db=0)
all_keys = local_redis.keys()
for bt_key in all_keys:
bt_key = bt_key.decode('utf-8')
bt_str = '\n{}\n'.format(parse.unquote(local_redis.get(bt_key).decode('utf-8')))
try:
with open('./User_Method/user_magnet_bt.txt', 'a') as sf:
sf.write(bt_str)
except Exception as error:
print(error)
continue
else:
print(bt_str)
finally:
sf.close()
|
the-stack_0_2551 | import os
import logging
from threading import Thread, Event, Lock
from time import sleep, time
import serial
# for python 2/3 compatibility
try:
reduce
except NameError:
# In python 3, reduce is no longer imported by default.
from functools import reduce
try:
isinstance("", basestring)
def is_str(s):
return isinstance(s, basestring)
def encode2To3(s):
return s
def decode2To3(s):
return s
except NameError:
def is_str(s):
return isinstance(s, str)
def encode2To3(s):
return bytes(s, 'UTF-8')
def decode2To3(s):
return s.decode('UTF-8')
HERE = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.addHandler(logging.StreamHandler())
#fh = logging.FileHandler(os.path.join(HERE, 'voxelface.log'))
#fh.setFormatter(logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s'))
#logger.addHandler(fh)
class Printer(object):
""" The Printer object is responsible for serial communications with a
printer. The printer is expected to be running Marlin firmware.
"""
def __init__(self, port='/dev/tty.usbmodem1421', baudrate=250000):
# USB port and baudrate for communication with the printer.
self.port = port
self.baudrate = baudrate
# The Serial object that the printer is communicating on.
self.s = None
# List of the responses from the printer.
self.responses = []
# List of lines that were sent to the printer.
self.sentlines = []
# True if the print thread is alive and sending lines.
self.printing = False
# Set to True to pause the print.
self.paused = False
# If set to True, the read_thread will be closed as soon as possible.
self.stop_reading = False
# If set to True, the print_thread will be closed as soon as possible.
self.stop_printing = False
# List of all temperature string responses from the printer.
self.temp_readings = []
### Private Attributes ################################################
# List of all lines to be sent to the printer.
self._buffer = []
# Index into the _buffer of the next line to send to the printer.
self._current_line_idx = 0
# This thread continuously sends lines as they appear in self._buffer.
self._print_thread = None
# This thread continuously reads lines as they appear from the printer.
self._read_thread = None
# Flag used to synchronize the print_thread and the read_thread. An 'ok'
# needs to be returned for every line sent. When the print_thread sends
# a line this flag is cleared, and when an 'ok' is received it is set.
self._ok_received = Event()
self._ok_received.set()
# Lock used to ensure serial send/receive events are atomic with the
# setting/clearing of the `_ok_received` flag.
self._communication_lock = Lock()
# Lock used to ensure connecting and disconnecting is atomic.
self._connection_lock = Lock()
# If False the Printer instacnce does not own the serial object passed
# in and it should not be closed when finished with.
self._owns_serial = True
# This is set to true when a disconnect was requested. If a sendline is
# called while this is true an error is raised.
self._disconnect_pending = False
# When we reset the line number Marlin's internal number will differ
# from our own _current_line_idx. This offset is used to keep those two
# in sync.
self._reset_offset = 0
### Printer Interface ###################################################
def connect(self, s=None):
""" Instantiate a Serial object using the stored port and baudrate.
Parameters
----------
s : serial.Serial
If a serial object is passed in then it will be used instead of
creating a new one.
"""
with self._connection_lock:
if s is None:
self.s = serial.Serial(self.port, self.baudrate, timeout=3)
else:
self.s = s
self._owns_serial = False
self._ok_received.set()
self._current_line_idx = 0
self._buffer = []
self.responses = []
self.sentlines = []
self._disconnect_pending = False
self._start_read_thread()
if s is None:
while len(self.responses) == 0:
sleep(0.01) # wait until the start message is recieved.
self.responses = []
logger.debug('Connected to {}'.format(self.s))
def disconnect(self, wait=False):
""" Disconnect from the printer by stopping threads and closing the port
Parameters
----------
wait : Bool (default: False)
If true, this method waits until all lines in the buffer have been
sent and acknowledged before disconnecting. Clearing the buffer
isn't guaranteed. If the read thread isn't running for some reason,
this function may return without waiting even when wait is set to
True.
"""
with self._connection_lock:
self._disconnect_pending = True
if wait:
buf_len = len(self._buffer)
while buf_len > len(self.responses) and \
self._is_read_thread_running():
sleep(0.01) # wait until all lines in the buffer are sent
if self._print_thread is not None:
self.stop_printing = True
if self.s is not None and self.s.writeTimeout is not None:
timeout = self.s.writeTimeout + 1
else:
timeout = 10
self._print_thread.join(timeout)
if self._read_thread is not None:
self.stop_reading = True
if self.s is not None and self.s.timeout is not None:
timeout = self.s.timeout + 1
else:
timeout = 10
self._read_thread.join(timeout)
if self.s is not None and self._owns_serial is True:
self.s.close()
self.s = None
self.printing = False
self._current_line_idx = 0
self._buffer = []
self.responses = []
self.sentlines = []
logger.debug('Disconnected from printer')
def load_file(self, filepath):
""" Load the given file into an internal _buffer. The lines will not be
send until `self._start_print_thread()` is called.
Parameters
----------
filepath : str
The path to a text file containing lines of GCode to be printed.
"""
lines = []
with open(filepath) as f:
for line in f:
line = line.strip()
if ';' in line: # clear out the comments
line = line.split(';')[0]
if line:
lines.append(line)
self._buffer.extend(lines)
def start(self):
""" Starts the read_thread and the _print_thread.
"""
self._start_read_thread()
self._start_print_thread()
def sendline(self, line):
""" Send the given line over serial by appending it to the send buffer
Parameters
----------
line : str
A line of GCode to send to the printer.
"""
if self._disconnect_pending:
msg = 'Attempted to send line after a disconnect was requested: {}'
raise RuntimeError(msg.format(line))
if line:
line = str(line).strip()
if ';' in line: # clear out the comments
line = line.split(';')[0]
if line:
self._buffer.append(line)
def get_response(self, line, timeout=0):
""" Send the given line and return the response from the printer.
Parameters
----------
line : str
The line to send to the printer
Returns
-------
r : str
The response from the printer.
"""
buf_len = len(self._buffer) + 1
self.sendline(line)
start_time = time()
while len(self.responses) != buf_len:
if len(self.responses) > buf_len:
msg = "Received more responses than lines sent"
raise RuntimeError(msg)
if timeout > 0 and (time() - start_time) > timeout:
return '' # return blank string on timeout.
if not self._is_read_thread_running():
raise RuntimeError("can't get response from serial since read thread isn't running")
sleep(0.01)
return self.responses[-1]
def current_position(self):
""" Get the current postion of the printer.
Returns
-------
pos : dict
Dict with keys of 'X', 'Y', 'Z', and 'E' and values of their
positions
"""
# example r: X:0.00 Y:0.00 Z:0.00 E:0.00 Count X: 0.00 Y:0.00 Z:0.00
r = self.get_response("M114")
r = r.split(' Count')[0].strip().split()
r = [x.split(':') for x in r]
pos = dict([(k, float(v)) for k, v in r])
return pos
def reset_linenumber(self, number = 0):
line = "M110 N{}".format(number)
self.sendline(line)
### Private Methods ######################################################
def _start_print_thread(self):
""" Spawns a new thread that will send all lines in the _buffer over
serial to the printer. This thread can be stopped by setting
`stop_printing` to True. If a print_thread already exists and is alive,
this method does nothing.
"""
if self._is_print_thread_running():
return
self.printing = True
self.stop_printing = False
self._print_thread = Thread(target=self._print_worker_entrypoint, name='Print')
self._print_thread.setDaemon(True)
self._print_thread.start()
logger.debug('print_thread started')
def _start_read_thread(self):
""" Spawns a new thread that will continuously read lines from the
printer. This thread can be stopped by setting `stop_reading` to True.
If a print_thread already exists and is alive, this method does
nothing.
"""
if self._is_read_thread_running():
return
self.stop_reading = False
self._read_thread = Thread(target=self._read_worker_entrypoint, name='Read')
self._read_thread.setDaemon(True)
self._read_thread.start()
logger.debug('read_thread started')
def _print_worker_entrypoint(self):
try:
self._print_worker()
except Exception as e:
logger.exception("Exception running print worker: " + str(e))
def _read_worker_entrypoint(self):
try:
self._read_worker()
except Exception as e:
logger.exception("Exception running read worker: " + str(e))
def _is_print_thread_running(self):
return self._print_thread is not None and self._print_thread.is_alive()
def _is_read_thread_running(self):
return self._read_thread is not None and self._read_thread.is_alive()
def _print_worker(self):
""" This method is spawned in the print thread. It loops over every line
in the _buffer and sends it over serial to the printer.
"""
while not self.stop_printing:
_paused = False
while self.paused is True and not self.stop_printing:
if _paused is False:
logger.debug('Printer.paused is True, waiting...')
_paused = True
sleep(0.01)
if _paused is True:
logger.debug('Printer.paused is now False, resuming.')
if self._current_line_idx < len(self._buffer):
self.printing = True
while not self._ok_received.is_set() and not self.stop_printing:
self._ok_received.wait(1)
line = self._next_line()
with self._communication_lock:
self.s.write(encode2To3(line))
self._ok_received.clear()
self._current_line_idx += 1
# Grab the just sent line without line numbers or checksum
plain_line = self._buffer[self._current_line_idx - 1].strip()
self.sentlines.append(plain_line)
else: # if there aren't new lines wait 10ms and check again
sleep(0.01)
self.printing = False
def _read_worker(self):
""" This method is spawned in the read thread. It continuously reads
from the printer over serial and checks for 'ok's.
"""
full_resp = ''
while not self.stop_reading:
if self.s is not None:
line = decode2To3(self.s.readline())
if line.startswith('Resend: '): # example line: "Resend: 143"
self._current_line_idx = int(line.split()[1]) - 1 + self._reset_offset
logger.debug('Resend Requested - {}'.format(line.strip()))
with self._communication_lock:
self._ok_received.set()
continue
if line.startswith('T:'):
self.temp_readings.append(line)
if line:
full_resp += line
# If there is no newline char in the response that means
# serial.readline() hit the timeout before a full line. This
# means communication has broken down so both threads need
# to be closed down.
if '\n' not in line:
self.printing = False
self.stop_printing = True
self.stop_reading = True
with self._communication_lock:
self._ok_received.set()
msg = """readline timed out mid-line.
last sentline: {}
response: {}
"""
raise RuntimeError(msg.format(self.sentlines[-1:],
full_resp))
if 'ok' in line:
with self._communication_lock:
self._ok_received.set()
self.responses.append(full_resp)
full_resp = ''
else: # if no printer is attached, wait 10ms to check again.
sleep(0.01)
def _next_line(self):
""" Prepares the next line to be sent to the printer by prepending the
line number and appending a checksum and newline character.
"""
line = self._buffer[self._current_line_idx].strip()
if line.startswith('M110 N'):
new_number = int(line[6:])
self._reset_offset = self._current_line_idx + 1 - new_number
elif line.startswith('M110'):
self._reset_offset = self._current_line_idx + 1
idx = self._current_line_idx + 1 - self._reset_offset
line = 'N{} {}'.format(idx, line)
checksum = self._checksum(line)
return '{}*{}\n'.format(line, checksum)
def _checksum(self, line):
""" Calclate the checksum by xor'ing all characters together.
"""
if not line:
raise RuntimeError("cannot compute checksum of an empty string")
return reduce(lambda a, b: a ^ b, [ord(char) for char in line])
|
the-stack_0_2553 | from django.conf.urls import url
from django.contrib.auth.decorators import login_required, permission_required
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# terminal urls
url(r'^$', views.terminals, name='terminals'),
url(r'^add/$', views.terminal_add, name='terminal-add'),
url(r'^terminal_process/$', views.terminal_process, name='terminal_process'),
url(r'^edit/(?P<pk>[0-9]+)/$', views.terminal_edit, name='terminal-edit'),
url(r'^terminal/history/(?P<pk>[0-9]+)/$', views.terminal_history, name='terminal-history'),
url(r'^terminal_update(?P<pk>[0-9]+)/$', views.terminal_update, name='terminal-update'),
url(r'^detail/(?P<pk>[0-9]+)/$', views.terminal_detail, name='terminal-detail'),
url(r'^delete/(?P<pk>[0-9]+)/$', views.terminal_delete, name='terminal-delete'),
# cashmovement urls
url(r'^transations/$', views.transactions, name='transactions'),
]
if settings.DEBUG:
# urlpatterns += [ url(r'^static/(?P<path>.*)$', serve)] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
the-stack_0_2556 | import config
import io
import tensorflow as tf
import joblib
def save_weights(weights, reverse_word_index):
out_v = io.open(f'{config.MODEL_PATH}/vecs.tsv', 'w', encoding='utf-8')
out_m = io.open(f'{config.MODEL_PATH}/meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, config.VOCAB_SIZE):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + '\n')
out_v.write('\t'.join([str(x) for x in embeddings]) + '\n')
out_v.close()
out_m.close()
if __name__ == '__main__':
load_model = tf.keras.models.load_model(f"{config.MODEL_PATH}my_model.h5")
reverse_word_index = joblib.load(f"{config.MODEL_PATH}rev_word_ind.pkl")
e = load_model.layers[0]
weights = e.get_weights()[0]
save_weights(weights, reverse_word_index)
|
the-stack_0_2557 | """The DeepFool attack
"""
import copy
import logging
import warnings
import numpy as np
import tensorflow as tf
from cleverhans.attacks.attack import Attack
from cleverhans.model import Model, wrapper_warning_logits, CallableModelWrapper
from cleverhans import utils
from cleverhans import utils_tf
np_dtype = np.dtype('float32')
_logger = utils.create_logger("cleverhans.attacks.deep_fool")
_logger.setLevel(logging.INFO)
class DeepFool(Attack):
"""
DeepFool is an untargeted & iterative attack which is based on an
iterative linearization of the classifier. The implementation here
is w.r.t. the L2 norm.
Paper link: "https://arxiv.org/pdf/1511.04599.pdf"
:param model: cleverhans.model.Model
:param sess: tf.Session
:param dtypestr: dtype of the data
:param kwargs: passed through to super constructor
"""
def __init__(self, model, sess, dtypestr='float32', **kwargs):
"""
Create a DeepFool instance.
"""
if not isinstance(model, Model):
wrapper_warning_logits()
model = CallableModelWrapper(model, 'logits')
super(DeepFool, self).__init__(model, sess, dtypestr, **kwargs)
self.structural_kwargs = [
'overshoot', 'max_iter', 'clip_max', 'clip_min', 'nb_candidate'
]
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
from cleverhans.utils_tf import jacobian_graph
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
# Define graph wrt to this input placeholder
logits = self.model.get_logits(x)
self.nb_classes = logits.get_shape().as_list()[-1]
assert self.nb_candidate <= self.nb_classes, \
'nb_candidate should not be greater than nb_classes'
preds = tf.reshape(
tf.nn.top_k(logits, k=self.nb_candidate)[0],
[-1, self.nb_candidate])
# grads will be the shape [batch_size, nb_candidate, image_size]
grads = tf.stack(jacobian_graph(preds, x, self.nb_candidate), axis=1)
# Define graph
def deepfool_wrap(x_val):
"""deepfool function for py_func"""
return deepfool_batch(self.sess, x, preds, logits, grads, x_val,
self.nb_candidate, self.overshoot,
self.max_iter, self.clip_min, self.clip_max,
self.nb_classes)
wrap = tf.compat.v1.py_func(deepfool_wrap, [x], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap
def parse_params(self,
nb_candidate=10,
overshoot=0.02,
max_iter=50,
clip_min=0.,
clip_max=1.,
**kwargs):
"""
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for deepfool
:param clip_min: Minimum component value for clipping
:param clip_max: Maximum component value for clipping
"""
self.nb_candidate = nb_candidate
self.overshoot = overshoot
self.max_iter = max_iter
self.clip_min = clip_min
self.clip_max = clip_max
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True
def deepfool_batch(sess,
x,
pred,
logits,
grads,
X,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
nb_classes,
feed=None):
"""
Applies DeepFool to a batch of inputs
:param sess: TF session
:param x: The input placeholder
:param pred: The model's sorted symbolic output of logits, only the top
nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param X: Numpy array with sample inputs
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:param nb_classes: Number of model output classes
:return: Adversarial examples
"""
X_adv = deepfool_attack(
sess,
x,
pred,
logits,
grads,
X,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
feed=feed)
return np.asarray(X_adv, dtype=np_dtype)
def deepfool_attack(sess,
x,
predictions,
logits,
grads,
sample,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
feed=None):
"""
TensorFlow implementation of DeepFool.
Paper link: see https://arxiv.org/pdf/1511.04599.pdf
:param sess: TF session
:param x: The input placeholder
:param predictions: The model's sorted symbolic output of logits, only the
top nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param sample: Numpy array with sample input
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:return: Adversarial examples
"""
adv_x = copy.copy(sample)
# Initialize the loop variables
iteration = 0
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
w = np.squeeze(np.zeros(sample.shape[1:])) # same shape as original image
r_tot = np.zeros(sample.shape)
original = current # use original label as the reference
_logger.debug(
"Starting DeepFool attack up to %s iterations", max_iter)
# Repeat this main loop until we have achieved misclassification
while (np.any(current == original) and iteration < max_iter):
if iteration % 5 == 0 and iteration > 0:
_logger.info("Attack result at iteration %s is %s", iteration, current)
gradients = sess.run(grads, feed_dict={x: adv_x})
predictions_val = sess.run(predictions, feed_dict={x: adv_x})
for idx in range(sample.shape[0]):
pert = np.inf
if current[idx] != original[idx]:
continue
for k in range(1, nb_candidate):
w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
f_k = predictions_val[idx, k] - predictions_val[idx, 0]
# adding value 0.00001 to prevent f_k = 0
pert_k = (abs(f_k) + 0.00001) / np.linalg.norm(w_k.flatten())
if pert_k < pert:
pert = pert_k
w = w_k
r_i = pert * w / np.linalg.norm(w)
r_tot[idx, ...] = r_tot[idx, ...] + r_i
adv_x = np.clip(r_tot + sample, clip_min, clip_max)
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
# Update loop variables
iteration = iteration + 1
# need more revision, including info like how many succeed
_logger.info("Attack result at iteration %s is %s", iteration, current)
_logger.info("%s out of %s become adversarial examples at iteration %s",
sum(current != original),
sample.shape[0],
iteration)
# need to clip this image into the given range
adv_x = np.clip((1 + overshoot) * r_tot + sample, clip_min, clip_max)
return adv_x
|
the-stack_0_2561 | from __future__ import absolute_import
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import warnings
import time
from collections import deque
from .utils.generic_utils import Progbar
class CallbackList(object):
def __init__(self, callbacks=[], queue_length=10):
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def _set_params(self, params):
for callback in self.callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self.callbacks:
callback._set_model(model)
def on_epoch_begin(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs={}):
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch \
and delta_t_median > 0.1:
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs={}):
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch \
and delta_t_median > 0.1:
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs={}):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs={}):
for callback in self.callbacks:
callback.on_train_end(logs)
class Callback(object):
def __init__(self):
pass
def _set_params(self, params):
self.params = params
def _set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
pass
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
pass
def on_train_end(self, logs={}):
pass
class BaseLogger(Callback):
def on_train_begin(self, logs={}):
self.verbose = self.params['verbose']
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d' % epoch)
self.progbar = Progbar(target=self.params['nb_sample'], \
verbose=self.verbose)
self.current = 0
self.tot_loss = 0.
self.tot_acc = 0.
def on_batch_begin(self, batch, logs={}):
if self.current < self.params['nb_sample']:
self.log_values = []
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.current += batch_size
loss = logs.get('loss')
self.log_values.append(('loss', loss))
self.tot_loss += loss * batch_size
if self.params['show_accuracy']:
accuracy = logs.get('accuracy')
self.log_values.append(('acc.', accuracy))
self.tot_acc += accuracy * batch_size
# skip progbar update for the last batch; will be handled by on_epoch_end
if self.verbose and self.current < self.params['nb_sample']:
self.progbar.update(self.current, self.log_values)
def on_epoch_end(self, epoch, logs={}):
self.log_values.append(('loss', self.tot_loss / self.current))
if self.params['show_accuracy']:
self.log_values.append(('acc.', self.tot_acc / self.current))
if self.params['do_validation']:
val_loss = logs.get('val_loss')
self.log_values.append(('val. loss', val_loss))
if self.params['show_accuracy']:
val_acc = logs.get('val_accuracy')
self.log_values.append(('val. acc.', val_acc))
self.progbar.update(self.current, self.log_values)
class History(Callback):
def on_train_begin(self, logs={}):
self.epoch = []
self.loss = []
if self.params['show_accuracy']:
self.accuracy = []
if self.params['do_validation']:
self.validation_loss = []
if self.params['show_accuracy']:
self.validation_accuracy = []
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.tot_loss = 0.
self.tot_accuracy = 0.
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
self.tot_loss += logs.get('loss', 0.) * batch_size
if self.params['show_accuracy']:
self.tot_accuracy += logs.get('accuracy', 0.) * batch_size
def on_epoch_end(self, epoch, logs={}):
val_loss = logs.get('val_loss')
val_acc = logs.get('val_accuracy')
self.epoch.append(epoch)
self.loss.append(self.tot_loss / self.seen)
if self.params['show_accuracy']:
self.accuracy.append(self.tot_accuracy / self.seen)
if self.params['do_validation']:
self.validation_loss.append(val_loss)
if self.params['show_accuracy']:
self.validation_accuracy.append(val_acc)
class ModelCheckpoint(Callback):
def __init__(self, filepath, verbose=0, save_best_only=False):
super(Callback, self).__init__()
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.loss = []
self.best_loss = np.Inf
self.val_loss = []
self.best_val_loss = np.Inf
def on_epoch_end(self, epoch, logs={}):
'''currently, on_epoch_end receives epoch_logs from keras.models.Sequential.fit
which does only contain, if at all, the validation loss and validation accuracy'''
if self.save_best_only and self.params['do_validation']:
cur_val_loss = logs.get('val_loss')
self.val_loss.append(cur_val_loss)
if cur_val_loss < self.best_val_loss:
if self.verbose > 0:
print("Epoch %05d: valdidation loss improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.best_val_loss, cur_val_loss, self.filepath))
self.best_val_loss = cur_val_loss
self.model.save_weights(self.filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: validation loss did not improve" % (epoch))
elif self.save_best_only and not self.params['do_validation']:
import warnings
warnings.warn("Can save best model only with validation data, skipping", RuntimeWarning)
elif not self.save_best_only:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, self.filepath))
self.model.save_weights(self.filepath, overwrite=True)
|
the-stack_0_2562 | from __future__ import unicode_literals
"""
To try running Django tests using green you can run:
./manage.py test --testrunner=green.djangorunner.DjangoRunner
To make the change permanent for your project, in settings.py add:
TEST_RUNNER="green.djangorunner.DjangoRunner"
"""
from argparse import Namespace
import os
import sys
from green.config import mergeConfig
from green.loader import GreenTestLoader
from green.output import GreenStream
from green.runner import run
from green.suite import GreenTestSuite
# If we're not being run from an actual django project, set up django config
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'green.djangorunner')
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = ')9^_e(=cisybdt4m4+fs+_wb%d$!9mpcoy0um^alvx%gexj#jv'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'myproj.urls'
WSGI_APPLICATION = 'myproj.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
# End of django fake config stuff
def django_missing():
raise ImportError("No django module installed")
try:
import django
if django.VERSION[:2] < (1, 6): # pragma: no cover
raise ImportError("Green integration supports Django 1.6+")
from django.test.runner import DiscoverRunner
class DjangoRunner(DiscoverRunner):
def __init__(self, verbose=-1, **kwargs):
super(DjangoRunner, self).__init__(**kwargs)
self.verbose = verbose
self.loader = GreenTestLoader()
@classmethod
def add_arguments(cls, parser):
parser.add_argument (
'--green-verbosity',
action='store', dest='verbose', default=-1, type=int,
help="""
Green 'verbose' level for tests. Value should be an integer
that green supports. For example: --green-verbosity 3""")
super(DjangoRunner, cls).add_arguments(parser)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
# Django setup
self.setup_test_environment()
django_db = self.setup_databases()
# Green
if type(test_labels) == tuple:
test_labels = list(test_labels)
else:
raise ValueError("test_labels should be a tuple of strings")
if not test_labels:
test_labels = ['.']
args = mergeConfig(Namespace())
if self.verbose != -1:
args.verbose = self.verbose
args.targets = test_labels
stream = GreenStream(sys.stdout)
suite = self.loader.loadTargets(args.targets)
if not suite:
suite = GreenTestSuite()
result = run(suite, stream, args)
# Django teardown
self.teardown_databases(django_db)
self.teardown_test_environment()
return self.suite_result(suite, result)
except ImportError: # pragma: no cover
DjangoRunner = django_missing
|
the-stack_0_2564 | import obspy
from mth5.utils.pathing import DATA_DIR
def load_sample_network_inventory(xml_file_handle, verbose=False):
""" """
iris_dir = DATA_DIR.joinpath("iris")
xml_file_path = iris_dir.joinpath(xml_file_handle)
xml_file_path_str = xml_file_path.__str__()
if verbose:
print(f"Loading {xml_file_path_str}")
inventory = obspy.read_inventory(xml_file_path_str)
# inventory = obspy.read_inventory(xml_file_path.__str__())
return inventory
def decimation_info_is_degenerate(obspy_stage):
"""
Check a few condtions that may apply to an obspy stage which if true
imply that the decimation information can be stripped out as it bears
no information about aany data transformation;
Case 1: All these attrs are None decimation has no information:
{'decimation_input_sample_rate', 'decimation_factor',
'decimation_offset', 'decimation_delay', 'decimation_correction'}
Case 2:
"""
cond1 = stage.stage_gain in [1.0, None]
cond2 = stage.decimation_factor in [1, None]
if cond1 & cond2:
return True
else:
return False
def decimation_info_is_pure_delay(stage):
cond1 = stage.stage_gain == 1.0
cond2 = stage.decimation_factor == 1
cond3 = stage.decimation_delay != 0.0
cond4 = stage.decimation_correction == 0.0
if cond1 & cond2 & cond3 & cond4:
return True
else:
return False
def stage_gain_is_degenerate():
# if gain is 1.0 ignore it
pass
|
the-stack_0_2565 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import xarray as xr
__author__ = "Louis Richard"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def trace(inp):
r"""Computes trace of the time series of 2nd order tensors.
Parameters
----------
inp : xarray.DataArray
Time series of the input 2nd order tensor.
Returns
-------
out : xarray.DataArray
Time series of the trace of the input tensor.
Examples
--------
>>> from pyrfu import mms, pyrf
Time interval
>>> tint = ["2015-10-30T05:15:20.000", "2015-10-30T05:16:20.000"]
Spacecraft index
>>> mms_id = 1
Load magnetic field and ion temperature
>>> b_xyz = mms.get_data("B_gse_fgm_srvy_l2", tint, mms_id)
>>> t_xyz_i = mms.get_data("Ti_gse_fpi_fast_l2", tint, mms_id)
Rotate to ion temperature tensor to field aligned coordinates
>>> t_xyzfac_i = mms.rotate_tensor(t_xyz_i, "fac", b_xyz, "pp")
Compute scalar temperature
>>> t_i = pyrf.trace(t_xyzfac_i)
"""
inp_data = inp.data
out_data = inp_data[:, 0, 0] + inp_data[:, 1, 1] + inp_data[:, 2, 2]
# Attributes
attrs = inp.attrs
# Change tensor order from 2 (matrix) to 0 (scalar)
attrs["TENSOR_ORDER"] = 0
out = xr.DataArray(out_data, coords=[inp.time.data], dims=["time"],
attrs=attrs)
return out
|
the-stack_0_2566 | from validator.rules_src.max import Max
from validator.rules_src.min import Min
class Between(Max, Min):
"""
>>> Between(2, 15).check(23)
False
>>> Between(2, 15).check(12)
True
"""
def __init__(self, min_value, max_value):
Min.__init__(self, min_value)
Max.__init__(self, max_value)
def check(self, arg):
if Min.check(self, arg) and Max.check(self, arg):
return True
self.set_errror_message(
f"Expected Between: {self.min_value} and {self.max_value}, Got: {arg}"
)
return False
def __from_str__(self):
Min.__from_str__(self)
Max.__from_str__(self)
|
the-stack_0_2568 | # coding: utf-8
import os
import copy
import collections
import collections.abc # 2022.02.28 - Python 3.3 or greater
import types
from collections import namedtuple
# 2022.02.28 - Python 3.3 or greater; import from __init__.py
from . import PY3K, PY3K3
from jinja2 import nodes
from jinja2 import Environment, TemplateNotFound, FileSystemLoader
from jinja2.ext import Extension
from jinja2.loaders import split_template_path
from jinja2.utils import open_if_exists
from schema import Schema
from snaql.convertors import (
guard_bool,
guard_case,
guard_date,
guard_datetime,
guard_float,
guard_integer,
guard_regexp,
guard_string,
guard_time,
guard_timedelta,
)
class RawFileSystemLoader(FileSystemLoader):
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = os.path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = os.path.getmtime(filename)
# Need to save original raw template before compilation
environment.sql_params.setdefault('raws', {}).update({
template: [c.strip() for c in contents.splitlines()]
})
def uptodate():
try:
return os.path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
class JinjaSQLExtension(Extension):
tags = set(['sql', 'query'])
def parse(self, parser):
lineno = next(parser.stream).lineno
expr = parser.parse_expression()
args = [expr]
kwargs = [nodes.Keyword('func', expr)]
if parser.stream.skip_if('comma'):
# Optional 'note' for function docstring
if (
parser.stream.current.type == 'name' and
parser.stream.current.value in (
'note', 'cond_for', 'depends_on'
)
):
stream_type = parser.stream.current.value
next(parser.stream)
parser.stream.expect('assign')
# Depends meta is always a list
if stream_type == 'depends_on':
c_expr = parser.parse_list()
else:
c_expr = parser.parse_expression()
args.append(c_expr)
kwargs.append(nodes.Keyword(stream_type, c_expr))
body = parser.parse_statements(
['name:endsql', 'name:endquery'], drop_needle=True
)
raw_template = self.environment.sql_params['raws'][parser.name]
# Lines range of original raw template
raw_lines = slice(lineno, parser.stream.current.lineno-1)
self.environment.sql_params.setdefault('funcs', {}).update({
expr.value: {'raw_sql': '\n '.join(raw_template[raw_lines])}
})
call_node = nodes.Call(
self.attr('_sql_process', lineno=lineno),
args, kwargs, None, None
)
return nodes.CallBlock(call_node, [], [], body)
def _sql_process(self, *args, **kwargs):
caller = kwargs['caller']
raw_sql = '\n '.join(x.strip() for x in caller().split('\n') if x)
if 'cond_for' in kwargs:
origin = (
self.environment.sql_params['funcs'].get(kwargs['cond_for'])
)
if origin:
origin.setdefault('conds', []).append(kwargs['cond_for'])
origin = self.environment.sql_params['funcs'].get(kwargs['func'])
origin.update({
'sql': raw_sql,
'note': kwargs.get('note'),
'is_cond': 'cond_for' in kwargs,
'depends_on': kwargs.get('depends_on', []),
'node': None,
})
if origin['is_cond']:
origin['cond_for'] = kwargs['cond_for']
return raw_sql
class SnaqlDepNode(object):
def __init__(self, name):
self.name = name
self.edges = []
def add_edge(self, node):
self.edges.append(node)
def __str__(self):
return '<SnaqlDepNode %s>' % self.name
def __repr__(self):
return '<SnaqlDepNode %s>' % self.name
class SnaqlException(Exception):
pass
class Snaql(object):
def __init__(self, sql_root, sql_ns):
self.sql_root = sql_root
self.jinja_env = Environment(
trim_blocks=True,
extensions=[JinjaSQLExtension],
loader=RawFileSystemLoader(os.path.join(self.sql_root, sql_ns)),
)
self.jinja_env.filters.update({
'guards.string': guard_string,
'guards.integer': guard_integer,
'guards.datetime': guard_datetime,
'guards.date': guard_date,
'guards.float': guard_float,
'guards.timedelta': guard_timedelta,
'guards.time': guard_time,
'guards.case': guard_case,
'guards.regexp': guard_regexp,
'guards.bool': guard_bool,
})
self.jinja_env.extend(sql_params={})
def gen_func(self, name, meta_struct, env):
def subrender_cond(owner_name, cond_func, context):
if (
# 2022.02.28 - Handle deprecated collections class since Python 3.3
isinstance(cond_func, collections.abc.Callable if PY3K3 else collections.Callable) and
cond_func.is_cond
):
cond_struct = meta_struct['funcs'][cond_func.func_name]
if cond_struct['cond_for'] != owner_name:
raise SnaqlException(
'"%s" is not proper condition for "%s"' % (
cond_func.func_name,
owner_name
)
)
cond_tmpl = env.from_string(
meta_struct['funcs'][cond_func.func_name]['raw_sql']
)
return cond_tmpl.render(**context).strip()
return cond_func
def fn(**kwargs):
if meta_struct['funcs'][name]['is_cond']:
raise SnaqlException((
'"%s" is condition for "%s" and can not '
'be rendered outside of it\'s scope'
) % (name, meta_struct['funcs'][name]['cond_for']))
if kwargs:
for point, val in kwargs.items():
maybe_cond_sql = subrender_cond(name, val, kwargs)
if maybe_cond_sql:
kwargs[point] = maybe_cond_sql
if (
# 2022.02.28 - Handle deprecated collections class since Python 3.3
isinstance(val, collections.abc.Iterable if PY3K3 else collections.Iterable) and
not isinstance(
val, (str if PY3K else types.StringTypes, dict)
)
):
val = [subrender_cond(name, v, kwargs) for v in val]
kwargs[point] = [v for v in val if v]
if 'schema' in kwargs and isinstance(kwargs['schema'], Schema):
validation_schema = kwargs.pop('schema')
kwargs = validation_schema.validate(kwargs)
sql_tmpl = (
env.from_string(meta_struct['funcs'][name]['raw_sql'])
)
return sql_tmpl.render(**kwargs).strip()
return meta_struct['funcs'][name]['sql']
fn.__doc__ = meta_struct['funcs'][name]['note']
fn.is_cond = meta_struct['funcs'][name]['is_cond']
fn.func_name = str(name)
return fn
def gen_dep_graph(self, node, accum):
for edge in node.edges:
if edge not in accum:
self.gen_dep_graph(edge, accum)
accum.append(node)
return accum
def load_queries(self, sql_path):
template = self.jinja_env.get_template(sql_path)
template.render()
factory_methods = {}
meta_struct = copy.deepcopy(self.jinja_env.sql_params)
blocks = set(meta_struct['funcs'])
node = SnaqlDepNode('root')
for name, block in meta_struct['funcs'].items():
# Dependency graph building
block['node'] = block['node'] or SnaqlDepNode(name)
for dep in block['depends_on']:
if dep not in blocks:
raise SnaqlException(
'"%s" block not found in "%s"' % (dep, sql_path)
)
if meta_struct['funcs'][dep]['node'] is None:
meta_struct['funcs'][dep]['node'] = SnaqlDepNode(dep)
block['node'].add_edge(meta_struct['funcs'][dep]['node'])
node.add_edge(block['node'])
fn = self.gen_func(name, meta_struct, self.jinja_env)
factory_methods[name] = fn
edges_accum = []
graph = self.gen_dep_graph(node, edges_accum)
graph.pop() # root node
factory_methods['ordered_blocks'] = [
factory_methods[n.name]
for n in graph
]
factory = namedtuple('SQLFactory', factory_methods.keys())
struct = factory(*factory_methods.values())
self.jinja_env.sql_params.clear()
return struct
|
the-stack_0_2569 | from unittest import mock
import pytest
from directory_api_client.base import AbstractAPIClient
class APIClient(AbstractAPIClient):
version = 123
@pytest.fixture
def client():
return APIClient(
base_url='https://example.com',
api_key='test',
sender_id='test',
timeout=5,
)
@mock.patch.object(AbstractAPIClient, 'fallback_cache_get')
def test_fallback_cache_used(mock_fallback_cache_get, client):
client.get('http://www.thing.com', use_fallback_cache=True)
assert mock_fallback_cache_get.call_count == 1
assert mock_fallback_cache_get.call_args == mock.call('http://www.thing.com')
@mock.patch.object(AbstractAPIClient, 'get')
@mock.patch.object(AbstractAPIClient, 'fallback_cache_get')
def test_fallback_cache_not_used(mock_fallback_cache_get, mock_get, client):
client.get('http://www.thing.com')
assert mock_fallback_cache_get.call_count == 0
assert mock_get.call_count == 1
|
the-stack_0_2570 |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import tempfile
import tarfile
import radical.utils as ru
import radical.saga as rs
rs.fs = rs.filesystem
from ... import states as rps
from ... import constants as rpc
from ... import utils as rpu
from .base import TMGRStagingInputComponent
from ...staging_directives import complete_url
# if we receive more than a certain numnber of tasks in a bulk, we create the
# task sandboxes in a remote bulk op. That limit is defined here, along with
# the definition of the bulk mechanism used to create the sandboxes:
# saga: use SAGA bulk ops
# tar : unpack a locally created tar which contains all sandboxes
TASK_BULK_MKDIR_THRESHOLD = 16
TASK_BULK_MKDIR_MECHANISM = 'tar'
# ------------------------------------------------------------------------------
#
class Default(TMGRStagingInputComponent):
"""
This component performs all tmgr side input staging directives for compute
tasks. It gets tasks from the tmgr_staging_input_queue, in
TMGR_STAGING_INPUT_PENDING state, will advance them to TMGR_STAGING_INPUT
state while performing the staging, and then moves then to the
AGENT_SCHEDULING_PENDING state, passing control to the agent.
"""
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
TMGRStagingInputComponent.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def initialize(self):
# we keep a cache of SAGA dir handles
self._fs_cache = dict()
self._js_cache = dict()
self._pilots = dict()
self._pilots_lock = ru.RLock()
self.register_input(rps.TMGR_STAGING_INPUT_PENDING,
rpc.TMGR_STAGING_INPUT_QUEUE, self.work)
# FIXME: this queue is inaccessible, needs routing via mongodb
self.register_output(rps.AGENT_STAGING_INPUT_PENDING, None)
# we subscribe to the command channel to learn about pilots being added
# to this task manager.
self.register_subscriber(rpc.CONTROL_PUBSUB, self._base_command_cb)
# --------------------------------------------------------------------------
#
def finalize(self):
for fs in list(self._fs_cache.values()): fs.close()
for js in list(self._js_cache.values()): js.close()
# --------------------------------------------------------------------------
#
def _base_command_cb(self, topic, msg):
# keep track of `add_pilots` commands and updates self._pilots
# accordingly.
cmd = msg.get('cmd')
arg = msg.get('arg')
if cmd not in ['add_pilots']:
self._log.debug('skip cmd %s', cmd)
pilots = arg.get('pilots', [])
if not isinstance(pilots, list):
pilots = [pilots]
with self._pilots_lock:
for pilot in pilots:
pid = pilot['uid']
self._log.debug('add pilot %s', pid)
if pid not in self._pilots:
self._pilots[pid] = pilot
return True
# --------------------------------------------------------------------------
#
def work(self, tasks):
if not isinstance(tasks, list):
tasks = [tasks]
self.advance(tasks, rps.TMGR_STAGING_INPUT, publish=True, push=False)
# we first filter out any tasks which don't need any input staging, and
# advance them again as a bulk. We work over the others one by one, and
# advance them individually, to avoid stalling from slow staging ops.
no_staging_tasks = list()
staging_tasks = list()
for task in tasks:
# no matter if we perform any staging or not, we will push the full
# task info to the DB on the next advance, and will pass control to
# the agent.
task['$all'] = True
task['control'] = 'agent_pending'
# check if we have any staging directives to be enacted in this
# component
actionables = list()
for sd in task['description'].get('input_staging', []):
if sd['action'] in [rpc.TRANSFER, rpc.TARBALL]:
actionables.append(sd)
if actionables:
staging_tasks.append([task, actionables])
else:
no_staging_tasks.append(task)
# Optimization: if we obtained a large bulk of tasks, we at this point
# attempt a bulk mkdir for the task sandboxes, to free the agent of
# performing that operation. That implies that the agent needs to check
# sandbox existence before attempting to create them now.
#
# Note that this relies on the tmgr scheduler to assigning the sandbox
# to the task.
#
# Note further that we need to make sure that all tasks are actually
# pointing into the same target file system, so we need to cluster by
# filesystem before checking the bulk size. For simplicity we actually
# cluster by pilot ID, which is sub-optimal for task bulks which go to
# different pilots on the same resource (think OSG).
#
# Note further that we skip the bulk-op for all tasks for which we
# actually need to stage data, since the mkdir will then implicitly be
# done anyways.
#
# Caveat: we can actually only (reasonably) do this if we know some
# details about the pilot, because otherwise we'd have too much guessing
# to do about the pilot configuration (sandbox, access schema, etc), so
# we only attempt this optimization for tasks scheduled to pilots for
# which we learned those details.
task_sboxes_by_pid = dict()
for task in no_staging_tasks:
sbox = task['task_sandbox']
pid = task['pilot']
if pid not in task_sboxes_by_pid:
task_sboxes_by_pid[pid] = list()
task_sboxes_by_pid[pid].append(sbox)
# now trigger the bulk mkdir for all filesystems which have more than
# a certain tasks tohandle in this bulk:
for pid in task_sboxes_by_pid:
with self._pilots_lock:
pilot = self._pilots.get(pid)
if not pilot:
# we don't feel inclined to optimize for unknown pilots
self._log.debug('pid unknown - skip optimizion', pid)
continue
session_sbox = self._session._get_session_sandbox(pilot)
task_sboxes = task_sboxes_by_pid[pid]
if len(task_sboxes) >= TASK_BULK_MKDIR_THRESHOLD:
self._log.debug('tar %d sboxes', len(task_sboxes))
# no matter the bulk mechanism, we need a SAGA handle to the
# remote FS
sbox_fs = ru.Url(session_sbox) # deep copy
sbox_fs.path = '/'
sbox_fs_str = str(sbox_fs)
if sbox_fs_str not in self._fs_cache:
self._fs_cache[sbox_fs_str] = \
rs.fs.Directory(sbox_fs, session=self._session)
saga_dir = self._fs_cache[sbox_fs_str]
# we have two options for a bulk mkdir:
# 1) ask SAGA to create the sandboxes in a bulk op
# 2) create a tarball with all task sandboxes, push
# it over, and untar it (one untar op then creates all dirs).
# We implement both
if TASK_BULK_MKDIR_MECHANISM == 'saga':
tc = rs.task.Container()
for sbox in task_sboxes:
tc.add(saga_dir.make_dir(sbox, ttype=rs.TASK))
tc.run()
tc.wait()
elif TASK_BULK_MKDIR_MECHANISM == 'tar':
tmp_path = tempfile.mkdtemp(prefix='rp_agent_tar_dir')
tmp_dir = os.path.abspath(tmp_path)
tar_name = '%s.%s.tar' % (self._session.uid, self.uid)
tar_tgt = '%s/%s' % (tmp_dir, tar_name)
tar_url = ru.Url('file://localhost/%s' % tar_tgt)
# we want pathnames which are relative to the session
# sandbox. Ignore all other sandboxes - the agent will have
# to create those.
root = str(session_sbox)
rlen = len(root)
rels = list()
for path in task_sboxes:
if path.startswith(root):
rels.append(path[rlen + 1:])
rpu.create_tar(tar_tgt, rels)
tar_rem_path = "%s/%s" % (str(session_sbox), tar_name)
self._log.debug('sbox: %s [%s]', session_sbox,
type(session_sbox))
self._log.debug('copy: %s -> %s', tar_url, tar_rem_path)
saga_dir.copy(tar_url, tar_rem_path,
flags=rs.fs.CREATE_PARENTS)
# get a job service handle to the target resource and run
# the untar command. Use the hop to skip the batch system
js_url = pilot['js_hop']
self._log.debug('js : %s', js_url)
if js_url in self._js_cache:
js_tmp = self._js_cache[js_url]
else:
js_tmp = rs.job.Service(js_url, session=self._session)
self._js_cache[js_url] = js_tmp
cmd = "tar xvf %s/%s -C %s" % (session_sbox.path, tar_name,
session_sbox.path)
j = js_tmp.run_job(cmd)
j.wait()
self._log.debug('untar : %s', cmd)
self._log.debug('untar : %s\n---\n%s\n---\n%s',
j.get_stdout_string(), j.get_stderr_string(),
j.exit_code)
if no_staging_tasks:
# nothing to stage, push to the agent
self.advance(no_staging_tasks, rps.AGENT_STAGING_INPUT_PENDING,
publish=True, push=True)
for task,actionables in staging_tasks:
self._handle_task(task, actionables)
# --------------------------------------------------------------------------
#
def _handle_task(self, task, actionables):
# FIXME: we should created task sandboxes in a bulk
uid = task['uid']
self._prof.prof("create_sandbox_start", uid=uid)
src_context = {'pwd' : os.getcwd(), # !!!
'task' : task['task_sandbox'],
'pilot' : task['pilot_sandbox'],
'resource' : task['resource_sandbox']}
tgt_context = {'pwd' : task['task_sandbox'], # !!!
'task' : task['task_sandbox'],
'pilot' : task['pilot_sandbox'],
'resource' : task['resource_sandbox']}
# we have actionable staging directives, and thus we need a task
# sandbox.
sandbox = rs.Url(task["task_sandbox"])
tmp = rs.Url(task["task_sandbox"])
# url used for cache (sandbox url w/o path)
tmp.path = '/'
key = str(tmp)
self._log.debug('key %s / %s', key, tmp)
if key not in self._fs_cache:
self._fs_cache[key] = rs.fs.Directory(tmp, session=self._session)
saga_dir = self._fs_cache[key]
saga_dir.make_dir(sandbox, flags=rs.fs.CREATE_PARENTS)
self._prof.prof("create_sandbox_stop", uid=uid)
# Loop over all transfer directives and filter out tarball staging
# directives. Those files are added into a tarball, and a single
# actionable to stage that tarball replaces the original actionables.
# create a new actionable list during the filtering
new_actionables = list()
tar_file = None
for sd in actionables:
# don't touch non-tar SDs
if sd['action'] != rpc.TARBALL:
new_actionables.append(sd)
else:
action = sd['action']
flags = sd['flags'] # NOTE: we don't use those
did = sd['uid']
src = sd['source']
tgt = sd['target']
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
self._prof.prof('staging_in_tar_start', uid=uid, msg=did)
# create a tarfile on the first match, and register for transfer
if not tar_file:
tmp_file = tempfile.NamedTemporaryFile(
prefix='rp_usi_%s.' % uid,
suffix='.tar',
delete=False)
tar_path = tmp_file.name
tar_file = tarfile.open(fileobj=tmp_file, mode='w')
tar_src = ru.Url('file://localhost/%s' % tar_path)
tar_tgt = ru.Url('task:////%s.tar' % uid)
tar_did = ru.generate_id('sd')
tar_sd = {'action' : rpc.TRANSFER,
'flags' : rpc.DEFAULT_FLAGS,
'uid' : tar_did,
'source' : str(tar_src),
'target' : str(tar_tgt),
}
new_actionables.append(tar_sd)
# add the src file
tar_file.add(src.path, arcname=tgt.path)
self._prof.prof('staging_in_tar_stop', uid=uid, msg=did)
# make sure tarball is flushed to disk
if tar_file:
tar_file.close()
# work on the filtered TRANSFER actionables
for sd in new_actionables:
action = sd['action']
flags = sd['flags']
did = sd['uid']
src = sd['source']
tgt = sd['target']
if action == rpc.TRANSFER:
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
# Check if the src is a folder, if true
# add recursive flag if not already specified
if os.path.isdir(src.path):
flags |= rs.fs.RECURSIVE
# Always set CREATE_PARENTS
flags |= rs.fs.CREATE_PARENTS
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
self._prof.prof('staging_in_start', uid=uid, msg=did)
saga_dir.copy(src, tgt, flags=flags)
self._prof.prof('staging_in_stop', uid=uid, msg=did)
if tar_file:
# some tarball staging was done. Add a staging directive for the
# agent to untar the tarball, and clean up.
tar_sd['action'] = rpc.TARBALL
task['description']['input_staging'].append(tar_sd)
os.remove(tar_path)
# staging is done, we can advance the task at last
self.advance(task, rps.AGENT_STAGING_INPUT_PENDING,
publish=True, push=True)
# ------------------------------------------------------------------------------
|
the-stack_0_2571 | # Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
from sg_jira.handlers import EntityIssueHandler
from sg_jira.constants import (
SHOTGUN_JIRA_ID_FIELD,
SHOTGUN_SYNC_IN_JIRA_FIELD,
SHOTGUN_JIRA_URL_FIELD,
)
from sg_jira.errors import InvalidShotgunValue
class AssetIssueHandler(EntityIssueHandler):
"""
A handler which syncs a ShotGrid Asset as a Jira Issue
"""
# Define the mapping between Shotgun Asset fields and Jira Issue fields
__ASSET_FIELDS_MAPPING = {
"code": "summary",
"description": "description",
"tags": "labels",
"created_by": "reporter",
"tasks": None,
"sg_status_list": None,
}
# The type of Issue link to use when linking a Task Issue to the Issue
# representing the Asset.
__JIRA_PARENT_LINK_TYPE = "relates to"
# Define the mapping between Jira Issue fields and Shotgun Asset fields
# if the Shotgun target is None, it means the target field is not settable
# directly.
__ISSUE_FIELDS_MAPPING = {
"summary": "code",
"description": "description",
"status": "sg_status_list",
"labels": "tags",
}
@property
def _shotgun_asset_fields(self):
"""
Return the list of fields to ask for when retrieving an Asset from
ShotGrid.
"""
return [
"project.Project.%s" % SHOTGUN_JIRA_ID_FIELD,
"project.Project.name",
SHOTGUN_JIRA_ID_FIELD,
] + self._supported_shotgun_fields_for_shotgun_event()
@property
def _sg_jira_status_mapping(self):
"""
Return a dictionary where keys are ShotGrid status short codes and values
are Jira Issue status names.
"""
return {
"wtg": "To Do",
"rdy": "Open",
"ip": "In Progress",
"fin": "Done",
"hld": "Backlog",
"omt": "Closed",
}
@property
def _supported_shotgun_fields_for_jira_event(self):
""""
Return the list of fields this handler can process for a Jira event.
:returns: A list of strings.
"""
# By convention we might have `None` as values in our mapping dictionary
# meaning that we handle a specific Jira field but there is not a direct
# mapping to a Shotgun field and a special logic must be implemented
# and called to perform the update to Shotgun.
return [field for field in self.__ISSUE_FIELDS_MAPPING.values() if field]
def _supported_shotgun_fields_for_shotgun_event(self):
"""
Return the list of ShotGrid fields that this handler can process for a
ShotGrid to Jira event.
"""
return list(self.__ASSET_FIELDS_MAPPING.keys())
def _get_jira_issue_field_for_shotgun_field(
self, shotgun_entity_type, shotgun_field
):
"""
Returns the Jira Issue field id to use to sync the given ShotGrid Entity
type field.
:param str shotgun_entity_type: A ShotGrid Entity type, e.g. 'Task'.
:param str shotgun_field: A ShotGrid Entity field name, e.g. 'sg_status_list'.
:returns: A string or ``None``.
"""
if shotgun_entity_type != "Asset":
return None
return self.__ASSET_FIELDS_MAPPING.get(shotgun_field)
def _get_shotgun_entity_field_for_issue_field(self, jira_field_id):
"""
Returns the ShotGrid field name to use to sync the given Jira Issue field.
:param str jira_field_id: A Jira Issue field id, e.g. 'summary'.
:returns: A string or ``None``.
"""
return self.__ISSUE_FIELDS_MAPPING.get(jira_field_id)
def _sync_asset_to_jira(self, shotgun_asset, event_meta=None):
"""
Update an existing Jira Issue from the ShotGrid Asset fields.
:param shotgun_asset: A ShotGrid Asset dictionary.
:param event_meta: A ShotGrid Event meta data dictionary or ``None``.
:returns: ``True`` if a Jira Issue was updated, ``False`` otherwise.
"""
jira_issue_key = shotgun_asset[SHOTGUN_JIRA_ID_FIELD]
if not jira_issue_key:
return False
jira_issue = self._get_jira_issue_and_validate(jira_issue_key, shotgun_asset)
if not jira_issue:
return False
# Process all supported fields if no event meta data was provided.
if not event_meta:
return self._sync_shotgun_fields_to_jira(shotgun_asset, jira_issue)
sg_field = event_meta["attribute_name"]
try:
jira_field, jira_value = self._get_jira_issue_field_sync_value(
jira_issue.fields.project,
jira_issue,
shotgun_asset["type"],
sg_field,
event_meta.get("added"),
event_meta.get("removed"),
event_meta.get("new_value"),
)
except InvalidShotgunValue as e:
self._logger.warning(
"Unable to update Jira %s %s for event %s: %s"
% (jira_issue.fields.issuetype.name, jira_issue.key, event_meta, e,)
)
self._logger.debug("%s" % e, exc_info=True)
return False
if jira_field:
self._logger.debug(
"Updating Jira %s %s field with %s"
% (jira_issue, jira_field, jira_value)
)
jira_issue.update(fields={jira_field: jira_value})
return True
# Special cases not handled by a direct update
if sg_field == "sg_status_list":
shotgun_status = event_meta["new_value"]
return self._sync_shotgun_status_to_jira(
jira_issue,
shotgun_status,
"Updated from Shotgun %s(%d) moving to %s"
% (shotgun_asset["type"], shotgun_asset["id"], shotgun_status),
)
return False
def _get_jira_issue_link(self, from_issue, to_issue_key):
"""
Retrieve an existing link between the given Jira Issue and another Issue
with the given key.
:param from_issue: A :class:`jira.Issue` instance.
:param str to_issue_key: An Issue key.
:returns: An Issue link or ``None``.
"""
for issue_link in from_issue.fields.issuelinks:
# Depending link directions we either get "inwardIssue" or "outwardIssue"
# populated.
if issue_link.raw.get("inwardIssue"):
if issue_link.inwardIssue.key == to_issue_key:
# Note: we don't check the Issue Link type and return any link
# which is n the right direction.
return issue_link
return None
def _sync_asset_tasks_change_to_jira(self, shotgun_asset, added, removed):
"""
Update Jira with tasks changes for the given ShotGrid Asset.
:param shotgun_asset: A ShotGrid Asset dictionary.
:param added: A list of ShotGrid Task dictionaries which were added to
the given Asset.
:param removed: A list of ShotGrid Task dictionaries which were removed from
the given Asset.
:returns: ``True`` if the given changes could be processed sucessfully,
``False`` otherwise.
"""
jira_issue_key = shotgun_asset[SHOTGUN_JIRA_ID_FIELD]
jira_issue = None
if jira_issue_key:
# Retrieve the Issue if we should have one
jira_issue = self.get_jira_issue(jira_issue_key)
if not jira_issue:
self._logger.warning(
"Unable to find Jira Issue %s for Shotgun Asset %s"
% (jira_issue_key, shotgun_asset)
)
# Better to stop processing.
return False
updated = False
if jira_issue and removed:
# Check if we should update dependencies because it was attached to
# a synced Task which has been removed.
sg_tasks = self._shotgun.find(
"Task",
[
["id", "in", [x["id"] for x in removed]],
[SHOTGUN_JIRA_ID_FIELD, "is_not", None],
[SHOTGUN_SYNC_IN_JIRA_FIELD, "is", True],
],
["content", SHOTGUN_JIRA_ID_FIELD],
)
to_delete = []
for sg_task in sg_tasks:
issue_link = self._get_jira_issue_link(
jira_issue, sg_task[SHOTGUN_JIRA_ID_FIELD]
)
if issue_link:
self._logger.debug(
"Found a Jira link between %s and %s to delete"
% (jira_issue.key, sg_task[SHOTGUN_JIRA_ID_FIELD])
)
to_delete.append(issue_link)
else:
self._logger.debug(
"Didn't find a Jira link between %s and %s to delete"
% (jira_issue.key, sg_task[SHOTGUN_JIRA_ID_FIELD])
)
# Delete the links, if any
for issue_link in to_delete:
self._logger.info("Deleting Jira link %s" % (issue_link))
self._jira.delete_issue_link(issue_link.id)
updated = True
if added:
# Collect the list of Tasks which are linked to Jira Issues
sg_tasks = self._shotgun.find(
"Task",
[
["id", "in", [x["id"] for x in added]],
[SHOTGUN_JIRA_ID_FIELD, "is_not", None],
[SHOTGUN_SYNC_IN_JIRA_FIELD, "is", True],
],
["content", SHOTGUN_JIRA_ID_FIELD, SHOTGUN_SYNC_IN_JIRA_FIELD],
)
if not sg_tasks:
# Nothing to do
return False
if not jira_issue:
# Check if the Project is linked to a Jira Project
jira_project_key = shotgun_asset[
"project.Project.%s" % SHOTGUN_JIRA_ID_FIELD
]
if not jira_project_key:
self._logger.debug(
"Skipping tasks change event for %s (%d) for Project %s "
"not linked to a Jira Project"
% (
shotgun_asset["type"],
shotgun_asset["id"],
shotgun_asset["project"],
)
)
return False
jira_project = self.get_jira_project(jira_project_key)
if not jira_project:
self._logger.warning(
"Unable to find Jira Project %s for Shotgun Project %s."
% (jira_project_key, shotgun_asset["project"],)
)
return False
# Time to create the Issue
jira_issue = self._create_jira_issue_for_entity(
shotgun_asset,
jira_project,
self._issue_type,
summary=shotgun_asset["code"],
timetracking={
"originalEstimate": "0 m" # Null estimate in the case it is mandatory
},
)
self._shotgun.update(
shotgun_asset["type"],
shotgun_asset["id"],
{
SHOTGUN_JIRA_ID_FIELD: jira_issue.key,
SHOTGUN_JIRA_URL_FIELD: {
"url": jira_issue.permalink(),
"name": "View in Jira",
},
},
)
updated = True
for sg_task in sg_tasks:
issue_link = self._get_jira_issue_link(
jira_issue, sg_task[SHOTGUN_JIRA_ID_FIELD]
)
if not issue_link:
self._logger.info(
"Linking Jira Issue %s to %s"
% (jira_issue.key, sg_task[SHOTGUN_JIRA_ID_FIELD])
)
self._jira.create_issue_link(
type=self.__JIRA_PARENT_LINK_TYPE,
# Note: depending on the link type, e.g. "blocks" or
# "is blocked", the inward and outward values might need
# to be swapped
inwardIssue=sg_task[SHOTGUN_JIRA_ID_FIELD],
outwardIssue=jira_issue.key,
comment={
"body": "Linking %s to %s"
% (shotgun_asset["code"], sg_task["content"],),
},
)
updated = True
else:
self._logger.debug(
"Jira Issue %s is already linked to %s"
% (jira_issue.key, sg_task[SHOTGUN_JIRA_ID_FIELD])
)
return updated
def _sync_shotgun_fields_to_jira(
self, sg_entity, jira_issue, exclude_shotgun_fields=None
):
"""
Update the given Jira Issue with values from the given ShotGrid Entity.
An optional list of ShotGrid fields can be provided to exclude them from
the sync.
:param sg_entity: A ShotGrid Entity dictionary.
:param jira_issue: A :class:`jira.Issue` instance.
:param exclude_shotgun_fields: An optional list of ShotGrid field names which
shouldn't be synced.
"""
if exclude_shotgun_fields is None:
exclude_shotgun_fields = []
issue_data = {}
for sg_field, jira_field in self.__ASSET_FIELDS_MAPPING.items():
if sg_field in exclude_shotgun_fields:
continue
if jira_field is None:
# Special cases where a direct update is not possible.
continue
shotgun_value = sg_entity[sg_field]
if isinstance(shotgun_value, list):
removed = []
added = shotgun_value
new_value = None
else:
removed = None
added = None
new_value = shotgun_value
try:
jira_field, jira_value = self._get_jira_issue_field_sync_value(
jira_issue.fields.project,
jira_issue,
sg_entity["type"],
sg_field,
added,
removed,
new_value,
)
if jira_field:
issue_data[jira_field] = jira_value
except InvalidShotgunValue as e:
self._logger.warning(
"Unable to update Jira %s %s %s field from Shotgun value %s: %s"
% (
jira_issue.fields.issuetype.name,
jira_issue.key,
jira_field,
shotgun_value,
e,
)
)
self._logger.debug("%s" % e, exc_info=True)
if issue_data:
self._logger.debug(
"Updating Jira %s %s with %s. Currently: %s"
% (
jira_issue.fields.issuetype.name,
jira_issue.key,
issue_data,
jira_issue,
)
)
jira_issue.update(fields=issue_data)
# Sync status
if "sg_status_list" not in exclude_shotgun_fields:
self._sync_shotgun_status_to_jira(
jira_issue,
sg_entity["sg_status_list"],
"Updated from Shotgun %s(%d) moving to %s"
% (sg_entity["type"], sg_entity["id"], sg_entity["sg_status_list"]),
)
def _sync_shotgun_task_asset_to_jira(self, shotgun_task):
"""
Sync the Asset attached to the given ShotGrid Task to Jira.
:param shotgun_task: A ShotGrid Task dictionary.
:returns: ``True`` if any update happened, ``False`` otherwise.
"""
# Retrieve the Asset linked to the Task, if any
shotgun_asset = self._shotgun.find_one(
"Asset", [["tasks", "is", shotgun_task]], self._shotgun_asset_fields
)
# make sure we have a full entity needed with the injected "name" key, etc.
shotgun_asset = self._shotgun.consolidate_entity(
shotgun_asset, fields=self._shotgun_asset_fields
)
self._logger.debug(
"Retrieved Asset %s linked to Task %s" % (shotgun_asset, shotgun_task)
)
if not shotgun_asset:
return False
updated = False
res = self._sync_asset_tasks_change_to_jira(
shotgun_asset, added=[shotgun_task], removed=[]
)
if res:
updated = True
if self._sync_asset_to_jira(shotgun_asset):
updated = True
return updated
def setup(self):
"""
Check the Jira and ShotGrid site, ensure that the sync can safely happen.
This can be used as well to cache any value which is slow to retrieve.
"""
self._shotgun.assert_field(
"Asset", SHOTGUN_JIRA_ID_FIELD, "text", check_unique=True
)
self._shotgun.assert_field("Asset", SHOTGUN_JIRA_URL_FIELD, "url")
def accept_shotgun_event(self, entity_type, entity_id, event):
"""
Accept or reject the given event for the given ShotGrid Entity.
:returns: ``True`` if the event is accepted for processing, ``False`` otherwise.
"""
# We only accept Assets
if entity_type != "Asset":
return False
meta = event["meta"]
field = meta["attribute_name"]
if field not in self._supported_shotgun_fields_for_shotgun_event():
self._logger.debug(
"Rejecting Shotgun event with unsupported Shotgun field %s: %s"
% (field, event)
)
return False
return True
def process_shotgun_event(self, entity_type, entity_id, event):
"""
Process the given ShotGrid event for the given ShotGrid Entity
:param str entity_type: The ShotGrid Entity type to sync.
:param int entity_id: The id of the ShotGrid Entity to sync.
:param event: A dictionary with the event meta data for the change.
:returns: True if the event was successfully processed, False if the
sync didn't happen for any reason.
"""
meta = event["meta"]
shotgun_field = meta["attribute_name"]
if shotgun_field == SHOTGUN_SYNC_IN_JIRA_FIELD:
# Note: in this case the Entity is a Task.
return self._sync_shotgun_task_asset_to_jira(
{"type": entity_type, "id": entity_id}
)
asset_fields = [
"project",
"project.Project.%s" % SHOTGUN_JIRA_ID_FIELD,
"project.Project.name",
SHOTGUN_JIRA_ID_FIELD,
] + self._supported_shotgun_fields_for_shotgun_event()
sg_entity = self._shotgun.consolidate_entity(
{"type": entity_type, "id": entity_id}, fields=asset_fields
)
if not sg_entity:
self._logger.warning(
"Unable to find Shotgun %s (%s)." % (entity_type, entity_id)
)
return False
# When an Entity is created in Shotgun, a unique event is generated for
# each field value set in the creation of the Entity. These events
# have an additional "in_create" key in the metadata, identifying them
# as events from the initial create event.
#
# When the bridge processes the first event, it loads all of the Entity
# field values from Shotgun and creates the Jira Issue with those
# values. So the remaining Shotgun events with the "in_create"
# metadata key can be ignored since we've already handled all of
# those field updates.
# We use the Jira id field value to check if we're processing the first
# event. If it exists with in_create, we know the comment has already
# been created.
if sg_entity[SHOTGUN_JIRA_ID_FIELD] and meta.get("in_create"):
self._logger.debug(
"Rejecting Shotgun event for %s.%s field update during "
"create. Issue was already created in Jira: %s"
% (sg_entity["type"], shotgun_field, event)
)
return False
# Update existing synced Issue (if any) Issue dependencies
# Note: deleting a Task does not seem to trigger an Asset.tasks change?
if shotgun_field == "tasks":
return self._sync_asset_tasks_change_to_jira(
sg_entity, meta["added"], meta["removed"],
)
# Update the Jira Issue itself
return self._sync_asset_to_jira(sg_entity, meta)
|
the-stack_0_2573 | import json
import tornado.web
class HomersHandler(tornado.web.RequestHandler):
async def get(self, name):
homer = await self.settings["mongo_db"].homers.find_one(
{"name": name})
if homer is None:
raise tornado.web.HTTPError(
404, f"Missing homer: {name}")
self.finish(homer["content"])
async def post(self, name):
await self.settings["mongo_db"].homers.replace_one(
{"name": name},
{
"name": name,
"content": json.loads(self.request.body)
},
upsert=True)
self.set_status(204)
self.finish()
|
the-stack_0_2575 | import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.utils.data import sampler
from torch import cuda
def to_var(x, device, requires_grad=False, volatile=False):
"""
Varialbe type that automatically choose cpu or cuda
"""
#@if torch.cuda.is_available():
# x = x.to(device)
return Variable(x, requires_grad=requires_grad, volatile=volatile).to(device)
def prune_rate(model, verbose=True):
"""
Print out prune rate for each layer and the whole network
"""
total_nb_param = 0
nb_zero_param = 0
layer_id = 0
for parameter in model.parameters():
param_this_layer = 1
for dim in parameter.data.size():
param_this_layer *= dim
total_nb_param += param_this_layer
# only pruning linear and conv layers
if len(parameter.data.size()) != 1:
layer_id += 1
zero_param_this_layer = \
np.count_nonzero(parameter.cpu().data.numpy()==0)
nb_zero_param += zero_param_this_layer
if verbose:
print("Layer {} | {} layer | {:.2f}% parameters pruned" \
.format(
layer_id,
'Conv' if len(parameter.data.size()) == 4 \
else 'Linear',
100.*zero_param_this_layer/param_this_layer,
))
pruning_perc = 100.*nb_zero_param/total_nb_param
if verbose:
print("Final pruning rate: {:.2f}%".format(pruning_perc))
return pruning_perc
def arg_nonzero_min(a):
"""
nonzero argmin of a non-negative array
"""
if not a:
return
min_ix, min_v = None, None
# find the starting value (should be nonzero)
for i, e in enumerate(a):
if e != 0:
min_ix = i
min_v = e
if not min_ix:
print('Warning: all zero')
return np.inf, np.inf
# search for the smallest nonzero
for i, e in enumerate(a):
if e < min_v and e != 0:
min_v = e
min_ix = i
return min_v, min_ix
|
the-stack_0_2576 | import os
from scipy.io import loadmat
class DATA:
def __init__(self, image_name, bboxes):
self.image_name = image_name
self.bboxes = bboxes
class WIDER(object):
def __init__(self, file_to_label, path_to_image=None):
self.file_to_label = file_to_label
self.path_to_image = path_to_image
self.f = loadmat(file_to_label)
self.event_list = self.f['event_list']
self.file_list = self.f['file_list']
self.face_bbx_list = self.f['face_bbx_list']
def next(self):
for event_idx, event in enumerate(self.event_list):
# fix error of "can't not .. bytes and strings"
e = str(event[0][0].encode('utf-8'))[2:-1]
for file, bbx in zip(self.file_list[event_idx][0],
self.face_bbx_list[event_idx][0]):
f = file[0][0].encode('utf-8')
# print(type(e), type(f)) # bytes, bytes
# fix error of "can't not .. bytes and strings"
f = str(f)[2:-1]
# path_of_image = os.path.join(self.path_to_image, str(e), str(f)) + ".jpg"
path_of_image = self.path_to_image + '/' + e + '/' + f + ".jpg"
# print(path_of_image)
bboxes = []
bbx0 = bbx[0]
for i in range(bbx0.shape[0]):
xmin, ymin, xmax, ymax = bbx0[i]
bboxes.append((int(xmin), int(ymin), int(xmax), int(ymax)))
yield DATA(path_of_image, bboxes)
|
the-stack_0_2577 | #coding: utf8
import sublime, sublime_plugin
import sys, os
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
PACKAGES_PATH = sublime.packages_path() or os.path.dirname(BASE_PATH)
if(BASE_PATH not in sys.path):
sys.path += [BASE_PATH] + [os.path.join(BASE_PATH, 'lib')] + [os.path.join(BASE_PATH, 'SublimeJS/core')]
import SublimeJS.loader.pyv8loader as pyv8loader
from SublimeJS.core.context import Context, import_pyv8
from SublimeJS.core.context import js_file_reader as _js_file_reader
from SublimeJS.loader.pyv8loader import LoaderDelegate
is_python3 = sys.version_info[0] > 2
# JS context
ctx = None
# sublime-v8 Settings
# settings = None
# Default ST settings
# user_settings = None
def is_st3():
return sublime.version()[0] == '3'
#######################
# Reload Lib
# The original idea is borrowed from
# https://github.com/wbond/sublime_package_control/blob/master/package_control/reloader.py
import imp
reload_mods = []
for mod in sys.modules:
if mod.startswith('SublimeJS') and sys.modules[mod] != None:
reload_mods.append(mod)
mods_load_order = [
'SublimeJS.core.semver',
'SublimeJS.loader.pyv8loader',
'SublimeJS.core.file',
'SublimeJS.core.http',
'SublimeJS.core.process',
'SublimeJS.core.child_process',
'SublimeJS.core.fs',
'SublimeJS.core.context',
]
for mod in mods_load_order:
if mod in reload_mods:
m = sys.modules[mod]
if 'on_module_reload' in m.__dict__:
m.on_module_reload()
imp.reload(sys.modules[mod])
def convert(obj):
from PyV8 import JSObject, JSArray, JSFunction
if type(obj) == JSArray:
return [convert(v) for v in obj]
if type(obj) == JSObject:
return dict([[str(k), convert(obj.__getattr__(str(k)))] for k in
obj.__members__])
return obj
##################################
# Global Events
class EventDispatcher(sublime_plugin.EventListener):
def on_new(self, view):
if(ctx):
return ctx.call('global.E.emit',['new', view])
return True
def on_clone(self, view):
if(ctx):
return ctx.call('global.E.emit',['clone', view])
return True
def on_load(self, view):
if(ctx):
return ctx.call('global.E.emit',['load', view])
return True
def on_pre_close(self, view):
if(ctx):
return ctx.call('global.E.emit',['pre_close', view])
return True
def on_close(self, view):
if(ctx):
return ctx.call('global.E.emit',['close', view])
return True
def on_pre_save(self, view):
if(ctx):
return ctx.call('global.E.emit',['pre_save', view])
return True
def on_post_save(self, view):
if(ctx):
return ctx.call('global.E.emit',['post_save', view])
return True
def on_modified(self, view):
if(ctx):
return ctx.call('global.E.emit',['modified', view])
return True
def on_selection_modified(self, view):
if(ctx):
return ctx.call('global.E.emit',['selection_modified', view])
return True
def on_activated(self, view):
if(ctx):
ctx.call('global.E.emit',['activated', view])
ctx.call('global.E.emit',['focus', view])
return True
def on_deactived(self, view):
if(ctx):
ctx.call('global.E.emit',['deactived', view])
ctx.call('global.E.emit',['blur', view])
return True
def on_text_command(self, view, command_name, args):
if(ctx):
return ctx.call('global.E.emit',['text_command', view, command_name, args])
return (command_name, args)
def on_window_command(self, window, command_name, args):
if(ctx):
return ctx.call('global.E.emit',['window_command', window, command_name, args])
return (command_name, args)
def post_text_command(self, view, command_name, args):
if(ctx):
return ctx.call('global.E.emit',['post_text_command', view, command_name, args])
return True
def post_window_command(self, window, command_name, args):
if(ctx):
return ctx.call('global.E.emit',['post_window_command', window, command_name, args])
return True
def on_query_context(self, view, key, operator, operand, match_all):
if(ctx):
return ctx.call('global.E.emit',['query_context', view, key, operator, operand, match_all])
return True
def on_query_completions(self, view, prefix, locations):
if(ctx):
ctx._js_ctx.enter()
ret = convert(ctx.call('global.E.on_query_completions',[view, prefix, locations]))
ctx._js_ctx.leave()
return ret
return None
##########################
# Base Class of JSCommand
class JSTextCommand(sublime_plugin.TextCommand):
def __init__(self, view):
self.view = view
def run(self, edit, args=None):
command = self.__class__.__name__[0:-7].lower()
ctx.call('global.runCommand', [command, self.view, edit, args]);
# ctx.load_js_file(os.path.join(BASE_PATH, mod), {'view':self.view, 'edit': edit})
class JSWindowCommand(sublime_plugin.WindowCommand):
def __init__(self, window):
self.window = window
def run(self, args=None):
command = self.__class__.__name__[0:-7].lower()
ctx.call('global.runCommand', [command, self.window, args]);
class JSApplicationCommand(sublime_plugin.ApplicationCommand):
def run(self, args=None):
command = self.__class__.__name__[0:-7].lower()
ctx.call('global.runCommand', [command, args]);
class HelloWorldCommand(JSTextCommand):
'''demo'''
pass
############################
def init():
"Init sublime-v8 engine"
# setup environment for PyV8 loading
pyv8_paths = [
os.path.join(PACKAGES_PATH, 'PyV8'),
os.path.join(PACKAGES_PATH, 'PyV8', pyv8loader.get_arch()),
os.path.join(PACKAGES_PATH, 'PyV8', 'pyv8-%s' % pyv8loader.get_arch())
]
sys.path += pyv8_paths
# unpack recently loaded binary, is exists
for p in pyv8_paths:
pyv8loader.unpack_pyv8(p)
###################################
# if you need update PyV8, comment this
try:
import PyV8
except:
pass
###################################
# create JS environment
delegate = SublimeLoaderDelegate()
pyv8loader.load(pyv8_paths[1], delegate)
class SublimeLoaderDelegate(LoaderDelegate):
load_cache = []
def __init__(self, settings=None):
if settings is None:
settings = {}
#for k in ['http_proxy', 'https_proxy', 'timeout']:
# if user_settings.has(k):
# settings[k] = user_settings.get(k, None)
LoaderDelegate.__init__(self, settings)
self.state = None
self.message = 'Loading PyV8 binary, please wait'
self.i = 0
self.addend = 1
self.size = 8
def on_start(self, *args, **kwargs):
self.state = 'loading'
def on_progress(self, *args, **kwargs):
if kwargs['progress'].is_background:
return
before = self.i % self.size
after = (self.size - 1) - before
msg = '%s [%s=%s]' % (self.message, ' ' * before, ' ' * after)
if not after:
self.addend = -1
if not before:
self.addend = 1
self.i += self.addend
sublime.set_timeout(lambda: sublime.status_message(msg), 0)
def on_complete(self, *args, **kwargs):
self.state = 'complete'
if kwargs['progress'].is_background:
return
sublime.set_timeout(self.on_ready, 0)
def on_ready(self):
sublime.status_message('PyV8 binary successfully loaded')
if(not ctx):
globals()['ctx'] = JSCore(self.log)
from PyV8 import JSClass, JSObject, JSArray, JSFunction
ctx.JSClass = lambda obj: JSClass(convert(obj))
ctx.JSObject = lambda obj: JSObject(convert(obj))
ctx.JSArray = lambda obj: JSArray(convert(obj))
ctx.JSFunction = lambda obj: JSFunction(convert(obj))
ctx.load_js_file(os.path.join(BASE_PATH, "SublimeJS/js/core.js"))
if('js_loading_queue' in globals()):
for i in globals()['js_loading_queue']:
ctx.load_js_file(i)
def on_error(self, exit_code=-1, thread=None):
self.state = 'error'
sublime.set_timeout(lambda: show_pyv8_error(exit_code), 0)
def setting(self, name, default=None):
"Returns specified setting name"
return self.settings.get(name, default)
def log(self, message):
print('JS: %s' % message)
def plugin_loaded():
sublime.set_timeout(init, 200)
##################
# Init plugin
if not is_python3:
init()
class Console:
def __init__(self, logger):
self.logger = logger;
def log(self, msg):
self.logger(msg);
import hashlib, urllib
class JSCore(Context):
_reload = False
def __init__(self, logger):
self.console = Console(logger);
Context.__init__(self, logger)
if('firstload__' not in globals()):
globals()['firstload__'] = True
else:
globals()['firstload__'] = False
def registerCommand(self, name, commandType):
name = name + 'Command'
fullName = '<' + commandType + '>SublimeJS.v8.' + name;
if(fullName not in globals()):
if(commandType == 'TextCommand'):
globals()[fullName] = type(name, (JSTextCommand,), {})
if(commandType == 'WindowCommand'):
globals()[fullName] = type(name, (JSWindowCommand,), {})
if(commandType == 'ApplicationCommand'):
globals()[fullName] = type(name, (JSApplicationCommand,), {})
if(not self._reload):
self._reload = True
if(self._reload):
self.reload(not globals()['firstload__'])
@property
def sublime(self):
return sublime
def reload(self, force=False):
if(force):
def _reload():
sublime.active_window().active_view().run_command('save')
sublime.active_window().run_command('close')
sublime.active_window().open_file(__file__)
sublime.set_timeout(_reload, 200)
else:
sublime.set_timeout(lambda:sublime_plugin.reload_plugin('SublimeJS.v8'),200)
def md5(self, str):
return hashlib.md5(str).hexdigest()
class JS:
_base_dir = None
def __init__(self, base):
self._base_dir = base;
def boot(self, file = 'index.js'):
if(globals()['ctx']):
globals()['ctx'].load_js_file(os.path.join(self._base_dir, file))
else:
if(not 'js_loading_queue' in globals()):
globals()['js_loading_queue'] = []
globals()['js_loading_queue'].append(os.path.join(self._base_dir, file))
def getContext():
return globals()['ctx']
|
the-stack_0_2578 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_text
from django.db.models import Sum
__all__ = ['CounterManager', ]
class CounterManager(models.Manager):
def for_model(self, model, total=False):
"""
QuerySet for all counter records for a particular model (either an instance or
a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_queryset().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_text(model._get_pk_val()))
if total:
qs = qs.aggregate(Sum('hits'))['hits__sum']
return qs
|
the-stack_0_2579 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import Tuple
import numpy as np
import torch
from nnformer.training.data_augmentation.data_augmentation_moreDA import get_moreDA_augmentation
from nnformer.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnformer.utilities.to_torch import maybe_to_torch, to_cuda
from nnformer.network_architecture.generic_UNet import Generic_UNet
from nnformer.network_architecture.Swin_Unet_l_gelunorm import swintransformer
from nnformer.network_architecture.initialization import InitWeights_He
from nnformer.network_architecture.neural_network import SegmentationNetwork
from nnformer.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \
get_patch_size, default_3D_augmentation_params
from nnformer.training.dataloading.dataset_loading import unpack_dataset
from nnformer.training.network_training.nnFormerTrainer_synapse import nnFormerTrainer_synapse
from nnformer.utilities.nd_softmax import softmax_helper
from sklearn.model_selection import KFold
from torch import nn
from torch.cuda.amp import autocast
from nnformer.training.learning_rate.poly_lr import poly_lr
from batchgenerators.utilities.file_and_folder_operations import *
class nnFormerTrainerV2_Synapse(nnFormerTrainer_synapse):
"""
Info for Fabian: same as internal nnFormerTrainerV2_2
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1000
self.initial_lr = 1e-2
self.deep_supervision_scales = None
self.ds_loss_weights = None
self.pin_memory = True
def initialize(self, training=True, force_load_plans=False):
"""
- replaced get_default_augmentation with get_moreDA_augmentation
- enforce to only run this code once
- loss function wrapper for deep supervision
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(
self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory,
use_nondetMultiThreadedAugmenter=False
)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
- momentum 0.99
- SGD instead of Adam
- self.lr_scheduler = None because we do poly_lr
- deep supervision = True
- i am sure I forgot something here
Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
#self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
# len(self.net_num_pool_op_kernel_sizes),
# self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
# dropout_op_kwargs,
# net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
# self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
self.network = swintransformer(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
checkpoint = torch.load("../Pretrained_weight/pretrain_Synapse.model", map_location='cuda')
self.network.load_state_dict(checkpoint['state_dict'])
print('I am using the pre_train weight!!')
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_online_evaluation(self, output, target):
"""
due to deep supervision the return value and the reference are now lists of tensors. We only need the full
resolution output because this is what we are interested in in the end. The others are ignored
:param output:
:param target:
:return:
"""
target = target[0]
output = output[0]
return super().run_online_evaluation(output, target)
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs,
run_postprocessing_on_folds=run_postprocessing_on_folds)
self.network.do_ds = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().predict_preprocessed_data_return_seg_and_softmax(data,
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size, use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.do_ds = ds
return ret
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
"""
gradient clipping improves training stability
:param data_generator:
:param do_backprop:
:param run_online_evaluation:
:return:
"""
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def do_split(self):
"""
The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded,
so always the same) and save it as splits_final.pkl file in the preprocessed data directory.
Sometimes you may want to create your own split for various reasons. For this you will need to create your own
splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in
it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3)
and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to
use a random 80:20 data split.
:return:
"""
if self.fold == "all":
# if fold==all then we use all images for training and validation
tr_keys = val_keys = list(self.dataset.keys())
else:
splits_file = join(self.dataset_directory, "splits_final.pkl")
# if the split file does not exist we need to create it
if not isfile(splits_file):
self.print_to_log_file("Creating new 5-fold cross-validation split...")
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[-1]['train'] = train_keys
splits[-1]['val'] = test_keys
save_pickle(splits, splits_file)
else:
self.print_to_log_file("Using splits from existing split file:", splits_file)
splits = load_pickle(splits_file)
self.print_to_log_file("The split file contains %d splits." % len(splits))
splits[self.fold]['train']=np.array(['img0006','img0007' ,'img0009', 'img0010', 'img0021' ,'img0023' ,'img0024','img0026' ,'img0027' ,'img0031', 'img0033' ,'img0034' \
,'img0039', 'img0040','img0005', 'img0028', 'img0030', 'img0037'])
splits[self.fold]['val']=np.array(['img0001', 'img0002', 'img0003', 'img0004', 'img0008', 'img0022','img0025', 'img0029', 'img0032', 'img0035', 'img0036', 'img0038'])
self.print_to_log_file("Desired fold for training: %d" % self.fold)
if self.fold < len(splits):
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
self.print_to_log_file("This split has %d training and %d validation cases."
% (len(tr_keys), len(val_keys)))
else:
self.print_to_log_file("INFO: You requested fold %d for training but splits "
"contain only %d folds. I am now creating a "
"random (but seeded) 80:20 split!" % (self.fold, len(splits)))
# if we request a fold that is not in the split file, create a random 80:20 split
rnd = np.random.RandomState(seed=12345 + self.fold)
keys = np.sort(list(self.dataset.keys()))
idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)
idx_val = [i for i in range(len(keys)) if i not in idx_tr]
tr_keys = [keys[i] for i in idx_tr]
val_keys = [keys[i] for i in idx_val]
self.print_to_log_file("This random 80:20 split has %d training and %d validation cases."
% (len(tr_keys), len(val_keys)))
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def setup_DA_params(self):
"""
- we increase roation angle from [-15, 15] to [-30, 30]
- scale range is now (0.7, 1.4), was (0.85, 1.25)
- we don't do elastic deformation anymore
:return:
"""
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.7, 1.4)
self.data_aug_params["do_elastic"] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
self.data_aug_params["num_cached_per_thread"] = 2
def maybe_update_lr(self, epoch=None):
"""
if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1
(maybe_update_lr is called in on_epoch_end which is called before epoch is incremented.
herefore we need to do +1 here)
:param epoch:
:return:
"""
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9)
self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6))
def on_epoch_end(self):
"""
overwrite patient-based early stopping. Always run to 1000 epochs
:return:
"""
super().on_epoch_end()
continue_training = self.epoch < self.max_num_epochs
# it can rarely happen that the momentum of nnFormerTrainerV2 is too high for some dataset. If at epoch 100 the
# estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95
if self.epoch == 100:
if self.all_val_eval_metrics[-1] == 0:
self.optimizer.param_groups[0]["momentum"] = 0.95
self.network.apply(InitWeights_He(1e-2))
self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too "
"high momentum. High momentum (0.99) is good for datasets where it works, but "
"sometimes causes issues such as this one. Momentum has now been reduced to "
"0.95 and network weights have been reinitialized")
return continue_training
def run_training(self):
"""
if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first
continued epoch with self.initial_lr
we also need to make sure deep supervision in the network is enabled for training, thus the wrapper
:return:
"""
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.do_ds
self.network.do_ds = True
ret = super().run_training()
self.network.do_ds = ds
return ret
|
the-stack_0_2580 | # -*- coding: utf-8 -*-
# This file is part of the OpenSYMORO project. Please see
# https://github.com/symoro/symoro/blob/master/LICENCE for the licence.
"""
This module of SYMORO package provides symbolic
solutions for inverse geompetric problem.
"""
from heapq import heapify, heappop
from sympy import var, sin, cos, eye, atan2, sqrt, pi
from sympy import Matrix, Symbol, Expr, trigsimp
from pysymoro.geometry import transform_list, to_matrix
from symoroutils import symbolmgr
from symoroutils import tools
EMPTY = var("EMPTY")
T_GENERAL = Matrix([var("s1,n1,a1,p1"), var("s2,n2,a2,p2"),
var("s3,n3,a3,p3"), [0, 0, 0, 1]])
# Dictionary for equation type classification.
eq_dict = {(1, 0, 0): 0, (0, 1, 0): 1, (1, 1, 0): 2,
(0, 2, 0): 3, (0, 2, 1): 4}
def _paul_solve(robo, symo, nTm, n, m, known_vars=None):
if known_vars is None:
knowns = set()
else:
knowns = set(known_vars)
chain = robo.loop_chain(m, n)
th_all = set()
r_all = set()
# Create the set of all knowns symbols
for i in chain:
if i >= 0:
if robo.sigma[i] == 0 and isinstance(robo.theta[i], Expr):
th_all.add(robo.theta[i])
if isinstance(robo.r[i], Expr):
knowns |= robo.r[i].atoms(Symbol)
if robo.sigma[i] == 1 and isinstance(robo.r[i], Expr):
r_all.add(robo.r[i])
if isinstance(robo.theta[i], Expr):
knowns |= robo.theta[i].atoms(Symbol)
if isinstance(robo.gamma[i], Expr):
knowns |= robo.gamma[i].atoms(Symbol)
if isinstance(robo.alpha[i], Expr):
knowns |= robo.alpha[i].atoms(Symbol)
if isinstance(robo.d[i], Expr):
knowns |= robo.d[i].atoms(Symbol)
if isinstance(robo.b[i], Expr):
knowns |= robo.b[i].atoms(Symbol)
while True:
repeat = False
iTm = nTm.copy()
tr_list = transform_list(robo, n, m)
_replace_EMPTY(iTm, tr_list)
tr_list.reverse()
tr_const, tr_list = _extract_const_transforms(tr_list, knowns)
for trc in tr_const:
iTm = iTm * trc.matrix_inv()
tr_list.reverse()
while tr_list:
tr_const, tr_list = _extract_const_transforms(tr_list, knowns)
for trc in tr_const:
iTm = trc.matrix_inv() * iTm
tr = tr_list.pop(0)
if tr.val.atoms(Symbol) - knowns:
M_eq = tr.matrix() * to_matrix(tr_list, simplify=False)
while True:
found = _look_for_eq(symo, M_eq - iTm,
knowns, th_all, r_all)
repeat |= found
if not found or th_all | r_all <= knowns:
break
iTm = tr.matrix_inv() * iTm
if th_all | r_all <= knowns:
break
if not repeat or th_all | r_all <= knowns:
break
return knowns
def _replace_EMPTY(T, tr_list):
T_sym = to_matrix(tr_list, simplify=True)
for e1 in xrange(4):
for e2 in xrange(4):
if T[e1, e2].has(EMPTY):
T[e1, e2] = T_sym[e1, e2]
def _extract_const_transforms(tr_list, knowns):
var_idx = len(tr_list)
var_found = False
for i, tr in enumerate(tr_list):
if not var_found:
if tr.val.atoms(Symbol) - knowns:
var_found = True
var_idx = i
elif tr.axis == tr_list[var_idx].axis:
if not tr.val.atoms(Symbol) - knowns:
tr_list[i] = tr_list[var_idx]
tr_list[var_idx] = tr
var_idx = i
else:
break
return tr_list[:var_idx], tr_list[var_idx:]
def _look_for_eq(symo, M_eq, knowns, th_all, r_all):
cont_search = False
eq_candidates = [list() for list_index in xrange(5)]
for e1 in xrange(3):
for e2 in xrange(4):
eq = M_eq[e1, e2]
if not isinstance(eq, Expr) or eq.is_Atom:
continue
th_vars = (eq.atoms(Symbol) & th_all) - knowns
arg_ops = [at.count_ops()-1 for at in eq.atoms(sin, cos)
if not at.atoms(Symbol) & knowns]
if th_vars and arg_ops:
arg_sum = max(arg_ops)
else:
arg_sum = 0
rs_s = (eq.atoms(Symbol) & r_all) - knowns
eq_features = (len(rs_s), len(th_vars), arg_sum)
if eq_features in eq_dict:
eq_key = eq_dict[eq_features]
eq_pack = (eq, list(rs_s), list(th_vars))
eq_candidates[eq_key].append(eq_pack)
cont_search |= _try_solve_0(symo, eq_candidates[0], knowns)
cont_search |= _try_solve_1(symo, eq_candidates[1], knowns)
cont_search |= _try_solve_2(symo, eq_candidates[2] +
eq_candidates[1], knowns)
cont_search |= _try_solve_3(symo, eq_candidates[3], knowns)
cont_search |= _try_solve_4(symo, eq_candidates[4], knowns)
return cont_search
def loop_solve(robo, symo, know=None):
# TODO: rewrite; Add parallelogram detection
q_vec = [robo.get_q(i) for i in xrange(robo.NF)]
loops = []
if know is None:
know = robo.q_active
# set(q for i, q in enumerate(q_vec) if robo.mu[i] == 1)
for i, j in robo.loop_terminals:
chain = robo.loop_chain(i, j)
know_ij = set(q_vec[i] for i in chain if q_vec[i] in know)
unknow_ij = set(q_vec[i] for i in chain if q_vec[i] not in know)
loops.append([len(unknow_ij), i, j, know_ij, unknow_ij])
while loops:
heapify(loops)
loop = heappop(loops)
res_know = _paul_solve(robo, symo, eye(4), *loop[1:4])
for l in loops:
found = l[4] & res_know
l[3] |= found
l[4] -= found
l[0] = len(l[4])
def igm_paul(robo, T_ref, n):
if isinstance(T_ref, list):
T_ref = Matrix(4, 4, T_ref)
symo = symbolmgr.SymbolManager()
symo.file_open(robo, 'igm')
symo.write_params_table(robo, 'Inverse Geometric Model for frame %s' % n)
_paul_solve(robo, symo, T_ref, 0, n)
symo.file_close()
return symo
# TODO: think about smarter way of matching
def _try_solve_0(symo, eq_sys, knowns):
res = False
for eq, [r], th_names in eq_sys:
X = tools.get_max_coef(eq, r)
if X != 0:
Y = X*r - eq
symo.write_line("# Solving type 1")
X = symo.replace(trigsimp(X), 'X', r)
Y = symo.replace(trigsimp(Y), 'Y', r)
symo.add_to_dict(r, Y/X)
knowns.add(r)
res = True
return res
def _try_solve_1(symo, eq_sys, knowns):
res = False
for i in xrange(len(eq_sys)):
eqi, rs_i, [th_i] = eq_sys[i]
if th_i in knowns:
continue
Xi, Yi, Zi, i_ok = _get_coefs(eqi, sin(th_i), cos(th_i), th_i)
zero = tools.ZERO
i_ok &= sum([Xi == zero, Yi == zero, Zi == zero]) <= 1
if not i_ok:
continue
j_ok = False
for j in xrange(i+1, len(eq_sys)):
eqj, rs_j, [th_j] = eq_sys[j]
if th_i == th_j:
Xj, Yj, Zj, j_ok = _get_coefs(eqj, sin(th_j), cos(th_j), th_i)
j_ok &= (Xi*Yj != Xj*Yi)
if j_ok:
break
if j_ok:
symo.write_line("# Solving type 3")
_solve_type_3(symo, Xi, Yi, -Zi, Xj, Yj, -Zj, th_i)
else:
symo.write_line("# Solving type 2")
_solve_type_2(symo, Xi, Yi, -Zi, th_i)
knowns.add(th_i)
res = True
return res
def _try_solve_2(symo, eq_sys, knowns):
if all(len(rs) == 0 for eq, rs, ths in eq_sys):
return False
for i in xrange(len(eq_sys)):
all_ok = False
for j in xrange(len(eq_sys)):
eqj, rs_j, ths_j = eq_sys[j]
eqi, rs_i, ths_i = eq_sys[i]
if i == j or set(ths_i) != set(ths_j) or set(rs_j) != set(rs_i):
continue
th = ths_i[0]
C, S = cos(th), sin(th)
r = rs_i[0]
X1, Y1, Z1, i_ok = _get_coefs(eqi, S, r, th, r)
X2, Y2, Z2, j_ok = _get_coefs(eqj, C, r, th, r)
all_ok = j_ok and i_ok and not eqi.has(C) and not eqj.has(S)
if all_ok:
eq_type = 5
break
X1, Y1, Z1, i_ok = _get_coefs(eqi, S, C, th, r)
X2, Y2, Z2, j_ok = _get_coefs(eqj, C, S, th, r)
i_ok &= X1.has(r) and not Z1.has(r) and Y1 == tools.ZERO
j_ok &= X2.has(r) and not Z2.has(r) and Y2 == tools.ZERO
all_ok = j_ok and i_ok
if all_ok:
eq_type = 4
X1 /= r
X2 /= r
break
else:
eq_type = -1
if not all_ok or eq_type == -1:
continue
symo.write_line("# Solving type %s" % eq_type)
if eq_type == 4:
_solve_type_4(symo, X1, -Y1, X2, -Y2, th, r)
else:
_solve_type_5(symo, X1, -Y1, -Z1, X2, -Y2, -Z2, th, r)
knowns |= {th, r}
return True
return False
def _match_coef(A1, A2, B1, B2):
return A1 == A2 and B1 == B2 or A1 == -A2 and B1 == -B2
def _try_solve_3(symo, eq_sys, knowns):
for i in xrange(len(eq_sys)):
all_ok = False
for j in xrange(len(eq_sys)):
eqj, rs_j, ths_i = eq_sys[j]
eqi, rs_i, ths_j = eq_sys[i]
if i == j or set(ths_i) != set(ths_j):
continue
th1 = ths_i[0]
th2 = ths_i[1]
C1, S1 = cos(th1), sin(th1)
C2, S2 = cos(th2), sin(th2)
X1, Y1, ZW1, i_ok = _get_coefs(eqi, C1, S1, th1)
X2, Y2, ZW2, j_ok = _get_coefs(eqj, S1, C1, th1)
Y2 = -Y2
V1, W1, Z1, iw_ok = _get_coefs(ZW1, C2, S2, th1, th2)
V2, W2, Z2, jw_ok = _get_coefs(ZW2, S2, C2, th1, th2)
W2 = -W2
all_ok = j_ok and i_ok and jw_ok and iw_ok
all_ok &= _check_const((X1, Y1), th2)
if X1 == 0 or Y1 == 0:
X1, Y1, V1, W1 = V1, W1, X1, Y1
X2, Y2, V2, W2 = V2, W2, X2, Y2
th1, th2 = th2, th1
all_ok &= _match_coef(X1, X2, Y1, Y2)
all_ok &= _match_coef(V1, V2, W1, W2)
eps = 1
if X1 == X2 and Y1 == Y2:
if W1 == -W2 and V1 == -V2:
eps = -1
else:
if W1 == W2 and V1 == V2:
eps = -1
Z2 = -Z2
for a in (X1, X2, Y1, Y2):
all_ok &= not a.has(C2)
all_ok &= not a.has(S2)
if all_ok:
break
if not all_ok:
continue
symo.write_line("# Solving type 6, 7")
_solve_type_7(symo, V1, W1, -X1, -Y1, -Z1, -Z2, eps, th1, th2)
knowns |= {th1, th2}
return True
return False
# TODO: make it with itertool
def _try_solve_4(symo, eq_sys, knowns):
res = False
for i in xrange(len(eq_sys)):
all_ok = False
for j in xrange(len(eq_sys)):
eqj, rs_j, ths_j = eq_sys[j]
eqi, rs_i, ths_i = eq_sys[i]
if i == j or set(ths_i) != set(ths_j):
continue
th12 = ths_i[0] + ths_i[1]
if eqi.has(sin(ths_i[0])) or eqi.has(cos(ths_i[0])):
th1 = ths_i[0]
th2 = ths_i[1]
else:
th1 = ths_i[1]
th2 = ths_i[0]
C1, S1 = cos(th1), sin(th1)
C12, S12 = cos(th12), sin(th12)
X1, Y1, Z1, i_ok = _get_coefs(eqi, C1, C12, th1, th2)
X2, Y2, Z2, j_ok = _get_coefs(eqj, S1, S12, th1, th2)
all_ok = (X1*Y2 == Y1*X2 and i_ok and j_ok)
all_ok &= X1 != 0 and Y1 != 0
all_ok &= not eqi.has(S1) and not eqi.has(S12)
all_ok &= not eqj.has(C1) and not eqj.has(C12)
if all_ok:
break
if not all_ok:
continue
symo.write_line("# Solving type 8")
_solve_type_8(symo, X1, Y1, -Z1, -Z2, th1, th2)
knowns |= {th1, th2}
res = True
return res
def _solve_type_2(symo, X, Y, Z, th):
"""Solution for the equation:
X*S + Y*C = Z
"""
symo.write_line("# X*sin({0}) + Y*cos({0}) = Z".format(th))
X = symo.replace(trigsimp(X), 'X', th)
Y = symo.replace(trigsimp(Y), 'Y', th)
Z = symo.replace(trigsimp(Z), 'Z', th)
YPS = var('YPS'+str(th))
if X == tools.ZERO and Y != tools.ZERO:
C = symo.replace(Z/Y, 'C', th)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
symo.add_to_dict(th, atan2(YPS*sqrt(1-C**2), C))
elif X != tools.ZERO and Y == tools.ZERO:
S = symo.replace(Z/X, 'S', th)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
symo.add_to_dict(th, atan2(S, YPS*sqrt(1-S**2)))
elif Z == tools.ZERO:
symo.add_to_dict(YPS, (tools.ONE, tools.ZERO))
symo.add_to_dict(th, atan2(-Y, X) + YPS*pi)
else:
B = symo.replace(X**2 + Y**2, 'B', th)
D = symo.replace(B - Z**2, 'D', th)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
S = symo.replace((X*Z + YPS * Y * sqrt(D))/B, 'S', th)
C = symo.replace((Y*Z - YPS * X * sqrt(D))/B, 'C', th)
symo.add_to_dict(th, atan2(S, C))
def _solve_type_3(symo, X1, Y1, Z1, X2, Y2, Z2, th):
"""Solution for the system:
X1*S + Y1*C = Z1
X2*S + Y2*C = Z2
"""
symo.write_line("# X1*sin({0}) + Y1*cos({0}) = Z1".format(th))
symo.write_line("# X2*sin({0}) + Y2*cos({0}) = Z2".format(th))
X1 = symo.replace(trigsimp(X1), 'X1', th)
Y1 = symo.replace(trigsimp(Y1), 'Y1', th)
Z1 = symo.replace(trigsimp(Z1), 'Z1', th)
X2 = symo.replace(trigsimp(X2), 'X2', th)
Y2 = symo.replace(trigsimp(Y2), 'Y2', th)
Z2 = symo.replace(trigsimp(Z2), 'Z2', th)
if X1 == tools.ZERO and Y2 == tools.ZERO:
symo.add_to_dict(th, atan2(Z2/X2, Z1/Y1))
elif X2 == tools.ZERO and Y1 == tools.ZERO:
symo.add_to_dict(th, atan2(Z1/X1, Z2/Y2))
else:
D = symo.replace(X1*Y2-X2*Y1, 'D', th)
C = symo.replace((Z2*X1 - Z1*X2)/D, 'C', th)
S = symo.replace((Z1*Y2 - Z2*Y1)/D, 'S', th)
symo.add_to_dict(th, atan2(S, C))
def _solve_type_4(symo, X1, Y1, X2, Y2, th, r):
"""Solution for the system:
X1*S*r = Y1
X2*C*r = Y2
"""
symo.write_line("# X1*sin({0})*{1} = Y1".format(th, r))
symo.write_line("# X2*cos({0})*{1} = Y2".format(th, r))
X1 = symo.replace(trigsimp(X1), 'X1', th)
Y1 = symo.replace(trigsimp(Y1), 'Y1', th)
X2 = symo.replace(trigsimp(X2), 'X2', th)
Y2 = symo.replace(trigsimp(Y2), 'Y2', th)
YPS = var('YPS' + r)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
symo.add_to_dict(r, YPS*sqrt((Y1/X1)**2 + (Y2/X2)**2))
symo.add_to_dict(th, atan2(Y1/(X1*r), Y2/(X2*r)))
def _solve_type_5(symo, X1, Y1, Z1, X2, Y2, Z2, th, r):
"""Solution for the system:
X1*S = Y1 + Z1*r
X2*C = Y2 + Z2*r
"""
symo.write_line("# X1*sin({0}) = Y1 + Z1*{1}".format(th, r))
symo.write_line("# X2*cos({0}) = Y2 + Z2*{1}".format(th, r))
X1 = symo.replace(trigsimp(X1), 'X1', th)
Y1 = symo.replace(trigsimp(Y1), 'Y1', th)
Z1 = symo.replace(trigsimp(Z1), 'Z1', th)
X2 = symo.replace(trigsimp(X2), 'X2', th)
Y2 = symo.replace(trigsimp(Y2), 'Y2', th)
Z2 = symo.replace(trigsimp(Z2), 'Z2', th)
V1 = symo.replace(Y1/X1, 'V1', r)
W1 = symo.replace(Z1/X1, 'W1', r)
V2 = symo.replace(Y2/X2, 'V2', r)
W2 = symo.replace(Z2/X2, 'W2', r)
_solve_square(W1**2 + W2**2, 2*(V1*W1 + V2*W2), V1**2 + V2**2, r)
_solve_type_3(X1, tools.ZERO, Y1 + Z1*r, tools.ZERO, X2, Y2 + Z2*r)
def _solve_type_7(symo, V, W, X, Y, Z1, Z2, eps, th_i, th_j):
"""Solution for the system:
V1*Cj + W1*Sj = X*Ci + Y*Si + Z1
eps*(V2*Sj - W2*Cj) = X*Si - Y*Ci + Z2
"""
s = "# V*cos({0}) + W*sin({0}) = X*cos({1}) + Y*sin({1}) + Z1"
symo.write_line(s.format(th_j, th_i))
s = "# eps*(V*sin({0}) - W*cos({0})) = X*sin({1}) - Y*cos({1}) + Z2"
symo.write_line(s.format(th_j, th_i))
V = symo.replace(trigsimp(V), 'V', th_i)
W = symo.replace(trigsimp(W), 'W', th_i)
X = symo.replace(trigsimp(X), 'X', th_i)
Y = symo.replace(trigsimp(Y), 'Y', th_i)
Z1 = symo.replace(trigsimp(Z1), 'Z1', th_i)
Z2 = symo.replace(trigsimp(Z2), 'Z2', th_i)
B1 = symo.replace(2*(Z1*Y + Z2*X), 'B1', th_i)
B2 = symo.replace(2*(Z1*X - Z2*Y), 'B2', th_i)
B3 = symo.replace(V**2 + W**2 - X**2 - Y**2 - Z1**2 - Z2**2, 'B3', th_i)
_solve_type_2(symo, B1, B2, B3, th_i)
Zi1 = symo.replace(X*cos(th_i) + Y*sin(th_i) + Z1, 'Zi1', th_j)
Zi2 = symo.replace(X*sin(th_i) - Y*cos(th_i) + Z2, 'Zi2', th_j)
_solve_type_3(symo, W, V, Zi1, eps*V, -eps*W, Zi2, th_j)
def _solve_type_8(symo, X, Y, Z1, Z2, th_i, th_j):
"""Solution for the system:
X*Ci + Y*Cij = Z1
X*Si + Y*Sij = Z2
"""
symo.write_line("# X*cos({0}) + Y*cos({0} + {1}) = Z1".format(th_i, th_j))
symo.write_line("# X*sin({0}) + Y*sin({0} + {1}) = Z2".format(th_i, th_j))
X = symo.replace(trigsimp(X), 'X', th_j)
Y = symo.replace(trigsimp(Y), 'Y', th_j)
Z1 = symo.replace(trigsimp(Z1), 'Z1', th_j)
Z2 = symo.replace(trigsimp(Z2), 'Z2', th_j)
Cj = symo.replace((Z1**2 + Z2**2 - X**2 - Y**2) / (2*X*Y), 'C', th_j)
YPS = var('YPS%s' % th_j)
symo.add_to_dict(YPS, (tools.ONE, -tools.ONE))
symo.add_to_dict(th_j, atan2(YPS*sqrt(1 - Cj**2), Cj))
Q1 = symo.replace(X + Y*cos(th_j), 'Q1', th_i)
Q2 = symo.replace(Y*sin(th_j), 'Q2', th_i)
Den = symo.replace(Q1**2 + Q2**2, 'Den', th_i)
Si = symo.replace((Q1*Z2 - Q2*Z1)/Den, 'S', th_i)
Ci = symo.replace((Q1*Z1 + Q2*Z2)/Den, 'C', th_i)
symo.add_to_dict(th_i, atan2(Si, Ci))
def _solve_square(symo, A, B, C, x):
""" solution for the equation:
A*x**2 + B*x + C = 0
"""
A = symo.replace(A, 'A', x)
B = symo.replace(B, 'B', x)
C = symo.replace(C, 'C', x)
Delta = symo.repalce(B**2 - 4*A*C, 'Delta', x)
YPS = var('YPS' + x)
symo.add_to_dict(YPS, (tools.ONE, - tools.ONE))
symo.add_to_dict(x, (-B + YPS*sqrt(Delta))/(2*A))
def _check_const(consts, *xs):
is_ok = True
for coef in consts:
for x in xs:
is_ok &= not coef.has(x)
return is_ok
def _get_coefs(eq, A1, A2, *xs):
eqe = eq.expand()
X = tools.get_max_coef(eqe, A1)
eqe = eqe.xreplace({A1: tools.ZERO})
Y = tools.get_max_coef(eqe, A2)
Z = eqe.xreplace({A2: tools.ZERO})
# is_ok = not X.has(A2) and not X.has(A1) and not Y.has(A2)
is_ok = True
is_ok &= _check_const((X, Y, Z), *xs)
return X, Y, Z, is_ok
|
the-stack_0_2581 | #appModules/totalcmd.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2012 NVDA Contributors
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
from NVDAObjects.IAccessible import IAccessible
import speech
import controlTypes
oldActivePannel=0
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.windowClassName in ("TMyListBox", "TMyListBox.UnicodeClass"):
clsList.insert(0, TCList)
class TCList(IAccessible):
def event_gainFocus(self):
global oldActivePannel
if oldActivePannel !=self.windowControlID:
oldActivePannel=self.windowControlID
obj=self
while obj and obj.parent and obj.parent.windowClassName!="TTOTAL_CMD":
obj=obj.parent
counter=0
while obj and obj.previous and obj.windowClassName!="TPanel":
obj=obj.previous
if obj.windowClassName!="TDrivePanel":
counter+=1
if counter==2:
speech.speakMessage(_("left"))
else:
speech.speakMessage(_("right"))
super(TCList,self).event_gainFocus()
def reportFocus(self):
if self.name:
speakList=[]
if controlTypes.STATE_SELECTED in self.states:
speakList.append(controlTypes.stateLabels[controlTypes.STATE_SELECTED])
speakList.append(self.name.split("\\")[-1])
speech.speakMessage(" ".join(speakList))
else:
super(TCList,self).reportFocus()
|
the-stack_0_2584 | #!/usr/bin/env python3
import requests
import sys
# Antiga URL = "https://brasil.io/api/dataset/covid19/caso/data/?city=Manaus"
URL_UF = "https://api.brasil.io/v1/dataset/covid19/caso_full/data/?state=CE&is_last=True&page=1"
URL_MUN = "https://api.brasil.io/v1/dataset/covid19/caso/data/?city=Fortaleza"
h=dict()
h['Authorization'] = 'Token ' + ( str(sys.argv[1]) if (len(sys.argv)>1) else '')
data = requests.get(URL_MUN, headers=h)
if data.status_code == 200:
with open('./data/data.json', 'w') as f:
f.write(data.text)
|
the-stack_0_2585 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
from wsme import types as wtypes
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers import v1
from ironic.api.controllers.v1 import versions
from ironic.api import expose
ID_VERSION1 = 'v1'
class Version(base.APIBase):
"""An API version representation.
This class represents an API version, including the minimum and
maximum minor versions that are supported within the major version.
"""
id = wtypes.text
"""The ID of the (major) version, also acts as the release number"""
links = [link.Link]
"""A Link that point to a specific version of the API"""
status = wtypes.text
"""Status of the version.
One of:
* CURRENT - the latest version of API,
* SUPPORTED - supported, but not latest, version of API,
* DEPRECATED - supported, but deprecated, version of API.
"""
version = wtypes.text
"""The current, maximum supported (major.minor) version of API."""
min_version = wtypes.text
"""Minimum supported (major.minor) version of API."""
def __init__(self, id, min_version, version, status='CURRENT'):
self.id = id
self.links = [link.Link.make_link('self', pecan.request.public_url,
self.id, '', bookmark=True)]
self.status = status
self.version = version
self.min_version = min_version
class Root(base.APIBase):
name = wtypes.text
"""The name of the API"""
description = wtypes.text
"""Some information about this API"""
versions = [Version]
"""Links to all the versions available in this API"""
default_version = Version
"""A link to the default version of the API"""
@staticmethod
def convert():
root = Root()
root.name = "OpenStack Ironic API"
root.description = ("Ironic is an OpenStack project which aims to "
"provision baremetal machines.")
root.default_version = Version(ID_VERSION1,
versions.MIN_VERSION_STRING,
versions.MAX_VERSION_STRING)
root.versions = [root.default_version]
return root
class RootController(rest.RestController):
_versions = [ID_VERSION1]
"""All supported API versions"""
_default_version = ID_VERSION1
"""The default API version"""
v1 = v1.Controller()
@expose.expose(Root)
def get(self):
# NOTE: The reason why convert() it's being called for every
# request is because we need to get the host url from
# the request object to make the links.
return Root.convert()
@pecan.expose()
def _route(self, args, request=None):
"""Overrides the default routing behavior.
It redirects the request to the default version of the ironic API
if the version number is not specified in the url.
"""
if args[0] and args[0] not in self._versions:
args = [self._default_version] + args
return super(RootController, self)._route(args, request)
|
the-stack_0_2589 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class PurchaseConfigSettings(models.TransientModel):
_name = 'purchase.config.settings'
_inherit = 'res.config.settings'
company_id = fields.Many2one('res.company', string='Company', required=True,
default=lambda self: self.env.user.company_id)
po_lead = fields.Float(related='company_id.po_lead', string="Purchase Lead Time *")
po_lock = fields.Selection(related='company_id.po_lock', string="Purchase Order Modification *")
po_double_validation = fields.Selection(related='company_id.po_double_validation', string="Levels of Approvals *")
po_double_validation_amount = fields.Monetary(related='company_id.po_double_validation_amount', string="Double validation amount *", currency_field='company_currency_id')
company_currency_id = fields.Many2one('res.currency', related='company_id.currency_id', readonly=True,
help='Utility field to express amount currency')
group_product_variant = fields.Selection([
(0, "No variants on products"),
(1, 'Products can have several attributes, defining variants (Example: size, color,...)')
], "Product Variants",
help='Work with product variant allows you to define some variant of the same products, an ease the product management in the ecommerce for example',
implied_group='product.group_product_variant')
group_uom = fields.Selection([
(0, 'Products have only one unit of measure (easier)'),
(1, 'Some products may be sold/puchased in different units of measure (advanced)')
], "Units of Measure",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products.""")
group_costing_method = fields.Selection([
(0, 'Set a fixed cost price on each product'),
(1, "Use a 'Fixed', 'Real' or 'Average' price costing method")
], "Costing Methods",
implied_group='stock_account.group_inventory_valuation',
help="""Allows you to compute product cost price based on average cost.""")
module_purchase_requisition = fields.Selection([
(0, 'Purchase propositions trigger draft purchase orders to a single supplier'),
(1, 'Allow using call for tenders to get quotes from multiple suppliers (advanced)')
], "Calls for Tenders",
help="Calls for tenders are used when you want to generate requests for quotations to several vendors for a given set of products.\n"
"You can configure per product if you directly do a Request for Quotation "
"to one vendor or if you want a Call for Tenders to compare offers from several vendors.")
group_warning_purchase = fields.Selection([
(0, 'All the products and the customers can be used in purchase orders'),
(1, 'An informative or blocking warning can be set on a product or a customer')
], "Warning", implied_group='purchase.group_warning_purchase')
module_stock_dropshipping = fields.Selection([
(0, 'Suppliers always deliver to your warehouse(s)'),
(1, "Allow suppliers to deliver directly to your customers")
], "Dropshipping",
help='\nCreates the dropship Route and add more complex tests\n'
'-This installs the module stock_dropshipping.')
group_manage_vendor_price = fields.Selection([
(0, 'Manage vendor price on the product form'),
(1, 'Allow using and importing vendor pricelists')
], "Vendor Price",
implied_group="purchase.group_manage_vendor_price")
class AccountConfigSettings(models.TransientModel):
_inherit = 'account.config.settings'
group_analytic_account_for_purchases = fields.Boolean('Analytic accounting for purchases',
implied_group='purchase.group_analytic_accounting',
help="Allows you to specify an analytic account on purchase order lines.")
|
the-stack_0_2590 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
from Alphas.HistoricalReturnsAlphaModel import HistoricalReturnsAlphaModel
from Portfolio.BlackLittermanOptimizationPortfolioConstructionModel import *
from Portfolio.UnconstrainedMeanVariancePortfolioOptimizer import UnconstrainedMeanVariancePortfolioOptimizer
from Risk.NullRiskManagementModel import NullRiskManagementModel
### <summary>
### Black-Litterman framework algorithm
### Uses the HistoricalReturnsAlphaModel and the BlackLittermanPortfolioConstructionModel
### to create an algorithm that rebalances the portfolio according to Black-Litterman portfolio optimization
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class BlackLittermanPortfolioOptimizationFrameworkAlgorithm(QCAlgorithm):
'''Black-Litterman Optimization algorithm.'''
def Initialize(self):
# Set requested data resolution
self.UniverseSettings.Resolution = Resolution.Minute
# Order margin value has to have a minimum of 0.5% of Portfolio value, allows filtering out small trades and reduce fees.
# Commented so regression algorithm is more sensitive
#self.Settings.MinimumOrderMarginPortfolioPercentage = 0.005
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.symbols = [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in [ 'AIG', 'BAC', 'IBM', 'SPY' ] ]
optimizer = UnconstrainedMeanVariancePortfolioOptimizer()
# set algorithm framework models
self.SetUniverseSelection(CoarseFundamentalUniverseSelectionModel(self.coarseSelector))
self.SetAlpha(HistoricalReturnsAlphaModel(resolution = Resolution.Daily))
self.SetPortfolioConstruction(BlackLittermanOptimizationPortfolioConstructionModel(optimizer = optimizer))
self.SetExecution(ImmediateExecutionModel())
self.SetRiskManagement(NullRiskManagementModel())
def coarseSelector(self, coarse):
# Drops SPY after the 8th
last = 3 if self.Time.day > 8 else len(self.symbols)
return self.symbols[0:last]
def OnOrderEvent(self, orderEvent):
if orderEvent.Status == OrderStatus.Filled:
self.Debug(orderEvent)
|
the-stack_0_2591 | #!/usr/bin/env python
#
# This is a module that gathers a list of serial ports including details on
# GNU/Linux systems.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2011-2015 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import
import glob
import os
from serial.tools import list_ports_common
class SysFS(list_ports_common.ListPortInfo):
"""Wrapper for easy sysfs access and device info"""
def __init__(self, device):
super(SysFS, self).__init__(device)
# special handling for links
if device is not None and os.path.islink(device):
device = os.path.realpath(device)
is_link = True
else:
is_link = False
self.usb_device_path = None
if os.path.exists('/sys/class/tty/{}/device'.format(self.name)):
self.device_path = os.path.realpath('/sys/class/tty/{}/device'.format(self.name))
self.subsystem = os.path.basename(os.path.realpath(os.path.join(self.device_path, 'subsystem')))
else:
self.device_path = None
self.subsystem = None
# check device type
if self.subsystem == 'usb-serial':
self.usb_interface_path = os.path.dirname(self.device_path)
elif self.subsystem == 'usb':
self.usb_interface_path = self.device_path
else:
self.usb_interface_path = None
# fill-in info for USB devices
if self.usb_interface_path is not None:
self.usb_device_path = os.path.dirname(self.usb_interface_path)
try:
num_if = int(self.read_line(self.usb_device_path, 'bNumInterfaces'))
except ValueError:
num_if = 1
self.vid = int(self.read_line(self.usb_device_path, 'idVendor'), 16)
self.pid = int(self.read_line(self.usb_device_path, 'idProduct'), 16)
self.serial_number = self.read_line(self.usb_device_path, 'serial')
if num_if > 1: # multi interface devices like FT4232
self.location = os.path.basename(self.usb_interface_path)
else:
self.location = os.path.basename(self.usb_device_path)
self.manufacturer = self.read_line(self.usb_device_path, 'manufacturer')
self.product = self.read_line(self.usb_device_path, 'product')
self.interface = self.read_line(self.usb_interface_path, 'interface')
if self.subsystem in ('usb', 'usb-serial'):
self.apply_usb_info()
#~ elif self.subsystem in ('pnp', 'amba'): # PCI based devices, raspi
elif self.subsystem == 'pnp': # PCI based devices
self.description = self.name
self.hwid = self.read_line(self.device_path, 'id')
elif self.subsystem == 'amba': # raspi
self.description = self.name
self.hwid = os.path.basename(self.device_path)
if is_link:
self.hwid += ' LINK={}'.format(device)
def read_line(self, *args):
"""\
Helper function to read a single line from a file.
One or more parameters are allowed, they are joined with os.path.join.
Returns None on errors..
"""
try:
with open(os.path.join(*args)) as f:
line = f.readline().strip()
return line
except IOError:
return None
def comports(include_links=False):
devices = glob.glob('/dev/ttyS*') # built-in serial ports
devices.extend(glob.glob('/dev/ttyUSB*')) # usb-serial with own driver
devices.extend(glob.glob('/dev/ttyXRUSB*')) # xr-usb-serial port exar (DELL Edge 3001)
devices.extend(glob.glob('/dev/ttyACM*')) # usb-serial with CDC-ACM profile
devices.extend(glob.glob('/dev/ttyAMA*')) # ARM internal port (raspi)
devices.extend(glob.glob('/dev/rfcomm*')) # BT serial devices
devices.extend(glob.glob('/dev/ttyAP*')) # Advantech multi-port serial controllers
if include_links:
devices.extend(list_ports_common.list_links(devices))
return [info
for info in [SysFS(d) for d in devices]
if info.subsystem != "platform"] # hide non-present internal serial ports
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# test
if __name__ == '__main__':
for port, desc, hwid in sorted(comports()):
print("{}: {} [{}]".format(port, desc, hwid))
|
the-stack_0_2592 | import tensorflow as tf
import tensorflow.contrib as tf_contrib
import numpy as np
# Xavier : tf_contrib.layers.xavier_initializer()
# He : tf_contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# l2_decay : tf_contrib.layers.l2_regularizer(0.0001)
weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)
weight_regularizer = None
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad_type == 'zero' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
if sn :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding='VALID')
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
#x = tf.layers.conv2d(inputs=x, filters=channels,
# kernel_size=kernel, kernel_initializer=weight_init,
# kernel_regularizer=weight_regularizer,
# strides=stride, use_bias=use_bias)
x = tf.contrib.layers.conv2d(inputs=x, num_outputs=channels, kernel_size=kernel,
stride=stride, padding='VALID',
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer())
return x
def atrous_conv2d(x, channels, kernel=3, rate=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad_type == 'zero' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
if sn :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.atrous_conv2d(value=x, filters=spectral_norm(w), rate=2, padding='SAME')
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.atrous_conv2d(value=x, filters=w, rate=2, padding='SAME')
return x
def atrous_pool2d(x, channels, kernel=3, rate=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad_type == 'zero' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
if sn :
w = tf.constant("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.atrous_conv2d(value=x, filters=spectral_norm(w), rate=2, padding='SAME')
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.atrous_conv2d(value=x, filters=w, rate=2, padding='SAME')
return x
def deconv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, sn=False, scope='deconv_0'):
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
if padding == 'SAME':
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, channels]
else:
output_shape =[x_shape[0], x_shape[1] * stride + max(kernel - stride, 0), x_shape[2] * stride + max(kernel - stride, 0), channels]
if sn :
w = tf.get_variable("kernel", shape=[kernel, kernel, channels, x.get_shape()[-1]], initializer=weight_init, regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), output_shape=output_shape, strides=[1, stride, stride, 1], padding=padding)
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
x = tf.layers.conv2d_transpose(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer,
strides=stride, padding=padding, use_bias=use_bias)
return x
def fully_conneted(x, units, use_bias=True, sn=False, scope='fully_0'):
with tf.variable_scope(scope):
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if sn :
w = tf.get_variable("kernel", [channels, units], tf.float32,
initializer=weight_init, regularizer=weight_regularizer)
if use_bias :
bias = tf.get_variable("bias", [units],
initializer=tf.constant_initializer(0.0))
x = tf.matmul(x, spectral_norm(w)) + bias
else :
x = tf.matmul(x, spectral_norm(w))
else :
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x
def flatten(x) :
return tf.layers.flatten(x)
def hw_flatten(x) :
return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])
#########################
#deformable conv
#########################
# Definition of the regular 2D convolutional
def deform_conv(x, kernel_size, stride, output_channals, mode):
if mode == 'offset':
layer_output = tf.layers.conv2d(x, filters=output_channals, kernel_size=kernel_size, strides=stride, padding='SAME', kernel_initializer = tf.zeros_initializer(), bias_initializer = tf.zeros_initializer())
layer_output = tf.clip_by_value(layer_output, -0.25*int(x.shape[1]), 0.25*int(x.shape[1]))
if mode == 'weight':
layer_output = tf.layers.conv2d(x, filters=output_channals, kernel_size=kernel_size, strides=stride, padding='SAME', bias_initializer = tf.zeros_initializer())
if mode == 'feature':
#layer_output = tf.layers.conv2d(x, filters=output_channals, kernel_size=kernel_size, strides=kernel_size, padding='SAME', kernel_initializer = tf.constant_initializer(0.5), bias_initializer = tf.zeros_initializer())
#layer_output = tf.layers.conv2d(x, filters=output_channals, kernel_size=kernel_size, strides=kernel_size, padding='SAME', initializer=weight_init,regularizer=weight_regularizer)
layer_output = conv(x, output_channals, kernel=kernel_size, stride=kernel_size, sn=True, scope='feature')
return layer_output
# Create the pn [1, 1, 1, 2N]
def get_pn(kernel_size, dtype):
pn_x, pn_y = np.meshgrid(range(-(kernel_size-1)//2, (kernel_size-1)//2+1), range(-(kernel_size-1)//2, (kernel_size-1)//2+1), indexing="ij")
# The order is [x1, x2, ..., y1, y2, ...]
pn = np.concatenate((pn_x.flatten(), pn_y.flatten()))
pn = np.reshape(pn, [1, 1, 1, 2 * kernel_size ** 2])
# Change the dtype of pn
pn = tf.constant(pn, dtype)
return pn
# Create the p0 [1, h, w, 2N]
def get_p0(kernel_size, x_size, dtype):
bs, h, w, C = x_size
p0_x, p0_y = np.meshgrid(range(0, h), range(0, w), indexing="ij")
p0_x = p0_x.flatten().reshape(1, h, w, 1).repeat(kernel_size ** 2, axis=3)
p0_y = p0_y.flatten().reshape(1, h, w, 1).repeat(kernel_size ** 2, axis=3)
p0 = np.concatenate((p0_x, p0_y), axis=3)
# Change the dtype of p0
p0 = tf.constant(p0, dtype)
return p0
def get_q(x_size, dtype):
bs, h, w, c = x_size
q_x, q_y = np.meshgrid(range(0, h), range(0, w), indexing="ij")
q_x = q_x.flatten().reshape(h, w, 1)
q_y = q_y.flatten().reshape(h, w, 1)
q = np.concatenate((q_x, q_y), axis=2)
# Change the dtype of q
q = tf.constant(q, dtype)
return q
def reshape_x_offset(x_offset, kernel_size):
bs, h, w, N, C = x_offset.get_shape().as_list()
# Get the new_shape
new_shape = [bs, h, w * kernel_size, C]
x_offset = [tf.reshape(x_offset[:, :, :, s:s+kernel_size, :], new_shape) for s in range(0, N, kernel_size)]
x_offset = tf.concat(x_offset, axis=2)
# Reshape to final shape [batch_size, h*kernel_size, w*kernel_size, C]
x_offset = tf.reshape(x_offset, [bs, h * kernel_size, w * kernel_size, C])
return x_offset
def deform_con2v(input, num_outputs, kernel_size, stride, trainable, name, reuse):
N = kernel_size ** 2
with tf.variable_scope(name, reuse=reuse):
bs, h, w, C = input.get_shape().as_list()
# offset with shape [batch_size, h, w, 2N]
offset = deform_conv(input, kernel_size, stride, 2 * N, "offset")
#offset = tf.constant(0.0,shape=[bs, h, w, 2*N])
# delte_weight with shape [batch_size, h, w, N * C]
#delte_weight = deform_conv(input, kernel_size, stride, N * C, "weight")
#delte_weight = tf.sigmoid(delte_weight)
# pn with shape [1, 1, 1, 2N]
pn = get_pn(kernel_size, offset.dtype)
# p0 with shape [1, h, w, 2N]
p0 = get_p0(kernel_size, [bs, h, w, C], offset.dtype)
# p with shape [batch_size, h, w, 2N]
p = pn + p0 + offset
# Reshape p to [batch_size, h, w, 2N, 1, 1]
p = tf.reshape(p, [bs, h, w, 2 * N, 1, 1])
# q with shape [h, w, 2]
q = get_q([bs, h, w, C], offset.dtype)
# Bilinear interpolation kernel G ([batch_size, h, w, N, h, w])
gx = tf.maximum(1 - tf.abs(p[:, :, :, :N, :, :] - q[:, :, 0]), 0)
gy = tf.maximum(1 - tf.abs(p[:, :, :, N:, :, :] - q[:, :, 1]), 0)
G = gx * gy
# Reshape G to [batch_size, h*w*N, h*w]
G = tf.reshape(G, [bs, h * w * N, h * w])
# Reshape x to [batch_size, h*w, C]
x = tf.reshape(input, [bs, h*w, C])
# x_offset with shape [batch_size, h, w, N, C]
x = tf.reshape(tf.matmul(G, x), [bs, h, w, N, C])
# Reshape x_offset to [batch_size, h*kernel_size, w*kernel_size, C]
x = reshape_x_offset(x, kernel_size)
# Reshape delte_weight to [batch_size, h*kernel_size, w*kernel_size, C]
#delte_weight = tf.reshape(delte_weight, [batch_size, h*kernel_size, w*kernel_size, C])
#y = x_offset * delte_weight
# Get the output of the deformable convolutional layer
x = deform_conv(x, kernel_size, stride, num_outputs, "feature")
return x, offset
##################################################################################
# Sampling
##################################################################################
def make_png(att, scale):
att_current = up_sample_bilinear(att, scale_factor=scale)
att_current = tf.nn.relu(att_current)
att_current = tf.reduce_mean(att_current,axis=-1)
att_current = tf.stack([att_current, att_current, att_current])
att_current = tf.transpose(att_current, perm=[1, 2, 3, 0])
return att_current
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2])
return gap
def up_sample(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [np.int32(h * scale_factor), np.int32(w * scale_factor)]
return tf.image.resize_nearest_neighbor(x, size=new_size)
def up_sample_bilinear(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [np.int32(h * scale_factor), np.int32(w * scale_factor)]
return tf.image.resize_bilinear(x, size=new_size)
def up_sample_bicubic(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [np.int32(h * scale_factor), np.int32(w * scale_factor)]
return tf.image.resize_bicubic(x, size=new_size)
##################################################################################
# Activation function
##################################################################################
def lrelu(x, alpha=0.2):
return tf.nn.leaky_relu(x, alpha)
def relu(x):
return tf.nn.relu(x)
def tanh(x):
return tf.tanh(x)
##################################################################################
# Normalization function
##################################################################################
def batch_norm(x, is_training=True, scope='batch_norm'):
#return tf.layers.batch_normalization(x, training=is_training)
return tf_contrib.layers.batch_norm(x,decay=0.9, epsilon=1e-05,
center=True, scale=True, updates_collections=tf.GraphKeys.UPDATE_OPS,
is_training=is_training, scope=scope)
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = l2_norm(v_)
u_ = tf.matmul(v_hat, w)
u_hat = l2_norm(u_)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
w_norm = w / sigma
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def l2_norm(v, eps=1e-12):
return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)
##################################################################################
# Loss function
##################################################################################
def class_loss(class_logits, label, num_class):
loss = 0
loss = tf.losses.softmax_cross_entropy(tf.one_hot(label, num_class), class_logits, weights=1.0)
return loss |
the-stack_0_2593 | '''production script for planetary nebula
this script is a streamlined version of the code in planetary_nebula.ipynb.
The notebook was used for testing and peaking into some results, while
this script is used to produce the final plots/tables.
'''
import sys
from pathlib import Path
import logging
import json
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii, fits
from astropy.table import Table
from astropy.coordinates import SkyCoord
from photutils import DAOStarFinder
from extinction import ccm89
from pnlf.auxiliary import search_table
from pnlf.io import ReadLineMaps
from pnlf.detection import detect_unresolved_sources, completeness_limit
from pnlf.photometry import measure_flux
from pnlf.analyse import emission_line_diagnostics, MaximumLikelihood, pnlf, Distance
from pnlf.plot.pnlf import plot_emission_line_ratio, plot_pnlf
logging.basicConfig(#filename='log.txt',
#filemode='w',
#format='(levelname)s %(name)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
basedir = Path('..')
# we save
with open(basedir / 'data' / 'interim' / 'parameters.json') as json_file:
parameters = json.load(json_file)
with fits.open(basedir / 'data' / 'raw' / 'phangs_sample_table_v1p4.fits') as hdul:
sample_table = Table(hdul[1].data)
for name in parameters.keys():
tmp = search_table(sample_table,name)
if tmp:
d = Distance(tmp['DIST'][0]*1e6,'pc').to_distance_modulus()
parameters[name]["mu"] = d
print('using mu from sample table')
'''
IC5332 NGC1087 NGC1365 NGC1512 NGC1566 NGC1672 NGC2835
NGC3351 NGC3627 NGC4254 NGC4535 NGC5068 NGC628
'''
data_raw = Path('d:\downloads\MUSEDAP')
basedir = Path('..')
for name in parameters.keys():
'''
Step 1: Read in the data
'''
galaxy = ReadLineMaps(data_raw / name)
setattr(galaxy,'mu',parameters[galaxy.name]['mu'])
setattr(galaxy,'alpha',parameters[galaxy.name]['power_index'])
setattr(galaxy,'completeness_limit',parameters[galaxy.name]['completeness_limit'])
'''
Step 2: Detect sources
'''
sources = detect_unresolved_sources(galaxy,
'OIII5006',
StarFinder=DAOStarFinder,
threshold=8,
save=False)
'''
Step 3: Measure fluxes
'''
flux = measure_flux(galaxy,sources, galaxy.alpha,aperture_size=2.,background='local')
for col in ['HA6562','NII6583','SII6716']:
flux[col][flux[col]<0] = flux[f'{col}_err'][flux[col]<0]
flux[col][flux[col]/flux[f'{col}_err']<3] = flux[f'{col}_err'][flux[col]/flux[f'{col}_err']<3]
# calculate astronomical coordinates for comparison
flux['SkyCoord'] = SkyCoord.from_pixel(flux['x'],flux['y'],galaxy.wcs)
# calculate magnitudes from measured fluxes
flux['mOIII'] = -2.5*np.log10(flux['OIII5006']*1e-20) - 13.74
flux['dmOIII'] = np.abs( 2.5/np.log(10) * flux['OIII5006_err'] / flux['OIII5006'] )
# correct for milky way extinction
extinction = ccm89(wave=np.array([5007.]),a_v=0.2,r_v=3.1,unit='aa')[0]
flux['mOIII'] -= extinction
'''
Step 4: Emission line diagnostics
'''
tbl = emission_line_diagnostics(flux,galaxy.mu,galaxy.completeness_limit)
filename = basedir / 'reports' / 'catalogues' / f'pn_candidates_{galaxy.name}.txt'
with open(filename,'w',newline='\n') as f:
tbl['RaDec'] = tbl['SkyCoord'].to_string(style='hmsdms',precision=2)
for col in tbl.colnames:
if col not in ['id','RaDec','type']:
tbl[col].info.format = '%.3f'
ascii.write(tbl[['id','type','x','y','RaDec','OIII5006','OIII5006_err','mOIII','dmOIII','HA6562','HA6562_err',
'NII6583','NII6583_err','SII6716','SII6716_err']][tbl['type']!='NaN'],
f,format='fixed_width',delimiter='\t',overwrite=True)
filename = basedir / 'reports' / 'figures' / f'{galaxy.name}_emission_line'
plot_emission_line_ratio(tbl,galaxy.mu,filename=filename)
'''
Step 5: Fit with maximum likelihood
'''
data = tbl[(tbl['type']=='PN') & (tbl['mOIII']<galaxy.completeness_limit)]['mOIII']
err = tbl[(tbl['type']=='PN') & (tbl['mOIII']<galaxy.completeness_limit)]['dmOIII']
#data = data[data>26]
fitter = MaximumLikelihood(pnlf,
data,
mhigh=galaxy.completeness_limit)
# a good guess would be mu_guess = min(data)-Mmax
mu = fitter([24])[0]
filename = basedir / 'reports' / 'figures' / f'{galaxy.name}_PNLF'
plot_pnlf(tbl[tbl['type']=='PN']['mOIII'],mu,galaxy.completeness_limit,binsize=0.25,mhigh=32,filename=filename)
print(f'{galaxy.name}: {mu:.2f} vs {parameters[galaxy.name]["mu"]:.2f}') |
the-stack_0_2594 | # -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import os
import numpy as np
dir = "./jtr/data/emoji2vec/"
emojis = []
vecs = []
with open(dir + "metadata.tsv", "w") as f_out:
# f_out.write("emoji\n")
with open(dir + "emoji2vec.txt", "r") as f_in:
for ix, line in enumerate(f_in.readlines()[1:]):
splits = line.strip().split(" ")
emoji = splits[0]
vec = [float(x) for x in splits[1:]]
assert len(vec) == 300
# print(emoji, vec)
emojis.append(emoji)
vecs.append(vec)
f_out.write(emoji+"\n")
f_in.close()
f_out.close()
emoji2vec = tf.constant(np.array(vecs))
tf_emoji2vec = tf.get_variable("emoji2vec", [len(vecs), 300], tf.float64)
# save embeddings to file
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf_emoji2vec.assign(emoji2vec))
saver = tf.train.Saver()
saver.save(sess, os.path.join(dir, "model.ckpt"), 0)
# Use the same LOG_DIR where you stored your checkpoint.
summary_writer = tf.summary.FileWriter(dir)
# Format: tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto
config = projector.ProjectorConfig()
# You can add multiple embeddings. Here we add only one.
embedding = config.embeddings.add()
embedding.tensor_name = tf_emoji2vec.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = os.path.join(dir, 'metadata.tsv')
# Saves a configuration file that TensorBoard will read during startup.
projector.visualize_embeddings(summary_writer, config)
|
the-stack_0_2595 | # -*- coding: utf-8 -*-
"""
The :class:`SwaggerClient` provides an interface for making API calls based on
a swagger spec, and returns responses of python objects which build from the
API response.
Structure Diagram::
+---------------------+
| |
| SwaggerClient |
| |
+------+--------------+
|
| has many
|
+------v--------------+
| |
| Resource +------------------+
| | |
+------+--------------+ has many |
| |
| has many |
| |
+------v--------------+ +------v--------------+
| | | |
| Operation | | SwaggerModel |
| | | |
+------+--------------+ +---------------------+
|
| uses
|
+------v--------------+
| |
| HttpClient |
| |
+---------------------+
To get a client
.. code-block:: python
client = bravado.client.SwaggerClient.from_url(swagger_spec_url)
"""
import logging
from bravado_core.docstring import create_operation_docstring
from bravado_core.exception import SwaggerMappingError
from bravado_core.formatter import SwaggerFormat # noqa
from bravado_core.param import marshal_param
from bravado_core.spec import Spec
from six import iteritems
from six import itervalues
from bravado.config import bravado_config_from_config_dict
from bravado.config import RequestConfig
from bravado.docstring_property import docstring_property
from bravado.requests_client import RequestsClient
from bravado.swagger_model import Loader
from bravado.warning import warn_for_deprecated_op
log = logging.getLogger(__name__)
class SwaggerClient(object):
"""A client for accessing a Swagger-documented RESTful service.
:type swagger_spec: :class:`bravado_core.spec.Spec`
"""
def __init__(self, swagger_spec, also_return_response=False):
self.__also_return_response = also_return_response
self.swagger_spec = swagger_spec
@classmethod
def from_url(cls, spec_url, http_client=None, request_headers=None, config=None):
"""Build a :class:`SwaggerClient` from a url to the Swagger
specification for a RESTful API.
:param spec_url: url pointing at the swagger API specification
:type spec_url: str
:param http_client: an HTTP client used to perform requests
:type http_client: :class:`bravado.http_client.HttpClient`
:param request_headers: Headers to pass with http requests
:type request_headers: dict
:param config: Config dict for bravado and bravado_core.
See CONFIG_DEFAULTS in :module:`bravado_core.spec`.
See CONFIG_DEFAULTS in :module:`bravado.client`.
:rtype: :class:`bravado_core.spec.Spec`
"""
log.debug(u"Loading from %s", spec_url)
http_client = http_client or RequestsClient()
loader = Loader(http_client, request_headers=request_headers)
spec_dict = loader.load_spec(spec_url)
# RefResolver may have to download additional json files (remote refs)
# via http. Wrap http_client's request() so that request headers are
# passed along with the request transparently. Yeah, this is not ideal,
# but since RefResolver has new found responsibilities, it is
# functional.
if request_headers is not None:
http_client.request = inject_headers_for_remote_refs(
http_client.request, request_headers)
return cls.from_spec(spec_dict, spec_url, http_client, config)
@classmethod
def from_spec(cls, spec_dict, origin_url=None, http_client=None,
config=None):
"""
Build a :class:`SwaggerClient` from a Swagger spec in dict form.
:param spec_dict: a dict with a Swagger spec in json-like form
:param origin_url: the url used to retrieve the spec_dict
:type origin_url: str
:param config: Configuration dict - see spec.CONFIG_DEFAULTS
:rtype: :class:`bravado_core.spec.Spec`
"""
http_client = http_client or RequestsClient()
config = config or {}
# Apply bravado config defaults
bravado_config = bravado_config_from_config_dict(config)
# remove bravado configs from config dict
for key in set(bravado_config._fields).intersection(set(config)):
del config[key]
# set bravado config object
config['bravado'] = bravado_config
swagger_spec = Spec.from_dict(
spec_dict, origin_url, http_client, config,
)
return cls(swagger_spec, also_return_response=bravado_config.also_return_response)
def get_model(self, model_name):
return self.swagger_spec.definitions[model_name]
def _get_resource(self, item):
"""
:param item: name of the resource to return
:return: :class:`Resource`
"""
resource = self.swagger_spec.resources.get(item)
if not resource:
raise AttributeError(
'Resource {0} not found. Available resources: {1}'
.format(item, ', '.join(dir(self))))
# Wrap bravado-core's Resource and Operation objects in order to
# execute a service call via the http_client.
return ResourceDecorator(resource, self.__also_return_response)
def __repr__(self):
return u"%s(%s)" % (self.__class__.__name__, self.swagger_spec.api_url)
def __getattr__(self, item):
return self._get_resource(item)
def __dir__(self):
return self.swagger_spec.resources.keys()
def inject_headers_for_remote_refs(request_callable, request_headers):
"""Inject request_headers only when the request is to retrieve the
remote refs in the swagger spec (vs being a request for a service call).
:param request_callable: method on http_client to make a http request
:param request_headers: headers to inject when retrieving remote refs
"""
def request_wrapper(request_params, *args, **kwargs):
def is_remote_ref_request(request_kwargs):
# operation is only present for service calls
return request_kwargs.get('operation') is None
if is_remote_ref_request(kwargs):
request_params['headers'] = request_headers
return request_callable(request_params, *args, **kwargs)
return request_wrapper
class ResourceDecorator(object):
"""
Wraps :class:`bravado_core.resource.Resource` so that accesses to contained
operations can be instrumented.
"""
def __init__(self, resource, also_return_response=False):
"""
:type resource: :class:`bravado_core.resource.Resource`
"""
self.also_return_response = also_return_response
self.resource = resource
def __getattr__(self, name):
"""
:rtype: :class:`CallableOperation`
"""
return CallableOperation(getattr(self.resource, name), self.also_return_response)
def __dir__(self):
"""
Exposes correct attrs on resource when tab completing in a REPL
"""
return self.resource.__dir__()
class CallableOperation(object):
"""Wraps an operation to make it callable and provides a docstring. Calling
the operation uses the configured http_client.
:type operation: :class:`bravado_core.operation.Operation`
"""
def __init__(self, operation, also_return_response=False):
self.also_return_response = also_return_response
self.operation = operation
@docstring_property(__doc__)
def __doc__(self):
return create_operation_docstring(self.operation)
def __getattr__(self, name):
"""Forward requests for attrs not found on this decorator to the
delegate.
"""
return getattr(self.operation, name)
def __call__(self, **op_kwargs):
"""Invoke the actual HTTP request and return a future.
:rtype: :class:`bravado.http_future.HTTPFuture`
"""
log.debug(u'%s(%s)', self.operation.operation_id, op_kwargs)
warn_for_deprecated_op(self.operation)
# Get per-request config
request_options = op_kwargs.pop('_request_options', {})
request_config = RequestConfig(request_options, self.also_return_response)
request_params = construct_request(
self.operation, request_options, **op_kwargs)
http_client = self.operation.swagger_spec.http_client
return http_client.request(
request_params,
operation=self.operation,
request_config=request_config,
)
def construct_request(operation, request_options, **op_kwargs):
"""Construct the outgoing request dict.
:type operation: :class:`bravado_core.operation.Operation`
:param request_options: _request_options passed into the operation
invocation.
:param op_kwargs: parameter name/value pairs to passed to the
invocation of the operation.
:return: request in dict form
"""
url = operation.swagger_spec.api_url.rstrip('/') + operation.path_name
request = {
'method': str(operation.http_method.upper()),
'url': url,
'params': {}, # filled in downstream
'headers': request_options.get('headers', {}),
}
# Adds Accept header to request for msgpack response if specified
if request_options.get('use_msgpack', False):
request['headers']['Accept'] = 'application/msgpack'
# Copy over optional request options
for request_option in ('connect_timeout', 'timeout'):
if request_option in request_options:
request[request_option] = request_options[request_option]
construct_params(operation, request, op_kwargs)
return request
def construct_params(operation, request, op_kwargs):
"""Given the parameters passed to the operation invocation, validates and
marshals the parameters into the provided request dict.
:type operation: :class:`bravado_core.operation.Operation`
:type request: dict
:param op_kwargs: the kwargs passed to the operation invocation
:raises: SwaggerMappingError on extra parameters or when a required
parameter is not supplied.
"""
current_params = operation.params.copy()
for param_name, param_value in iteritems(op_kwargs):
param = current_params.pop(param_name, None)
if param is None:
raise SwaggerMappingError(
"{0} does not have parameter {1}"
.format(operation.operation_id, param_name))
marshal_param(param, param_value, request)
# Check required params and non-required params with a 'default' value
for remaining_param in itervalues(current_params):
if remaining_param.location == 'header' and remaining_param.name in request['headers']:
marshal_param(remaining_param, request['headers'][remaining_param.name], request)
else:
if remaining_param.required:
raise SwaggerMappingError(
'{0} is a required parameter'.format(remaining_param.name))
if not remaining_param.required and remaining_param.has_default():
marshal_param(remaining_param, None, request)
|
the-stack_0_2596 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# copied from https://github.com/kaidic/LDAM-DRW/blob/master/losses.py
class LDAMLoss(nn.Module):
def __init__(self, cls_num_list, max_m=0.5, weight=None, s=30, reduce_=False):
super(LDAMLoss, self).__init__()
m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))
m_list = m_list * (max_m / np.max(m_list))
m_list = torch.cuda.FloatTensor(m_list)
self.m_list = m_list
assert s > 0
self.s = s
self.weight = weight
self.reduce = reduce_
def forward(self, x, target):
index = torch.zeros_like(x, dtype=torch.uint8)
index.scatter_(1, target.data.view(-1, 1), 1)
index_float = index.type(torch.cuda.FloatTensor)
batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0,1))
batch_m = batch_m.view((-1, 1))
x_m = x - batch_m
output = torch.where(index, x_m, x)
return F.cross_entropy(self.s*output, target, reduce=self.reduce)
|
the-stack_0_2600 | import torch
from torch.nn import Parameter
from torch_scatter import scatter_add
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import add_remaining_self_loops
from torch_geometric.nn.inits import glorot, zeros
class GCNConv(MessagePassing):
r"""The graph convolutional operator from the `"Semi-supervised
Classification with Graph Convolutional Networks"
<https://arxiv.org/abs/1609.02907>`_ paper
.. math::
\mathbf{X}^{\prime} = \mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2} \mathbf{X} \mathbf{\Theta},
where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the
adjacency matrix with inserted self-loops and
:math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
improved (bool, optional): If set to :obj:`True`, the layer computes
:math:`\mathbf{\hat{A}}` as :math:`\mathbf{A} + 2\mathbf{I}`.
(default: :obj:`False`)
cached (bool, optional): If set to :obj:`True`, the layer will cache
the computation of :math:`\mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2}` on first execution, and will use the
cached version for further executions.
This parameter should only be set to :obj:`True` in transductive
learning scenarios. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels, out_channels, improved=False, cached=False,
bias=True, **kwargs):
super(GCNConv, self).__init__(aggr='add', **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
self.cached_result = None
self.cached_num_edges = None
@staticmethod
def norm(edge_index, num_nodes, edge_weight=None, improved=False,
dtype=None):
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
fill_value = 1 if not improved else 2
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def forward(self, x, edge_index, edge_weight=None):
""""""
x = torch.matmul(x, self.weight)
if self.cached and self.cached_result is not None:
if edge_index.size(1) != self.cached_num_edges:
raise RuntimeError(
'Cached {} number of edges, but found {}. Please '
'disable the caching behavior of this layer by removing '
'the `cached=True` argument in its constructor.'.format(
self.cached_num_edges, edge_index.size(1)))
if not self.cached or self.cached_result is None:
self.cached_num_edges = edge_index.size(1)
edge_index, norm = self.norm(edge_index, x.size(0), edge_weight,
self.improved, x.dtype)
self.cached_result = edge_index, norm
edge_index, norm = self.cached_result
return self.propagate(edge_index, x=x, norm=norm)
def message(self, x_j, norm, edge_index_j):
return norm.view(-1, 1) * x_j
def update(self, aggr_out):
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
|
the-stack_0_2601 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.contrib.operators.gcs_to_s3 import GoogleCloudStorageToS3Operator
from airflow.hooks.S3_hook import S3Hook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
try:
from moto import mock_s3
except ImportError:
mock_s3 = None
TASK_ID = 'test-gcs-list-operator'
GCS_BUCKET = 'test-bucket'
DELIMITER = '.csv'
PREFIX = 'TEST'
S3_BUCKET = 's3://bucket/'
MOCK_FILES = ["TEST1.csv", "TEST2.csv", "TEST3.csv"]
class GoogleCloudStorageToS3OperatorTest(unittest.TestCase):
@mock_s3
@mock.patch('airflow.contrib.operators.gcs_list_operator.GoogleCloudStorageHook')
@mock.patch('airflow.contrib.operators.gcs_to_s3.GoogleCloudStorageHook')
def test_execute(self, mock_hook, mock_hook2):
mock_hook.return_value.list.return_value = MOCK_FILES
mock_hook.return_value.download.return_value = b"testing"
mock_hook2.return_value.list.return_value = MOCK_FILES
operator = GoogleCloudStorageToS3Operator(task_id=TASK_ID,
bucket=GCS_BUCKET,
prefix=PREFIX,
delimiter=DELIMITER,
dest_aws_conn_id=None,
dest_s3_key=S3_BUCKET)
# create dest bucket
hook = S3Hook(aws_conn_id=None)
b = hook.get_bucket('bucket')
b.create()
b.put_object(Key=MOCK_FILES[0], Body=b'testing')
# we expect MOCK_FILES[1:] to be uploaded
# and all MOCK_FILES to be present at the S3 bucket
uploaded_files = operator.execute(None)
self.assertEqual(sorted(MOCK_FILES[1:]),
sorted(uploaded_files))
self.assertEqual(sorted(MOCK_FILES),
sorted(hook.list_keys('bucket', delimiter='/')))
|
the-stack_0_2602 | import pandas as pd
from data.dataset import Metric
wrap_cpu = Metric.CPU_TIME.value
wrap_wc = Metric.WALL_TIME.value
core_count = Metric.USED_CORES.value
cpu_time_per_core = Metric.CPU_TIME_PER_CORE
def cpu_efficiency(df, include_zero_cpu=False):
"""Compute the CPU efficiency from a data frame containing job monitoring information."""
df_filtered = filter_cpu_efficiency(df, include_zero=include_zero_cpu)
df_filtered['max_cpu_time'] = df_filtered[wrap_wc] * df_filtered[core_count]
# Do not count NaN values here
total_walltime = df_filtered['max_cpu_time'].sum()
total_cpu_time = df_filtered[wrap_cpu].sum()
return total_cpu_time / total_walltime
def filter_cpu_efficiency(df, cols=None, include_zero=False):
if not cols:
cols = [Metric.WALL_TIME.value, Metric.CPU_TIME.value]
df_filtered = df.copy()
for col in cols:
if include_zero:
mask = df_filtered[col] >= 0
else:
mask = df_filtered[col] > 0
df_filtered = df_filtered[mask]
return df_filtered
def calculate_efficiencies(jobs: pd.DataFrame, freq='D'):
df = jobs[[Metric.STOP_TIME.value, Metric.WALL_TIME.value, Metric.CPU_TIME.value, Metric.USED_CORES.value]].copy()
df = filter_cpu_efficiency(df, include_zero=False)
df['MaxCPUTime'] = df[Metric.WALL_TIME.value] * df[Metric.USED_CORES.value]
df['day'] = df[Metric.STOP_TIME.value].dt.round(freq)
timeseries = df.groupby('day').apply(lambda x: x[Metric.CPU_TIME.value].sum() / x['MaxCPUTime'].sum())
overall_efficiency = cpu_efficiency(jobs, include_zero_cpu=False)
return timeseries, overall_efficiency
def cpu_efficiency_scaled_by_jobslots(df, include_zero_cpu=False, physical=False):
"""Compute the CPU efficiency from a data frame containing job monitoring information,
but scale the result with the number of jobslots available in the node, either with physical or logical cores.
"""
df_filtered = filter_cpu_efficiency(df, include_zero=include_zero_cpu)
if physical:
core_col = 'cores'
else:
core_col = 'coresLogical'
total_walltime = \
(df_filtered[wrap_wc] * df_filtered[core_count] * df_filtered[core_col] / df_filtered['jobslots']).sum()
total_cpu_time = df_filtered[wrap_cpu].sum()
return total_cpu_time / total_walltime
|
the-stack_0_2606 | from dataclasses import dataclass, field
from typing import Optional
from .t_base_element import TBaseElement
from .t_formal_expression import TFormalExpression
from .t_implicit_throw_event import TImplicitThrowEvent
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TComplexBehaviorDefinition(TBaseElement):
class Meta:
name = "tComplexBehaviorDefinition"
condition: Optional[TFormalExpression] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.omg.org/spec/BPMN/20100524/MODEL",
"required": True,
}
)
event: Optional[TImplicitThrowEvent] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.omg.org/spec/BPMN/20100524/MODEL",
}
)
|
the-stack_0_2611 | from __future__ import absolute_import, division, unicode_literals
from collections import OrderedDict
import re
from pip._vendor.six import string_types
from . import base
from .._utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, _, _, flag = node
if flag in ("text", "tail"):
return base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return base.COMMENT, node.text
else:
assert isinstance(node.tag, string_types), type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
assert list(parents[-1]).count(parent) == 1
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
the-stack_0_2612 | #!/usr/bin/env python3
import argparse
import json
import urllib.parse
from collections import defaultdict
from oic.oic import Client, RegistrationResponse
from oic.oic.message import AuthorizationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic import rndstr
from http.server import HTTPServer, BaseHTTPRequestHandler
from http import HTTPStatus
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
url = urllib.parse.urlparse(self.path)
if url.path == "/":
self._handle_initial()
elif url.path.startswith("/callback/"):
self._handle_callback()
else:
self.return_redirect("/")
def _handle_initial(self):
global session
# setup oic code flow
session["state"] = rndstr()
session["nonce"] = rndstr()
auth_req = client.construct_AuthorizationRequest(request_args={
"response_type": "code",
"scope": cmd_args.scope,
"state": session["state"],
"nonce": session["nonce"],
"redirect_uri": f"http://{self.server.server_address[0]}:{self.server.server_address[1]}/callback/"
})
login_url = auth_req.request(client.authorization_endpoint)
# send response
self.return_redirect(login_url)
def _handle_callback(self):
global session
# parse callback
auth_response = client.parse_response(AuthorizationResponse, info=self.path, sformat="urlencoded")
if auth_response["state"] != session["state"]:
self.send_error(HTTPStatus.BAD_REQUEST, "invalid state", explain="The state of the callback does not match in-memory state")
return
# exchange received code for proper access and refresh tokens
token_response = client.do_access_token_request(scope=cmd_args.scope, state=session["state"],
request_args={"code": auth_response["code"]})
# retrieve user information with newly received access token
userinfo = client.do_user_info_request(state=session["state"], scope=cmd_args.scope)
# output data
self.return_json_response({
"token_response": token_response.to_dict(),
"userinfo": userinfo.to_dict(),
})
print("===============================================================")
print(f"token_type: {token_response.get('token_type')}")
print("access_token:")
print(token_response.get("access_token"))
print("===============================================================")
def return_redirect(self, to: str, code: int = HTTPStatus.FOUND):
self.send_response(code)
self.send_header("location", to)
self.end_headers()
def return_json_response(self, content: dict):
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(content).encode("UTF-8"))
if __name__ == "__main__":
parser = argparse.ArgumentParser("get_tokens", description="Retrieves access and id tokens from Mafiasi Identity")
parser.add_argument("--issuer", help="OpenId Connect issuer. Defaults to Mafiasi",
default="https://identity.mafiasi.de/auth/realms/mafiasi")
parser.add_argument("--client-id", help="OpenId Connect client id. Defaults to dev-client",
default="dev-client")
parser.add_argument("--client-secret", help="OpenId Connect client secret. Defaults to dev-client's secret",
default="bb0c83bc-1dd9-4946-a074-d452bc1fb830")
parser.add_argument("--scope", help="OpenID scopes to request",
action="append", default=["openid"])
cmd_args = parser.parse_args()
# initialize openid client
client = Client(client_id=cmd_args.client_id, client_authn_method=CLIENT_AUTHN_METHOD)
client.provider_config(cmd_args.issuer)
client.store_registration_info(RegistrationResponse(client_id=cmd_args.client_id, client_secret=cmd_args.client_secret))
# initialize a session object (which is very primitive but works)
session = defaultdict(lambda: "")
# serve a basic http server so that authorization code flow can be used
with HTTPServer(("127.0.0.1", 8080), RequestHandler) as server:
print(f"Open http://{server.server_name}:{server.server_port}")
try:
server.serve_forever()
except KeyboardInterrupt:
pass
|
the-stack_0_2613 | import uuid
from datetime import datetime, timedelta
from app import db, encryption
from app.models import ApiKey
from app.dao.dao_utils import (
transactional,
version_class
)
from sqlalchemy import or_, func
from sqlalchemy.orm import joinedload
@transactional
@version_class(ApiKey)
def save_model_api_key(api_key):
if not api_key.id:
api_key.id = uuid.uuid4() # must be set now so version history model can use same id
api_key.secret = uuid.uuid4()
db.session.add(api_key)
@transactional
@version_class(ApiKey)
def expire_api_key(service_id, api_key_id):
api_key = ApiKey.query.filter_by(id=api_key_id, service_id=service_id).one()
api_key.expiry_date = datetime.utcnow()
db.session.add(api_key)
def get_api_key_by_secret(secret):
return db.on_reader().query(ApiKey).filter_by(
_secret=encryption.encrypt(str(secret))
).options(joinedload('service')).one()
def get_model_api_keys(service_id, id=None):
if id:
return ApiKey.query.filter_by(id=id, service_id=service_id, expiry_date=None).one()
seven_days_ago = datetime.utcnow() - timedelta(days=7)
return ApiKey.query.filter(
or_(ApiKey.expiry_date == None, func.date(ApiKey.expiry_date) > seven_days_ago), # noqa
ApiKey.service_id == service_id
).all()
def get_unsigned_secrets(service_id):
"""
This method can only be exposed to the Authentication of the api calls.
"""
api_keys = ApiKey.query.filter_by(service_id=service_id, expiry_date=None).all()
keys = [x.secret for x in api_keys]
return keys
def get_unsigned_secret(key_id):
"""
This method can only be exposed to the Authentication of the api calls.
"""
api_key = ApiKey.query.filter_by(id=key_id, expiry_date=None).one()
return api_key.secret
|
the-stack_0_2616 | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)),
stutterT=stutter)
loc.set_progress(0, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_2)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.GT(y, i_0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Times(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_pc, i_2))
loc1 = Location(env, mgr.Equals(pc, i_2))
loc1.set_progress(2, mgr.Equals(x_pc, i_3))
loc2 = Location(env, mgr.Equals(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc0", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Minus(y, i_1)))
h_y = Hint("h_y0", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
return frozenset(res)
|
the-stack_0_2617 | import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
@pluginmatcher(re.compile(
r"https?://live\.line\.me/channels/(?P<channel>\d+)/broadcast/(?P<broadcast>\d+)"
))
class LineLive(Plugin):
_api_url = "https://live-api.line-apps.com/app/v3.2/channel/{0}/broadcast/{1}/player_status"
_player_status_schema = validate.Schema(
{
"liveStatus": validate.text,
"liveHLSURLs": validate.any(None, {
"720": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"480": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"360": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"240": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"144": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
}),
"archivedHLSURLs": validate.any(None, {
"720": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"480": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"360": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"240": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"144": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
}),
})
def _get_live_streams(self, json):
for stream in json["liveHLSURLs"]:
url = json["liveHLSURLs"][stream]
if url is not None:
yield "{0}p.".format(stream), HLSStream(self.session, url)
def _get_vod_streams(self, json):
for stream in json["archivedHLSURLs"]:
url = json["archivedHLSURLs"][stream]
if url is not None:
yield "{0}p.".format(stream), HLSStream(self.session, url)
def _get_streams(self):
channel = self.match.group("channel")
broadcast = self.match.group("broadcast")
res = self.session.http.get(self._api_url.format(channel, broadcast))
json = self.session.http.json(res, schema=self._player_status_schema)
if json["liveStatus"] == "LIVE":
return self._get_live_streams(json)
elif json["liveStatus"] == "FINISHED":
return self._get_vod_streams(json)
return
__plugin__ = LineLive
|
the-stack_0_2618 | import os
import re
from bentoml.service import BentoServiceArtifact
JSON_ARTIFACT_EXTENSION = ".json"
class JSONArtifact(BentoServiceArtifact):
"""Abstraction for saving/loading objects to/from JSON files.
Args:
name (str): Name of the artifact
encoding (:obj:`str`, optional): The encoding will be used for saving/loading
text. Defaults to "utf8"
json_module (module|object, optional): Namespace/object implementing `loads()`
and `dumps()` methods for serializing/deserializing to/from JSON string.
Defaults to stdlib's json module.
"""
def __init__(self, name, encoding="utf-8", json_module=None):
super().__init__(name)
self._content = None
self._json_dumps_kwargs = None
self._encoding = encoding
if json_module:
self.json_module = json_module
else:
import json
self.json_module = json
def _file_path(self, base_path):
return os.path.join(
base_path,
re.sub("[^-a-zA-Z0-9_.() ]+", "", self.name) + JSON_ARTIFACT_EXTENSION,
)
def load(self, path):
with open(self._file_path(path), "rt", encoding=self._encoding) as fp:
content = self.json_module.loads(fp.read())
return self.pack(content)
def pack(
self, content, metadata=None, **json_dumps_kwargs
): # pylint:disable=arguments-renamed
self._content = content
self._json_dumps_kwargs = json_dumps_kwargs
return self
def get(self):
return self._content
def save(self, dst):
with open(self._file_path(dst), "wt", encoding=self._encoding) as fp:
fp.write(self.json_module.dumps(self._content, **self._json_dumps_kwargs))
|
the-stack_0_2619 | import pytest
from skiski.ski import S, K, I
from skiski.lib import B, R
def test_composite_function():
a = lambda x: x * 5
b = lambda x: x - 3
assert B(a).dot(b).dot(5).w() == 10
def test_sksk_is_b():
a = lambda x: x * 5
b = lambda x: x - 3
b_comb = B(a).dot(b).dot(5).w()
sksk = B.to_ski().dot(a).w().dot(b).dot(5).w()
assert b_comb == sksk
def test_reverse_composite_function():
a = lambda x: x * 5
assert R(5).dot(a).w() == 25
def test_sksik_is_r():
a = lambda x: x * 5
r_comb = R(5).dot(a).w()
sksik = S(K(S(I))).dot(K).dot(5).w().dot(a).w()
assert r_comb == sksik
|
the-stack_0_2620 | #!/usr/bin/env python3
# Day 15: Maximum Sum Circular Subarray
#
# Given a circular array C of integers represented by A, find the maximum
# possible sum of a non-empty subarray of C.
# Here, a circular array means the end of the array connects to the beginning
# of the array. (Formally, C[i] = A[i] when 0 <= i < A.length, and
# C[i+A.length] = C[i] when i >= 0.)
# Also, a subarray may only include each element of the fixed buffer A at most
# once. (Formally, for a subarray C[i], C[i+1], ..., C[j], there does not
# exist i <= k1, k2 <= j with k1 % A.length = k2 % A.length.)
#
# Notes:
# - -30000 <= A[i] <= 30000
# - 1 <= A.length <= 30000
class Solution:
def maxSubarraySum(self, numbers: [int]) -> int:
best = float("-inf")
current = 0
for number in numbers:
current += number
if best < current:
best = current
if current < 0:
current = 0
return best
def maxSubarraySumCircular(self, A: [int]) -> int:
total = sum(A)
inverted = [-number for number in A]
best_contiguous = self.maxSubarraySum(A)
best_inverted = self.maxSubarraySum(inverted)
if best_inverted == -total:
return best_contiguous
else:
return max(best_contiguous, total + best_inverted)
# Tests
assert Solution().maxSubarraySumCircular([1,-2,3,-2]) == 3
assert Solution().maxSubarraySumCircular([5,-3,5]) == 10
assert Solution().maxSubarraySumCircular([3,-1,2,-1]) == 4
assert Solution().maxSubarraySumCircular([3,-2,2,-3]) == 3
assert Solution().maxSubarraySumCircular([-2,-3,-1]) == -1
|
the-stack_0_2622 | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
#
# Complete the 'missingNumbers' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts following parameters:
# 1. INTEGER_ARRAY arr
# 2. INTEGER_ARRAY brr
#
def missingNumbers(arr, brr):
# Write your code here
acount = Counter(arr)
bcount = Counter(brr)
for el in acount.items():
get = bcount.get(el[0])
if get:
bcount[el[0]] -= el[1]
bcount = list(map(lambda x: x[0], (filter(lambda x: x[1] > 0, bcount.items()))))
bcount = sorted(bcount)
return bcount
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
m = int(input().strip())
brr = list(map(int, input().rstrip().split()))
result = missingNumbers(arr, brr)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
the-stack_0_2623 | # -*- coding: utf-8 -*-
from captcha.conf import settings
from captcha.fields import CaptchaField, CaptchaTextInput
from captcha.models import CaptchaStore, get_safe_now
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.translation import ugettext_lazy
import datetime
import json
import re
import six
import os
class CaptchaCase(TestCase):
urls = 'captcha.tests.urls'
def setUp(self):
self.stores = {}
self.__current_settings_output_format = settings.CAPTCHA_OUTPUT_FORMAT
self.__current_settings_dictionary = settings.CAPTCHA_WORDS_DICTIONARY
self.__current_settings_punctuation = settings.CAPTCHA_PUNCTUATION
tested_helpers = ['captcha.helpers.math_challenge', 'captcha.helpers.random_char_challenge', 'captcha.helpers.unicode_challenge']
if os.path.exists('/usr/share/dict/words'):
settings.CAPTCHA_WORDS_DICTIONARY = '/usr/share/dict/words'
settings.CAPTCHA_PUNCTUATION = ';-,.'
tested_helpers.append('captcha.helpers.word_challenge')
tested_helpers.append('captcha.helpers.huge_words_and_punctuation_challenge')
for helper in tested_helpers:
challenge, response = settings._callable_from_string(helper)()
self.stores[helper.rsplit('.', 1)[-1].replace('_challenge', '_store')], _ = CaptchaStore.objects.get_or_create(challenge=challenge, response=response)
challenge, response = settings.get_challenge()()
self.stores['default_store'], _ = CaptchaStore.objects.get_or_create(challenge=challenge, response=response)
self.default_store = self.stores['default_store']
def tearDown(self):
settings.CAPTCHA_OUTPUT_FORMAT = self.__current_settings_output_format
settings.CAPTCHA_WORDS_DICTIONARY = self.__current_settings_dictionary
settings.CAPTCHA_PUNCTUATION = self.__current_settings_punctuation
def __extract_hash_and_response(self, r):
hash_ = re.findall(r'value="([0-9a-f]+)"', str(r.content))[0]
response = CaptchaStore.objects.get(hashkey=hash_).response
return hash_, response
def testImages(self):
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse('captcha-image', kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header('content-type'))
self.assertEqual(response._headers.get('content-type'), ('Content-Type', 'image/png'))
def testAudio(self):
if not settings.CAPTCHA_FLITE_PATH:
return
for key in (self.stores.get('math_store').hashkey, self.stores.get('math_store').hashkey, self.default_store.hashkey):
response = self.client.get(reverse('captcha-audio', kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.content) > 1024)
self.assertTrue(response.has_header('content-type'))
self.assertEqual(response._headers.get('content-type'), ('Content-Type', 'audio/x-wav'))
def testFormSubmit(self):
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertFalse(str(r.content).find('Form validated') > 0)
def testFormModelForm(self):
r = self.client.get(reverse('captcha-test-model-form'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test-model-form'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
r = self.client.post(reverse('captcha-test-model-form'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertFalse(str(r.content).find('Form validated') > 0)
def testWrongSubmit(self):
for urlname in ('captcha-test', 'captcha-test-model-form'):
r = self.client.get(reverse(urlname))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse(urlname), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='[email protected]'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
def testDeleteExpired(self):
self.default_store.expiration = get_safe_now() - datetime.timedelta(minutes=5)
self.default_store.save()
hash_ = self.default_store.hashkey
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=self.default_store.response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertFalse('Form validated' in str(r.content))
# expired -> deleted
try:
CaptchaStore.objects.get(hashkey=hash_)
self.fail()
except:
pass
def testCustomErrorMessage(self):
r = self.client.get(reverse('captcha-test-custom-error-message'))
self.assertEqual(r.status_code, 200)
# Wrong answer
r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc', captcha_1='wrong response'))
self.assertFormError(r, 'form', 'captcha', 'TEST CUSTOM ERROR MESSAGE')
# empty answer
r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc', captcha_1=''))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('This field is required.'))
def testRepeatedChallenge(self):
CaptchaStore.objects.create(challenge='xxx', response='xxx')
try:
CaptchaStore.objects.create(challenge='xxx', response='xxx')
except Exception:
self.fail()
def testRepeatedChallengeFormSubmit(self):
__current_challange_function = settings.CAPTCHA_CHALLENGE_FUNCT
for urlname in ('captcha-test', 'captcha-test-model-form'):
settings.CAPTCHA_CHALLENGE_FUNCT = 'captcha.tests.trivial_challenge'
r1 = self.client.get(reverse(urlname))
r2 = self.client.get(reverse(urlname))
self.assertEqual(r1.status_code, 200)
self.assertEqual(r2.status_code, 200)
if re.findall(r'value="([0-9a-f]+)"', str(r1.content)):
hash_1 = re.findall(r'value="([0-9a-f]+)"', str(r1.content))[0]
else:
self.fail()
if re.findall(r'value="([0-9a-f]+)"', str(r2.content)):
hash_2 = re.findall(r'value="([0-9a-f]+)"', str(r2.content))[0]
else:
self.fail()
try:
store_1 = CaptchaStore.objects.get(hashkey=hash_1)
store_2 = CaptchaStore.objects.get(hashkey=hash_2)
except:
self.fail()
self.assertTrue(store_1.pk != store_2.pk)
self.assertTrue(store_1.response == store_2.response)
self.assertTrue(hash_1 != hash_2)
r1 = self.client.post(reverse(urlname), dict(captcha_0=hash_1, captcha_1=store_1.response, subject='xxx', sender='[email protected]'))
self.assertEqual(r1.status_code, 200)
self.assertTrue(str(r1.content).find('Form validated') > 0)
try:
store_2 = CaptchaStore.objects.get(hashkey=hash_2)
except:
self.fail()
r2 = self.client.post(reverse(urlname), dict(captcha_0=hash_2, captcha_1=store_2.response, subject='xxx', sender='[email protected]'))
self.assertEqual(r2.status_code, 200)
self.assertTrue(str(r2.content).find('Form validated') > 0)
settings.CAPTCHA_CHALLENGE_FUNCT = __current_challange_function
def testOutputFormat(self):
for urlname in ('captcha-test', 'captcha-test-model-form'):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s<p>Hello, captcha world</p>%(hidden_field)s%(text_field)s'
r = self.client.get(reverse(urlname))
self.assertEqual(r.status_code, 200)
self.assertTrue('<p>Hello, captcha world</p>' in str(r.content))
def testInvalidOutputFormat(self):
__current_settings_debug = django_settings.DEBUG
for urlname in ('captcha-test', 'captcha-test-model-form'):
# we turn on DEBUG because CAPTCHA_OUTPUT_FORMAT is only checked debug
django_settings.DEBUG = True
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s'
try:
self.client.get(reverse(urlname))
self.fail()
except ImproperlyConfigured as e:
self.assertTrue('CAPTCHA_OUTPUT_FORMAT' in str(e))
django_settings.DEBUG = __current_settings_debug
def testPerFormFormat(self):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s testCustomFormatString %(hidden_field)s %(text_field)s'
r = self.client.get(reverse('captcha-test'))
self.assertTrue('testCustomFormatString' in str(r.content))
r = self.client.get(reverse('test_per_form_format'))
self.assertTrue('testPerFieldCustomFormatString' in str(r.content))
def testIssue31ProperLabel(self):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s %(hidden_field)s %(text_field)s'
r = self.client.get(reverse('captcha-test'))
self.assertTrue('<label for="id_captcha_1"' in str(r.content))
def testRefreshView(self):
r = self.client.get(reverse('captcha-refresh'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
try:
new_data = json.loads(six.text_type(r.content, encoding='ascii'))
self.assertTrue('image_url' in new_data)
except:
self.fail()
def testContentLength(self):
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse('captcha-image', kwargs=dict(key=key)))
self.assertTrue(response.has_header('content-length'))
self.assertTrue(response['content-length'].isdigit())
self.assertTrue(int(response['content-length']))
def testIssue12ProperInstantiation(self):
"""
This test covers a default django field and widget behavior
It not assert anything. If something is wrong it will raise a error!
"""
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s %(hidden_field)s %(text_field)s'
widget = CaptchaTextInput(attrs={'class': 'required'})
CaptchaField(widget=widget)
def testTestMode_Issue15(self):
__current_test_mode_setting = settings.CAPTCHA_TEST_MODE
settings.CAPTCHA_TEST_MODE = False
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='[email protected]'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
settings.CAPTCHA_TEST_MODE = True
# Test mode, only 'PASSED' is accepted
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='[email protected]'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='passed', subject='xxx', sender='[email protected]'))
self.assertTrue(str(r.content).find('Form validated') > 0)
settings.CAPTCHA_TEST_MODE = __current_test_mode_setting
def test_get_version(self):
import captcha
captcha.get_version(True)
def test_missing_value(self):
r = self.client.get(reverse('captcha-test-non-required'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
# Empty response is okay when required is False
r = self.client.post(reverse('captcha-test-non-required'), dict(subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
# But a valid response is okay, too
r = self.client.get(reverse('captcha-test-non-required'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test-non-required'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
def test_autocomplete_off(self):
r = self.client.get(reverse('captcha-test'))
self.assertTrue('autocomplete="off"' in six.text_type(r.content))
def trivial_challenge():
return 'trivial', 'trivial'
|
the-stack_0_2624 | import asyncio
import aiosqlite
import copy
from typing import Dict
from aiosqlite.core import Connection
class DBWrapper:
"""
This object handles HeaderBlocks and Blocks stored in DB used by wallet.
"""
db: Dict[str,aiosqlite.Connection]
lock: asyncio.Lock
def __init__(self, connection: Dict[str,aiosqlite.Connection]):
self.db = dict()
self.db = connection
self.lock = asyncio.Lock()
async def begin_transaction(self, str="chia"):
cursor = await self.db[str].execute("BEGIN TRANSACTION")
await cursor.close()
async def rollback_transaction(self,str="chia"):
# Also rolls back the coin store, since both stores must be updated at once
if self.db[str].in_transaction:
cursor = await self.db[str].execute("ROLLBACK")
await cursor.close()
async def commit_transaction(self,str="chia"):
if isinstance(self.db[str], aiosqlite.Connection):
await self.db[str].commit()
|
the-stack_0_2625 | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import os.path as osp
import warnings
from argparse import ArgumentParser
import cv2
import mmcv
import numpy as np
from mmpose.apis import (collect_multi_frames, extract_pose_sequence,
get_track_id, inference_pose_lifter_model,
inference_top_down_pose_model, init_pose_model,
process_mmdet_results, vis_3d_pose_result)
from mmpose.core import Smoother
from mmpose.datasets import DatasetInfo
from mmpose.models import PoseLifter, TopDown
try:
from mmdet.apis import inference_detector, init_detector
has_mmdet = True
except (ImportError, ModuleNotFoundError):
has_mmdet = False
def convert_keypoint_definition(keypoints, pose_det_dataset,
pose_lift_dataset):
"""Convert pose det dataset keypoints definition to pose lifter dataset
keypoints definition.
Args:
keypoints (ndarray[K, 2 or 3]): 2D keypoints to be transformed.
pose_det_dataset, (str): Name of the dataset for 2D pose detector.
pose_lift_dataset (str): Name of the dataset for pose lifter model.
"""
coco_style_datasets = [
'TopDownCocoDataset', 'TopDownPoseTrack18Dataset',
'TopDownPoseTrack18VideoDataset'
]
if pose_det_dataset == 'TopDownH36MDataset' and \
pose_lift_dataset == 'Body3DH36MDataset':
return keypoints
elif pose_det_dataset in coco_style_datasets and \
pose_lift_dataset == 'Body3DH36MDataset':
keypoints_new = np.zeros((17, keypoints.shape[1]))
# pelvis is in the middle of l_hip and r_hip
keypoints_new[0] = (keypoints[11] + keypoints[12]) / 2
# thorax is in the middle of l_shoulder and r_shoulder
keypoints_new[8] = (keypoints[5] + keypoints[6]) / 2
# in COCO, head is in the middle of l_eye and r_eye
# in PoseTrack18, head is in the middle of head_bottom and head_top
keypoints_new[10] = (keypoints[1] + keypoints[2]) / 2
# spine is in the middle of thorax and pelvis
keypoints_new[7] = (keypoints_new[0] + keypoints_new[8]) / 2
# rearrange other keypoints
keypoints_new[[1, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 15, 16]] = \
keypoints[[12, 14, 16, 11, 13, 15, 0, 5, 7, 9, 6, 8, 10]]
return keypoints_new
else:
raise NotImplementedError
def main():
parser = ArgumentParser()
parser.add_argument('det_config', help='Config file for detection')
parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
parser.add_argument(
'pose_detector_config',
type=str,
default=None,
help='Config file for the 1st stage 2D pose detector')
parser.add_argument(
'pose_detector_checkpoint',
type=str,
default=None,
help='Checkpoint file for the 1st stage 2D pose detector')
parser.add_argument(
'pose_lifter_config',
help='Config file for the 2nd stage pose lifter model')
parser.add_argument(
'pose_lifter_checkpoint',
help='Checkpoint file for the 2nd stage pose lifter model')
parser.add_argument(
'--video-path', type=str, default='', help='Video path')
parser.add_argument(
'--rebase-keypoint-height',
action='store_true',
help='Rebase the predicted 3D pose so its lowest keypoint has a '
'height of 0 (landing on the ground). This is useful for '
'visualization when the model do not predict the global position '
'of the 3D pose.')
parser.add_argument(
'--norm-pose-2d',
action='store_true',
help='Scale the bbox (along with the 2D pose) to the average bbox '
'scale of the dataset, and move the bbox (along with the 2D pose) to '
'the average bbox center of the dataset. This is useful when bbox '
'is small, especially in multi-person scenarios.')
parser.add_argument(
'--num-instances',
type=int,
default=-1,
help='The number of 3D poses to be visualized in every frame. If '
'less than 0, it will be set to the number of pose results in the '
'first frame.')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show visualizations.')
parser.add_argument(
'--out-video-root',
type=str,
default='vis_results',
help='Root of the output video file. '
'Default not saving the visualization video.')
parser.add_argument(
'--device', default='cuda:0', help='Device for inference')
parser.add_argument(
'--det-cat-id',
type=int,
default=1,
help='Category id for bounding box detection model')
parser.add_argument(
'--bbox-thr',
type=float,
default=0.9,
help='Bounding box score threshold')
parser.add_argument('--kpt-thr', type=float, default=0.3)
parser.add_argument(
'--use-oks-tracking', action='store_true', help='Using OKS tracking')
parser.add_argument(
'--tracking-thr', type=float, default=0.3, help='Tracking threshold')
parser.add_argument(
'--radius',
type=int,
default=8,
help='Keypoint radius for visualization')
parser.add_argument(
'--thickness',
type=int,
default=2,
help='Link thickness for visualization')
parser.add_argument(
'--smooth',
action='store_true',
help='Apply a temporal filter to smooth the pose estimation results. '
'See also --smooth-filter-cfg.')
parser.add_argument(
'--smooth-filter-cfg',
type=str,
default='configs/_base_/filters/one_euro.py',
help='Config file of the filter to smooth the pose estimation '
'results. See also --smooth.')
parser.add_argument(
'--use-multi-frames',
action='store_true',
default=False,
help='whether to use multi frames for inference in the 2D pose'
'detection stage. Default: False.')
parser.add_argument(
'--online',
action='store_true',
default=False,
help='inference mode. If set to True, can not use future frame'
'information when using multi frames for inference in the 2D pose'
'detection stage. Default: False.')
assert has_mmdet, 'Please install mmdet to run the demo.'
args = parser.parse_args()
assert args.show or (args.out_video_root != '')
assert args.det_config is not None
assert args.det_checkpoint is not None
video = mmcv.VideoReader(args.video_path)
assert video.opened, f'Failed to load video file {args.video_path}'
# First stage: 2D pose detection
print('Stage 1: 2D pose detection.')
print('Initializing model...')
person_det_model = init_detector(
args.det_config, args.det_checkpoint, device=args.device.lower())
pose_det_model = init_pose_model(
args.pose_detector_config,
args.pose_detector_checkpoint,
device=args.device.lower())
assert isinstance(pose_det_model, TopDown), 'Only "TopDown"' \
'model is supported for the 1st stage (2D pose detection)'
# frame index offsets for inference, used in multi-frame inference setting
if args.use_multi_frames:
assert 'frame_indices_test' in pose_det_model.cfg.data.test.data_cfg
indices = pose_det_model.cfg.data.test.data_cfg['frame_indices_test']
pose_det_dataset = pose_det_model.cfg.data['test']['type']
# get datasetinfo
dataset_info = pose_det_model.cfg.data['test'].get('dataset_info', None)
if dataset_info is None:
warnings.warn(
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
else:
dataset_info = DatasetInfo(dataset_info)
pose_det_results_list = []
next_id = 0
pose_det_results = []
# whether to return heatmap, optional
return_heatmap = False
# return the output of some desired layers,
# e.g. use ('backbone', ) to return backbone feature
output_layer_names = None
print('Running 2D pose detection inference...')
for frame_id, cur_frame in enumerate(mmcv.track_iter_progress(video)):
pose_det_results_last = pose_det_results
# test a single image, the resulting box is (x1, y1, x2, y2)
mmdet_results = inference_detector(person_det_model, cur_frame)
# keep the person class bounding boxes.
person_det_results = process_mmdet_results(mmdet_results,
args.det_cat_id)
if args.use_multi_frames:
frames = collect_multi_frames(video, frame_id, indices,
args.online)
# make person results for current image
pose_det_results, _ = inference_top_down_pose_model(
pose_det_model,
frames if args.use_multi_frames else cur_frame,
person_det_results,
bbox_thr=args.bbox_thr,
format='xyxy',
dataset=pose_det_dataset,
dataset_info=dataset_info,
return_heatmap=return_heatmap,
outputs=output_layer_names)
# get track id for each person instance
pose_det_results, next_id = get_track_id(
pose_det_results,
pose_det_results_last,
next_id,
use_oks=args.use_oks_tracking,
tracking_thr=args.tracking_thr)
pose_det_results_list.append(copy.deepcopy(pose_det_results))
# Second stage: Pose lifting
print('Stage 2: 2D-to-3D pose lifting.')
print('Initializing model...')
pose_lift_model = init_pose_model(
args.pose_lifter_config,
args.pose_lifter_checkpoint,
device=args.device.lower())
assert isinstance(pose_lift_model, PoseLifter), \
'Only "PoseLifter" model is supported for the 2nd stage ' \
'(2D-to-3D lifting)'
pose_lift_dataset = pose_lift_model.cfg.data['test']['type']
if args.out_video_root == '':
save_out_video = False
else:
os.makedirs(args.out_video_root, exist_ok=True)
save_out_video = True
if save_out_video:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = video.fps
writer = None
# convert keypoint definition
for pose_det_results in pose_det_results_list:
for res in pose_det_results:
keypoints = res['keypoints']
res['keypoints'] = convert_keypoint_definition(
keypoints, pose_det_dataset, pose_lift_dataset)
# load temporal padding config from model.data_cfg
if hasattr(pose_lift_model.cfg, 'test_data_cfg'):
data_cfg = pose_lift_model.cfg.test_data_cfg
else:
data_cfg = pose_lift_model.cfg.data_cfg
# build pose smoother for temporal refinement
if args.smooth:
smoother = Smoother(filter_cfg=args.smooth_filter_cfg, keypoint_dim=3)
else:
smoother = None
num_instances = args.num_instances
print('Running 2D-to-3D pose lifting inference...')
for i, pose_det_results in enumerate(
mmcv.track_iter_progress(pose_det_results_list)):
# extract and pad input pose2d sequence
pose_results_2d = extract_pose_sequence(
pose_det_results_list,
frame_idx=i,
causal=data_cfg.causal,
seq_len=data_cfg.seq_len,
step=data_cfg.seq_frame_interval)
# 2D-to-3D pose lifting
pose_lift_results = inference_pose_lifter_model(
pose_lift_model,
pose_results_2d=pose_results_2d,
dataset=pose_lift_dataset,
with_track_id=True,
image_size=video.resolution,
norm_pose_2d=args.norm_pose_2d)
# Pose processing
pose_lift_results_vis = []
for idx, res in enumerate(pose_lift_results):
keypoints_3d = res['keypoints_3d']
# exchange y,z-axis, and then reverse the direction of x,z-axis
keypoints_3d = keypoints_3d[..., [0, 2, 1]]
keypoints_3d[..., 0] = -keypoints_3d[..., 0]
keypoints_3d[..., 2] = -keypoints_3d[..., 2]
# rebase height (z-axis)
if args.rebase_keypoint_height:
keypoints_3d[..., 2] -= np.min(
keypoints_3d[..., 2], axis=-1, keepdims=True)
res['keypoints_3d'] = keypoints_3d
# add title
det_res = pose_det_results[idx]
instance_id = det_res['track_id']
res['title'] = f'Prediction ({instance_id})'
# only visualize the target frame
res['keypoints'] = det_res['keypoints']
res['bbox'] = det_res['bbox']
res['track_id'] = instance_id
pose_lift_results_vis.append(res)
# Smoothing
if smoother:
pose_lift_results = smoother.smooth(pose_lift_results)
# Visualization
if num_instances < 0:
num_instances = len(pose_lift_results_vis)
img_vis = vis_3d_pose_result(
pose_lift_model,
result=pose_lift_results_vis,
img=video[i],
out_file=None,
radius=args.radius,
thickness=args.thickness,
num_instances=num_instances)
if save_out_video:
if writer is None:
writer = cv2.VideoWriter(
osp.join(args.out_video_root,
f'vis_{osp.basename(args.video_path)}'), fourcc,
fps, (img_vis.shape[1], img_vis.shape[0]))
writer.write(img_vis)
if save_out_video:
writer.release()
if __name__ == '__main__':
main()
|
the-stack_0_2626 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ArgumentUsageError,
ClientRequestError,
InvalidArgumentValueError)
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2020_09_01.models import ManagedCluster
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2020_09_01.models import AgentPool
from azure.mgmt.containerservice.v2020_09_01.models import AgentPoolUpgradeSettings
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentityUserAssignedIdentitiesValue
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_managed_clusters
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import ADDONS
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
import zipfile
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"', download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
msi_client = get_msi_client(cli_ctx)
pattern = '/subscriptions/.*?/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)'
resource_id = resource_id.lower()
match = re.search(pattern, resource_id)
if match:
resource_group_name = match.group(1)
identity_name = match.group(2)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity.client_id
raise InvalidArgumentValueError("Cannot parse identity name from provided resource id {}.".format(resource_id))
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
cmd.cli_ctx.cloud.endpoints.portal + # Azure Portal URL (https://portal.azure.com for public cloud)
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning('To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=False,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
logger.warning('The cluster is an MSI cluster using system assigned identity, '
'please manually grant Network Contributor role to the '
'system assigned identity after the cluster is created, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity')
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError('--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = monitoring or (enable_managed_identity and attach_acr)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if (CONST_MONITORING_ADDON_NAME in instance.addon_profiles and
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled):
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub"')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
_upgrade_single_nodepool_image_version(True, client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None, aci_subnet_name=None, vnet_subnet_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
managed_cluster_client = cf_managed_clusters(cmd.cli_ctx)
return _upgrade_single_nodepool_image_version(no_wait,
managed_cluster_client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
the-stack_0_2627 | from operator import methodcaller
from readers import FileReader
COM = "COM"
YOU = "YOU"
SAN = "SAN"
def main():
raw_orbits = list(map(methodcaller("split", ")"), map(str.strip, FileReader.read_input_as_list())))
orbits = {o[1]: o[0] for o in raw_orbits}
you_planets = set_of_planets_to_home(YOU, orbits, set())
santa_planets = set_of_planets_to_home(SAN, orbits, set())
print(f"Total number jumps to santa: {len(you_planets ^ santa_planets) - 2}")
def set_of_planets_to_home(planet, orbits, planets):
if planet in orbits:
planets.add(planet)
if orbits[planet] == COM:
print(f"{len(planets)} planets to home")
return planets
return set_of_planets_to_home(orbits[planet], orbits, planets)
print(f"This is odd, did not expect to get here! Processing: {planet}")
return planets
if __name__ == "__main__":
main()
|
the-stack_0_2628 | from cohortextractor import StudyDefinition, patients, codelist, codelist_from_csv # NOQA
study = StudyDefinition(
default_expectations={
"date": {"earliest": "1900-01-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.5,
},
population=patients.registered_with_one_practice_between(
"2019-02-01", "2020-02-01"
),
age=patients.age_as_of(
"2019-09-01",
return_expectations={
"rate": "universal",
"int": {"distribution": "population_ages"},
},
),
)
|
the-stack_0_2630 | '''Get and put messages on with IBM MQ queues.
User is based on `pymqi` for communicating with IBM MQ. However `pymqi` uses native libraries which `gevent` (used by `locust`) cannot patch,
which causes any calls in `pymqi` to block the rest of `locust`. To get around this, the user implementation communicates with a stand-alone
process via zmq, which in turn communicates with IBM MQ.
`async-messaged` starts automagically when a scenario uses `MessageQueueUser` and `pymqi` dependencies are installed.
## Request methods
Supports the following request methods:
* send
* put
* get
* receive
## Format
Format of `host` is the following:
``` plain
mq://<hostname>:<port>/?QueueManager=<queue manager name>&Channel=<channel name>
```
`endpoint` in the request is the name of an MQ queue. This can also be combined with an expression, if
a specific message is to be retrieved from the queue. The format of endpoint is:
``` plain
queue:<queue_name>[, expression:<expression>]
```
Where `<expression>` can be a XPath or jsonpath expression, depending on the specified content type. See example below.
## Examples
Example of how to use it in a scenario:
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mq.example.com/?QueueManager=QM01&Channel=SRVCONN01"
Then put request "test/queue-message.j2.json" with name "queue-message" to endpoint "queue:INCOMING.MESSAGES"
```
### Get message
Default behavior is to fail directly if there is no message on the queue. If the request should wait until a message is available,
set the time it should wait with `message.wait` (seconds) context variable.
To keep the connection alive during longer waiting periods, a heartbeat interval can be configured using the
`connection.heartbeat_interval` (seconds) context variable (default 300).
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mq.example.com/?QueueManager=QM01&Channel=SRVCONN01"
And set context variable "message.wait" to "5"
Then get request with name "get-queue-message" from endpoint "queue:INCOMING.MESSAGES"
```
In this example, the request will not fail if there is a message on queue within 5 seconds.
### Get message with expression
When specifying an expression, the messages on the queue are first browsed. If any message matches the expression, it is
later consumed from the queue. If no matching message was found during browsing, it is repeated again after a slight delay,
up until the specified `message.wait` seconds has elapsed. To use expressions, a content type must be specified for the get
request, e.g. `application/xml`:
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mq.example.com/?QueueManager=QM01&Channel=SRVCONN01"
And set context variable "message.wait" to "5"
Then get request with name "get-specific-queue-message" from endpoint "queue:INCOMING.MESSAGES, expression: //document[@id='abc123']"
And set response content type to "application/xml"
```
### Authentication
#### Username and password
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mqm:[email protected]/?QueueManager=QM01&Channel=SRVCONN01"
And set context variable "auth.username" to "<username>"
And set context variable "auth.password" to "<password>"
```
#### With TLS
A [key repository](https://www.ibm.com/docs/en/ibm-mq/7.5?topic=wstulws-setting-up-key-repository-unix-linux-windows-systems)
(3 files; `.kdb`, `.rdb` and `.sth`) for the user is needed, and is specified with `auth.key_file` excluding the file extension.
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mqm:[email protected]/?QueueManager=QM01&Channel=SRVCONN01"
And set context variable "auth.username" to "<username>"
And set context variable "auth.password" to "<password>"
And set context variable "auth.key_file" to "<path to key file, excl. file extension>"
```
Default SSL cipher is `ECDHE_RSA_AES_256_GCM_SHA384`, change it by setting `auth.ssl_cipher` context variable.
Default certificate label is set to `auth.username`, change it by setting `auth.cert_label` context variable.
'''
import logging
from typing import Dict, Any, Generator, Tuple, Optional, cast
from urllib.parse import urlparse, parse_qs, unquote
from contextlib import contextmanager
from time import perf_counter as time
from zmq.sugar.constants import NOBLOCK as ZMQ_NOBLOCK, REQ as ZMQ_REQ
from zmq.error import Again as ZMQAgain
import zmq.green as zmq
from locust.exception import StopUser
from locust.env import Environment
from gevent import sleep as gsleep
from grizzly_extras.async_message import AsyncMessageContext, AsyncMessageRequest, AsyncMessageResponse, AsyncMessageError
from grizzly_extras.arguments import get_unsupported_arguments, parse_arguments
from ..types import GrizzlyResponse, RequestDirection, RequestType
from ..tasks import RequestTask
from ..utils import merge_dicts
from .base import GrizzlyUser, ResponseHandler, RequestLogger
from . import logger
# no used here, but needed for sanity check
try:
# do not fail grizzly if ibm mq dependencies are missing, some might
# not be interested in MessageQueueUser.
import pymqi # pylint: disable=unused-import
except:
from grizzly_extras import dummy_pymqi as pymqi
class MessageQueueUser(ResponseHandler, RequestLogger, GrizzlyUser):
_context: Dict[str, Any] = {
'auth': {
'username': None,
'password': None,
'key_file': None,
'cert_label': None,
'ssl_cipher': None
},
'message': {
'wait': None,
}
}
__dependencies__ = set(['async-messaged'])
am_context: AsyncMessageContext
worker_id: Optional[str]
zmq_context = zmq.Context()
zmq_client: zmq.Socket
zmq_url = 'tcp://127.0.0.1:5554'
def __init__(self, environment: Environment, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> None:
if pymqi.__name__ == 'grizzly_extras.dummy_pymqi':
pymqi.raise_for_error(self.__class__)
super().__init__(environment, *args, **kwargs)
# Get configuration values from host string
parsed = urlparse(self.host or '')
if parsed.scheme != 'mq':
raise ValueError(f'"{parsed.scheme}" is not a supported scheme for {self.__class__.__name__}')
if parsed.hostname is None or len(parsed.hostname) < 1:
raise ValueError(f'{self.__class__.__name__}: hostname is not specified in {self.host}')
if parsed.username is not None or parsed.password is not None:
raise ValueError(f'{self.__class__.__name__}: username and password should be set via context variables "auth.username" and "auth.password"')
if parsed.query == '':
raise ValueError(f'{self.__class__.__name__} needs QueueManager and Channel in the query string')
port = parsed.port or 1414
self.am_context = {
'url': self.host or '',
'connection': f'{parsed.hostname}({port})',
}
params = parse_qs(parsed.query)
if 'QueueManager' not in params:
raise ValueError(f'{self.__class__.__name__} needs QueueManager in the query string')
if 'Channel' not in params:
raise ValueError(f'{self.__class__.__name__} needs Channel in the query string')
self.am_context.update({
'queue_manager': unquote(params['QueueManager'][0]),
'channel': unquote(params['Channel'][0]),
})
# Get configuration values from context
self._context = merge_dicts(super().context(), self.__class__._context)
auth_context = self._context.get('auth', {})
username = auth_context.get('username', None)
self.am_context.update({
'username': username,
'password': auth_context.get('password', None),
'key_file': auth_context.get('key_file', None),
'cert_label': auth_context.get('cert_label', None) or username,
'ssl_cipher': auth_context.get('ssl_cipher', None) or 'ECDHE_RSA_AES_256_GCM_SHA384',
'message_wait': self._context.get('message', {}).get('wait', None),
'heartbeat_interval': self._context.get('connection', {}).get('heartbeat_interval', None),
})
self.worker_id = None
# silence uamqp loggers
for uamqp_logger_name in ['uamqp', 'uamqp.c_uamqp']:
logging.getLogger(uamqp_logger_name).setLevel(logging.ERROR)
def request(self, request: RequestTask) -> GrizzlyResponse:
request_name, endpoint, payload = self.render(request)
@contextmanager
def action_context(am_request: AsyncMessageRequest, name: str) -> Generator[Dict[str, Any], None, None]:
exception: Optional[Exception] = None
action: Dict[str, Any] = {
'failure_exception': None,
'meta': False,
'payload': None,
'metadata': None,
}
response: Optional[AsyncMessageResponse] = None
start_time = time()
try:
yield action
self.zmq_client.send_json(am_request)
# do not block all other "threads", just it self
while True:
try:
response = cast(AsyncMessageResponse, self.zmq_client.recv_json(flags=ZMQ_NOBLOCK))
break
except ZMQAgain:
gsleep(0.1)
except Exception as e:
exception = e
finally:
total_time = int((time() - start_time) * 1000) # do not include event handling in request time
if response is not None:
if self.worker_id is None:
self.worker_id = response['worker']
else:
assert self.worker_id == response['worker'], f'worker changed from {self.worker_id} to {response["worker"]}'
mq_response_time = response.get('response_time', 0)
delta = total_time - mq_response_time
if delta > 100: # @TODO: what is a suitable value?
logger.warning(f'{self.__class__.__name__}: communicating with async-messaged took {delta} ms')
if not response['success'] and exception is None:
exception = AsyncMessageError(response['message'])
else:
response = {}
action['metadata'] = response.get('metadata', None)
action['payload'] = response.get('payload', None)
try:
if not action.get('meta', False):
self.response_event.fire(
name=name,
request=request,
context=(
response.get('metadata', None),
response.get('payload', None),
),
user=self,
exception=exception,
)
except Exception as e:
if exception is None:
exception = e
finally:
self.environment.events.request.fire(
request_type=RequestType.from_string(am_request['action']),
name=name,
response_time=total_time,
response_length=response.get('response_length', None) or 0,
context=self._context,
exception=exception,
)
failure_exception = action.get('failure_exception', None)
action = {
'payload': action['payload'],
'metadata': action['metadata'],
}
if exception is not None and failure_exception is not None:
try:
self.zmq_client.disconnect(self.zmq_url)
except:
pass
raise failure_exception()
name = f'{request.scenario.identifier} {request_name}'
# connect to queue manager at first request
if self.worker_id is None:
with action_context({
'action': RequestType.CONNECT(),
'context': self.am_context
}, self.am_context['connection']) as action:
action.update({
'meta': True,
'failure_exception': request.scenario.failure_exception,
})
self.zmq_client = self.zmq_context.socket(ZMQ_REQ)
self.zmq_client.connect(self.zmq_url)
am_request: AsyncMessageRequest = {
'action': request.method.name,
'worker': self.worker_id,
'context': {
'endpoint': endpoint,
},
'payload': payload,
}
am_request['context']['content_type'] = request.response.content_type.name.lower()
with action_context(am_request, name) as action:
action['failure_exception'] = StopUser
# Parse the endpoint to validate queue name / expression parts
try:
arguments = parse_arguments(endpoint, ':')
except ValueError as e:
raise RuntimeError(str(e)) from e
if 'queue' not in arguments:
raise RuntimeError('queue name must be prefixed with queue:')
unsupported_arguments = get_unsupported_arguments(['queue', 'expression'], arguments)
if len(unsupported_arguments) > 0:
raise RuntimeError(f'arguments {", ".join(unsupported_arguments)} is not supported')
if 'expression' in arguments and request.method.direction != RequestDirection.FROM:
raise RuntimeError('argument "expression" is not allowed when sending to an endpoint')
action['failure_exception'] = request.scenario.failure_exception
return action['metadata'], action['payload']
|
the-stack_0_2632 | #!/usr/bin/env python3
import warnings
from copy import deepcopy
import torch
from .. import settings
from ..distributions import MultivariateNormal
from ..likelihoods import _GaussianLikelihoodBase
from ..utils.broadcasting import _mul_broadcast_shape
from .exact_prediction_strategies import prediction_strategy
from .gp import GP
class ExactGP(GP):
r"""
The base class for any Gaussian process latent function to be used in conjunction
with exact inference.
:param torch.Tensor train_inputs: (size n x d) The training features :math:`\mathbf X`.
:param torch.Tensor train_targets: (size n) The training targets :math:`\mathbf y`.
:param ~gpytorch.likelihoods.GaussianLikelihood likelihood: The Gaussian likelihood that defines
the observational distribution. Since we're using exact inference, the likelihood must be Gaussian.
The :meth:`forward` function should describe how to compute the prior latent distribution
on a given input. Typically, this will involve a mean and kernel function.
The result must be a :obj:`~gpytorch.distributions.MultivariateNormal`.
Calling this model will return the posterior of the latent Gaussian process when conditioned
on the training data. The output will be a :obj:`~gpytorch.distributions.MultivariateNormal`.
Example:
>>> class MyGP(gpytorch.models.ExactGP):
>>> def __init__(self, train_x, train_y, likelihood):
>>> super().__init__(train_x, train_y, likelihood)
>>> self.mean_module = gpytorch.means.ZeroMean()
>>> self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
>>>
>>> def forward(self, x):
>>> mean = self.mean_module(x)
>>> covar = self.covar_module(x)
>>> return gpytorch.distributions.MultivariateNormal(mean, covar)
>>>
>>> # train_x = ...; train_y = ...
>>> likelihood = gpytorch.likelihoods.GaussianLikelihood()
>>> model = MyGP(train_x, train_y, likelihood)
>>>
>>> # test_x = ...;
>>> model(test_x) # Returns the GP latent function at test_x
>>> likelihood(model(test_x)) # Returns the (approximate) predictive posterior distribution at test_x
"""
def __init__(self, train_inputs, train_targets, likelihood):
if train_inputs is not None and torch.is_tensor(train_inputs):
train_inputs = (train_inputs,)
if train_inputs is not None and not all(torch.is_tensor(train_input) for train_input in train_inputs):
raise RuntimeError("Train inputs must be a tensor, or a list/tuple of tensors")
if not isinstance(likelihood, _GaussianLikelihoodBase):
raise RuntimeError("ExactGP can only handle Gaussian likelihoods")
super(ExactGP, self).__init__()
if train_inputs is not None:
self.train_inputs = tuple(tri.unsqueeze(-1) if tri.ndimension() == 1 else tri for tri in train_inputs)
self.train_targets = train_targets
else:
self.train_inputs = None
self.train_targets = None
self.likelihood = likelihood
self.prediction_strategy = None
@property
def train_targets(self):
return self._train_targets
@train_targets.setter
def train_targets(self, value):
object.__setattr__(self, "_train_targets", value)
def _apply(self, fn):
if self.train_inputs is not None:
self.train_inputs = tuple(fn(train_input) for train_input in self.train_inputs)
self.train_targets = fn(self.train_targets)
return super(ExactGP, self)._apply(fn)
def local_load_samples(self, samples_dict, memo, prefix):
"""
Replace the model's learned hyperparameters with samples from a posterior distribution.
"""
# Pyro always puts the samples in the first batch dimension
num_samples = next(iter(samples_dict.values())).size(0)
self.train_inputs = tuple(tri.unsqueeze(0).expand(num_samples, *tri.shape) for tri in self.train_inputs)
self.train_targets = self.train_targets.unsqueeze(0).expand(num_samples, *self.train_targets.shape)
super().local_load_samples(samples_dict, memo, prefix)
def set_train_data(self, inputs=None, targets=None, strict=True):
"""
Set training data (does not re-fit model hyper-parameters).
:param torch.Tensor inputs: The new training inputs.
:param torch.Tensor targets: The new training targets.
:param bool strict: (default True) If `True`, the new inputs and
targets must have the same shape, dtype, and device
as the current inputs and targets. Otherwise, any shape/dtype/device are allowed.
"""
if inputs is not None:
if torch.is_tensor(inputs):
inputs = (inputs,)
inputs = tuple(input_.unsqueeze(-1) if input_.ndimension() == 1 else input_ for input_ in inputs)
if strict:
for input_, t_input in zip(inputs, self.train_inputs or (None,)):
for attr in {"shape", "dtype", "device"}:
expected_attr = getattr(t_input, attr, None)
found_attr = getattr(input_, attr, None)
if expected_attr != found_attr:
msg = "Cannot modify {attr} of inputs (expected {e_attr}, found {f_attr})."
msg = msg.format(attr=attr, e_attr=expected_attr, f_attr=found_attr)
raise RuntimeError(msg)
self.train_inputs = inputs
if targets is not None:
if strict:
for attr in {"shape", "dtype", "device"}:
expected_attr = getattr(self.train_targets, attr, None)
found_attr = getattr(targets, attr, None)
if expected_attr != found_attr:
msg = "Cannot modify {attr} of targets (expected {e_attr}, found {f_attr})."
msg = msg.format(attr=attr, e_attr=expected_attr, f_attr=found_attr)
raise RuntimeError(msg)
self.train_targets = targets
self.prediction_strategy = None
def get_fantasy_model(self, inputs, targets, **kwargs):
"""
Returns a new GP model that incorporates the specified inputs and targets as new training data.
Using this method is more efficient than updating with `set_train_data` when the number of inputs is relatively
small, because any computed test-time caches will be updated in linear time rather than computed from scratch.
.. note::
If `targets` is a batch (e.g. `b x m`), then the GP returned from this method will be a batch mode GP.
If `inputs` is of the same (or lesser) dimension as `targets`, then it is assumed that the fantasy points
are the same for each target batch.
:param torch.Tensor inputs: (`b1 x ... x bk x m x d` or `f x b1 x ... x bk x m x d`) Locations of fantasy
observations.
:param torch.Tensor targets: (`b1 x ... x bk x m` or `f x b1 x ... x bk x m`) Labels of fantasy observations.
:return: An `ExactGP` model with `n + m` training examples, where the `m` fantasy examples have been added
and all test-time caches have been updated.
:rtype: ~gpytorch.models.ExactGP
"""
if self.prediction_strategy is None:
raise RuntimeError(
"Fantasy observations can only be added after making predictions with a model so that "
"all test independent caches exist. Call the model on some data first!"
)
model_batch_shape = self.train_inputs[0].shape[:-2]
if self.train_targets.dim() > len(model_batch_shape) + 1:
raise RuntimeError("Cannot yet add fantasy observations to multitask GPs, but this is coming soon!")
if not isinstance(inputs, list):
inputs = [inputs]
inputs = [i.unsqueeze(-1) if i.ndimension() == 1 else i for i in inputs]
target_batch_shape = targets.shape[:-1]
input_batch_shape = inputs[0].shape[:-2]
tbdim, ibdim = len(target_batch_shape), len(input_batch_shape)
if not (tbdim == ibdim + 1 or tbdim == ibdim):
raise RuntimeError(
f"Unsupported batch shapes: The target batch shape ({target_batch_shape}) must have either the "
f"same dimension as or one more dimension than the input batch shape ({input_batch_shape})"
)
# Check whether we can properly broadcast batch dimensions
err_msg = (
f"Model batch shape ({model_batch_shape}) and target batch shape "
f"({target_batch_shape}) are not broadcastable."
)
_mul_broadcast_shape(model_batch_shape, target_batch_shape, error_msg=err_msg)
if len(model_batch_shape) > len(input_batch_shape):
input_batch_shape = model_batch_shape
if len(model_batch_shape) > len(target_batch_shape):
target_batch_shape = model_batch_shape
# If input has no fantasy batch dimension but target does, we can save memory and computation by not
# computing the covariance for each element of the batch. Therefore we don't expand the inputs to the
# size of the fantasy model here - this is done below, after the evaluation and fast fantasy update
train_inputs = [tin.expand(input_batch_shape + tin.shape[-2:]) for tin in self.train_inputs]
train_targets = self.train_targets.expand(target_batch_shape + self.train_targets.shape[-1:])
full_inputs = [
torch.cat([train_input, input.expand(input_batch_shape + input.shape[-2:])], dim=-2)
for train_input, input in zip(train_inputs, inputs)
]
full_targets = torch.cat([train_targets, targets.expand(target_batch_shape + targets.shape[-1:])], dim=-1)
try:
fantasy_kwargs = {"noise": kwargs.pop("noise")}
except KeyError:
fantasy_kwargs = {}
full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
# Copy model without copying training data or prediction strategy (since we'll overwrite those)
old_pred_strat = self.prediction_strategy
old_train_inputs = self.train_inputs
old_train_targets = self.train_targets
old_likelihood = self.likelihood
self.prediction_strategy = None
self.train_inputs = None
self.train_targets = None
self.likelihood = None
new_model = deepcopy(self)
self.prediction_strategy = old_pred_strat
self.train_inputs = old_train_inputs
self.train_targets = old_train_targets
self.likelihood = old_likelihood
new_model.likelihood = old_likelihood.get_fantasy_likelihood(**fantasy_kwargs)
new_model.prediction_strategy = old_pred_strat.get_fantasy_strategy(
inputs, targets, full_inputs, full_targets, full_output, **fantasy_kwargs
)
# if the fantasies are at the same points, we need to expand the inputs for the new model
if tbdim == ibdim + 1:
new_model.train_inputs = [fi.expand(target_batch_shape + fi.shape[-2:]) for fi in full_inputs]
else:
new_model.train_inputs = full_inputs
new_model.train_targets = full_targets
return new_model
def train(self, mode=True):
if mode:
self.prediction_strategy = None
return super(ExactGP, self).train(mode)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
self.prediction_strategy = None
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __call__(self, *args, **kwargs):
train_inputs = list(self.train_inputs) if self.train_inputs is not None else []
inputs = [i.unsqueeze(-1) if i.ndimension() == 1 else i for i in args]
# Training mode: optimizing
if self.training:
if self.train_inputs is None:
raise RuntimeError(
"train_inputs, train_targets cannot be None in training mode. "
"Call .eval() for prior predictions, or call .set_train_data() to add training data."
)
if settings.debug.on():
if not all(torch.equal(train_input, input) for train_input, input in zip(train_inputs, inputs)):
raise RuntimeError("You must train on the training inputs!")
res = super().__call__(*inputs, **kwargs)
return res
# Prior mode
elif settings.prior_mode.on() or self.train_inputs is None or self.train_targets is None:
full_inputs = args
full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
if settings.debug().on():
if not isinstance(full_output, MultivariateNormal):
raise RuntimeError("ExactGP.forward must return a MultivariateNormal")
return full_output
# Posterior mode
else:
if settings.debug.on():
if all(torch.equal(train_input, input) for train_input, input in zip(train_inputs, inputs)):
warnings.warn(
"The input matches the stored training data. Did you forget to call model.train()?", UserWarning
)
# Get the terms that only depend on training data
if self.prediction_strategy is None:
train_output = super().__call__(*train_inputs, **kwargs)
# Create the prediction strategy for
self.prediction_strategy = prediction_strategy(
train_inputs=train_inputs,
train_prior_dist=train_output,
train_labels=self.train_targets,
likelihood=self.likelihood,
)
# Concatenate the input to the training input
full_inputs = []
batch_shape = train_inputs[0].shape[:-2]
for train_input, input in zip(train_inputs, inputs):
# Make sure the batch shapes agree for training/test data
if batch_shape != train_input.shape[:-2]:
batch_shape = _mul_broadcast_shape(batch_shape, train_input.shape[:-2])
train_input = train_input.expand(*batch_shape, *train_input.shape[-2:])
if batch_shape != input.shape[:-2]:
batch_shape = _mul_broadcast_shape(batch_shape, input.shape[:-2])
train_input = train_input.expand(*batch_shape, *train_input.shape[-2:])
input = input.expand(*batch_shape, *input.shape[-2:])
full_inputs.append(torch.cat([train_input, input], dim=-2))
# Get the joint distribution for training/test data
full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
if settings.debug().on():
if not isinstance(full_output, MultivariateNormal):
raise RuntimeError("ExactGP.forward must return a MultivariateNormal")
full_mean, full_covar = full_output.loc, full_output.lazy_covariance_matrix
# Determine the shape of the joint distribution
batch_shape = full_output.batch_shape
joint_shape = full_output.event_shape
tasks_shape = joint_shape[1:] # For multitask learning
test_shape = torch.Size([joint_shape[0] - self.prediction_strategy.train_shape[0], *tasks_shape])
# Make the prediction
with settings._use_eval_tolerance():
predictive_mean, predictive_covar = self.prediction_strategy.exact_prediction(full_mean, full_covar)
# Reshape predictive mean to match the appropriate event shape
predictive_mean = predictive_mean.view(*batch_shape, *test_shape).contiguous()
return full_output.__class__(predictive_mean, predictive_covar)
|
the-stack_0_2634 | # -*- coding: utf-8 -*-
# file: training.py
# time: 2021/5/26 0026
# author: yangheng <[email protected]>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
########################################################################################################################
# train and evaluate on your own apc_datasets (need train and test apc_datasets) #
# your custom dataset_utils should have the continue polarity labels like [0,N-1] for N categories #
########################################################################################################################
from pyabsa.functional import Trainer
from pyabsa.functional import APCConfigManager
from pyabsa.functional import ABSADatasetList
from pyabsa.functional import APCModelList
config = APCConfigManager.get_apc_config_chinese()
config.evaluate_begin = 4
config.dropout = 0.5
config.l2reg = 0.0001
config.model = APCModelList.FAST_LCF_BERT
save_path = 'state_dict'
chinese_sets = ABSADatasetList.Chinese
sent_classifier = Trainer(config=config, # set config=None to use default model
dataset=chinese_sets, # train set and test set will be automatically detected
auto_device=True # automatic choose CUDA or CPU
)
|
the-stack_0_2635 | import asyncio
import datetime
import logging
import time
from collections import defaultdict
from contextlib import suppress
from datetime import timedelta
from io import BytesIO
from typing import Any, Iterable, NoReturn, Optional, Set
import discord
import prettytable
import pytz
from redbot.core import Config, checks, commands
from redbot.core.utils.chat_formatting import box, pagify
from tsutils.enums import Server, StarterGroup
from tsutils.formatting import normalize_server_name
from tsutils.helper_classes import DummyObject
from tsutils.helper_functions import conditional_iterator, repeating_timer
from padevents.autoevent_mixin import AutoEvent
from padevents.enums import DungeonType, EventLength
from padevents.events import Event, EventList, SERVER_TIMEZONES
logger = logging.getLogger('red.padbot-cogs.padevents')
SUPPORTED_SERVERS = ["JP", "NA", "KR"]
GROUPS = ['red', 'blue', 'green']
class PadEvents(commands.Cog, AutoEvent):
"""Pad Event Tracker"""
def __init__(self, bot, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot = bot
self.config = Config.get_conf(self, identifier=940373775)
self.config.register_global(sent={}, last_daychange=None)
self.config.register_guild(pingroles={})
self.config.register_channel(guerrilla_servers=[], daily_servers=[], do_aep_post=True)
self.config.register_user(dmevents=[])
# Load event data
self.events = set()
self.started_events = set()
self.fake_uid = -time.time()
self._event_loop = bot.loop.create_task(self.reload_padevents())
self._refresh_loop = bot.loop.create_task(self.do_loop())
self._daily_event_loop = bot.loop.create_task(self.show_daily_info())
async def red_get_data_for_user(self, *, user_id):
"""Get a user's personal data."""
aeds = await self.config.user_from_id(user_id).dmevents()
if aeds:
data = f"You have {len(aeds)} AEDs stored. Use" \
f" {(await self.bot.get_valid_prefixes())[0]}aed list to see what they are.\n"
else:
data = f"No data is stored for user with ID {user_id}."
return {"user_data.txt": BytesIO(data.encode())}
async def red_delete_data_for_user(self, *, requester, user_id):
"""Delete a user's personal data."""
await self.config.user_from_id(user_id).clear()
def cog_unload(self):
# Manually nulling out database because the GC for cogs seems to be pretty shitty
self.events = set()
self.started_events = set()
self._event_loop.cancel()
self._refresh_loop.cancel()
self._daily_event_loop.cancel()
async def reload_padevents(self) -> NoReturn:
await self.bot.wait_until_ready()
with suppress(asyncio.CancelledError):
async for _ in repeating_timer(60 * 60):
try:
await self.refresh_data()
except asyncio.CancelledError:
raise
except Exception:
logger.exception("Error in loop:")
async def do_loop(self) -> NoReturn:
await self.bot.wait_until_ready()
with suppress(asyncio.CancelledError):
async for _ in repeating_timer(10):
try:
await self.do_autoevents()
await self.do_eventloop()
except asyncio.CancelledError:
raise
except Exception:
logger.exception("Error in loop:")
async def show_daily_info(self) -> NoReturn:
async def is_day_change():
curserver = self.get_most_recent_day_change()
oldserver = self.config.last_daychange
if curserver != await oldserver():
await oldserver.set(curserver)
return curserver
await self.bot.wait_until_ready()
with suppress(asyncio.CancelledError):
async for server in conditional_iterator(is_day_change, poll_interval=10):
try:
await self.do_daily_post(server)
await self.do_autoevent_summary(server)
except asyncio.CancelledError:
raise
except Exception:
logger.exception("Error in loop:")
async def refresh_data(self):
dbcog: Any = self.bot.get_cog('DBCog')
await dbcog.wait_until_ready()
scheduled_events = dbcog.database.get_all_events()
new_events = set()
for se in scheduled_events:
try:
new_events.add(Event(se))
except Exception as ex:
logger.exception("Refresh error:")
self.events = self.coalesce_event_data(new_events)
self.started_events = {ev.key for ev in new_events if ev.is_started()}
async with self.config.sent() as seen:
for key, value in [*seen.items()]:
if value < time.time() - 60 * 60:
del seen[key]
async def do_eventloop(self):
events = filter(lambda e: e.is_started() and e.key not in self.started_events, self.events)
daily_refresh_servers = set()
for event in events:
self.started_events.add(event.key)
if event.event_length != EventLength.limited:
continue
for cid, data in (await self.config.all_channels()).items():
if (channel := self.bot.get_channel(cid)) is None \
or event.server not in data['guerrilla_servers']:
continue
role_name = f'{event.server}_group_{event.group_long_name()}'
role = channel.guild.get_role(role_name)
if role and role.mentionable:
message = f"{role.mention} {event.clean_dungeon_name} is starting"
else:
message = box(f"Server {event.server}, group {event.group_long_name()}:"
f" {event.clean_dungeon_name}")
with suppress(discord.Forbidden):
await channel.send(message, allowed_mentions=discord.AllowedMentions(roles=True))
async def do_daily_post(self, server):
msg = self.make_active_text(server)
for cid, data in (await self.config.all_channels()).items():
if (channel := self.bot.get_channel(cid)) is None \
or server not in data['daily_servers']:
continue
for page in pagify(msg, delims=['\n\n']):
with suppress(discord.Forbidden):
await channel.send(box(page))
async def do_autoevent_summary(self, server):
events = EventList(self.events).with_server(server).today_only('NA')
for gid, data in (await self.config.all_guilds()).items():
if (guild := self.bot.get_guild(gid)) is None:
continue
channels = defaultdict(list)
for key, aep in data.get('pingroles', {}).items():
for channel in aep['channels']:
if channel is not None:
channels[channel].append(aep)
for cid, aeps in channels.items():
if (channel := self.bot.get_channel(cid)) is None:
continue
if not await self.config.channel(channel).do_aep_post():
continue
aepevents = events.with_func(lambda e: any(self.event_matches_autoevent(e, ae) for ae in aeps))
if not aepevents:
continue
msg = self.make_full_guerrilla_output('AEP Event', aepevents)
for page in pagify(msg, delims=['\n\n']):
with suppress(discord.Forbidden):
await channel.send(box(page))
@commands.group(aliases=['pde'])
@checks.mod_or_permissions(manage_guild=True)
async def padevents(self, ctx):
"""PAD event tracking"""
@padevents.command()
@checks.is_owner()
async def testevent(self, ctx, server: Server, seconds: int = 0, group='red'):
server = server.value
if group.lower() not in ('red', 'blue', 'green'):
group = None
dbcog: Any = self.bot.get_cog('DBCog')
await dbcog.wait_until_ready()
# TODO: Don't use this awful importing hack
dg_module = __import__('.'.join(dbcog.__module__.split('.')[:-1]) + ".models.scheduled_event_model")
timestamp = int((datetime.datetime.now(pytz.utc) + timedelta(seconds=seconds)).timestamp())
self.fake_uid -= 1
te = dg_module.models.scheduled_event_model.ScheduledEventModel(
event_id=self.fake_uid,
server_id=SUPPORTED_SERVERS.index(server),
event_type_id=-1,
start_timestamp=timestamp,
end_timestamp=timestamp + 60,
group_name=group and group.lower(),
dungeon_model=DummyObject(
name_en='fake_dungeon_name',
clean_name_en='fake_dungeon_name',
dungeon_type=DungeonType.ThreePlayer,
dungeon_id=1,
)
)
self.events.add(Event(te))
await ctx.tick()
@padevents.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def addchannel(self, ctx, channel: Optional[discord.TextChannel], server: Server):
server = server.value
async with self.config.channel(channel or ctx.channel).guerrilla_servers() as guerillas:
if server in guerillas:
return await ctx.send("Channel already active.")
guerillas.append(server)
await ctx.send("Channel now active.")
@padevents.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def rmchannel(self, ctx, channel: Optional[discord.TextChannel], server: Server):
server = server.value
async with self.config.channel(channel or ctx.channel).guerrilla_servers() as guerillas:
if server not in guerillas:
return await ctx.send("Channel already inactive.")
guerillas.remove(server)
await ctx.send("Channel now inactive.")
@padevents.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def addchanneldaily(self, ctx, channel: Optional[discord.TextChannel], server: Server):
server = server.value
async with self.config.channel(channel or ctx.channel).daily_servers() as dailies:
if server in dailies:
return await ctx.send("Channel already active.")
dailies.append(server)
await ctx.send("Channel now active.")
@padevents.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def rmchanneldaily(self, ctx, channel: Optional[discord.TextChannel], server: Server):
server = server.value
async with self.config.channel(channel or ctx.channel).daily_servers() as dailies:
if server not in dailies:
return await ctx.send("Channel already inactive.")
dailies.remove(server)
await ctx.send("Channel now inactive.")
@padevents.command()
@checks.mod_or_permissions(manage_guild=True)
async def active(self, ctx, server: Server):
server = server.value
msg = self.make_active_text(server)
for page in pagify(msg, delims=['\n\n']):
await ctx.send(box(page))
def make_active_text(self, server):
server = normalize_server_name(server)
server_events = EventList(self.events).with_server(server)
active_events = server_events.active_only()
events_today = server_events.today_only(server)
active_special = active_events.with_dungeon_type(DungeonType.Special)
msg = server + " Events - " + datetime.datetime.now(SERVER_TIMEZONES[server]).strftime('%A, %B %-e')
ongoing_events = active_events.with_length(EventLength.weekly, EventLength.special)
if ongoing_events:
msg += "\n\n" + self.make_active_output('Ongoing Events', ongoing_events)
active_dailies_events = active_events.with_length(EventLength.daily)
if active_dailies_events:
msg += "\n\n" + self.make_daily_output('Daily Dungeons', active_dailies_events)
limited_events = events_today.with_length(EventLength.limited)
if limited_events:
msg += "\n\n" + self.make_full_guerrilla_output('Limited Events', limited_events)
return msg
def make_daily_output(self, table_name, event_list):
tbl = prettytable.PrettyTable([table_name])
tbl.hrules = prettytable.HEADER
tbl.vrules = prettytable.NONE
tbl.align[table_name] = "l"
for e in event_list:
tbl.add_row([e.clean_dungeon_name])
return tbl.get_string()
def make_active_output(self, table_name, event_list):
tbl = prettytable.PrettyTable(["Time", table_name])
tbl.hrules = prettytable.HEADER
tbl.vrules = prettytable.NONE
tbl.align[table_name] = "l"
tbl.align["Time"] = "r"
for e in event_list:
tbl.add_row([e.end_from_now_full_min().strip(), e.clean_dungeon_name])
return tbl.get_string()
def make_active_guerrilla_output(self, table_name: str, event_list: EventList) -> str:
tbl = prettytable.PrettyTable([table_name, "Group", "Time"])
tbl.hrules = prettytable.HEADER
tbl.vrules = prettytable.NONE
tbl.align[table_name] = "l"
tbl.align["Time"] = "r"
for e in event_list:
tbl.add_row([e.clean_dungeon_name, e.group, e.end_from_now_full_min().strip()])
return tbl.get_string()
def make_full_guerrilla_output(self, table_name, event_list):
events_by_name = defaultdict(set)
for event in event_list:
events_by_name[event.clean_dungeon_name].add(event)
rows = []
for name, events in events_by_name.items():
events = sorted(events, key=lambda e: e.open_datetime)
events_by_group = {group: [] for group in GROUPS}
for event in events:
if event.group is not None:
events_by_group[event.group].append(event)
else:
for group in GROUPS:
events_by_group[group].append(event)
while True:
row = []
for group in GROUPS:
if len(events_by_group[group]) == 0:
row.append('')
else:
# Get the timestamp of the earliest event in this group in PST
start = events_by_group[group].pop(0).open_datetime.astimezone(pytz.timezone('US/Pacific'))
row.append(start.strftime("%H:%M"))
if not any(row):
break
if row[0] == row[1] == row[2]:
rows.append([name, row[0], '=', '='])
else:
rows.append([name] + row)
header = "Times are shown in Pacific Time\n= means same for all groups\n"
table = prettytable.PrettyTable([table_name, 'Red', 'Blue', 'Green'])
table.align[table_name] = "l"
table.hrules = prettytable.HEADER
table.vrules = prettytable.ALL
for r in rows:
table.add_row(r)
return header + table.get_string() + "\n"
@commands.command(aliases=['events'])
async def eventsna(self, ctx, group: StarterGroup = None):
"""Display upcoming daily events for NA."""
await self.do_partial(ctx, Server.NA, group)
@commands.command()
async def eventsjp(self, ctx, group: StarterGroup = None):
"""Display upcoming daily events for JP."""
await self.do_partial(ctx, Server.JP, group)
@commands.command()
async def eventskr(self, ctx, group: StarterGroup = None):
"""Display upcoming daily events for KR."""
await self.do_partial(ctx, Server.KR, group)
async def do_partial(self, ctx, server: Server, group: StarterGroup = None):
server = server.value
if group is not None:
group = GROUPS[group.value]
events = EventList(self.events)
events = events.with_server(server)
events = events.with_dungeon_type(DungeonType.SoloSpecial, DungeonType.Special)
events = events.with_length(EventLength.limited)
active_events = sorted(events.active_only(), key=lambda e: (e.open_datetime, e.dungeon_name), reverse=True)
pending_events = sorted(events.pending_only(), key=lambda e: (e.open_datetime, e.dungeon_name), reverse=True)
if group is not None:
active_events = [e for e in active_events if e.group == group.lower()]
pending_events = [e for e in pending_events if e.group == group.lower()]
group_to_active_event = {e.group: e for e in active_events}
group_to_pending_event = {e.group: e for e in pending_events}
active_events.sort(key=lambda e: (GROUPS.index(e.group or 'red'), e.open_datetime))
pending_events.sort(key=lambda e: (GROUPS.index(e.group or 'red'), e.open_datetime))
if len(active_events) == 0 and len(pending_events) == 0:
await ctx.send("No events available for " + server)
return
output = "**Events for {}**".format(server)
if len(active_events) > 0:
output += "\n\n" + "` Remaining Dungeon - Ending Time`"
for e in active_events:
output += "\n" + e.to_partial_event(self)
if len(pending_events) > 0:
output += "\n\n" + "` Dungeon - ETA`"
for e in pending_events:
output += "\n" + e.to_partial_event(self)
for page in pagify(output):
await ctx.send(page)
def get_most_recent_day_change(self):
now = datetime.datetime.utcnow().time()
if now < datetime.time(8):
return "JP"
elif now < datetime.time(15):
return "NA"
elif now < datetime.time(16):
return "KR"
else:
return "JP"
def coalesce_event_data(self, events: Iterable[Event]) -> Set[Event]:
all_events = set()
grouped = defaultdict(lambda: {})
for event in events:
if event.group is None:
all_events.add(event)
continue
key = (event.open_datetime, event.close_datetime, event.server, event.dungeon.dungeon_id)
grouped[key][event.group] = event
for _, grouped_events in grouped.items():
if len(grouped_events) != 3:
all_events.update(grouped_events.values())
continue
grouped_events['red'].group = None
all_events.add(grouped_events['red'])
return all_events
|
the-stack_0_2636 | from setuptools import setup, find_packages
import io
import os
here = os.path.abspath(os.path.dirname(__file__))
# Avoids IDE errors, but actual version is read from version.py
__version__ = None
exec(open('rasa_core/version.py').read())
# Get the long description from the README file
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
tests_requires = [
"pytest",
"pytest-pep8",
"pytest-services",
"pytest-cov",
"pytest-xdist",
"pytest-twisted<1.6",
"treq",
"freezegun",
]
install_requires = [
'jsonpickle',
'six',
'redis',
'fakeredis',
'nbsphinx',
'pandoc',
'future',
'numpy>=1.13',
'typing>=3.6',
'requests',
'graphviz',
'Keras',
'tensorflow>=1.7',
'h5py',
'apscheduler',
'tqdm',
'ConfigArgParse',
'networkx',
'fbmessenger>=5.0.0',
'pykwalify<=1.6.0',
'coloredlogs',
'ruamel.yaml',
'flask',
'scikit-learn',
'rasa_nlu>=0.12.0,<0.13.0',
'slackclient',
'python-telegram-bot',
'twilio',
'mattermostwrapper',
'colorhash',
]
extras_requires = {
'test': tests_requires
}
setup(
name='rasa-core',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
# supported python versions
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries",
],
packages=find_packages(exclude=["tests", "tools"]),
version=__version__,
install_requires=install_requires,
tests_require=tests_requires,
extras_require=extras_requires,
include_package_data=True,
description="Machine learning based dialogue engine "
"for conversational software.",
long_description=long_description,
long_description_content_type="text/markdown",
author='Rasa Technologies GmbH',
author_email='[email protected]',
maintainer="Tom Bocklisch",
maintainer_email="[email protected]",
license='Apache 2.0',
keywords="nlp machine-learning machine-learning-library bot bots "
"botkit rasa conversational-agents conversational-ai chatbot"
"chatbot-framework bot-framework",
url="https://rasa.ai",
download_url="https://github.com/RasaHQ/rasa_core/archive/{}.tar.gz".format(__version__),
project_urls={
'Bug Reports': 'https://github.com/rasahq/rasa_core/issues',
'Source': 'https://github.com/rasahq/rasa_core',
},
)
print("\nWelcome to Rasa Core!")
print("If any questions please visit documentation page https://core.rasa.com")
print("or join community chat on https://gitter.im/RasaHQ/rasa_core")
|
the-stack_0_2637 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a parser for the Android SMS database.
Android SMS messages are stored in SQLite database files named mmssms.dbs.
"""
from plaso.events import time_events
from plaso.lib import eventdata
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class AndroidSmsEvent(time_events.JavaTimeEvent):
"""Convenience class for an Android SMS event."""
DATA_TYPE = 'android:messaging:sms'
def __init__(self, java_time, identifier, address, sms_read, sms_type, body):
"""Initializes the event object.
Args:
java_time: The Java time value.
identifier: The row identifier.
address: The phone number associated to the sender/receiver.
status: Read or Unread.
type: Sent or Received.
body: Content of the SMS text message.
"""
super(AndroidSmsEvent, self).__init__(
java_time, eventdata.EventTimestamp.CREATION_TIME)
self.offset = identifier
self.address = address
self.sms_read = sms_read
self.sms_type = sms_type
self.body = body
class AndroidSmsPlugin(interface.SQLitePlugin):
"""Parse Android SMS database."""
NAME = 'android_sms'
DESCRIPTION = u'Parser for Android text messages SQLite database files.'
# Define the needed queries.
QUERIES = [
('SELECT _id AS id, address, date, read, type, body FROM sms',
'ParseSmsRow')]
# The required tables.
REQUIRED_TABLES = frozenset(['sms'])
SMS_TYPE = {
1: u'RECEIVED',
2: u'SENT'}
SMS_READ = {
0: u'UNREAD',
1: u'READ'}
def ParseSmsRow(self, parser_context, row, query=None, **unused_kwargs):
"""Parses an SMS row.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Extract and lookup the SMS type and read status.
sms_type = self.SMS_TYPE.get(row['type'], u'UNKNOWN')
sms_read = self.SMS_READ.get(row['read'], u'UNKNOWN')
event_object = AndroidSmsEvent(
row['date'], row['id'], row['address'], sms_read, sms_type, row['body'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
sqlite.SQLiteParser.RegisterPlugin(AndroidSmsPlugin)
|
the-stack_0_2639 | # -*- coding: utf-8 -*-
# Copyright © 2012-2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on asciidoc.
You will need, of course, to install asciidoc
"""
import codecs
import os
import subprocess
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, write_metadata
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # NOQA
class CompileAsciiDoc(PageCompiler):
"""Compile asciidoc into HTML."""
name = "asciidoc"
demote_headers = True
def compile_html(self, source, dest, is_two_file=True):
makedirs(os.path.dirname(dest))
binary = self.site.config.get('ASCIIDOC_BINARY', 'asciidoc')
try:
subprocess.check_call((binary, '-b', 'html5', '-s', '-o', dest, source))
except OSError as e:
if e.strreror == 'No such file or directory':
req_missing(['asciidoc'], 'build this site (compile with asciidoc)', python=False)
def create_post(self, path, **kw):
content = kw.pop('content', 'Write your post here.')
one_file = kw.pop('onefile', False) # NOQA
is_page = kw.pop('is_page', False) # NOQA
metadata = OrderedDict()
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with codecs.open(path, "wb+", "utf8") as fd:
if one_file:
fd.write("////\n")
fd.write(write_metadata(metadata))
fd.write("////\n")
fd.write(content)
|
the-stack_0_2640 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nefi2.model.algorithms._alg import Algorithm, FloatSlider, CheckBox
import cv2
__authors__ = {"Sebastian Schattner": "[email protected]"}
class AlgBody(Algorithm):
"""Color enhancement algorithm implementation"""
def __init__(self):
Algorithm.__init__(self)
self.name = "Color Enhance"
self.parent = "Preprocessing"
self.left_pct = FloatSlider("left percentage", 0.0, 10.0, 0.1, 2.5)
self.right_pct = FloatSlider("right percentage", 0.0, 10.0, 0.1, 2.5)
self.channel1 = CheckBox("channel1", True)
self.channel2 = CheckBox("channel2", True)
self.channel3 = CheckBox("channel3", True)
self.float_sliders.append(self.left_pct)
self.float_sliders.append(self.right_pct)
# self.checkboxes.append(self.channel1)
# self.checkboxes.append(self.channel2)
# self.checkboxes.append(self.channel3)
def process(self, args):
channels = cv2.split(args[0])
if self.channel1.value:
channels[0] = self.compute_channels(channels[0])
if self.channel2.value:
channels[1] = self.compute_channels(channels[1])
if self.channel3.value:
channels[2] = self.compute_channels(channels[2])
self.result['img'] = cv2.merge(channels)
def compute_channels(self, image_channel):
vmin = 0
vmax = 255
hist = cv2.calcHist([image_channel], [0], None, [256], [0, 256])
cdf = hist.cumsum()
for i, e in list(enumerate(cdf)):
if e > image_channel.size * (self.left_pct.value / 100):
if i != 0:
vmin = i-1
break
for i, e in list(enumerate(cdf)):
if e > image_channel.size * (1 - (self.right_pct.value / 100)):
vmax = i
break
if vmax != vmin:
for i in range(image_channel.shape[0]):
for j in range(image_channel.shape[1]):
pix = image_channel.item(i, j)
if pix < vmin:
image_channel.itemset((i, j), vmin)
elif pix > vmax:
image_channel.itemset((i, j), vmax)
vmin_ij = image_channel.item(i, j) - vmin
image_channel.itemset((i, j), vmin_ij * 255 / (vmax-vmin))
return image_channel
if __name__ == '__main__':
pass
|
the-stack_0_2642 | import sys
def _up_to(args):
try:
n_str = args[1]
return int(n_str) + 1
except:
return 25
def main(up_to):
for n in mod_3(range(1, up_to)):
print(n)
def mod_3(numbers):
for number in numbers:
if number % 3 == 0:
yield "Mod3"
else:
yield f"{number}"
if __name__ == "__main__":
up_to = _up_to(sys.argv)
main(up_to)
|
the-stack_0_2643 | # Copyright 2013-2014 eNovance <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2015 Wind River Systems, Inc.
#
import fixtures
import glanceclient
import mock
import novaclient
from oslotest import base
from ceilometer import nova_client
from ceilometer import service
class TestNovaClient(base.BaseTestCase):
def setUp(self):
super(TestNovaClient, self).setUp()
self.CONF = service.prepare_service([], [])
self._flavors_count = 0
self._images_count = 0
self.nv = nova_client.Client(self.CONF)
self.useFixture(fixtures.MockPatchObject(
self.nv.nova_client.flavors, 'get',
side_effect=self.fake_flavors_get))
self.useFixture(fixtures.MockPatchObject(
self.nv.glance_client.images, 'get',
side_effect=self.fake_images_get))
def fake_flavors_get(self, *args, **kwargs):
self._flavors_count += 1
a = mock.MagicMock()
a.id = args[0]
if a.id == 1:
a.name = 'm1.tiny'
elif a.id == 2:
a.name = 'm1.large'
else:
raise novaclient.exceptions.NotFound('foobar')
return a
def fake_images_get(self, *args, **kwargs):
self._images_count += 1
a = mock.MagicMock()
a.id = args[0]
image_details = {
1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)),
2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)),
3: ('rhel-6-x64', None),
4: ('rhel-6-x64', dict()),
5: ('rhel-6-x64', dict(kernel_id=11)),
6: ('rhel-6-x64', dict(ramdisk_id=21))
}
if a.id in image_details:
a.name = image_details[a.id][0]
a.metadata = image_details[a.id][1]
else:
raise glanceclient.exc.HTTPNotFound('foobar')
return a
@staticmethod
def fake_servers_list(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 1}
a.image = {'id': 1}
b = mock.MagicMock()
b.id = 43
b.flavor = {'id': 2}
b.image = {'id': 2}
return [a, b]
def test_instance_get_all_by_host(self):
with mock.patch.object(self.nv.nova_client.servers, 'list',
side_effect=self.fake_servers_list):
instances = self.nv.instance_get_all_by_host('foobar')
self.assertEqual(2, len(instances))
self.assertEqual('m1.tiny', instances[0].flavor['name'])
self.assertEqual('ubuntu-12.04-x86', instances[0].image['name'])
self.assertEqual(11, instances[0].kernel_id)
self.assertEqual(21, instances[0].ramdisk_id)
def test_instance_get_all(self):
with mock.patch.object(self.nv.nova_client.servers, 'list',
side_effect=self.fake_servers_list):
instances = self.nv.instance_get_all()
self.assertEqual(2, len(instances))
self.assertEqual(42, instances[0].id)
self.assertEqual(1, instances[0].flavor['id'])
self.assertEqual(1, instances[0].image['id'])
@staticmethod
def fake_servers_list_unknown_flavor(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 666}
a.image = {'id': 1}
return [a]
def test_instance_get_all_by_host_unknown_flavor(self):
with mock.patch.object(
self.nv.nova_client.servers, 'list',
side_effect=self.fake_servers_list_unknown_flavor):
instances = self.nv.instance_get_all_by_host('foobar')
self.assertEqual(1, len(instances))
self.assertEqual('unknown-id-666', instances[0].flavor['name'])
@staticmethod
def fake_servers_list_unknown_image(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 1}
a.image = {'id': 666}
return [a]
@staticmethod
def fake_servers_list_image_missing_metadata(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 1}
a.image = {'id': args[0]}
return [a]
@staticmethod
def fake_instance_image_missing(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 666}
a.image = None
return [a]
def test_instance_get_all_by_host_unknown_image(self):
with mock.patch.object(
self.nv.nova_client.servers, 'list',
side_effect=self.fake_servers_list_unknown_image):
instances = self.nv.instance_get_all_by_host('foobar')
self.assertEqual(1, len(instances))
self.assertEqual('unknown-id-666', instances[0].image['name'])
def test_with_flavor_and_image(self):
results = self.nv._with_flavor_and_image(self.fake_servers_list())
instance = results[0]
self.assertEqual(2, len(results))
self.assertEqual('ubuntu-12.04-x86', instance.image['name'])
self.assertEqual('m1.tiny', instance.flavor['name'])
self.assertEqual(11, instance.kernel_id)
self.assertEqual(21, instance.ramdisk_id)
def test_with_flavor_and_image_unknown_image(self):
instances = self.fake_servers_list_unknown_image()
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertEqual('unknown-id-666', instance.image['name'])
self.assertNotEqual(instance.flavor['name'], 'unknown-id-666')
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_flavor_and_image_unknown_flavor(self):
instances = self.fake_servers_list_unknown_flavor()
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertEqual('unknown-id-666', instance.flavor['name'])
self.assertEqual(0, instance.flavor['vcpus'])
self.assertEqual(0, instance.flavor['ram'])
self.assertEqual(0, instance.flavor['disk'])
self.assertNotEqual(instance.image['name'], 'unknown-id-666')
self.assertEqual(11, instance.kernel_id)
self.assertEqual(21, instance.ramdisk_id)
def test_with_flavor_and_image_none_metadata(self):
instances = self.fake_servers_list_image_missing_metadata(3)
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_flavor_and_image_missing_metadata(self):
instances = self.fake_servers_list_image_missing_metadata(4)
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_flavor_and_image_missing_ramdisk(self):
instances = self.fake_servers_list_image_missing_metadata(5)
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertEqual(11, instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_flavor_and_image_missing_kernel(self):
instances = self.fake_servers_list_image_missing_metadata(6)
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertIsNone(instance.kernel_id)
self.assertEqual(21, instance.ramdisk_id)
def test_with_flavor_and_image_no_cache(self):
results = self.nv._with_flavor_and_image(self.fake_servers_list())
self.assertEqual(2, len(results))
self.assertEqual(2, self._flavors_count)
self.assertEqual(2, self._images_count)
def test_with_flavor_and_image_cache(self):
results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2)
self.assertEqual(4, len(results))
self.assertEqual(2, self._flavors_count)
self.assertEqual(2, self._images_count)
def test_with_flavor_and_image_unknown_image_cache(self):
instances = self.fake_servers_list_unknown_image()
results = self.nv._with_flavor_and_image(instances * 2)
self.assertEqual(2, len(results))
self.assertEqual(1, self._flavors_count)
self.assertEqual(1, self._images_count)
for instance in results:
self.assertEqual('unknown-id-666', instance.image['name'])
self.assertNotEqual(instance.flavor['name'], 'unknown-id-666')
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_missing_image_instance(self):
instances = self.fake_instance_image_missing()
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.image)
self.assertIsNone(instance.ramdisk_id)
def test_with_nova_http_log_debug(self):
self.CONF.set_override("nova_http_log_debug", True)
self.nv = nova_client.Client(self.CONF)
self.assertIsNotNone(self.nv.nova_client.client.logger)
def test_with_max_timing_buffer(self):
self.CONF.set_override("max_timing_buffer", 300)
self.nv = nova_client.Client(self.CONF)
# TO DO (dbadea): remove condition after updating nova_client
if hasattr(self.nv.nova_client, 'get_timings_max_len'):
self.assertEqual(300, self.nv.nova_client.get_timings_max_len())
|
the-stack_0_2644 | import os
import datetime
import sys
import logging
from flask import Flask, render_template
from logging.config import dictConfig
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from prometheus_client import make_wsgi_app, Summary, Counter
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
}
})
c = Counter('my_failures', 'Description of counter')
app = Flask(__name__)
@app.route('/')
def index():
app.logger.info('Request at %s ', datetime.datetime.now())
return render_template('index.html')
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
'/metrics': make_wsgi_app()
})
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
|
the-stack_0_2646 | import logging
from multiprocessing import cpu_count, Pool
from bg_backend.bitglitter.config.palettefunctions import _return_palette
from bg_backend.bitglitter.utilities.filemanipulation import create_default_output_folder
from bg_backend.bitglitter.utilities.gui.messages import write_frame_count_http, write_save_path_http
from bg_backend.bitglitter.write.render.headerencode import metadata_header_encode, custom_palette_header_encode, \
stream_header_encode
from bg_backend.bitglitter.write.render.framestategenerator import frame_state_generator
from bg_backend.bitglitter.write.render.renderutilities import draw_frame, total_frames_estimator
class RenderHandler:
def __init__(self,
# Basic Setup
stream_name, stream_description, working_dir, default_output_path, crypto_key, scrypt_n, scrypt_r,
scrypt_p,
# Stream Rendering
block_height, block_width, pixel_width, stream_palette_id, max_cpu_cores,
# Header
stream_sha256, size_in_bytes, compression_enabled, encryption_enabled, file_mask_enabled,
datetime_started, bg_version, manifest, protocol_version,
# Render Output
output_mode, output_path, stream_name_file_output,
# Statistics
save_statistics
):
self.blocks_wrote = 0
self.total_frames = 0
write_save_path_http(str(output_path))
# Pre render
logging.info('Beginning pre-render processes...')
create_default_output_folder(default_output_path)
initializer_palette = _return_palette(palette_id='1')
initializer_palette_b = _return_palette('11')
stream_palette = _return_palette(palette_id=stream_palette_id)
initializer_palette_dict = initializer_palette.return_encoder()
initializer_palette_dict_b = initializer_palette_b.return_encoder()
stream_palette_dict = stream_palette.return_encoder()
metadata_header_bytes, metadata_header_hash_bytes = metadata_header_encode(file_mask_enabled, crypto_key,
scrypt_n, scrypt_r, scrypt_p,
bg_version, stream_name,
datetime_started,
stream_description, manifest)
palette_header_bytes = b''
palette_header_hash_bytes = b''
if stream_palette.is_custom:
palette_header_bytes, palette_header_hash_bytes = custom_palette_header_encode(stream_palette)
self.total_frames = total_frames_estimator(block_height, block_width, len(metadata_header_bytes),
len(palette_header_bytes), size_in_bytes, stream_palette,
output_mode)
write_frame_count_http(self.total_frames)
stream_header = stream_header_encode(size_in_bytes, self.total_frames, compression_enabled,
encryption_enabled, file_mask_enabled, len(metadata_header_bytes),
metadata_header_hash_bytes, len(palette_header_bytes),
palette_header_hash_bytes)
logging.info('Pre-render complete.')
# Render
if max_cpu_cores == 0 or max_cpu_cores >= cpu_count():
pool_size = cpu_count()
else:
pool_size = max_cpu_cores
self.total_operations = self.total_frames * (1 + int(output_mode != 'image'))
with Pool(processes=pool_size) as worker_pool:
logging.info(f'Beginning rendering on {pool_size} CPU core(s)...')
count = 1
for frame_encode in worker_pool.imap(draw_frame, frame_state_generator(block_height, block_width,
pixel_width, protocol_version, initializer_palette, stream_palette,
output_mode, output_path, stream_name_file_output, working_dir,
self.total_frames, stream_header, metadata_header_bytes,
palette_header_bytes, stream_sha256, initializer_palette_dict,
initializer_palette_dict_b, stream_palette_dict, default_output_path,
stream_name, save_statistics, self.total_operations), chunksize=1):
pass
logging.info('Rendering frames complete.')
|
the-stack_0_2647 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian
from a2c_ppo_acktr.utils import init, init_null
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, base, base_kwargs=None):
super(Policy, self).__init__()
self.base = base
if action_space.__class__.__name__ == "Discrete":
num_outputs = action_space.n
self.dist = Categorical(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "Box":
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "MultiBinary":
num_outputs = action_space.shape[0]
self.dist = Bernoulli(self.base.output_size, num_outputs)
else:
raise NotImplementedError
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def act(self, inputs, rnn_hxs, masks, deterministic=False, eps=0., rand_action_mask=None,
rand_actions=None):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
if rand_action_mask is not None and rand_action_mask.sum() > 0:
if rand_actions is None:
action[rand_action_mask] = torch.randint(0, dist.probs.size(1),
(rand_action_mask.sum(),),
device=action.device)
else:
action[rand_action_mask] = rand_actions
elif eps > 0:
rand_act = torch.rand(dist.probs.size(0), 1) < eps
action[rand_act] = torch.randint(0, dist.probs.size(1), (rand_act.sum(),),
device=action.device)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRU(recurrent_input_size, hidden_size)
for name, param in self.gru.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, hxs = self.gru(
x[start_idx:end_idx],
hxs * masks[start_idx].view(1, -1, 1))
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = hxs.squeeze(0)
return x, hxs
class CNNBase(NNBase):
def __init__(self, cfg, obs_space, action_space):
num_inputs = obs_space[0]
recurrent = cfg.recurrent
hidden_size = cfg.hidden_size
use_init = cfg.use_init
super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
if use_init:
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
else:
init_ = lambda m: init_null(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.main = nn.Sequential(
init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())
if use_init:
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
else:
init_ = lambda m: init_null(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = self.main(inputs / 255.0)
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
class MLPBase(NNBase):
def __init__(self, cfg, obs_space, action_space):
num_inputs = obs_space[0]
recurrent = cfg.recurrent
hidden_size = cfg.hidden_size
super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
|
the-stack_0_2648 | from .activity import Activity, CashPayment, Trade, TradeFlags
from .balance import AccountBalance
from .cash import Currency, Cash
from .instrument import (
Instrument,
Stock,
Bond,
Option,
OptionType,
FutureOption,
Future,
Forex,
)
from .quote import Quote
from .position import Position
from . import converter
__all__ = [
"AccountBalance",
"Activity",
"Bond",
"Cash",
"CashPayment",
"converter",
"Currency",
"Forex",
"Future",
"FutureOption",
"Instrument",
"Option",
"OptionType",
"Position",
"Quote",
"Stock",
"Trade",
"TradeFlags",
]
|
the-stack_0_2649 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests pertaining to line/branch test coverage for the Firecracker code base.
# TODO
- Put the coverage in `s3://spec.firecracker` and update it automatically.
target should be put in `s3://spec.firecracker` and automatically updated.
"""
import os
import platform
import re
import pytest
import framework.utils as utils
import host_tools.cargo_build as host # pylint: disable=import-error
import host_tools.proc as proc
# AMD has a slightly different coverage due to
# the appearance of the brand string. On Intel,
# this contains the frequency while on AMD it does not.
# Checkout the cpuid crate. In the future other
# differences may appear.
COVERAGE_DICT = {"Intel": 85.12, "AMD": 84.35, "ARM": 83.18}
PROC_MODEL = proc.proc_type()
COVERAGE_MAX_DELTA = 0.05
CARGO_KCOV_REL_PATH = os.path.join(host.CARGO_BUILD_REL_PATH, 'kcov')
KCOV_COVERAGE_FILE = 'index.js'
"""kcov will aggregate coverage data in this file."""
KCOV_COVERED_LINES_REGEX = r'"covered_lines":"(\d+)"'
"""Regex for extracting number of total covered lines found by kcov."""
KCOV_TOTAL_LINES_REGEX = r'"total_lines" : "(\d+)"'
"""Regex for extracting number of total executable lines found by kcov."""
@pytest.mark.timeout(400)
def test_coverage(test_session_root_path, test_session_tmp_path):
"""Test line coverage with kcov.
The result is extracted from the $KCOV_COVERAGE_FILE file created by kcov
after a coverage run.
"""
proc_model = [item for item in COVERAGE_DICT if item in PROC_MODEL]
assert len(proc_model) == 1, "Could not get processor model!"
coverage_target_pct = COVERAGE_DICT[proc_model[0]]
exclude_pattern = (
'${CARGO_HOME:-$HOME/.cargo/},'
'build/,'
'tests/,'
'usr/lib/gcc,'
'lib/x86_64-linux-gnu/,'
'test_utils.rs,'
# The following files/directories are auto-generated
'bootparam.rs,'
'elf.rs,'
'mpspec.rs,'
'msr_index.rs,'
'_gen'
)
exclude_region = '\'mod tests {\''
target = "{}-unknown-linux-musl".format(platform.machine())
cmd = (
'RUSTFLAGS="{}" CARGO_TARGET_DIR={} cargo kcov --all '
'--target {} --output {} -- '
'--exclude-pattern={} '
'--exclude-region={} --verify'
).format(
host.get_rustflags(),
os.path.join(test_session_root_path, CARGO_KCOV_REL_PATH),
target,
test_session_tmp_path,
exclude_pattern,
exclude_region
)
# By default, `cargo kcov` passes `--exclude-pattern=$CARGO_HOME --verify`
# to kcov. To pass others arguments, we need to include the defaults.
utils.run_cmd(cmd)
coverage_file = os.path.join(test_session_tmp_path, KCOV_COVERAGE_FILE)
with open(coverage_file) as cov_output:
contents = cov_output.read()
covered_lines = int(re.findall(KCOV_COVERED_LINES_REGEX, contents)[0])
total_lines = int(re.findall(KCOV_TOTAL_LINES_REGEX, contents)[0])
coverage = covered_lines / total_lines * 100
print("Number of executable lines: {}".format(total_lines))
print("Number of covered lines: {}".format(covered_lines))
print("Thus, coverage is: {:.2f}%".format(coverage))
coverage_low_msg = (
'Current code coverage ({:.2f}%) is below the target ({}%).'
.format(coverage, coverage_target_pct)
)
min_coverage = coverage_target_pct - COVERAGE_MAX_DELTA
assert coverage >= min_coverage, coverage_low_msg
# Get the name of the variable that needs updating.
namespace = globals()
cov_target_name = [name for name in namespace if namespace[name]
is COVERAGE_DICT][0]
coverage_high_msg = (
'Current code coverage ({:.2f}%) is above the target ({}%).\n'
'Please update the value of {}.'
.format(coverage, coverage_target_pct, cov_target_name)
)
assert coverage - coverage_target_pct <= COVERAGE_MAX_DELTA,\
coverage_high_msg
|
the-stack_0_2650 | from thespian.system.utilis import withPossibleInitArgs
class NoArgs(object):
def __init__(self):
self.ready = True
class ReqArgs(object):
def __init__(self, requirements):
self.ready = True
self.reqs = requirements
class PossibleReqArgs(object):
def __init__(self, requirements=None):
self.ready = True
self.reqs = requirements
class CapArgs(object):
def __init__(self, capabilities):
self.ready = True
self.caps = capabilities
class PossibleCapArgs(object):
def __init__(self, capabilities=None):
self.ready = True
self.caps = capabilities
class CapReqArgs(object):
def __init__(self, capabilities, requirements):
self.ready = True
self.reqs = requirements
self.caps = capabilities
class CapPossibleReqArgs(object):
def __init__(self, capabilities, requirements=None):
self.ready = True
self.reqs = requirements
self.caps = capabilities
class PossibleCapPossibleReqArgs(object):
def __init__(self, requirements=None, capabilities=None):
self.ready = True
self.reqs = requirements
self.caps = capabilities
class CapFooArgs(object):
def __init__(self, foo=None, capabilities=None):
self.ready = True
self.caps = capabilities
class ReqCapFooArgs(object):
def __init__(self, requirements=None, foo=None, capabilities=None):
self.ready = True
self.reqs = requirements
self.caps = capabilities
class ReqFooArgs(object):
def __init__(self, requirements=None, foo=None):
self.ready = True
self.reqs = requirements
wpa = withPossibleInitArgs(capabilities={'caps':'here', 'capa':'bilities'},
requirements={'reqs':'requirements', 'r':True})
def test_noargs():
obj = wpa.create(NoArgs)
assert obj
assert not hasattr(obj, 'caps')
assert not hasattr(obj, 'reqs')
def test_reqargs():
obj = wpa.create(ReqArgs)
assert obj
assert not hasattr(obj, 'caps')
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_possiblereqargs():
obj = wpa.create(PossibleReqArgs)
assert obj
assert not hasattr(obj, 'caps')
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_reqfooargs():
obj = wpa.create(ReqFooArgs)
assert obj
assert not hasattr(obj, 'caps')
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_capargs():
obj = wpa.create(CapArgs)
assert obj
assert not hasattr(obj, 'reqs')
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
def test_possiblecapargs():
obj = wpa.create(PossibleCapArgs)
assert obj
assert not hasattr(obj, 'reqs')
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
def test_capfooargs():
obj = wpa.create(CapFooArgs)
assert obj
assert not hasattr(obj, 'reqs')
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
def test_capreqargs():
obj = wpa.create(CapReqArgs)
assert obj
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_cappossiblereqargs():
obj = wpa.create(CapPossibleReqArgs)
assert obj
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_possiblecappossiblereqargs():
obj = wpa.create(CapPossibleReqArgs)
assert obj
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_reqcapfooargs():
obj = wpa.create(ReqCapFooArgs)
assert obj
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
|
the-stack_0_2652 | #! /usr/bin/env python
# coding=utf-8
import os
import pandas as pd
import urllib
import xml.etree.ElementTree as ET
import io
import itertools as IT
# Copyright © 2016 Joachim Muth <[email protected]>
#
# Distributed under terms of the MIT license.
class Scraper:
"""
Scraper for parlament.ch
scraper.get(table_name): get the table, write it in csv file, return a pandas.data_frame
"""
def __init__(self, time_out=10, language='FR'):
self.tables = {'party': 'Party',
'person': 'Person',
'member_council': 'MemberCouncil',
'council': 'Council'}
self.url_base = "https://ws.parlament.ch/odata.svc/"
self.url_count = "$count"
self.url_lang_filter = "$filter=Language%20eq%20'" + language + "'"
self.folder = "data"
self.time_out = time_out
self.limit_api = 1000
def get(self, table_name):
"""
Load the table_name from parlament.ch
Write a csv file in self.folder / table_name
:return (pandas.data_frame): table
"""
table_size = self.count(table_name)
if table_name == 'BusinessRole':
df = self._inner_get_business_role(table_name)
elif table_name == 'BusinessStatus':
df = self._inner_get_big_table_skip(table_name)
elif table_size > 10000:
df = self._inner_get_big_table_ids(table_name)
elif table_size > 900:
df = self._inner_get_big_table_skip(table_name)
else:
df = self._inner_get_small_table(table_name)
self._inner_write_file(df, table_name)
return df
def count(self, table_name):
"""
Count request for parlament.ch server
:param table_name:
:return: number of entries in table_name
"""
url = self.url_base + table_name + "/$count?$filter=Language%20eq%20'FR'"
with urllib.request.urlopen(url) as response:
n = response.read()
# get the number from the bytes
n = int(str(n).split("'")[1])
return n
def _inner_get_and_parse(self, url):
"""
Send GET request to parlament.ch and parse the return XML file to a pandas.data_frame
:param url: (str) GET url request
:return: (pandas.data_frame) parsed XML
"""
print("GET:", url)
with urllib.request.urlopen(url) as url:
s = url.read()
# root = ET.fromstring(s)
root = self._inner_error_handling_xmlfromstring(s)
dict_ = {}
base = "{http://www.w3.org/2005/Atom}"
# base = self.base_url
for child in root.iter(base + 'entry'):
for children in child.iter(base + 'content'):
for properties in children:
for subject in properties:
# print(subject.text)
s = subject.tag.split('}')
if s[1] in dict_:
dict_[s[1]].append(subject.text)
else:
dict_[s[1]] = [subject.text]
data = pd.DataFrame(dict_)
return data
def _inner_error_handling_xmlfromstring(self, content):
""" Print XML if error while parsing (mainly due to server API timeout)"""
try:
tree = ET.fromstring(content)
except ET.ParseError as err:
lineno, column = err.position
line = next(IT.islice(io.BytesIO(content), lineno))
caret = '{:=>{}}'.format('^', column)
err.msg = '{}\n{}\n{}'.format(err, line, caret)
raise
return tree
def _inner_write_file(self, table, table_name):
""" Write table in csv file inside self.folder / table_name"""
self._inner_check_folder()
table.to_csv(self.folder + '/' + table_name + '.csv')
def _inner_get_big_table_skip(self, table_name):
"""
Loop URL request on table by step of 1000 and load data until reaches the end
Time Out after self.time_out iterations
:param table_name: Name of the wished table
:return: (pandas.data_frame) table
"""
# url
base = self.url_base
language = self.url_lang_filter
# loop parameters
limit_api = self.limit_api
data_frames = []
i = 0
top = 1000
skip = 0
while True:
url = base + table_name + '?' + "$top=" + str(top) + \
'&' + language + \
'&' + "$skip=" + str(skip)
df = self._inner_get_and_parse(url)
# stop when we reach the end of the data
if df.shape == (0, 0):
break
# stop after 10 iteration to avoid swiss police to knock at our door
if i > self.time_out:
print("Loader timed out after ", i, " iterations. Data frame IDs are greater than ", top)
break
data_frames.append(df)
# top += limit_api
skip += limit_api
i += 1
# concat all downloaded tables
df = pd.concat(data_frames, ignore_index=True)
# check if we really download the whole table
self._inner_check_size(df, table_name)
return df
def _inner_get_big_table_ids(self, table_name):
"""
"skip" odata attribute leads to time out the parlament.ch server. Here we use id's to get directly intervals of
items.
Less safe than "skip version, because could stop if a big ID interval is not used (normally not the case)
Loop URL request on table by step of 1000 id's and load data until reaches the end
Time Out after 10 iterations
:param table_name: Name of the wished table
:return: (pandas.data_frame) table
"""
# url
base = self.url_base
language = self.url_lang_filter
id_from = "ID%20ge%20"
id_to = "%20and%20ID%20lt%20"
# loop parameters
limit_api = self.limit_api
data_frames = []
id_ = self._inner_get_smaller_id(table_name)
i = 0
n_downloaded = 0
expected_size = self.count(table_name)
while True:
url = base + table_name + '?' + language + '%20and%20' + id_from + str(id_) + id_to + str(id_ + limit_api)
df = self._inner_get_and_parse(url)
# stop when we reach the end of the data
# if df.shape == (0, 0):
# break
# add number of elements downloaded
n_downloaded += df.shape[0]
# stop when downloaded the whole table
if n_downloaded >= expected_size:
break
# stop after 10 iteration to avoid swiss police to knock at our door
if i > self.time_out:
print("Loader timed out after ", i, " iterations. Data frame IDs are greater than ", id_)
break
data_frames.append(df)
id_ += limit_api
i += 1
# concat all downloaded tables
df = pd.concat(data_frames, ignore_index=True)
# check if we really download the whole table
self._inner_check_size(df, table_name)
return df
def _inner_get_small_table(self, table_name):
"""
Simple get request with language filer
:param table_name:
:return:
"""
url = self.url_base + table_name + '?' + self.url_lang_filter
df = self._inner_get_and_parse(url)
self._inner_check_size(df, table_name)
return df
def _inner_check_size(self, df, table_name):
expected_size = self.count(table_name)
if df.shape[0] != expected_size:
print("[ERROR] in scraping table", table_name, "expected size of", expected_size, "but is", df.shape[0])
else:
print("[OK] table " + table_name + " correctly scraped, df.shape = ", df.shape[0], "as expected")
def _inner_check_folder(self):
""" check if folder exists to avoid error and create it if not """
if not os.path.exists(self.folder):
os.makedirs(self.folder)
def _inner_get_smaller_id(self, table_name):
url = self.url_base + table_name + '?' + self.url_lang_filter
df = self._inner_get_and_parse(url)
return int(df.ID[0])
def _inner_get_business_role(self, table_name):
"""
Special case of Table BusinessRole which has non-trivial ID.
Filter result base on BusinessNumber (which is a random attribute) and iterate over it
At each iteratino process an _inner_get_big_table_skip method
Time Out after self.time_out iterations
:param table_name: Name of the wished table
:return: (pandas.data_frame) table
"""
# url
base = self.url_base
language = "$filter=Language%20eq%20%27FR%27"
id_ = 19000000
step_id = 10000
# id filter (too long)
id_from = "BusinessNumber%20ge%20"
id_to = "%20and%20BusinessNumber%20lt%20"
# loop parameters
data_frames = []
i = 0
top = 1000
skip = 0
limit_api = self.limit_api
while True:
while True:
url = base + table_name + '?' + "$top=" + str(top) + \
'&' + language + \
'%20and%20' + id_from + str(id_) + id_to + str(id_ + step_id) + \
'&' + "$skip=" + str(skip)
df = self._inner_get_and_parse(url)
# stop when we reach the end of the data
if df.shape == (0, 0):
break
# # stop when we reach the end of the data
# url_count = base + table_name + "/$count?" + "$top=" + str(top) + \
# '&' + language + \
# '&' + id_from + str(id_) + id_to + str(id_ + step_id) + \
# '&' + "$skip=" + str(skip)
# print(self._inner_url_count(url_count))
# if self._inner_url_count(url_count) == 0:
# break
# stop after 10 iteration to avoid swiss police to knock at our door
if i > self.time_out:
print("Loader timed out after ", i, " iterations. Data frame IDs are greater than ", top)
break
data_frames.append(df)
# top += limit_api
skip += limit_api
i += 1
if id_ > 22000000:
break
id_ += step_id
skip = 0
# concat all downloaded tables
df = pd.concat(data_frames, ignore_index=True)
# check if we really download the whole table
self._inner_check_size(df, table_name)
return df
|
the-stack_0_2653 | ### Define a class to receive the characteristics of each line detection
import numpy as np
class Line( ):
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = [ ]
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [ np.array( [ False ] ) ]
#self.current_fit = None
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diff = np.array( [ 0, 0, 0 ], dtype='float' )
#number of detected pixels
self.px_count = None
def add_fitted_line( self, fit, indices ):
# add a new fit to the line, up to n
if fit is not None:
if self.best_fit is not None:
# if a best fit, compare to previous
self.diff = abs( fit - self.best_fit )
if ( self.diff[0] > 0.001 or self.diff[1] > 1.0 or self.diff[2] > 100. ) \
and len( self.current_fit ) > 0:
# break if bad fit unless no current fits
self.detected = False
else:
self.detected = True
self.px_count = np.count_nonzero( indices )
# keep most recent fits
if len( self.current_fit ) > 5:
self.current_fit = self.current_fit[len( self.current_fit )-5:]
# clear out initial false entries
if self.current_fit == [ ] or len( self.current_fit[0] ) != 1:
self.current_fit.append( fit )
else:
self.current_fit[0] = fit
self.best_fit = np.average( self.current_fit, axis=0 )
else:
# or remove one from the history, if not found
self.detected = False
if len( self.current_fit ) > 0:
# throw out oldest fit
self.current_fit = self.current_fit[ :len( self.current_fit ) - 1 ]
if len( self.current_fit ) > 0:
# if there are still any fits in the queue, best_fit is their average
self.best_fit = np.average( self.current_fit, axis=0 )
### |
the-stack_0_2654 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for translation data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import bleu_hook
from tensor2tensor.utils import mlperf_log
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class TranslateProblem(text_problems.Text2TextProblem):
"""Base class for translation problems."""
def is_generate_per_split(self):
return True
@property
def approx_vocab_size(self):
return 2**15
def source_data_files(self, dataset_split):
"""Files to be passed to compile_data."""
raise NotImplementedError()
def vocab_data_files(self):
"""Files to be passed to get_or_generate_vocab."""
return self.source_data_files(problem.DatasetSplit.TRAIN)
def generate_samples(self, data_dir, tmp_dir, dataset_split):
datasets = self.source_data_files(dataset_split)
tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev"
data_path = compile_data(tmp_dir, datasets, "%s-compiled-%s" % (self.name,
tag))
return text_problems.text2text_txt_iterator(data_path + ".lang1",
data_path + ".lang2")
def generate_text_for_vocab(self, data_dir, tmp_dir):
return generator_utils.generate_lines_for_vocab(tmp_dir,
self.vocab_data_files())
@property
def decode_hooks(self):
return [compute_bleu_summaries]
def compute_bleu_summaries(hook_args):
"""Compute BLEU core summaries using the decoder output.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
A list of tf.Summary values if hook_args.hparams contains the
reference file and the translated file.
"""
decode_hparams = hook_args.decode_hparams
if (decode_hparams.decode_reference is None or
decode_hparams.decode_to_file is None):
return None
values = []
bleu = 100 * bleu_hook.bleu_wrapper(
decode_hparams.decode_reference, decode_hparams.decode_to_file)
values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu))
tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu))
if hook_args.hparams.mlperf_mode:
current_step = decode_hparams.mlperf_decode_step
mlperf_log.transformer_print(
key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold)
mlperf_log.transformer_print(
key=mlperf_log.EVAL_ACCURACY,
value={
"epoch": max(current_step // decode_hparams.iterations_per_loop - 1,
0),
"value": bleu
})
mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP)
if bleu >= decode_hparams.mlperf_threshold:
decode_hparams.set_hparam("mlperf_success", True)
return values
def _preprocess_sgm(line, is_sgm):
"""Preprocessing to strip tags in SGM files."""
if not is_sgm:
return line
# In SGM files, remove <srcset ...>, <p>, <doc ...> lines.
if line.startswith("<srcset") or line.startswith("</srcset"):
return ""
if line.startswith("<doc") or line.startswith("</doc"):
return ""
if line.startswith("<p>") or line.startswith("</p>"):
return ""
# Strip <seg> tags.
line = line.strip()
if line.startswith("<seg") and line.endswith("</seg>"):
i = line.index(">")
return line[i + 1:-6] # Strip first <seg ...> and last </seg>.
def compile_data(tmp_dir, datasets, filename):
"""Concatenate all `datasets` and save to `filename`."""
filename = os.path.join(tmp_dir, filename)
lang1_fname = filename + ".lang1"
lang2_fname = filename + ".lang2"
if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname):
tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname,
lang2_fname)
return filename
with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile:
with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile:
for dataset in datasets:
url = dataset[0]
compressed_filename = os.path.basename(url)
compressed_filepath = os.path.join(tmp_dir, compressed_filename)
if url.startswith("http"):
generator_utils.maybe_download(tmp_dir, compressed_filename, url)
if dataset[1][0] == "tsv":
_, src_column, trg_column, glob_pattern = dataset[1]
filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern))
if not filenames:
# Capture *.tgz and *.tar.gz too.
mode = "r:gz" if compressed_filepath.endswith("gz") else "r"
with tarfile.open(compressed_filepath, mode) as corpus_tar:
corpus_tar.extractall(tmp_dir)
filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern))
for tsv_filename in filenames:
if tsv_filename.endswith(".gz"):
new_filename = tsv_filename.strip(".gz")
generator_utils.gunzip_file(tsv_filename, new_filename)
tsv_filename = new_filename
with tf.gfile.Open(tsv_filename) as tsv_file:
for line in tsv_file:
if line and "\t" in line:
parts = line.split("\t")
source, target = parts[src_column], parts[trg_column]
source, target = source.strip(), target.strip()
if source and target:
lang1_resfile.write(source)
lang1_resfile.write("\n")
lang2_resfile.write(target)
lang2_resfile.write("\n")
else:
lang1_filename, lang2_filename = dataset[1]
lang1_filepath = os.path.join(tmp_dir, lang1_filename)
lang2_filepath = os.path.join(tmp_dir, lang2_filename)
is_sgm = (
lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm"))
if not (tf.gfile.Exists(lang1_filepath) and
tf.gfile.Exists(lang2_filepath)):
# For .tar.gz and .tgz files, we read compressed.
mode = "r:gz" if compressed_filepath.endswith("gz") else "r"
with tarfile.open(compressed_filepath, mode) as corpus_tar:
corpus_tar.extractall(tmp_dir)
if lang1_filepath.endswith(".gz"):
new_filepath = lang1_filepath.strip(".gz")
generator_utils.gunzip_file(lang1_filepath, new_filepath)
lang1_filepath = new_filepath
if lang2_filepath.endswith(".gz"):
new_filepath = lang2_filepath.strip(".gz")
generator_utils.gunzip_file(lang2_filepath, new_filepath)
lang2_filepath = new_filepath
for example in text_problems.text2text_txt_iterator(
lang1_filepath, lang2_filepath):
line1res = _preprocess_sgm(example["inputs"], is_sgm)
line2res = _preprocess_sgm(example["targets"], is_sgm)
if line1res and line2res:
lang1_resfile.write(line1res)
lang1_resfile.write("\n")
lang2_resfile.write(line2res)
lang2_resfile.write("\n")
return filename
class TranslateDistillProblem(TranslateProblem):
"""Base class for translation problems."""
def is_generate_per_split(self):
return True
def example_reading_spec(self):
data_fields = {"dist_targets": tf.VarLenFeature(tf.int64)}
if self.has_inputs:
data_fields["inputs"] = tf.VarLenFeature(tf.int64)
# hack: ignoring true targets and putting dist_targets in targets
data_items_to_decoders = {
"inputs": tf.contrib.slim.tfexample_decoder.Tensor("inputs"),
"targets": tf.contrib.slim.tfexample_decoder.Tensor("dist_targets"),
}
return (data_fields, data_items_to_decoders)
def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False):
"""Get vocab for distill problems."""
# We assume that vocab file is present in data_dir directory where the
# data generated will be stored.
vocab_filepath = os.path.join(data_dir, self.vocab_filename)
encoder = text_encoder.SubwordTextEncoder(vocab_filepath)
return encoder
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
generator = self.generate_samples(data_dir, tmp_dir, dataset_split)
vocab = self.get_or_create_vocab(data_dir, tmp_dir)
# For each example, encode the text and append EOS ID.
for sample in generator:
if self.has_inputs:
sample["inputs"] = vocab.encode(sample["inputs"])
sample["inputs"].append(text_encoder.EOS_ID)
sample["targets"] = vocab.encode(sample["targets"])
sample["targets"].append(text_encoder.EOS_ID)
sample["dist_targets"] = vocab.encode(sample["dist_targets"])
sample["dist_targets"].append(text_encoder.EOS_ID)
yield sample
def generate_samples(self, data_dir, tmp_dir, dataset_split):
data_path = self.source_data_files(dataset_split)
assert tf.gfile.Exists(data_path)
return text_problems.text2text_distill_iterator(data_path + "inputs",
data_path + "gold",
data_path + "prediction")
|
the-stack_0_2657 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prepare dataset for keras model benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from official.utils.misc import model_helpers # pylint: disable=g-bad-import-order
# Default values for dataset.
_NUM_CHANNELS = 3
_NUM_CLASSES = 1000
def _get_default_image_size(model):
"""Provide default image size for each model."""
image_size = (224, 224)
if model in ["inceptionv3", "xception", "inceptionresnetv2"]:
image_size = (299, 299)
elif model in ["nasnetlarge"]:
image_size = (331, 331)
return image_size
def generate_synthetic_input_dataset(model, batch_size):
"""Generate synthetic dataset."""
image_size = _get_default_image_size(model)
image_shape = (batch_size,) + image_size + (_NUM_CHANNELS,)
label_shape = (batch_size, _NUM_CLASSES)
dataset = model_helpers.generate_synthetic_data(
input_shape=tf.TensorShape(image_shape),
label_shape=tf.TensorShape(label_shape),
)
return dataset
class Cifar10Dataset(object):
"""CIFAR10 dataset, including train and test set.
Each sample consists of a 32x32 color image, and label is from 10 classes.
"""
def __init__(self, batch_size):
"""Initializes train/test datasets.
Args:
batch_size: int, the number of batch size.
"""
self.input_shape = (32, 32, 3)
self.num_classes = 10
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train, y_test = y_train.astype(np.int64), y_test.astype(np.int64)
y_train = tf.keras.utils.to_categorical(y_train, self.num_classes)
y_test = tf.keras.utils.to_categorical(y_test, self.num_classes)
self.train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(2000).batch(batch_size).repeat()
self.test_dataset = tf.data.Dataset.from_tensor_slices(
(x_test, y_test)).shuffle(2000).batch(batch_size).repeat()
|
the-stack_0_2658 | # Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for geometry_utils."""
import jax
import jax.numpy as jnp
from jax3d.projects.nesf.nerfstatic.utils import geometry_utils as geom
from jax3d.projects.nesf.nerfstatic.utils import types
import numpy as np
def _make_rays(origin, direction):
n, _ = origin.shape
return types.Rays(scene_id=jnp.zeros((n, 1), dtype=jnp.int32),
origin=origin,
direction=direction)
def test_scale():
transform = geom.Scale(scale=jnp.array([1, 2, 3]))
rays = _make_rays(origin=np.array([[1, 1, 0]]),
direction=np.array([[1/np.sqrt(2), -1/np.sqrt(2), 0]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0, 1], 2) # pytype: disable=attribute-error
def test_rotate():
transform = geom.Rotate(axis=jnp.array([0, 0, 1]), radians=np.pi/2)
rays = _make_rays(origin=np.array([[1, 1, 0]]),
direction=np.array([[1/np.sqrt(2), -1/np.sqrt(2), 0]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0], np.array([-1, 1, 0])) # pytype: disable=attribute-error
np.testing.assert_allclose(rays2.direction[0],
np.array([1/np.sqrt(2), 1/np.sqrt(2), 0]))
def test_translate():
transform = geom.Translate(offset=jnp.array([1, 2, 3]))
rays = _make_rays(origin=np.array([[1, 1, 0]]),
direction=np.array([[1/np.sqrt(2), -1/np.sqrt(2), 0]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0], np.array([2, 3, 3])) # pytype: disable=attribute-error
np.testing.assert_allclose(rays2.direction[0],
np.array([1/np.sqrt(2), -1/np.sqrt(2), 0]))
def test_compose():
transform = geom.Compose(transforms=[
geom.Scale(scale=np.array([1, 2, 1])),
geom.Rotate(axis=np.array([0, 0, 1]), radians=np.pi/2),
geom.Translate(offset=jnp.array([1, -1, 3])),
])
rays = _make_rays(origin=np.array([[1, 0.5, -3]]),
direction=np.array([[1/np.sqrt(2), -1/np.sqrt(2), 0]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0], np.zeros(3)) # pytype: disable=attribute-error
np.testing.assert_allclose(rays2.direction[0],
np.array([2/np.sqrt(2), 1/np.sqrt(2), 0]))
def test_inverse():
transform = geom.Inverse(transform=geom.Compose(transforms=[
geom.Scale(scale=np.array([1, 2, 1])),
geom.Rotate(axis=np.array([0, 0, 1]), radians=np.pi/2),
geom.Translate(offset=jnp.array([1, -1, 3])),
]))
rays = _make_rays(origin=np.array([[0, 0, 0]]),
direction=np.array([[1.4142135, 0.70710677, 0.]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0], np.array([1, 0.5, -3])) # pytype: disable=attribute-error
np.testing.assert_allclose(rays2.direction[0],
np.array([1/np.sqrt(2), -1/np.sqrt(2), 0]))
def test_identity():
transform = geom.Identity()
rays = _make_rays(origin=np.array([[0, 0, 0]]),
direction=np.array([[1.4142135, 0.70710677, 0.]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
def test_sample_points():
n = 20
k = 7
sample_points = types.SamplePoints(
scene_id=np.random.randint(0, 5, size=(n, 1)),
position=np.random.randn(n, k, 3),
direction=np.random.randn(n, 3))
rays = geom._sample_points_to_rays(sample_points)
sample_points2 = geom._rays_to_sample_points(rays, sample_points.batch_shape)
jax.tree_map(np.testing.assert_allclose, sample_points, sample_points2)
|
the-stack_0_2659 | from src.if_else import if_else
from src.secint import secint as s
def maximum(quotients):
"""
Returns both the maximum quotient and the index of the maximum in an
oblivious sequence.
Only works for quotients that have positive numerator and denominator.
"""
def max(previous, current):
(maximum, index_of_maximum, index) = previous
is_new_maximum = ge_quotient(current, maximum)
index_of_maximum = if_else(is_new_maximum, index, index_of_maximum)
maximum = tuple(if_else(is_new_maximum,
list(current),
list(maximum)))
return (maximum, index_of_maximum, index + 1)
neutral = (s(0), s(0))
initial = (neutral, s(0), s(0))
maximum, index_of_maximum, _ = quotients.reduce(neutral, max, initial)
return maximum, index_of_maximum
def ge_quotient(left, right):
"""
Returns whether the left quotient is greater than or equal than the right
quotient.
Only works for quotients that have positive numerator and denominator.
"""
(a, b) = left
(c, d) = right
return a * d >= b * c
|
the-stack_0_2665 | """
A CapitalT class and methods that use the Cross class.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Jun Fan.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the test functions. As you implement CapitalT method uncomment the appropriate tests. """
# --------------------------------------------------------------
# Uncomment only 1 test at a time as you develop your code.
# --------------------------------------------------------------
print('Un-comment the calls in MAIN one by one')
print(' to run the testing code as you complete the TODOs.')
run_test_simple_t()
run_test_set_colors()
run_test_move_by()
run_test_clone()
def run_test_simple_t():
""" Tests for the __init__ method and attach_to method. See the simple_t PDF for expected output. """
print()
print('--------------------------------------------------')
print('Testing __init__ and attach_to ')
print('--------------------------------------------------')
window = rg.RoseWindow(600, 400, 'Test 1 - Simple Ts')
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
print("Expected: Point(250.0, 40.0) Point(350.0, 60.0)")
print("Actual: ", t1.h_rect.get_upper_left_corner(), t1.h_rect.get_lower_right_corner())
print("Expected: Point(290.0, 40.0) Point(310.0, 240.0)")
print("Actual: ", t1.v_rect.get_upper_left_corner(), t1.v_rect.get_lower_right_corner())
t1.attach_to(window)
t2 = CapitalT(rg.Point(150, 150), 100, 150, 40)
t2.attach_to(window)
t3 = CapitalT(rg.Point(450, 150), 10, 15, 4)
t3.attach_to(window)
window.render()
print("See graphics window and compare to the simple_t PDF")
window.close_on_mouse_click()
def run_test_set_colors():
""" Tests for the set_colors method. See the set_colors PDF for expected output. """
window = rg.RoseWindow(600, 400, 'Test 2 - Colorful Ts')
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_colors('red', 'magenta')
t1.attach_to(window)
t2 = CapitalT(rg.Point(150, 150), 100, 150, 40)
t2.set_colors('green', 'purple')
t2.attach_to(window)
t3 = CapitalT(rg.Point(450, 150), 10, 15, 4)
t3.set_colors('blue', 'gray')
t3.attach_to(window)
window.render()
window.close_on_mouse_click()
def run_test_move_by():
""" Tests for the move_by method. See the move_by PDF for expected output. """
window = rg.RoseWindow(600, 400, 'Test 3 - Moving T')
little_red_t = CapitalT(rg.Point(300, 50), 60, 80, 5)
little_red_t.set_colors('red', 'gray')
little_red_t.attach_to(window)
window.render(0.5)
little_red_t.move_by(0, 100)
window.render(0.5)
little_red_t.move_by(0, 100)
window.render(0.5)
for k in range(40):
little_red_t.move_by(5, -2)
window.render(0.05)
window.close_on_mouse_click()
def run_test_clone():
""" Tests for the clone method. See the clone PDF for expected output. """
window = rg.RoseWindow(650, 400, 'Test 4 - Cloning Ts')
first_t = CapitalT(rg.Point(75, 50), 80, 80, 40)
first_t.set_colors('blue', 'cyan')
for k in range(6):
t = first_t.clone()
if k < 2:
t.set_colors('white', 'black')
t.move_by(100 * k, 20 * k)
t.attach_to(window)
first_t.move_by(0, 200)
first_t.attach_to(window)
window.render()
window.close_on_mouse_click()
########################################################################
# The CapitalT class (and its methods) begins here.
########################################################################
class CapitalT(object):
""" Manages a CapitalT graphics object which is made up of two rectangles. """
def __init__(self, intersection_center, width, height, letter_thickness):
"""
What comes in:
-- self
-- an rg.Point for the intersection center of the CapitalT
-- This point is also center of the horizontal rectangle.
-- a int for the width of the CapitalT (the width of the horizontal rectangle)
-- a int for the height of the CapitalT (the height of the vertical rectangle)
-- a int for the thickness of each rectangle (the letter's thickness)
What goes out: Nothing (i.e., None).
Side effects: Sets two instance variables named:
-- h_rect (to represent the horizontal rectangle in the T, the top bar)
-- v_rect (to represent the vertical rectangle in the T, the | part of the T)
*** See the dimensions PDF for the exact placement of the rectangles in the T. ***
Each rectangle is an rg.Rectangle. Unlike prior modules you are NOT
allowed to make any other instance variables. You may only use
exactly these two and must figure out how to do the problem with ONLY
those two instance variables.
Example:
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
-- t1.h_rect would have an upper left corner of (250, 40)
-- t1.h_rect would have an lower right corner of (350, 60)
-- t1.v_rect would have an upper left corner of (290, 40)
-- t1.v_rect would have an lower right corner of (310, 240)
Type hints:
:type intersection_center: rg.Point
:type width: int
:type height: int
:type letter_thickness: int
"""
# --------------------------------------------------------------
# DONE: 3.
# READ the above specification, including the Example.
# Implement this method
# Note: you will need to also implement attach_to before testing
# --------------------------------------------------------------
self.intersection = intersection_center.clone()
self.width = width
self.height = height
self.thickness = letter_thickness
self.h_rect = rg.Rectangle(rg.Point(self.intersection.x + 0.5 * self.width,
self.intersection.y + 0.5 * self.thickness),
rg.Point(self.intersection.x - 0.5 * self.width,
self.intersection.y - 0.5 * self.thickness))
self.v_rect = rg.Rectangle(rg.Point(self.intersection.x - 0.5 * self.thickness,
self.intersection.y - 0.5 * self.thickness),
rg.Point(self.intersection.x + 0.5 * self.thickness,
self.intersection.y + self.height - 0.5 * self.thickness))
def attach_to(self, window):
"""
What comes in:
-- self
-- an rg.RoseWindow
What goes out: Nothing (i.e., None).
Side effects:
-- Attaches both instance rectangles to the given window.
-- Hint: Attach h_rect second to make it draw in front of v_rect
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.attach_to(window)
Type hints:
:type window: rg.RoseWindow
"""
# --------------------------------------------------------------
# DONE: 4.
# READ the above specification, including the Example.
# Implement and test this method by looking at the console and
# the graphics window (compare it to simple_t.pdf)
# --------------------------------------------------------------
self.v_rect.attach_to(window)
self.h_rect.attach_to(window)
window.render()
def set_colors(self, fill_color, outline_color):
"""
What comes in:
-- self
-- a string that represents a valid rosegraphics color
-- a string that represents a valid rosegraphics color
What goes out: Nothing (i.e., None).
Side effects:
-- sets the fill_color of both rectangles to the given fill color
-- sets the outline_color of both rectangles to the given outline color
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_color('red', 'blue')
Type hints:
:type fill_color: str
:type outline_color: str
"""
# --------------------------------------------------------------
# DONE: 5.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# set_colors.pdf.
# --------------------------------------------------------------
self.h_rect.fill_color = fill_color
self.h_rect.outline_color = outline_color
self.v_rect.fill_color = fill_color
self.v_rect.outline_color = outline_color
def move_by(self, dx, dy):
"""
What comes in:
-- self
-- an int amount to move in the x direction
-- an int amount to move in the y direction
What goes out: Nothing (i.e., None).
Side effects:
-- Moves both h_rect and v_rect the specified dx and dy amounts.
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.attach_to(window)
window.render(0.5)
t1.move_by(100, 200) # Moves the T 100 pixels right and 200 down.
window.render() # necessary to see the change
Type hints:
:type dx: int
:type dy: int
"""
# --------------------------------------------------------------
# DONE: 6.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# move_by.pdf. Note: the pdf shows the different locations
# that the T moves through, but there is only 1 T at any moment.
# --------------------------------------------------------------
self.h_rect.corner_1.move_by(dx, dy)
self.v_rect.corner_1.move_by(dx, dy)
self.h_rect.corner_2.move_by(dx, dy)
self.v_rect.corner_2.move_by(dx, dy)
def clone(self):
"""
What comes in:
-- self
What goes out:
-- Returns a new CapitalT that is located in the same position as
this CapitalT with the same colors for the rectangles.
Side effects:
-- None
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_color('red', 'blue')
t2 = t1.clone() # t2 is at the same location WITH THE SAME COLORS
Type hints:
:rtype: CapitalT
"""
# --------------------------------------------------------------
# DONE: 7.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# clone.pdf.
# --------------------------------------------------------------
a = CapitalT(self.intersection.clone(), self.width, self.height, self.thickness)
a.h_rect.fill_color = self.h_rect.fill_color
a.h_rect.outline_color = self.h_rect.outline_color
a.v_rect.fill_color = self.h_rect.fill_color
a.v_rect.outline_color = self.h_rect.outline_color
return a
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
the-stack_0_2668 | from xception_model import XceptionModel
from glob import glob
import sys
sys.path.append('../')
# Main
def main():
# Setup parameters
data_dir = "../../data/"
images_dir = data_dir + "assets/images/"
checkpoint_dir = data_dir + "saved_models/"
weights_path = data_dir + "saved_models/best_xception_model.hdf5"
breeds_path = data_dir + "assets/dog_breeds.txt"
bottleneck_features_path = data_dir + "assets/bottleneck_features/DogXceptionData.npz"
xception_model = XceptionModel()
# Train model
# xception_model.learn(images_dir,
# bottleneck_file=None,
# checkpoint_dir=checkpoint_dir)
# Load Pretrained weights
xception_model.load_pretrained_model(weights_path, breeds_path)
img_path1 = "/Users/irvinodjuana/Desktop/rosie.png"
img_path2 = "/Users/irvinodjuana/Downloads/cat2.jpeg"
img_path3 = "/Users/irvinodjuana/Downloads/linkedin_pic.png"
# Test breed predictions
predictions = xception_model.predict_file(img_path1)
print(predictions)
# Test dog detection
print("Rosie is a dog: ", xception_model.detect_dog(img_path1)) # True
print("Cat is a dog: ", xception_model.detect_dog(img_path2)) # False
print("Irvino is a dog: ", xception_model.detect_dog(img_path3)) # False
# count = 0
# dogs = 0
# for file in glob(images_dir + "test/**/*.jpg")[:20]:
# count += 1
# if xception_model.detect_dog(file):
# dogs += 1
# print(f"Percentage of dogs detected in train: {dogs}/{count}")
if __name__ == "__main__":
main()
|
the-stack_0_2670 | import logging
import growattServer
import datetime
logger = logging.getLogger(__name__.rsplit(".")[-1])
class Growatt:
# Growatt EMS Module
# Fetches Consumption and Generation details from Growatt API
import requests
import time
cacheTime = 10
config = None
configConfig = None
configGrowatt = None
batterySOC = 0
consumedW = 0
fetchFailed = False
generatedW = 0
lastFetch = 0
master = None
password = None
session = None
status = False
timeout = 2
username = None
useBatteryAt = None
useBatteryTill = None
batteryMaxOutput = None
dischargingTill = None
useBatteryBefore = None
now = None
def __init__(self, master):
self.master = master
self.config = master.config
self.configConfig = master.config.get("config", {})
self.configGrowatt = master.config["sources"].get("Growatt", {})
self.password = self.configGrowatt.get("password", "")
self.status = self.configGrowatt.get("enabled", False)
self.username = self.configGrowatt.get("username", "")
self.useBatteryAt = float(self.configGrowatt.get("useBatteryAt", 0))
self.useBatteryTill = float(self.configGrowatt.get("useBatteryTill", 0))
self.batteryMaxOutput = float(self.configGrowatt.get("batteryMaxOutput", 0))
timestring = self.configGrowatt.get("useBatteryBefore", "00:00")
timelist = timestring.split(":")
self.useBatteryBefore = datetime.time(int(timelist[0]), int(timelist[1]))
self.discharginTill = self.useBatteryAt
self.now = datetime.datetime.now().time()
# Unload if this module is disabled or misconfigured
if (not self.status) or (not self.username or not self.password):
self.master.releaseModule("lib.TWCManager.EMS", "Growatt")
return None
def getConsumption(self): # gets called by TWCManager.py
if not self.status:
logger.debug("EMS Module Disabled. Skipping getConsumption")
return 0
# Perform updates if necessary
self.update()
# Return consumption value
return self.consumedW
def getGeneration(self): # gets called by TWCManager.py
if not self.status:
logger.debug("EMS Module Disabled. Skipping getGeneration")
return 0
# Perform updates if necessary
self.update()
# Return generation value
return self.generatedW
def getGenerationValues(self):
if not self.status:
logger.debug("EMS Module Disabled. Skipping getGeneration")
return 0
api = growattServer.GrowattApi()
try:
logger.debug("Fetching Growatt EMS sensor values")
login_response = api.login(self.username, self.password)
except Exception as e:
logger.log(
logging.INFO4, "Error connecting to Growatt to fetching sensor values"
)
logger.debug(str(e))
self.fetchFailed = True
return False
if not login_response:
logger.log(logging.INFO4, "Empty Response from Growatt API")
return False
if login_response:
plant_list = api.plant_list(login_response["userId"])["data"][0]
plant_ID = plant_list["plantId"]
inverter = api.device_list(plant_ID)[0]
deviceAilas = inverter["deviceAilas"]
status = api.mix_system_status(deviceAilas, plant_ID)
plant_info = api.plant_info(plant_ID)
device = plant_info["deviceList"][0]
device_sn = device["deviceSn"]
mix_status = api.mix_system_status(device_sn, plant_ID)
self.batterySOC = float(mix_status["SOC"])
gen_calc = float(status["pPv1"]) + float(status["pPv2"])
gen_calc *= 1000
gen_api = float(status["ppv"]) * 1000
inTime = (
self.now > datetime.time(00, 00) and self.now < self.useBatteryBefore
)
if self.discharginTill < self.batterySOC and inTime:
self.discharginTill = self.useBatteryTill
self.generatedW = gen_api + self.batteryMaxOutput
else:
self.discharginTill = self.useBatteryAt
self.generatedW = gen_api
self.consumedW = float(status["pLocalLoad"]) * 1000
else:
logger.log(logging.INFO4, "No response from Growatt API")
def setCacheTime(self, cacheTime):
self.cacheTime = cacheTime
def setTimeout(self, timeout):
self.timeout = timeout
def update(self):
# Update function - determine if an update is required
self.now = datetime.datetime.now().time()
if (int(self.time.time()) - self.lastFetch) > self.cacheTime:
# Cache has expired. Fetch values from Growatt.
self.getGenerationValues()
# Update last fetch time
if self.fetchFailed is not True:
self.lastFetch = int(self.time.time())
return True
else:
# Cache time has not elapsed since last fetch, serve from cache.
return False
|
the-stack_0_2672 | import os
import time
from argparse import ArgumentParser
from django.conf import settings
from django.core.management import call_command
from rest_base.commands import BaseCommand
from rest_base.settings import base_settings
class Command(BaseCommand):
help = (
'Load predefined model instances'
)
def add_arguments(self, parser: ArgumentParser):
parser.add_argument('model', type=str, help='Specifies the model to load in the format of app_label.ModelName')
parser.add_argument(
'-f', '--filename', nargs='?', type=str, help='Specifies the file name of dumps (default: ModelName.json)')
def handle(self, *args, **options):
model: str = options['model']
filename: str = options['filename']
if filename is None:
filename = f"{model.split('.')[-1]}.json"
try:
base_dir = settings.BASE_DIR
except AttributeError as e:
raise AttributeError('BASE_DIR must be defined in Django settings') from e
path = os.path.join(base_dir, base_settings.PREDEFINED_ROOT, filename)
t = time.time()
self.log(f'load {model} instances from:')
self.log(path)
call_command('loaddata', path)
self.log(f'done ({time.time() - t:.2f} s)')
|
the-stack_0_2675 | from pathlib import Path
import mlflow
import tensorflow as tf
import yaml
from loguru import logger
from tensorflow.keras.models import load_model
from utils import get_sorted_runs
with open("configs/params.yaml") as reproducibility_params:
mlflow_config = yaml.safe_load(reproducibility_params)["mlflow"]
experiment_name = mlflow_config["experiment_name"]
def load_model_artifact() -> tf.keras.Model:
"""Load artifacts for a particular `run_id`.
Args:
run_id (str): ID of the run to load model artifacts from.
Returns:
Artifacts needed for inference.
"""
all_runs = get_sorted_runs(
experiment_name=experiment_name,
order_by=["metrics.val_loss ASC"],
)
print(
all_runs[
[
"run_id",
"tags.mlflow.runName",
"metrics.val_categorical_accuracy",
"metrics.val_loss",
]
],
)
best_run = all_runs.iloc[0]["run_id"]
logger.info(f"Best run id is : {best_run}")
# Load model
run = mlflow.get_run(run_id=best_run)
homedir = Path(run.info.artifact_uri).parent.parent.parent.parent
root = Path(run.info.artifact_uri).relative_to(homedir)
model_url = Path(root) / Path("model/data") / "model.h5"
model = load_model(model_url)
logger.info(f"Model loaded from {run.info}")
return model
if __name__ == "__main__":
load_model_artifact()
|
the-stack_0_2676 | from typing import Dict
from .base import APIObject, APIList, Session, getSessionType, DEFAULT_URL, q
from .kv import KV
from . import users
from . import objects
from .notifications import Notifications
from functools import partial
class App(APIObject):
props = {"name", "description", "icon", "settings", "settings_schema"}
def __init__(
self, access_token: str, url: str = DEFAULT_URL, session="sync", cached_data={}
):
appid = "self"
if isinstance(session, Session):
# Treat the session as already initialized, meaning that the access token is actually
# the app id
appid = access_token
super().__init__(
f"api/apps/{q(appid)}", {"app": appid}, session, cached_data=cached_data
)
else:
# Initialize the app object as a direct API
s = getSessionType(session, "self", url)
s.setAccessToken(access_token)
super().__init__("api/apps/self", {"app": appid}, s)
# The objects belonging to the app
self.objects = objects.Objects({"app": appid}, self.session)
# Key-value store associated with the app
self.kv = KV(f"api/kv/apps/{q(appid)}", self.session)
@property
def owner(self):
return self.session.f(
self.read(), lambda x: users.User(x["owner"], self.session)
)
class Apps(APIList):
def __init__(self, constraints: Dict, session: Session):
super().__init__("api/apps", constraints, session)
def __getitem__(self, item):
return self._getitem(
item, f=lambda x: App(x["id"], session=self.session, cached_data=x)
)
def __call__(self, **kwargs):
return self._call(
f=lambda x: [
App(xx["id"], session=self.session, cached_data=xx) for xx in x
],
**kwargs,
)
def create(self, name, **kwargs):
return self._create(
f=lambda x: App(x["id"], session=self.session, cached_data=x),
**{"name": name, **kwargs},
)
|
the-stack_0_2678 | # coding=utf-8
from subprocess import PIPE, DEVNULL, STDOUT, check_output, check_call, CalledProcessError
from utilities import mongolog, command_success, command_error, filedel
import os
import re
import datetime
from pprint import pprint
import inspect
#import urllib.parse
externalreposdir = "/etc/apt/sources.list.d/"
def aptupdate():
logid = mongolog( locals() )
try:
command = ['apt-get', 'update']
check_call(command)
except CalledProcessError as e:
return command_error( e, command, logid )
return command_success( logid=logid )
#Se icludesummary è True allora aggiunge alla lista restituita anche le informazioni sull'applicazione
def listinstalled( summary=False ):
options = '-f=${binary:Package};${Version};${Architecture}' + ( ';${binary:Summary}\n' if summary else '\n' )
command = ['dpkg-query', options, '-W']
try:
output = check_output(command, stderr=PIPE, universal_newlines=True).splitlines()
except CalledProcessError as e:
return command_error( e, command )
except FileNotFoundError as e:
return command_error( e, command )
#Lista di chiavi per le informazioni sull'app
keys = ['name', 'version', 'architecture']
if summary: keys.append('summary')
#Conterrà una lista di dizionari con tutte le app installate nel sistema
pkgs = list()
#Inserimento valori nella lista apps()
for i in output:
appinfo = i.split(';')
pkgs.append( dict( zip(keys, appinfo) ) )
return command_success( data=pkgs )
#Ricerca di una applicazione. se namesonly è true (default) la ricerca viene effettuata solo nel nome del pacchetto
def aptsearch( pkgname, namesonly=True ):
#Cannot search on empty string
if not pkgname:
command_error( returncode=255, stderr='Empty search string not allowed' )
command = ['apt-cache', 'search', pkgname]
if namesonly: command.append('--names-only')
try:
output = check_output(command, stderr=PIPE, universal_newlines=True).splitlines()
except CalledProcessError as e:
return command_error( e, command )
keys = ['name', 'desc']
pkgs = list()
for i in output:
appinfo = i.split(' - ')
pkgs.append( dict( zip(keys, appinfo) ) )
return command_success( data=pkgs )
#onlydependencies option is used from other functions in this same file
#Shows package information
#Returns: List
def aptshow(pkgname, onlydependences=False):
mode = 'depends' if onlydependences else 'show'
try:
command = ['apt-cache', mode, pkgname]
output = check_output(command, stderr=PIPE, universal_newlines=True)
except CalledProcessError as e:
return command_error( e, command )
if onlydependences:
#Remove the first line (header)
toreturn = re.sub('^.*\\n', '', output)
else:
#On multiple results only keep the first one
output = output.split('\n\n')[0]
output = output.splitlines() #<-- We use splitlines() here because onlydependences does not need a split-lined output
#Check whether the package is installed or not
isinstalled = None
try:
command = ['dpkg', '-s', pkgname]
check_call(command)
except CalledProcessError as e:
isinstalled = False
if isinstalled is None: isinstalled = True
#Removing useless lines
linestomantain = ['Package:', 'Version:', 'Priority:', 'Section:', 'Origin:', 'Installed-Size:', 'Depends:', 'Description', ' ']
output = list( filter( lambda line: any( line.startswith(s) for s in linestomantain), output ) )
#Merging all of descrition lines
i = 0
n = len(output)
while i < n:
if output[i].startswith(' '):
output[i-1] = output[i-1] + output[i] #<-- Merge lines
del output[i] #<-- Delete current line
n -= 1
else:
i += 1
#Converting list to dictionary
toreturn = dict()
for line in output:
dictelems = line.split(':', maxsplit=1)
toreturn.update({ dictelems[0] : dictelems[1] })
#Is this package installed?
toreturn.update({ 'Installed' : 1 if isinstalled else 0 })
return command_success( data=toreturn )
def aptinstall(pkgname):
logid = mongolog( locals(), {'dependencies' : aptshow(pkgname,onlydependences=True)} )
command = ['apt-get', 'install', '-y', pkgname]
environ = {'DEBIAN_FRONTEND': 'noninteractive', 'PATH': os.environ.get('PATH')}
try:
check_call( command, env=environ ) #, stdout=open(os.devnull, 'wb'), stderr=STDOUT)
except CalledProcessError:
return command_error( returncode=14, stderr='Package installation error. Package name: "'+pkgname+'"', logid=logid )
return command_success( logid=logid )
#Allows user to remove system packages using apt-get remove.
#If purge == True then launch "apt-get remove --purge" instead
def aptremove(pkgname, purge=False):
logid = mongolog( locals(), {'dependencies' : aptshow(pkgname,onlydependences=True)} )
command = ['apt-get', 'purge' if purge else 'remove', '-y', pkgname]
environ = {'DEBIAN_FRONTEND': 'noninteractive', 'PATH': os.environ.get('PATH')}
try:
check_call( command, env=environ ) #stdout=open(os.devnull, 'wb'), stderr=STDOUT)
except CalledProcessError as e:
return command_error( e, command, logid )
return command_success( logid=logid )
#Returns external repos added to system in folder /etc/apt/sources.list.d/
def getexternalrepos():
repospath = '/etc/apt/sources.list.d/'
reposfiles = os.listdir(repospath)
#Removing file that ends with '.save'
reposfiles = list( filter( lambda item: not item.endswith('.save'), reposfiles ) )
#List to return
repos = list()
for filename in reposfiles:
with open(repospath + filename) as opened:
repos.append({
'filename': filename,
'lines': opened.read()
})
return command_success( data=repos )
def getreponame(): return 'nomodo-' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
#returns <string> containing filename where repo is added
def addrepo( content, name ):
logid = mongolog( locals() )
filename = '/etc/apt/sources.list.d/' + name + '.list'
repofile = open( filename, 'a')
repofile.write(content + '\n')
repofile.close()
return command_success( logid=logid )
def removerepofile(filename):
result = filedel( externalreposdir + filename )['logid']
filedel( externalreposdir + filename + '.save' ) #Ignores errors if file not exists ignoring return dictionary
logid = mongolog( locals() )
repospath = '/etc/apt/sources.list.d/'
try:
os.remove(repospath + filename + '.list')
os.remove(repospath + filename + '.list.save')
except FileNotFoundError:
return command_error( returncode=10, stderr='File to remove not found: "'+repospath+'"', logid=logid )
if result['returncode'] is 0:
return command_succes( logid=logid )
else:
return result
|
the-stack_0_2679 | # -*- coding: utf-8 -*-
# Created by restran on 2016/12/4
# https://github.com/RyanKung/rc4-python3/blob/master/rc4/rc4.py
__all__ = ['encrypt', 'decrypt']
def crypt(data: bytes, key: bytes) -> bytes:
"""RC4 algorithm"""
x = 0
box = list(range(256))
for i in range(256):
x = (x + int(box[i]) + int(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
print(len(data))
x = y = 0
out = []
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
t = char ^ box[(box[x] + box[y]) % 256]
out.append(t)
return bytes(bytearray(out))
def encrypt(data: str, key: str) -> bytes:
"""RC4 encryption with random salt and final encoding"""
data = crypt(data.encode(), key.encode())
return data
def decrypt(data: bytes, key: str) -> bytes:
"""RC4 decryption of encoded data"""
return crypt(data, key.encode())
def main():
# 需要加密的数据
data = 'UUyFTj8PCzF6geFn6xgBOYSvVTrbpNU4OF9db9wMcPD1yDbaJw=='
# 密钥
key = 'welcometoicqedu'
# 加码
encoded_data = encrypt(data=data, key=key)
print(encoded_data)
# 解码
decoded_data = decrypt(data=encoded_data, key=key)
print(decoded_data)
if __name__ == '__main__':
main()
|
the-stack_0_2680 | import os
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class SmoothDialog(QDialog):
def __init__(self, parent=None, flag=0):
super(SmoothDialog, self).__init__(parent)
self.flag = flag
self.setWindowTitle('smoothDialog')
# 在布局中添加控件
layout = QVBoxLayout(self)
self.form = QFormLayout(self)
if flag == 0:
self.smoothTypeLabel = QLabel(self)
self.smoothTypeLabel.setText("滤波类型")
self.smoothTypeCB = QComboBox(self)
self.smoothTypeCB.addItem("均值滤波")
self.smoothTypeCB.addItem("方框滤波")
self.smoothTypeCB.addItem("高斯滤波")
self.smoothTypeCB.addItem("中值滤波")
self.smoothTypeCB.addItem("双边滤波")
self.smoothTypeCB.currentIndexChanged.connect(self.selectionchange)
self.ksizeLabel = QLabel(self)
self.ksizeLabel.setText("滤波核大小")
self.ksizeLabel.setFocus()
self.ksizeLine = QLineEdit(self)
self.ksizeLine.setProperty("name", "smoothKsizeLine")
self.ksizeLine.setPlaceholderText("滤波核形如(5,5)")
self.form.addRow(self.smoothTypeLabel, self.smoothTypeCB)
self.form.addRow(self.ksizeLabel, self.ksizeLine)
else:
# self.ksizeLabel = QLabel(self)
# self.ksizeLabel.setText("修正值")
# self.ksizeLine = QLineEdit(self)
self.kernelLabel = QPushButton(self)
self.kernelLabel.setText("导入卷积核")
self.kernelLabel.clicked.connect(self.importImage)
self.kernelLine = QLineEdit(self)
# self.form.addRow(self.ksizeLabel, self.ksizeLine)
self.form.addRow(self.kernelLabel, self.kernelLine)
layout.addLayout(self.form)
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addWidget(buttons)
def getData(self):
if self.flag == 0:
if self.smoothTypeCB.currentText() == "均值滤波":
return self.smoothTypeCB.currentText(), self.ksizeLine.text()
elif self.smoothTypeCB.currentText() == "方框滤波":
return self.smoothTypeCB.currentText(), self.ksizeLine.text(), self.ddepthLine.text()
elif self.smoothTypeCB.currentText() == "高斯滤波":
return self.smoothTypeCB.currentText(), self.ksizeLine.text(), self.sigmaXLine.text(), self.sigmaYLine.text()
elif self.smoothTypeCB.currentText() == "中值滤波":
return self.smoothTypeCB.currentText(), self.ksizeLine.text()
else:
return self.smoothTypeCB.currentText(), self.ksizeLine.text(), self.sigmaXLine.text(), self.sigmaYLine.text()
else:
return self.kernelLine.text()
def selectionchange(self, i):
for row in range(int(self.form.count() / 2) - 1, 1, -1):
self.form.removeRow(row)
self.ksizeLabel.setText("滤波核大小")
if i == 1:
self.ddepthLabel = QLabel(self)
self.ddepthLabel.setText("处理结果图像的图像深度")
self.ddepthLine = QLineEdit(self)
self.form.addRow(self.ddepthLabel, self.ddepthLine)
elif i == 2:
self.sigmaXLabel = QLabel(self)
self.sigmaXLabel.setText("卷积核在水平方向上的标准差")
self.sigmaXLine = QLineEdit(self)
self.sigmaXLine.setText("0")
self.sigmaYLabel = QLabel(self)
self.sigmaYLabel.setText("卷积核在垂直方向上的标准差")
self.sigmaYLine = QLineEdit(self)
self.sigmaYLine.setText("0")
self.form.addRow(self.sigmaXLabel, self.sigmaXLine)
self.form.addRow(self.sigmaYLabel, self.sigmaYLine)
elif i == 3:
self.ksizeLine.setPlaceholderText("滤波核大小,形如5")
elif i == 4:
self.ksizeLabel.setText("以当前像素点为中心点的直径")
self.ksizeLine.setPlaceholderText("空间距离参数")
self.sigmaXLabel = QLabel(self)
self.sigmaXLabel.setText("颜色差值范围")
self.sigmaXLine = QLineEdit(self)
self.sigmaYLabel = QLabel(self)
self.sigmaYLabel.setText("坐标空间中sigma值")
self.sigmaYLine = QLineEdit(self)
self.form.addRow(self.sigmaXLabel, self.sigmaXLine)
self.form.addRow(self.sigmaYLabel, self.sigmaYLine)
def importImage(self):
imgName, imgType = QFileDialog.getOpenFileName(self, "上传核", os.getcwd(), "All Files(*)")
self.kernelLine.setText(imgName)
|
the-stack_0_2681 | import random
import string
from importlib import import_module
from typing import List
from protobuf_gen.transpiler import build, BuildProps, InputModule, OutputModule
def _load_protoc_mods(
output_files: List[InputModule],
root_autogen: str,
):
# we just need to say "map this definition module to a new one"
output_mods: List[OutputModule] = []
clear_mods = []
for of in output_files:
to_import = root_autogen + '.' + of.mod
m = import_module(to_import)
clear_mods.append(m)
output_mods += [of.to_output(m.DESCRIPTOR)]
return output_mods
def wrap(
output_dir_wrappers='./wrappers',
root_autogen='autogen',
output_files: List[InputModule] = None,
):
build(
BuildProps(
root_autogen,
_load_protoc_mods(
output_files,
root_autogen,
)
),
outdir=output_dir_wrappers
)
|
the-stack_0_2682 | from unittest.mock import patch
import pytest
from click.testing import CliRunner
from todoman.cli import cli
from todoman.configuration import ConfigurationException
from todoman.configuration import load_config
def test_explicit_nonexistant(runner):
result = CliRunner().invoke(
cli,
env={"TODOMAN_CONFIG": "/nonexistant"},
catch_exceptions=True,
)
assert result.exception
assert "Configuration file /nonexistant does not exist" in result.output
def test_xdg_nonexistant(runner):
with patch("xdg.BaseDirectory.xdg_config_dirs", ["/does-not-exist"]):
result = CliRunner().invoke(
cli,
catch_exceptions=True,
)
assert result.exception
assert "No configuration file found" in result.output
def test_xdg_existant(runner, tmpdir, config):
with tmpdir.mkdir("todoman").join("config.py").open("w") as f:
with config.open() as c:
f.write(c.read())
with patch("xdg.BaseDirectory.xdg_config_dirs", [str(tmpdir)]):
result = CliRunner().invoke(
cli,
catch_exceptions=True,
)
assert not result.exception
assert not result.output.strip()
def test_sane_config(config, runner, tmpdir):
config.write(
'color = "auto"\n'
'date_format = "%Y-%m-%d"\n'
f'path = "{tmpdir}"\n'
f'cache_path = "{tmpdir.join("cache.sqlite")}"\n'
)
result = runner.invoke(cli)
# This is handy for debugging breakage:
if result.exception:
print(result.output)
raise result.exception
assert not result.exception
def test_invalid_color(config, runner):
config.write('color = 12\npath = "/"\n')
result = runner.invoke(cli, ["list"])
assert result.exception
assert (
"Error: Bad color setting. Invalid type (expected str, got int)."
in result.output
)
def test_invalid_color_arg(config, runner):
config.write('path = "/"\n')
result = runner.invoke(cli, ["--color", "12", "list"])
assert result.exception
assert "Usage:" in result.output
def test_missing_path(config, runner):
config.write('color = "auto"\n')
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Error: Missing 'path' setting." in result.output
@pytest.mark.xfail(reason="Not implemented")
def test_extra_entry(config, runner):
config.write("color = auto\ndate_format = %Y-%m-%d\npath = /\nblah = false\n")
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Error: Invalid configuration entry" in result.output
@pytest.mark.xfail(reason="Not implemented")
def test_extra_section(config, runner):
config.write("date_format = %Y-%m-%d\npath = /\n[extra]\ncolor = auto\n")
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Invalid configuration section" in result.output
def test_missing_cache_dir(config, runner, tmpdir):
cache_dir = tmpdir.join("does").join("not").join("exist")
cache_file = cache_dir.join("cache.sqlite")
config.write(f'path = "{tmpdir}/*"\ncache_path = "{cache_file}"\n')
result = runner.invoke(cli)
assert not result.exception
assert cache_dir.isdir()
assert cache_file.isfile()
def test_date_field_in_time_format(config, runner, tmpdir):
config.write('path = "/"\ntime_format = "%Y-%m-%d"\n')
result = runner.invoke(cli)
assert result.exception
assert (
"Found date component in `time_format`, please use `date_format` for that."
in result.output
)
def test_date_field_in_time(config, runner, tmpdir):
config.write('path = "/"\ndate_format = "%Y-%d-:%M"\n')
result = runner.invoke(cli)
assert result.exception
assert (
"Found time component in `date_format`, please use `time_format` for that."
in result.output
)
def test_colour_validation_auto(config):
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
):
cfg = load_config()
assert cfg["color"] == "auto"
def test_colour_validation_always(config):
config.write("color = 'always'\n", "a")
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
):
cfg = load_config()
assert cfg["color"] == "always"
def test_colour_validation_invalid(config):
config.write("color = 'on_weekends_only'\n", "a")
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
), pytest.raises(ConfigurationException):
load_config()
|
the-stack_0_2684 | from conans.client.generators.cmake import DepsCppCmake
from conans.model import Generator
class CMakePathsGenerator(Generator):
name = "cmake_paths"
@property
def filename(self):
return "conan_paths.cmake"
@property
def content(self):
lines = []
# The CONAN_XXX_ROOT variables are needed because the FindXXX.cmake or XXXConfig.cmake
# in a package could have been "patched" with the `cmake.patch_config_paths()`
# replacing absolute paths with CONAN_XXX_ROOT variables.
for _, dep_cpp_info in self.deps_build_info.dependencies:
var_name = "CONAN_{}_ROOT".format(dep_cpp_info.get_name(self.name).upper())
lines.append('set({} {})'.format(var_name, DepsCppCmake(dep_cpp_info,
self.name).rootpath))
# We want to prioritize the FindXXX.cmake files:
# 1. First the files found in the packages
# 2. The previously set (by default CMAKE_MODULE_PATH is empty)
# 3. The "install_folder" ones, in case there is no FindXXX.cmake, try with the install dir
# if the user used the "cmake_find_package" will find the auto-generated
# 4. The CMake installation dir/Modules ones.
deps = DepsCppCmake(self.deps_build_info, self.name)
lines.append("set(CMAKE_MODULE_PATH {deps.build_paths} ${{CMAKE_MODULE_PATH}} "
"${{CMAKE_CURRENT_LIST_DIR}})".format(deps=deps))
lines.append("set(CMAKE_PREFIX_PATH {deps.build_paths} ${{CMAKE_PREFIX_PATH}} "
"${{CMAKE_CURRENT_LIST_DIR}})".format(deps=deps))
return "\n".join(lines)
|
the-stack_0_2685 | #!/usr/bin/env python
from setuptools import setup
VERSION = "0.1.2"
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="target-couchbase",
version=VERSION,
description="Load data on Couchbase via singer.io",
long_description=long_description,
long_description_content_type="text/markdown",
author="Daigo Tanaka, Anelen Co., LLC",
url="https://github.com/anelendata/target_couchbase",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
install_requires=[
"singer-python>=5.2.0",
"couchbase>=3.0.8",
"simplejson==3.11.1",
"setuptools>=40.3.0",
],
entry_points="""
[console_scripts]
target-couchbase=target_couchbase:main
""",
packages=["target_couchbase"],
package_data={
# Use MANIFEST.ini
},
include_package_data=True
)
|
the-stack_0_2686 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 11:52:26 2018
@author: student
"""
import sys
import random
# import sys, random
class BankAccount():
min_acc_balance = 0
def __init__(self):
self.acc_balance = 0
def get_details(self, acc_type='Savings'):
self.name = input('Enter customer name: ')
self.acc_number = random.randint(100, 150)
self.acc_type = acc_type
def deposit_amount(self, amount):
self.acc_balance += amount
def withdraw_amount(self, amount):
if self.acc_balance > amount:
self.acc_balance -= amount
else:
print('Insufficient Funds to withdraw')
def get_balance(self):
return self.acc_balance
def display_acc_details(self):
print(self.acc_number,"\t", self.name,"\t", self.get_balance())
def Display(accounts):
for account in accounts.values():
account.display_acc_details()
def main():
accounts = {}
while True:
print('1. Open the bank account')
print('2. Withdraw amount')
print('3. Deposit amount')
print('4. Display details')
print('5. Exit')
choice = int(input('Enter choice: '))
if choice == 1:
account = BankAccount()
account.get_details()
accounts[account.acc_number] = account
elif choice == 2:
acc = int(input('Withdraw account: '))
amount = int(input('Enter amount to withdraw: '))
accounts[acc].withdraw_amount(amount)
elif choice == 3:
acc = int(input('Deposit account: '))
amount = int(input('Enter amount to deposit: '))
accounts[acc].deposit_amount(amount)
elif choice == 4:
print('Account\tName\tBalance')
Display(accounts)
else:
sys.exit('Done')
if __name__ == '__main__':
main() |
the-stack_0_2687 | # bsl, 2016
import xbmc
import xbmcaddon
import json
import sys
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
__icon__ = __addon__.getAddonInfo('icon')
REMOTE_DBG = False
if REMOTE_DBG:
try:
import pydevd
pydevd.settrace(stdoutToServer=True, stderrToServer=True)
except:
xbmcgui.Dialog().ok(addonname, "debug mode not workng")
sys.exit(1)
req = xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Settings.GetSettings","id":1}')
jsonRPCRes = json.loads(req);
settingsList = jsonRPCRes["result"]["settings"]
audioSetting = [item for item in settingsList if item["id"] == "audiooutput.audiodevice"][0]
audioDeviceOptions = audioSetting["options"];
activeAudioDeviceValue = audioSetting["value"];
activeAudioDeviceId = [index for (index, option) in enumerate(audioDeviceOptions) if option["value"] == activeAudioDeviceValue][0];
nextIndex = ( activeAudioDeviceId + 1 ) % len(audioDeviceOptions)
nextValue = audioDeviceOptions[nextIndex]["value"]
nextName = audioDeviceOptions[nextIndex]["label"]
changeReq = xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Settings.SetSettingValue","params":{"setting":"audiooutput.audiodevice","value":"%s"},"id":1}' % nextValue)
try:
changeResJson = json.loads(changeReq);
if changeResJson["result"] != True:
raise Exception
except:
sys.stderr.write("Error switching audio output device")
raise Exception
xbmc.executebuiltin('Notification("%s","Output-Device: %s",2000,"%s")' % (__addonname__, nextName, __icon__ ))
|
the-stack_0_2689 | # shared global variables to be imported from model also
import numpy as np
import os
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "o"
class DataSet(object):
def __init__(self, filepath, vocab_words=None, vocab_tags=None, max_iter=None, lower=True, allow_unk=True):
self.filepath = filepath
self.max_iter = max_iter
# Process data setting
self.vocab_words = vocab_words
self.vocab_tags = vocab_tags
# default setting
self.length = None
self.lower = lower
self.allow_unk = allow_unk
# assign function
if self.vocab_words and self.vocab_tags:
self.proc_word = process_vocab(self.vocab_words, lower=self.lower, allow_unk=self.allow_unk) # define a funciton
self.proc_tag = process_vocab(self.vocab_tags, lower=self.lower, allow_unk=self.allow_unk) # define a funciton
def __iter__(self):
num_iter = 0 # represent sentence target
with open(self.filepath, encoding="utf8", errors='ignore') as f:
next(f) # Pass the column name line
sent_id = None
words, tags = [], []
for line in f:
line = line.strip().split()
sent_tmp_id = line[1]
# print("sentence:",sent_tmp_id)
# judge initial sentence id status
if not sent_id: # initital assign of sentence id
sent_id = sent_tmp_id
# judge if same sentence
if sent_tmp_id != sent_id: # new sentence
yield words, tags # change sentence, yield iterator
num_iter += 1
if self.max_iter is not None and niter > self.max_iter:
break
sent_id = sent_tmp_id
words, tags = [], []
try:
word, tag = line[2], line[4]
if self.lower == True:
word, tag = word.lower(), tag.lower()
except:
print("wrong line,line content:", line)
continue
if self.vocab_words and self.vocab_tags:
word = self.proc_word(word)
tag = self.proc_tag(tag)
words.append(word)
tags.append(tag)
# print(word,tag)
def __len__(self):
"""Iterates once over the corpus to set and store length"""
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
def get_trimmed_glove_vectors(filename):
"""
Args:
filename: path to the npz file
Returns:
matrix of embeddings (np array)
"""
try:
with np.load(filename) as data:
return data["embeddings"]
except IOError:
raise MyIOError(filename)
def process_vocab(vocab, lower=True, allow_unk=True):
def proc(key):
if lower:
key = key.lower()
if key.isdigit():
key = '$NUM$'
if key in vocab:
key = vocab[key]
else:
if allow_unk:
#print(key,"not in vocab ")
key = vocab["$UNK$"]
else:
raise Exception("unknow key is not allowed, sth is wrong")
return key # return a processed key eg: I to I's id 30
return proc # return a function
def build_vocabs(datasets):
"""Build vocabulary from an iterable of datasets objects
Args:
datasets: a list of dataset objects
Returns:
a set of all the words in the dataset
"""
print("Building vocab...")
vocab_words = set()
vocab_tags = set()
for dataset in datasets:
for words, tags in dataset:
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
return vocab_words, vocab_tags
def build_glove_vocab(filename):
"""Load vocab from file
Args:
filename: path to the glove vectors
Returns:
vocab: set() of strings
"""
print("Building vocab...")
vocab = set()
with open(filename) as f:
for line in f:
word = line.strip().split(' ')[0]
vocab.add(word)
print("- done. {} tokens".format(len(vocab)))
return vocab
def write_vocab(vocab, filename):
"""Writes a vocab to a file
Writes one word per line.
Args:
vocab: iterable that yields word
filename: path to vocab file
Returns:
write a word per line
"""
print("Writing vocab...")
with open(filename, "w") as f:
for i, word in enumerate(vocab):
if i != len(vocab) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
print("- done. {} tokens".format(len(vocab)))
def load_vocab(filename):
"""Loads vocab from a file
Args:
filename: (string) the format of the file must be one word per line.
Returns:
d: dict[word] = index
"""
try:
d = dict()
with open(filename) as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
except IOError:
raise MyIOError(filename)
return d
def export_trimmed_glove_vectors(vocab, glove_filename, trimmed_filename, dim):
"""Saves glove vectors in numpy array
Args:
vocab: dictionary vocab[word] = index
glove_filename: a path to a glove file
trimmed_filename: a path where to store a matrix in npy
dim: (int) dimension of embeddings
"""
embeddings = np.zeros([len(vocab), dim])
with open(glove_filename) as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
embedding = [float(x) for x in line[1:]]
if word in vocab:
word_idx = vocab[word]
embeddings[word_idx] = np.asarray(embedding)
np.savez_compressed(trimmed_filename, embeddings=embeddings)
"""RNN part"""
def _pad_sequences(sequences, pad_tok, max_length):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
Returns:
a list of list where each sublist has same length
"""
sequence_padded, sequence_length = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:max_length] + [pad_tok] * max(max_length - len(seq), 0)
sequence_padded += [seq_]
sequence_length += [min(len(seq), max_length)]
return sequence_padded, sequence_length
def pad_sequences(sequences, pad_tok, nlevels=1):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
nlevels: "depth" of padding, for the case where we have characters ids
Returns:
a list of list where each sublist has same length
"""
if nlevels == 1:
max_length = max(map(lambda x: len(x), sequences))
sequence_padded, sequence_length = _pad_sequences(sequences,
pad_tok, max_length)
elif nlevels == 2:
max_length_word = max([max(map(lambda x: len(x), seq))
for seq in sequences])
sequence_padded, sequence_length = [], []
for seq in sequences:
# all words are same length now
sp, sl = _pad_sequences(seq, pad_tok, max_length_word)
sequence_padded += [sp]
sequence_length += [sl]
max_length_sentence = max(map(lambda x: len(x), sequences))
sequence_padded, _ = _pad_sequences(sequence_padded,
[pad_tok] * max_length_word, max_length_sentence)
sequence_length, _ = _pad_sequences(sequence_length, 0,
max_length_sentence)
return sequence_padded, sequence_length
def minibatches(data, minibatch_size):
"""
Args:
data: generator of (sentence, tags) tuples
minibatch_size: (int)
Yields:
list of tuples
"""
x_batch, y_batch = [], []
for (x, y) in data:
if len(x_batch) == minibatch_size:
yield x_batch, y_batch
x_batch, y_batch = [], []
if type(x[0]) == tuple:
x = zip(*x)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield x_batch, y_batch
def get_chunk_type(tok, idx_to_tag):
"""
Args:
tok: id of token, ex 4
idx_to_tag: dictionary {4: "B-PER", ...}
Returns:
tuple: "B", "PER"
"""
tag_name = idx_to_tag[tok]
tag_class = tag_name.split('-')[0]
tag_type = tag_name.split('-')[-1]
return tag_class, tag_type
def get_chunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idx_to_tag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunk_type, chunk_start = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunk_type is not None:
# Add a chunk.
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tok_chunk_class, tok_chunk_type = get_chunk_type(tok, idx_to_tag)
if chunk_type is None:
chunk_type, chunk_start = tok_chunk_type, i
elif tok_chunk_type != chunk_type or tok_chunk_class == "B":
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = tok_chunk_type, i
else:
pass
# end condition
if chunk_type is not None:
chunk = (chunk_type, chunk_start, len(seq))
chunks.append(chunk)
return chunks
|
the-stack_0_2690 | #!/usr/bin/env python3
import json
from typing import List
import urllib3
from blessings import Terminal
from github import Github
from github.Repository import Repository
from utils import get_env_var, timestamped_print
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
print = timestamped_print
REVIEWERS = ['bziser', 'GuyAfik', 'yucohen']
MARKETPLACE_CONTRIBUTION_PR_AUTHOR = 'xsoar-bot'
WELCOME_MSG = 'Thank you for your contribution. Your generosity and caring are unrivaled! Rest assured - our content ' \
'wizard @{selected_reviewer} will very shortly look over your proposed changes.'
WELCOME_MSG_WITH_GFORM = 'Thank you for your contribution. Your generosity and caring are unrivaled! Make sure to ' \
'register your contribution by filling the [Contribution Registration]' \
'(https://forms.gle/XDfxU4E61ZwEESSMA) form, ' \
'so our content wizard @{selected_reviewer} will know he can start review the proposed ' \
'changes.'
def determine_reviewer(potential_reviewers: List[str], repo: Repository) -> str:
"""Checks the number of open 'Contribution' PRs that have either been assigned to a user or a review
was requested from the user for each potential reviewer and returns the user with the smallest amount
Args:
potential_reviewers (List): The github usernames from which a reviewer will be selected
repo (Repository): The relevant repo
Returns:
str: The github username to assign to a PR
"""
label_to_consider = 'contribution'
pulls = repo.get_pulls(state='OPEN')
assigned_prs_per_potential_reviewer = {reviewer: 0 for reviewer in potential_reviewers}
for pull in pulls:
# we only consider 'Contribution' prs when computing who to assign
pr_labels = [label.name.casefold() for label in pull.labels]
if label_to_consider not in pr_labels:
continue
assignees = {assignee.login for assignee in pull.assignees}
requested_reviewers, _ = pull.get_review_requests()
reviewers_info = {requested_reviewer.login for requested_reviewer in requested_reviewers}
combined_list = assignees.union(reviewers_info)
for reviewer in potential_reviewers:
if reviewer in combined_list:
assigned_prs_per_potential_reviewer[reviewer] = assigned_prs_per_potential_reviewer.get(reviewer, 0) + 1
selected_reviewer = sorted(assigned_prs_per_potential_reviewer, key=assigned_prs_per_potential_reviewer.get)[0]
return selected_reviewer
def main():
"""Handles External PRs (PRs from forks)
Performs the following operations:
1. If the external PR's base branch is master we create a new branch and set it as the base branch of the PR.
2. Labels the PR with the "Contribution" label. (Adds the "Hackathon" label where applicable.)
3. Assigns a Reviewer.
4. Creates a welcome comment
Will use the following env vars:
- CONTENTBOT_GH_ADMIN_TOKEN: token to use to update the PR
- EVENT_PAYLOAD: json data from the pull_request event
"""
t = Terminal()
payload_str = get_env_var('EVENT_PAYLOAD')
if not payload_str:
raise ValueError('EVENT_PAYLOAD env variable not set or empty')
payload = json.loads(payload_str)
print(f'{t.cyan}Processing PR started{t.normal}')
org_name = 'demisto'
repo_name = 'content'
gh = Github(get_env_var('CONTENTBOT_GH_ADMIN_TOKEN'), verify=False)
content_repo = gh.get_repo(f'{org_name}/{repo_name}')
pr_number = payload.get('pull_request', {}).get('number')
pr = content_repo.get_pull(pr_number)
# Add 'Contribution' Label to PR
contribution_label = 'Contribution'
pr.add_to_labels(contribution_label)
print(f'{t.cyan}Added "Contribution" label to the PR{t.normal}')
# check base branch is master
if pr.base.ref == 'master':
print(f'{t.cyan}Determining name for new base branch{t.normal}')
branch_prefix = 'contrib/'
new_branch_name = f'{branch_prefix}{pr.head.label.replace(":", "_")}'
existant_branches = content_repo.get_git_matching_refs(f'heads/{branch_prefix}')
potential_conflicting_branch_names = [branch.ref.lstrip('refs/heads/') for branch in existant_branches]
# make sure new branch name does not conflict with existing branch name
while new_branch_name in potential_conflicting_branch_names:
# append or increment digit
if not new_branch_name[-1].isdigit():
new_branch_name += '-1'
else:
digit = str(int(new_branch_name[-1]) + 1)
new_branch_name = f'{new_branch_name[:-1]}{digit}'
master_branch_commit_sha = content_repo.get_branch('master').commit.sha
# create new branch
print(f'{t.cyan}Creating new branch "{new_branch_name}"{t.normal}')
content_repo.create_git_ref(f'refs/heads/{new_branch_name}', master_branch_commit_sha)
# update base branch of the PR
pr.edit(base=new_branch_name)
print(f'{t.cyan}Updated base branch of PR "{pr_number}" to "{new_branch_name}"{t.normal}')
# assign reviewers / request review from
reviewer_to_assign = determine_reviewer(REVIEWERS, content_repo)
pr.add_to_assignees(reviewer_to_assign)
pr.create_review_request(reviewers=[reviewer_to_assign])
print(f'{t.cyan}Assigned user "{reviewer_to_assign}" to the PR{t.normal}')
print(f'{t.cyan}Requested review from user "{reviewer_to_assign}"{t.normal}')
# create welcome comment (only users who contributed through Github need to have that contribution form filled)
message_to_send = WELCOME_MSG if pr.user.login == MARKETPLACE_CONTRIBUTION_PR_AUTHOR else WELCOME_MSG_WITH_GFORM
body = message_to_send.format(selected_reviewer=reviewer_to_assign)
pr.create_issue_comment(body)
print(f'{t.cyan}Created welcome comment{t.normal}')
if __name__ == "__main__":
main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.