filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_7810 | """
@name: Modules/House/Lighting/lighting.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2010-2020 by D. Brian Kimmel
@note: Created on Apr 2, 2010
@license: MIT License
@summary: Handle the home lighting system automation.
PyHouse.House.Lighting.
Buttons
Controllers
Lights
Outlets
"""
__updated__ = '2020-02-21'
__version_info__ = (20, 1, 25)
__version__ = '.'.join(map(str, __version_info__))
# Import system type stuff
# Import PyHouse files
from Modules.Core.Config.config_tools import Api as configApi
from Modules.House.Lighting import MODULES, LightingClass
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.Lighting ')
class LocalConfig:
"""
"""
m_config = None
m_pyhouse_obj = None
m_schedule_altered = False
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self.m_config = configApi(p_pyhouse_obj)
self.m_schedule_altered = False
def _update_lighting_from_yaml(self, _p_pyhouse_obj, p_node_yaml):
"""
"""
l_lighting = {}
try:
l_yaml = p_node_yaml['Lighting']
except:
LOG.error('The "Lighting" tag is missing in the "lighting.yaml" file!')
return None
for l_key, l_val in l_yaml.items():
LOG.debug('\n\tKey: {}\n\tValue: {}'.format(l_key, PrettyFormatAny.form(l_val, 'Lighting.Update', 190)))
return l_lighting # For testing.
def load_yaml_config(self, p_pyhouse_obj):
""" Read the lighting.yaml file.
It contains lighting data for the house.
"""
pass
# ----------
def save_yaml_config(self, _p_pyhouse_obj):
"""
"""
LOG.info('Saving Config - Version:{}'.format(__version__))
class Api:
""" Handles all the components of the lighting sub-system.
"""
m_config_tools = None
m_local_config = None
m_pyhouse_obj = None
m_module_apis = None
def __init__(self, p_pyhouse_obj) -> None:
LOG.info("Initialing - Version:{}".format(__version__))
self.m_pyhouse_obj = p_pyhouse_obj
self._add_storage()
self.m_local_config = LocalConfig(p_pyhouse_obj)
self.m_config_tools = configApi(p_pyhouse_obj)
l_path = 'Modules.House.Lighting.'
l_modules = self.m_config_tools.find_module_list(MODULES)
self.m_module_apis = self.m_config_tools.import_module_list(l_modules, l_path)
p_pyhouse_obj.House.Lighting._Apis = self.m_module_apis
LOG.info("Initialized - Version:{}".format(__version__))
def _add_storage(self) -> None:
self.m_pyhouse_obj.House.Lighting = LightingClass()
def LoadConfig(self):
LOG.info('Loading all Lighting config files.')
LOG.debug(PrettyFormatAny.form(self.m_module_apis, 'Apis'))
for l_module in self.m_module_apis.values():
l_module.LoadConfig()
LOG.info('Loaded Lighting config files.')
def Start(self):
LOG.info("Starting.")
for l_module in self.m_module_apis.values():
l_module.Start()
LOG.info("Started.")
def SaveConfig(self):
LOG.info('SaveConfig')
for l_module in self.m_module_apis.values():
l_module.SaveConfig()
LOG.info("Saved Lighting Config.")
return
def Stop(self):
for l_module in self.m_module_apis.values():
l_module.Stop()
LOG.info("Stopped.")
def Control(self, p_device_obj, p_controller_obj, p_control):
"""
Insteon specific version of control light
All that Insteon can control is Brightness and Fade Rate.
@param p_controller_obj: optional ==> ControllerInformation
@param p_device_obj: the device being controlled
@param p_control: the idealized light control params
"""
if self.m_plm == None:
LOG.info('No PLM was defined - Quitting.')
return
self.m_plm.Control(p_device_obj, p_controller_obj, p_control)
def MqttDispatch(self, p_msg):
"""
"""
LOG.debug(PrettyFormatAny.form(p_msg, 'Msg'))
p_msg.LogMessage += '\tLighting: {}\n'.format(self.m_pyhouse_obj.House.Name)
l_topic = p_msg.UnprocessedTopic[0].lower()
p_msg.UnprocessedTopic = p_msg.UnprocessedTopic[1:]
if l_topic in self.m_module_apis:
self.m_module_apis[l_topic].MqttDispatch(p_msg)
else:
p_msg.LogMessage += '\tUnknown sub-topic: "{}"'.format(l_topic)
LOG.warning('Unknown lighting Topic: {}\n\tTopic: {}\n\tMessge: {}'.format(l_topic, p_msg.Topic, p_msg.Payload))
LOG.debug(PrettyFormatAny.form(self.m_module_apis, 'Modules'))
# ## END DBK
|
the-stack_0_7811 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
import os
import weakref
from absl.testing import parameterized
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import save as saved_model_save
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tracking.AutoTrackable):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
class InterfaceTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAddVariable(self):
obj = NonLayerTrackable()
with self.assertRaisesRegex(ValueError, "do not specify shape"):
trackable_utils.add_variable(
obj, name="shape_specified_twice", shape=[], initializer=1)
constant_initializer = trackable_utils.add_variable(
obj, name="constant_initializer", initializer=1)
with variable_scope.variable_scope("some_variable_scope"):
ones_initializer = trackable_utils.add_variable(
obj,
name="ones_initializer",
shape=[2],
initializer=init_ops.ones_initializer(dtype=dtypes.float32))
bare_initializer = trackable_utils.add_variable(
obj,
name="bare_initializer",
shape=[2, 2],
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
# Even in graph mode, there are no naming conflicts between objects, only
# naming conflicts within an object.
other_duplicate = resource_variable_ops.ResourceVariable(
name="duplicate", initial_value=1.)
duplicate = trackable_utils.add_variable(
obj, name="duplicate", shape=[])
with self.assertRaisesRegex(ValueError, "'duplicate'.*already declared"):
trackable_utils.add_variable(obj, name="duplicate", shape=[])
self.evaluate(trackable_utils.gather_initializers(obj))
self.assertEqual("constant_initializer:0", constant_initializer.name)
self.assertEqual(1, self.evaluate(constant_initializer))
self.assertEqual("some_variable_scope/ones_initializer:0",
ones_initializer.name)
self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
self.assertAllEqual([[0., 0.],
[0., 0.]], self.evaluate(bare_initializer))
self.assertEqual("a_variable:0", obj.a_variable.name)
self.assertEqual("duplicate:0", other_duplicate.name)
if context.executing_eagerly():
# When executing eagerly, there's no uniquification of variable names. The
# checkpoint name will be the same.
self.assertEqual("duplicate:0", duplicate.name)
else:
# The .name attribute may be globally influenced, but the checkpoint name
# won't be (tested below).
self.assertEqual("duplicate_1:0", duplicate.name)
named_variables, _, _ = (
graph_view.ObjectGraphView(obj).serialize_object_graph())
expected_checkpoint_names = (
"a_variable/.ATTRIBUTES/VARIABLE_VALUE",
"bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"duplicate/.ATTRIBUTES/VARIABLE_VALUE",
"ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
)
six.assertCountEqual(
self, expected_checkpoint_names, [v.name for v in named_variables])
def testInitNotCalled(self):
class NoInit(tracking.AutoTrackable):
def __init__(self):
pass
# __init__ for Trackable will be called implicitly.
trackable_utils.add_variable(NoInit(), "var", shape=[])
def testShapeDtype(self):
root = tracking.AutoTrackable()
v1 = trackable_utils.add_variable(
root, name="v1", initializer=3., dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v1.dtype)
v2 = trackable_utils.add_variable(
root,
name="v2",
shape=[3],
initializer=init_ops.ones_initializer,
dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v2.dtype)
self.assertAllEqual([1., 1., 1.], self.evaluate(v2))
class _MirroringSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
tensor = self._primary_variable.read_value()
spec = saver_lib.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name)
super(_MirroringSaveable, self).__init__(
tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(base.Trackable):
"""A Trackable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class CheckpointingTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
checkpoint = trackable_utils.Checkpoint(v=v)
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
self.evaluate(v.non_dep_variable.assign(44.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(45.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(44., self.evaluate(v.non_dep_variable))
self.assertEqual(44., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturnedWithGlobalName(self):
# The same object can also be saved using the name-based saver.
v = _OwnsMirroredVariables()
saver = saver_lib.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testAssertConsumedNoCheckpoint(self):
prefix = os.path.join(self.get_temp_dir(), "ckpt")
v = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(v.initializer)
ckpt = trackable_utils.Checkpoint(v=v)
self.evaluate(trackable_utils.gather_initializers(ckpt))
save_path = ckpt.save(file_prefix=prefix)
status = ckpt.restore(save_path=save_path)
del ckpt
status.assert_consumed()
def testDeepCopyCheckpoint(self):
prefix = os.path.join(self.get_temp_dir(), "ckpt")
v = variables_lib.Variable(1.)
original_ckpt = trackable_utils.Checkpoint(v=v)
copied_ckpt = copy.deepcopy(original_ckpt)
copied_ckpt.v.assign(2.)
self.assertAllClose(1., v)
save_path = copied_ckpt.save(file_prefix=prefix)
original_ckpt.restore(save_path=save_path).assert_consumed()
self.assertAllClose(2., v)
@test_util.run_in_graph_and_eager_modes
def testPassingCheckpointOptions(self):
localhost = "/job:localhost/device:CPU:0"
options = checkpoint_options.CheckpointOptions(
experimental_io_device=localhost)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
v = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(v.initializer)
ckpt = trackable_utils.Checkpoint(v=v)
self.evaluate(trackable_utils.gather_initializers(ckpt))
save_path = ckpt.save(file_prefix=prefix, options=options)
status = ckpt.restore(save_path=save_path, options=options)
del ckpt
status.assert_consumed()
# In graph mode, verify that the save and restore ops were set to run on
# localhost.
if not context.executing_eagerly():
for op in ops.get_default_graph().get_operations():
if op.type in ("SaveV2", "RestoreV2"):
self.assertEqual(localhost, op.device)
@test_util.run_in_graph_and_eager_modes
def testFreezing(self):
with test_util.use_gpu():
# Save an object-based checkpoint using a frozen saver
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
self.evaluate(v.assign(3))
# Create the save counter so assert_consumed doesn't complain about it not
# existing in the checkpoint on restore.
self.evaluate(checkpoint.save_counter.assign(12))
saver = trackable_utils.frozen_saver(checkpoint)
with ops.device("cpu:0"):
prefix_tensor = constant_op.constant(prefix)
self.evaluate(saver.save(prefix_tensor))
self.evaluate(v.assign(10))
# Use the frozen saver to restore the same object graph
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore using another frozen saver on an identical object graph
del v, checkpoint, saver
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
saver = trackable_utils.frozen_saver(checkpoint)
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore as an object-based checkpoint
del v, checkpoint, saver
checkpoint = trackable_utils.Checkpoint()
status = checkpoint.restore(prefix)
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
if context.executing_eagerly():
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
self.assertEqual(0, self.evaluate(v))
checkpoint.v = v
status.assert_consumed().run_restore_ops()
self.assertEqual(3, self.evaluate(v))
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
@test_util.run_in_graph_and_eager_modes
def testCustomNumbering(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
step = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(step=step)
self.evaluate(step.initializer)
for i in range(5):
path = checkpoint.write("%s-%d" % (prefix, self.evaluate(step)))
expected_suffix = "-%d" % (2 * i,)
if not path.endswith(expected_suffix):
self.fail("%s should have suffix %s" % (path, expected_suffix))
self.evaluate(step.assign_add(2))
def testPartialRestoreWarningAttribute(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
with test.mock.patch.object(logging, "warning") as mock_log:
# Note: Unlike in testPartialRestoreWarningObject, the warning actually
# prints immediately here, since all of the objects have been created
# and there's no deferred restoration sitting around.
partial_root.restore(save_path)
self.assertEqual(3., partial_root.v2.numpy())
del partial_root
self.assertIsNone(weak_partial_root())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v1", messages)
self.assertNotIn("(root).v2", messages)
self.assertIn("expect_partial()", messages)
def testAttributeException(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
status = partial_root.restore(save_path)
with self.assertRaisesRegex(AssertionError,
r"Unused attributes(.|\n)*\(root\).v1"):
status.assert_consumed()
def testSilencePartialWarning(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path).expect_partial()
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
self.assertEmpty(mock_log.call_args_list)
def _get_checkpoint_name(self, name):
root = tracking.AutoTrackable()
trackable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
with ops.name_scope("root/" + named_variable.name):
pass # Make sure we can use this as an op name if we prefix it.
return named_variable.name
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableNameEscaping(self):
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c"))
self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b"))
self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/"))
self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S"))
self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix,
self._get_checkpoint_name(r"d/.ATTRIBUTES/f"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNumberedPath(self):
root = tracking.AutoTrackable()
leaf = tracking.AutoTrackable()
root.leaf = leaf
trackable_utils.add_variable(leaf, name="v", shape=[])
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", named_variable.name)
@test_util.run_in_graph_and_eager_modes
def testLocalNameValidation(self):
root = tracking.AutoTrackable()
leaf = tracking.AutoTrackable()
# Dots are escaped, which avoids conflicts with reserved names.
root._track_trackable(leaf, name=".ATTRIBUTES")
trackable_utils.add_variable(trackable=leaf, name="a", shape=[])
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
self.assertEqual("..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE",
named_variable.name)
@test_util.run_in_graph_and_eager_modes
def testLateDependencyTracking(self):
class Dependency(tracking.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class LateDependencies(trackable_utils.Checkpoint):
def add_dep(self):
self.dep = Dependency()
self.dep.build()
original = LateDependencies()
original.add_dep()
self.evaluate(state_ops.assign(original.dep.var, 123.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = original.save(checkpoint_prefix)
load_into = LateDependencies()
status = load_into.restore(save_path)
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
load_into.add_dep()
status.assert_consumed()
status.assert_existing_objects_matched().run_restore_ops()
self.assertEqual(123., self.evaluate(load_into.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDepAfterVar(self):
class Dependency(tracking.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class DepAfterVar(trackable_utils.Checkpoint):
def add_dep(self):
dep = Dependency()
dep.build()
self.dep = dep
dep_after_var = DepAfterVar()
dep_after_var.add_dep()
self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = dep_after_var.save(checkpoint_prefix)
loaded_dep_after_var = DepAfterVar()
status = loaded_dep_after_var.restore(save_path)
loaded_dep_after_var.add_dep()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var))
@test_util.run_in_graph_and_eager_modes
def testOverlappingRestores(self):
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep = tracking.AutoTrackable()
save_root.dep.var = trackable_utils.add_variable(
save_root.dep, name="var", initializer=0.)
self.evaluate(state_ops.assign(save_root.dep.var, 12.))
first_path = save_root.save(os.path.join(checkpoint_directory, "first"))
self.evaluate(state_ops.assign(save_root.dep.var, 13.))
second_path = save_root.save(os.path.join(checkpoint_directory, "second"))
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
first_status = first_root.restore(first_path)
second_status = second_root.restore(second_path)
load_dep = tracking.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(13., self.evaluate(load_dep.var))
# Try again with the order of the restore() reversed. The last restore
# determines the final value.
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
second_status = second_root.restore(second_path)
first_status = first_root.restore(first_path)
load_dep = tracking.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
@test_util.run_in_graph_and_eager_modes
def testAmbiguousLoad(self):
# Not OK to split one checkpoint object into two
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = tracking.AutoTrackable()
save_root.dep_two = tracking.AutoTrackable()
dep_three = tracking.AutoTrackable()
save_root.dep_one.dep_three = dep_three
save_root.dep_two.dep_three = dep_three
trackable_utils.add_variable(dep_three, name="var", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
status = load_root.restore(save_path)
load_root.dep_one = tracking.AutoTrackable()
load_root.dep_two = tracking.AutoTrackable()
load_root.dep_one.dep_three = tracking.AutoTrackable()
load_root.dep_two.dep_three = tracking.AutoTrackable()
trackable_utils.add_variable(
load_root.dep_one.dep_three, name="var", initializer=0.)
trackable_utils.add_variable(
load_root.dep_two.dep_three, name="var", initializer=0.)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
@test_util.run_in_graph_and_eager_modes
def testObjectsCombined(self):
# Currently fine to load two checkpoint objects into one Python object
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = tracking.AutoTrackable()
save_root.dep_two = tracking.AutoTrackable()
trackable_utils.add_variable(
save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64)
trackable_utils.add_variable(
save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
load_root.dep_one = tracking.AutoTrackable()
load_root.dep_two = load_root.dep_one
v1 = trackable_utils.add_variable(
load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64)
v2 = trackable_utils.add_variable(
load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64)
status = load_root.restore(
save_path).assert_consumed().assert_existing_objects_matched()
status.run_restore_ops()
self.assertEqual(32., self.evaluate(v1))
self.assertEqual(64., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes
def testEmptyContainersIgnored(self):
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint(a=[])
path = save_root.save(checkpoint_directory)
load_root = trackable_utils.Checkpoint(b=[])
load_root.dep = []
load_root.dep.append([])
status = load_root.restore(path)
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
@test_util.run_in_graph_and_eager_modes
def testDependencyLoop(self):
# Note: this test creates garbage during eager execution because it
# purposefully creates a reference cycle.
first = trackable_utils.Checkpoint()
second = trackable_utils.Checkpoint()
first.second = second
second.first = first
first.v = trackable_utils.add_variable(
first, "v1", initializer=[3., 1., 4.])
second.v = trackable_utils.add_variable(
second, "v2", initializer=[1., 1., 2., 3.])
self.evaluate(trackable_utils.gather_initializers(first))
checkpoint_directory = self.get_temp_dir()
save_path = first.save(os.path.join(checkpoint_directory, "ckpt"))
# Test deferred loading
first_load = trackable_utils.Checkpoint()
status = first_load.restore(save_path)
second_load = tracking.AutoTrackable()
first_load.second = second_load
second_load.first = first_load
with self.assertRaises(AssertionError):
status.assert_consumed()
first_load.v = trackable_utils.add_variable(
first_load, "v1", shape=[3])
second_load.v = trackable_utils.add_variable(
second_load, "v2", shape=[4])
status.assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
# Test loading when variables have already been created
self.evaluate(first_load.v.assign([2., 7., 1.]))
self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v))
self.evaluate(second_load.v.assign([2., 7., 1., 8.]))
self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v))
status = first_load.restore(save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
@test_util.run_in_graph_and_eager_modes
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first = trackable_utils.Checkpoint()
first.var1 = variables_lib.Variable(0., name="outside_var")
first.var2 = variables_lib.Variable(0., name="blah")
self.evaluate(first.var1.assign(4.))
self.evaluate(first.var2.assign(8.))
save_path = first.save(checkpoint_prefix)
second = trackable_utils.Checkpoint()
second.var2 = variables_lib.Variable(0., name="blah")
status = second.restore(save_path)
recreated_var1 = variables_lib.Variable(0., name="outside_var")
status.run_restore_ops()
self.assertEqual(8., self.evaluate(second.var2))
self.evaluate(recreated_var1.assign(-2.))
self.assertEqual(-2., self.evaluate(recreated_var1))
second.var1 = recreated_var1
status.run_restore_ops()
self.assertEqual(4., self.evaluate(recreated_var1))
@test_util.run_in_graph_and_eager_modes
def testCheckpointState(self):
# No checkpoints are deleted by default
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
saver = trackable_utils.Checkpoint(obj=obj)
for _ in range(10):
saver.save(checkpoint_prefix)
expected_filenames = ["checkpoint"]
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
@test_util.run_in_graph_and_eager_modes
def testCheckpointStateChangingVarList(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
checkpoint = trackable_utils.Checkpoint(obj=obj)
looped_variables = []
for iteration in range(10):
new_variable = resource_variable_ops.ResourceVariable(iteration)
self.evaluate(new_variable.initializer)
setattr(checkpoint, "var_%d" % iteration, new_variable)
checkpoint.save(checkpoint_prefix)
looped_variables.append(new_variable)
expected_filenames = ["checkpoint"]
# We've copied the saver each time, but checkpoint management should still
# be consistent. Nothing gets deleted.
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
self.assertEqual(
checkpoint_prefix + "-10",
checkpoint_management.latest_checkpoint(checkpoint_directory))
# The checkpoint list only contains the most recent checkpoint, but they're
# all on disk. This means we won't eventually run into proto size limits.
self.assertEqual(
[checkpoint_prefix + "-10"],
(checkpoint_management.get_checkpoint_state(checkpoint_directory)
.all_model_checkpoint_paths))
for v in looped_variables:
self.evaluate(v.assign(314))
checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops()
self.assertEqual(314, self.evaluate(checkpoint.var_9))
self.assertEqual(314, self.evaluate(checkpoint.var_8))
self.assertEqual(314, self.evaluate(checkpoint.var_6))
self.assertEqual(5, self.evaluate(checkpoint.var_5))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops()
self.assertEqual(9, self.evaluate(checkpoint.var_9))
self.assertEqual(8, self.evaluate(checkpoint.var_8))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
@test_util.run_in_graph_and_eager_modes
def test_restore_after_adding_empty_trackable_data_structure(self):
model = NonLayerTrackable()
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.restore(None).initialize_or_restore()
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
del model, checkpoint
model = NonLayerTrackable()
model.dict = {"a": 1}
model.list = {"b": 1}
checkpoint = trackable_utils.Checkpoint(model=model)
load_status = checkpoint.restore(save_path)
load_status.assert_existing_objects_matched().run_restore_ops()
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_path_str_from_function(self):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint():
save_path = save_checkpoint.write(checkpoint_prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
self.evaluate(_write_checkpoint())
load_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(0.))
# Use read() instead of restore() which allows us to check that all
# existing objects were loaded.
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(1., self.evaluate(load_checkpoint.v))
self.evaluate(save_checkpoint.v.assign(3.))
self.evaluate(_write_checkpoint())
self.evaluate(save_checkpoint.v.assign(0.))
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(3., self.evaluate(load_checkpoint.v))
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_path_tensor_from_function(self):
# Same as the previous test, but the path is a tensor not a python string.
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
checkpoint_prefix_tensor = constant_op.constant(checkpoint_prefix)
save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint(prefix):
save_path = save_checkpoint.write(prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
self.evaluate(_write_checkpoint(checkpoint_prefix_tensor))
load_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(0.))
# Use read() instead of restore() which allows us to check that all
# existing objects were loaded.
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(1., self.evaluate(load_checkpoint.v))
self.evaluate(save_checkpoint.v.assign(3.))
self.evaluate(_write_checkpoint(checkpoint_prefix_tensor))
self.evaluate(save_checkpoint.v.assign(0.))
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(3., self.evaluate(load_checkpoint.v))
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_path_tensor_does_not_exist_from_function(self):
# Same as the previous test, but the path is a tensor not a python string.
checkpoint_prefix = os.path.join(
self.get_temp_dir(), "DOES_NOT_EXIST", "ckpt")
checkpoint_prefix_tensor = constant_op.constant(checkpoint_prefix)
save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint(prefix):
save_path = save_checkpoint.write(prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
with self.assertRaises(errors_impl.NotFoundError):
self.evaluate(_write_checkpoint(checkpoint_prefix_tensor))
def test_inititialize_with_data_structures(self):
checkpoint = trackable_utils.Checkpoint(
a=[variables_lib.Variable(0.), variables_lib.Variable(1.)],
b={"a": variables_lib.Variable(2.), "b": variables_lib.Variable(3.)})
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
load_checkpoint = trackable_utils.Checkpoint(
a=[variables_lib.Variable(4.), variables_lib.Variable(5.)],
b={"a": variables_lib.Variable(6.), "b": variables_lib.Variable(7.)})
load_checkpoint.restore(save_path)
self.assertAllClose(self.evaluate(load_checkpoint.a), [0, 1])
self.assertAllClose(self.evaluate(load_checkpoint.b), {"a": 2, "b": 3})
def _create_trackable(self):
class Model(tracking.AutoTrackable):
def __init__(self):
self.v = variables_lib.Variable(2.)
def __call__(self, x):
return self.v * x
return Model()
def test_initialize_with_root_object(self):
model = self._create_trackable()
input_value = constant_op.constant([[3.]])
expected_output = self.evaluate(model(input_value))
model.deferred_variable = variables_lib.Variable(5.)
checkpoint = trackable_utils.Checkpoint(model)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
new_model = self._create_trackable()
load_checkpoint = trackable_utils.Checkpoint(new_model)
load_checkpoint.restore(save_path)
self.assertAllClose(expected_output, new_model(input_value))
new_model.deferred_variable = variables_lib.Variable(1.)
self.assertEqual(self.evaluate(new_model.deferred_variable), 5)
def test_initialize_with_root_object_and_kwargs(self):
model = self._create_trackable()
model.v.assign(3.)
separate_variable = variables_lib.Variable(5.)
with self.assertRaisesRegex(ValueError, "root.v already exists"):
trackable_utils.Checkpoint(model, v=separate_variable)
checkpoint = trackable_utils.Checkpoint(
model, separate_variable=separate_variable)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
# Case 1: Loading checkpoint with same configuration.
new_model = self._create_trackable()
separate_variable = variables_lib.Variable(1.)
load_checkpoint = trackable_utils.Checkpoint(
new_model, separate_variable=separate_variable)
load_checkpoint.restore(save_path).assert_consumed()
self.assertEqual(self.evaluate(new_model.v), 3)
self.assertEqual(self.evaluate(separate_variable), 5)
self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1)
# Case 2: Loading checkpoint where v and separate_variable are swapped:
# v is not attached to the root, while separate variable is attached to root
new_model = tracking.AutoTrackable()
new_model.separate_variable = variables_lib.Variable(200.)
v = variables_lib.Variable(100.)
load_checkpoint = trackable_utils.Checkpoint(new_model, v=v)
load_checkpoint.restore(save_path).assert_consumed()
self.assertEqual(self.evaluate(v), 3)
self.assertEqual(self.evaluate(new_model.separate_variable), 5)
self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1)
# Case 3: Loading checkpoint where no root object is specified
separate_variable = variables_lib.Variable(200.)
v = variables_lib.Variable(100.)
load_checkpoint = trackable_utils.Checkpoint(
v=v, separate_variable=separate_variable)
load_checkpoint.restore(save_path).assert_consumed()
self.assertEqual(self.evaluate(v), 3)
self.assertEqual(self.evaluate(new_model.separate_variable), 5)
self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1)
def test_checkpoint_saved_model_compatibility(self):
model = self._create_trackable()
input_value = constant_op.constant([[3.]])
expected_output = self.evaluate(model(input_value))
model.deferred_variable = variables_lib.Variable(5.)
saved_model_dir = os.path.join(self.get_temp_dir(), "saved_model")
saved_model_save.save(model, saved_model_dir)
new_model = self._create_trackable()
load_checkpoint = trackable_utils.Checkpoint(new_model)
with self.assertRaisesRegex(
errors_impl.NotFoundError,
"Error when restoring from checkpoint or SavedModel"):
load_checkpoint.restore(saved_model_dir + "no").expect_partial()
load_checkpoint.restore(saved_model_dir).expect_partial()
self.assertAllClose(expected_output, new_model(input_value))
new_model.deferred_variable = variables_lib.Variable(1.)
self.assertEqual(self.evaluate(new_model.deferred_variable), 5)
def test_deferred_dependency_avoids_reference_cycles(self):
# Tests that there are no reference cycles when running garbage collection.
# Python uses reference counts as the primary garbage collector, which will
# not delete and finalize (__del__) objects in a cycle. The deletion is
# eventually triggered by gc, which only runs when the garbage has reached
# a certain threshold.
delete_counter = 0
class TrackableWithDel(tracking.AutoTrackable):
def __del__(self):
nonlocal delete_counter
delete_counter += 1
x = tracking.AutoTrackable()
x.v = variables_lib.Variable(100.)
x.has_del = TrackableWithDel()
checkpoint = trackable_utils.Checkpoint(x)
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
self.assertEqual(delete_counter, 0)
del checkpoint
del x
self.assertEqual(delete_counter, 1)
no_v = tracking.AutoTrackable()
no_v.has_del = TrackableWithDel()
checkpoint = trackable_utils.Checkpoint(no_v)
checkpoint.restore(save_path).expect_partial()
del checkpoint
del no_v
self.assertEqual(delete_counter, 2)
def test_defer_objects_with_values_only(self):
# Tests that deferred dependencies are only added if the node in the
# object graph has children or checkpointed values.
root = tracking.AutoTrackable()
root.branch_with_value = tracking.AutoTrackable()
root.branch_with_value.v = variables_lib.Variable(5.0)
root.branch_no_value = tracking.AutoTrackable()
root.branch_no_value.child = tracking.AutoTrackable()
root.v = variables_lib.Variable(1.0)
checkpoint = trackable_utils.Checkpoint(model=root)
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
new_root = tracking.AutoTrackable()
checkpoint = trackable_utils.Checkpoint(model=new_root)
checkpoint.restore(save_path)
# root should have two nodes with values/children (`branch-with_value`/`v`).
self.assertLen(new_root._deferred_dependencies, 2)
new_root.branch_no_value = tracking.AutoTrackable()
self.assertLen(new_root._deferred_dependencies, 2)
new_root.branch_with_value = tracking.AutoTrackable()
self.assertLen(new_root._deferred_dependencies, 1)
new_root.v = variables_lib.Variable(1.0)
self.assertEmpty(new_root._deferred_dependencies, 1)
class TemplateTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore_nested(self):
def _inner_template():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer())
return v
def _outer_template():
first_inner = template.make_template("i1", _inner_template)
second_inner = template.make_template("i2", _inner_template)
v1 = first_inner()
v2 = second_inner()
v3 = second_inner()
return (first_inner, second_inner), (v1, v2, v3)
with variable_scope.variable_scope("ignored"):
save_template = template.make_template("s1", _outer_template)
save_root = trackable_utils.Checkpoint(my_template=save_template)
(inner_template_one, inner_template_two), _ = save_template()
self.evaluate(inner_template_one.variables[0].assign([20.]))
self.evaluate(inner_template_two.variables[0].assign([25.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _outer_template)
load_root = trackable_utils.Checkpoint(my_template=load_template)
status = load_root.restore(save_path)
(inner_template_one, inner_template_two), (v1, v2, v3) = load_template()
outer_template_dependencies = load_root.my_template._trackable_children()
self.assertLen(outer_template_dependencies, 2)
self.assertDictEqual({"i1": inner_template_one, "i2": inner_template_two},
outer_template_dependencies)
self.assertLen(inner_template_one._trackable_children(), 1)
self.assertIn("v", inner_template_one._trackable_children())
self.assertLen(inner_template_two._trackable_children(), 1)
self.assertIn("v", inner_template_two._trackable_children())
status.assert_consumed().run_restore_ops()
self.assertAllEqual([20.], self.evaluate(v1))
self.assertAllEqual([25.], self.evaluate(v2))
self.assertAllEqual([25.], self.evaluate(v3))
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
the-stack_0_7812 | # -*- coding: utf-8 -*-
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from .recurrent import _generate_dropout_mask
from .recurrent import _standardize_args
import numpy as np
import warnings
from ..engine.base_layer import InputSpec, Layer
from ..utils import conv_utils
from ..legacy import interfaces
from ..legacy.layers import Recurrent, ConvRecurrent2D
from .recurrent import RNN
from ..utils.generic_utils import has_arg
class ConvRNN2D(RNN):
"""Base class for convolutional-recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the number of channels of the recurrent state
(which should be the same as the number of channels of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
input_shape: Use this argument to specify the shape of the
input when this layer is the first one in a model.
# Input shape
5D tensor with shape:
`(samples, timesteps, channels, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, rows, cols, channels)` if data_format='channels_last'.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each 5D tensor with shape:
`(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
- if `return_sequences`: 5D tensor with shape:
`(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'.
- else, 4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
- if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
- if functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100, 100, 32)`.
Note that the number of rows and columns should be specified too.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
"""
def __init__(self, cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if unroll:
raise TypeError('Unrolling isn\'t possible with '
'convolutional RNNs.')
if isinstance(cell, (list, tuple)):
# The StackedConvRNN2DCells isn't implemented yet.
raise TypeError('It is not possible at the moment to'
'stack convolutional cells.')
super(ConvRNN2D, self).__init__(cell,
return_sequences,
return_state,
go_backwards,
stateful,
unroll,
**kwargs)
self.input_spec = [InputSpec(ndim=5)]
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
cell = self.cell
if cell.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif cell.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(rows,
cell.kernel_size[0],
padding=cell.padding,
stride=cell.strides[0],
dilation=cell.dilation_rate[0])
cols = conv_utils.conv_output_length(cols,
cell.kernel_size[1],
padding=cell.padding,
stride=cell.strides[1],
dilation=cell.dilation_rate[1])
if cell.data_format == 'channels_first':
output_shape = input_shape[:2] + (cell.filters, rows, cols)
elif cell.data_format == 'channels_last':
output_shape = input_shape[:2] + (rows, cols, cell.filters)
if not self.return_sequences:
output_shape = output_shape[:1] + output_shape[2:]
if self.return_state:
output_shape = [output_shape]
if cell.data_format == 'channels_first':
output_shape += [(input_shape[0], cell.filters, rows, cols)
for _ in range(2)]
elif cell.data_format == 'channels_last':
output_shape += [(input_shape[0], rows, cols, cell.filters)
for _ in range(2)]
return output_shape
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5])
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if self.cell.data_format == 'channels_first':
ch_dim = 1
elif self.cell.data_format == 'channels_last':
ch_dim = 3
if not [spec.shape[ch_dim] for spec in self.state_spec] == state_size:
raise ValueError(
'An initial_state was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'However `cell.state_size` is '
'{}'.format([spec.shape for spec in self.state_spec], self.cell.state_size))
else:
if self.cell.data_format == 'channels_first':
self.state_spec = [InputSpec(shape=(None, dim, None, None))
for dim in state_size]
elif self.cell.data_format == 'channels_last':
self.state_spec = [InputSpec(shape=(None, None, None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = K.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = K.sum(initial_state, axis=1)
shape = list(self.cell.kernel_shape)
shape[-1] = self.cell.filters
initial_state = self.cell.input_conv(initial_state,
K.zeros(tuple(shape)),
padding=self.cell.padding)
# Fix for Theano because it needs
# K.int_shape to work in call() with initial_state.
keras_shape = list(K.int_shape(inputs))
keras_shape.pop(1)
if K.image_data_format() == 'channels_first':
indices = 2, 3
else:
indices = 1, 2
for i, j in enumerate(indices):
keras_shape[j] = conv_utils.conv_output_length(
keras_shape[j],
shape[i],
padding=self.cell.padding,
stride=self.cell.strides[i],
dilation=self.cell.dilation_rate[i])
initial_state._keras_shape = keras_shape
if hasattr(self.cell.state_size, '__len__'):
return [initial_state for _ in self.cell.state_size]
else:
return [initial_state]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(
inputs, initial_state, constants, self._num_constants)
if initial_state is None and constants is None:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = []
for state in initial_state:
try:
shape = K.int_shape(state)
# Fix for Theano
except TypeError:
shape = tuple(None for _ in range(K.ndim(state)))
self.state_spec.append(InputSpec(shape=shape))
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != K.is_keras_tensor(additional_inputs[0]):
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if K.is_keras_tensor(additional_inputs[0]):
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(ConvRNN2D, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
timesteps = K.int_shape(inputs)[1]
kwargs = {}
if has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
input_shape = self.input_spec[0].shape
state_shape = self.compute_output_shape(input_shape)
if self.return_state:
state_shape = state_shape[0]
if self.return_sequences:
state_shape = state_shape[:1] + state_shape[2:]
if None in state_shape:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.\n'
'The same thing goes for the number of rows and columns.')
# helper function
def get_tuple_shape(nb_channels):
result = list(state_shape)
if self.cell.data_format == 'channels_first':
result[1] = nb_channels
elif self.cell.data_format == 'channels_last':
result[3] = nb_channels
else:
raise KeyError
return tuple(result)
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros(get_tuple_shape(dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros(get_tuple_shape(self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros(get_tuple_shape(dim)))
else:
K.set_value(self.states[0],
np.zeros(get_tuple_shape(self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != get_tuple_shape(dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str(get_tuple_shape(dim)) +
', found shape=' + str(value.shape))
# TODO: consider batch calls to `set_value`.
K.set_value(state, value)
class ConvLSTM2DCell(Layer):
"""Cell class for the ConvLSTM2D layer.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(ConvLSTM2DCell, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
if K.backend() == 'theano' and (dropout or recurrent_dropout):
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = (self.filters, self.filters)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name='recurrent_kernel',
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.filters,), *args, **kwargs),
initializers.Ones()((self.filters,), *args, **kwargs),
self.bias_initializer((self.filters * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.filters * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :, :, :self.filters]
self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]
self.kernel_f = self.kernel[:, :, :, self.filters: self.filters * 2]
self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.filters: self.filters * 2]
self.kernel_c = self.kernel[:, :, :, self.filters * 2: self.filters * 3]
self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters * 2: self.filters * 3]
self.kernel_o = self.kernel[:, :, :, self.filters * 3:]
self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.filters]
self.bias_f = self.bias[self.filters: self.filters * 2]
self.bias_c = self.bias[self.filters * 2: self.filters * 3]
self.bias_o = self.bias[self.filters * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
K.ones_like(inputs),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
K.ones_like(states[1]),
self.recurrent_dropout,
training=training,
count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x_i = self.input_conv(inputs_i, self.kernel_i, self.bias_i,
padding=self.padding)
x_f = self.input_conv(inputs_f, self.kernel_f, self.bias_f,
padding=self.padding)
x_c = self.input_conv(inputs_c, self.kernel_c, self.bias_c,
padding=self.padding)
x_o = self.input_conv(inputs_o, self.kernel_o, self.bias_o,
padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i,
self.recurrent_kernel_i)
h_f = self.recurrent_conv(h_tm1_f,
self.recurrent_kernel_f)
h_c = self.recurrent_conv(h_tm1_c,
self.recurrent_kernel_c)
h_o = self.recurrent_conv(h_tm1_o,
self.recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h, c]
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(x, w, strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if b is not None:
conv_out = K.bias_add(conv_out, b,
data_format=self.data_format)
return conv_out
def recurrent_conv(self, x, w):
conv_out = K.conv2d(x, w, strides=(1, 1),
padding='same',
data_format=self.data_format)
return conv_out
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(ConvLSTM2DCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvLSTM2D(ConvRNN2D):
"""Convolutional LSTM.
It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
# Input shape
- if data_format='channels_first'
5D tensor with shape:
`(samples, time, channels, rows, cols)`
- if data_format='channels_last'
5D tensor with shape:
`(samples, time, rows, cols, channels)`
# Output shape
- if `return_sequences`
- if data_format='channels_first'
5D tensor with shape:
`(samples, time, filters, output_row, output_col)`
- if data_format='channels_last'
5D tensor with shape:
`(samples, time, output_row, output_col, filters)`
- else
- if data_format ='channels_first'
4D tensor with shape:
`(samples, filters, output_row, output_col)`
- if data_format='channels_last'
4D tensor with shape:
`(samples, output_row, output_col, filters)`
where o_row and o_col depend on the shape of the filter and
the padding
# Raises
ValueError: in case of invalid constructor arguments.
# References
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
The current implementation does not include the feedback loop on the
cells output
"""
@interfaces.legacy_convlstm2d_support
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
cell = ConvLSTM2DCell(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(ConvLSTM2D, self).__init__(cell,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(ConvLSTM2D, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def filters(self):
return self.cell.filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(ConvLSTM2D, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
|
the-stack_0_7813 | import asyncio
import json
import logging.config
import os
from types import SimpleNamespace
from aiohttp import web
from utils.middleware import (
app_info_factory,
auth_factory,
data_factory,
logger_factory,
response_factory,
)
import blog.app, homepage.app
# import blog.handler, blog.api
# import homepage.handler, homepage.api
import utils.orm as orm
from utils import coroweb
from utils.jinja_filter import *
from utils.utils import init_jinja2
def init_logging( # 初始化日志配置
default_path="conf/logging.json", default_level=logging.INFO
):
path = default_path
if os.path.exists(path):
with open(path, "r") as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
async def init(loop): # 初始化服务器
init_logging()
with open("conf/conf.json", "r") as f:
configs = json.load(f, object_hook=lambda d: SimpleNamespace(**d))
await orm.create_pool(loop=loop, **configs.db.__dict__)
# app = web.Application(middlewares=[logger_factory, auth_factory, response_factory])
# app.COOKIE_NAME = "Mume"
# app._COOKIE_KEY = configs.session.secret
# app._INVITATION_KEY = configs.session.key
# init_jinja2(
# app,
# os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates"),
# filters={"datetime": datetime_filter},
# )
# app.add_routes(homepage.handler.routes)
# # app.add_routes(homepage.api.routes)
# blog_app = web.Application()
# blog_app.add_routes(blog.handler.routes)
# blog_app.add_routes(blog.api.routes)
# app.router.add_static(
# "/static/",
# os.path.join(os.path.dirname(os.path.abspath(__file__)), "../static"),
# )
# app.add_subapp("/blog", blog_app)
# runner = web.AppRunner(app)
# await runner.setup()
# site = web.TCPSite(runner, "localhost", 9000)
# logging.info("server started at http://localhost:9000")
# await site.start()
homepage_app = homepage.app.init()
runner = web.AppRunner(homepage_app)
await runner.setup()
site = web.TCPSite(runner, "localhost", 9000)
# site = web.UnixSite(runner, "/tmp/Mume_blog.sock")
logging.info("server started at http://localhost:9000")
await site.start()
blog_app = blog.app.init()
runner = web.AppRunner(blog_app)
await runner.setup()
site = web.TCPSite(runner, "localhost", 9001)
# site = web.UnixSite(runner, "/tmp/Mume_blog.sock")
logging.info("server started at http://localhost:9001")
await site.start()
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
the-stack_0_7815 | import yaml
import torch
import torch.nn as nn
import argparse
import pprint
from typing import List, Dict
from pathlib import Path
from tqdm import tqdm
from torch.utils.data import DataLoader
from model import Generator, Discriminator, Vgg19
from dataset import BuildDataset, noise_generate
from visualize import Visualizer
from loss import SPADELossCalculator
from utils import session
class Trainer:
def __init__(self,
config,
outdir,
outdir_fix,
modeldir,
data_path,
sketch_path,
):
self.train_config = config["train"]
self.data_config = config["dataset"]
model_config = config["model"]
self.loss_config = config["loss"]
self.outdir = outdir
self.outdir_fix = outdir_fix
self.modeldir = modeldir
self.dataset = BuildDataset(data_path,
sketch_path,
self.data_config["line_method"],
self.data_config["extension"],
self.data_config["train_size"],
self.data_config["valid_size"],
self.data_config["color_space"],
self.data_config["line_space"]
)
print(self.dataset)
gen = Generator(model_config["generator"]["in_ch"],
self.train_config["latent_dim"])
self.gen, self.gen_opt = self._setting_model_optim(gen,
model_config["generator"])
dis = Discriminator(multi_patterns=model_config["discriminator"]["multi"])
self.dis, self.dis_opt = self._setting_model_optim(dis,
model_config["discriminator"])
self.vgg = Vgg19(requires_grad=False)
self.vgg.cuda()
self.vgg.eval()
self.lossfunc = SPADELossCalculator()
self.visualizer = Visualizer()
self.l_dim = self.train_config["latent_dim"]
@staticmethod
def _setting_model_optim(model: nn.Module,
config: Dict):
model.cuda()
if config["mode"] == "train":
model.train()
elif config["mode"] == "eval":
model.eval()
optimizer = torch.optim.Adam(model.parameters(),
lr=config["lr"],
betas=(config["b1"], config["b2"]))
return model, optimizer
@staticmethod
def _build_dict(loss_dict: Dict[str, float],
epoch: int,
num_epochs: int) -> Dict[str, str]:
report_dict = {}
report_dict["epoch"] = f"{epoch}/{num_epochs}"
for k, v in loss_dict.items():
report_dict[k] = f"{v:.6f}"
return report_dict
@staticmethod
def _valid_prepare(dataset,
validsize: int,
l_dim: int) -> List[torch.Tensor]:
c_val, l_val, m_val, c_fix, l_fix, m_fix = dataset.valid(validsize)
x_val = torch.cat([l_val, m_val], dim=1)
x_fix = torch.cat([l_fix, m_fix], dim=1)
z_fix = noise_generate(validsize, l_dim)
return [x_val, l_val, m_val, c_val], [x_fix, l_fix, m_fix, c_fix], z_fix
def _eval(self,
l_dim: int,
z_fix: torch.Tensor,
iteration: int,
validsize: int,
v_list: List[torch.Tensor],
fix_list: List[torch.Tensor]):
torch.save(self.gen.state_dict(),
f"{self.modeldir}/generator_{iteration}.pt")
torch.save(self.dis.state_dict(),
f"{self.modeldir}/discriminator_{iteration}.pt")
with torch.no_grad():
y_fix = self.gen(z_fix, fix_list[0])
z = noise_generate(validsize, l_dim)
y = self.gen(z, v_list[0])
self.visualizer(fix_list[1:], y_fix,
self.outdir_fix, iteration, validsize)
self.visualizer(v_list[1:], y,
self.outdir, iteration, validsize)
def _iter(self, data):
color, line, mask = data
color = color.cuda()
line = line.cuda()
mask = mask.cuda()
loss = {}
x = torch.cat([line, mask], dim=1)
batchsize = x.size(0)
z = noise_generate(batchsize, self.l_dim)
# Discriminator update
y = self.gen(z, x)
dis_loss = self.loss_config["adv"] * self.lossfunc.adversarial_disloss(self.dis,
y.detach(),
color)
self.dis_opt.zero_grad()
dis_loss.backward()
self.dis_opt.step()
# Generator update
y = self.gen(z, x)
gen_adv_loss = self.loss_config["adv"] * self.lossfunc.adversarial_genloss(self.dis, y)
content_loss = self.loss_config["content"] * self.lossfunc.content_loss(y, color)
pef_loss = self.loss_config["pe"] * self.lossfunc.positive_enforcing_loss(y)
gen_loss = gen_adv_loss + content_loss + pef_loss
self.gen_opt.zero_grad()
gen_loss.backward()
self.gen_opt.step()
loss["loss_adv_dis"] = dis_loss.item()
loss["loss_adv_gen"] = gen_adv_loss.item()
loss["loss_content"] = content_loss.item()
loss["loss_pef"] = pef_loss.item()
return loss
def __call__(self):
iteration = 0
v_list, fix_list, z_fix = self._valid_prepare(self.dataset,
self.train_config["validsize"],
self.l_dim)
for epoch in range(self.train_config["epoch"]):
dataloader = DataLoader(self.dataset,
batch_size=self.train_config["batchsize"],
shuffle=True,
drop_last=True)
with tqdm(total=len(self.dataset)) as pbar:
for index, data in enumerate(dataloader):
iteration += 1
loss_dict = self._iter(data)
report_dict = self._build_dict(loss_dict,
epoch,
self.train_config["epoch"])
pbar.update(self.train_config["batchsize"])
pbar.set_postfix(**report_dict)
if iteration % self.train_config["snapshot_interval"] == 1:
self._eval(self.l_dim,
z_fix,
iteration,
self.train_config["validsize"],
v_list,
fix_list)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SPADE colorization")
parser.add_argument('--session', type=str, default='spade', help="session name")
parser.add_argument('--data_path', type=Path, help="path containing color images")
parser.add_argument('--sketch_path', type=Path, help="path containing sketch images")
args = parser.parse_args()
outdir, outdir_fix, modeldir = session(args.session)
with open("param.yaml", "r") as f:
config = yaml.safe_load(f)
pprint.pprint(config)
trainer = Trainer(config,
outdir,
outdir_fix,
modeldir,
args.data_path,
args.sketch_path)
trainer()
|
the-stack_0_7817 | import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras import Sequential
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score
class TrainingModel:
def __init__(self, input_shape):
self.model = Sequential()
self.model.add(Dense(64, activation='relu', input_shape=input_shape))
self.model.add(Dropout(0.3))
self.model.add(Dense(128, activation='relu'))
self.model.add(Dropout(0.3))
self.model.add(Dense(128, activation='relu'))
self.model.add(Dense(1, activation='sigmoid'))
self.model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
def fit(self, data, label):
self.model.fit(data, label, epochs=1, batch_size=128, verbose=0)
def predict(self, data):
return self.model.predict_classes(data)
def evaluate(self, X_test, y_test, print_report=True):
y_predicted = self.predict(X_test)
y_predicted_probs = self.model.predict_proba(X_test)
if print_report:
self.print_report(y_test, y_predicted, y_predicted_probs)
else:
accuracy = accuracy_score(y_test, y_predicted)
report = classification_report(y_test, y_predicted, output_dict=True)
auc_score = roc_auc_score(y_test, y_predicted_probs)
matrix = confusion_matrix(y_test, y_predicted)
return {
'accuracy': accuracy,
'auc_score': auc_score,
**report['weighted avg'],
}
def print_report(self, test, predicted, predicted_probs):
accuracy = accuracy_score(test, predicted)
report = classification_report(test, predicted)
matrix = confusion_matrix(test, predicted)
print('Accuracy score: {:.5f}'.format(accuracy))
print('-' * 20)
print('Confusion Matrix:')
print(matrix)
print('-' * 20)
print(report)
print('-' * 20)
print('AUC score: {:.5f}'.format(roc_auc_score(test, predicted_probs))) |
the-stack_0_7820 | import torch
import torch.nn as nn
import torch.nn.functional as F
class RelNMS(nn.Module):
def __init__(self, cfg):
super(RelNMS, self).__init__()
self.fg_iou_threshold = 0.7
self.bg_iou_threshold = 0.3
self.nms_threshold = 0.5
self.top_k_proposals = cfg.RELPN.DPN.NUM_DURATION_PROPOSALS
self.anchor = None
def forward(self, relationness, duration_proposals):
relationness
|
the-stack_0_7821 | # Copyright 2020 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with the local dataset cache. Parts of this file is adapted from the AllenNLP library at
https://github.com/allenai/allennlp.
"""
import copy
import fnmatch
import functools
import importlib.util
import io
import json
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import types
from collections import OrderedDict, UserDict
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from functools import partial, wraps
from hashlib import sha256
from itertools import chain
from pathlib import Path
from types import ModuleType
from typing import Any, BinaryIO, ContextManager, Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse
from uuid import uuid4
from zipfile import ZipFile, is_zipfile
import numpy as np
from packaging import version
import requests
from filelock import FileLock
from huggingface_hub import HfFolder, Repository, create_repo, list_repo_files, whoami
from requests.exceptions import HTTPError
from transformers.utils.logging import tqdm
from transformers.utils.versions import importlib_metadata
from . import __version__
from .utils import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
_torch_available = importlib.util.find_spec("torch") is not None
if _torch_available:
try:
_torch_version = importlib_metadata.version("torch")
logger.info(f"PyTorch version {_torch_version} available.")
except importlib_metadata.PackageNotFoundError:
_torch_available = False
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
_tf_available = importlib.util.find_spec("tensorflow") is not None
if _tf_available:
candidates = (
"tensorflow",
"tensorflow-cpu",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-cpu",
"tf-nightly-gpu",
"intel-tensorflow",
"intel-tensorflow-avx512",
"tensorflow-rocm",
"tensorflow-macos",
)
_tf_version = None
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
for pkg in candidates:
try:
_tf_version = importlib_metadata.version(pkg)
break
except importlib_metadata.PackageNotFoundError:
pass
_tf_available = _tf_version is not None
if _tf_available:
if version.parse(_tf_version) < version.parse("2"):
logger.info(f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum.")
_tf_available = False
else:
logger.info(f"TensorFlow version {_tf_version} available.")
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
_flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None
if _flax_available:
try:
_jax_version = importlib_metadata.version("jax")
_flax_version = importlib_metadata.version("flax")
logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
except importlib_metadata.PackageNotFoundError:
_flax_available = False
else:
_flax_available = False
_datasets_available = importlib.util.find_spec("datasets") is not None
try:
# Check we're not importing a "datasets" directory somewhere but the actual library by trying to grab the version
# AND checking it has an author field in the metadata that is HuggingFace.
_ = importlib_metadata.version("datasets")
_datasets_metadata = importlib_metadata.metadata("datasets")
if _datasets_metadata.get("author", "") != "HuggingFace Inc.":
_datasets_available = False
except importlib_metadata.PackageNotFoundError:
_datasets_available = False
_detectron2_available = importlib.util.find_spec("detectron2") is not None
try:
_detectron2_version = importlib_metadata.version("detectron2")
logger.debug(f"Successfully imported detectron2 version {_detectron2_version}")
except importlib_metadata.PackageNotFoundError:
_detectron2_available = False
_faiss_available = importlib.util.find_spec("faiss") is not None
try:
_faiss_version = importlib_metadata.version("faiss")
logger.debug(f"Successfully imported faiss version {_faiss_version}")
except importlib_metadata.PackageNotFoundError:
try:
_faiss_version = importlib_metadata.version("faiss-cpu")
logger.debug(f"Successfully imported faiss version {_faiss_version}")
except importlib_metadata.PackageNotFoundError:
_faiss_available = False
coloredlogs = importlib.util.find_spec("coloredlogs") is not None
try:
_coloredlogs_available = importlib_metadata.version("coloredlogs")
logger.debug(f"Successfully imported sympy version {_coloredlogs_available}")
except importlib_metadata.PackageNotFoundError:
_coloredlogs_available = False
sympy_available = importlib.util.find_spec("sympy") is not None
try:
_sympy_available = importlib_metadata.version("sympy")
logger.debug(f"Successfully imported sympy version {_sympy_available}")
except importlib_metadata.PackageNotFoundError:
_sympy_available = False
_tf2onnx_available = importlib.util.find_spec("tf2onnx") is not None
try:
_tf2onnx_version = importlib_metadata.version("tf2onnx")
logger.debug(f"Successfully imported tf2onnx version {_tf2onnx_version}")
except importlib_metadata.PackageNotFoundError:
_tf2onnx_available = False
_onnx_available = importlib.util.find_spec("onnxruntime") is not None
try:
_onxx_version = importlib_metadata.version("onnx")
logger.debug(f"Successfully imported onnx version {_onxx_version}")
except importlib_metadata.PackageNotFoundError:
_onnx_available = False
_scatter_available = importlib.util.find_spec("torch_scatter") is not None
try:
_scatter_version = importlib_metadata.version("torch_scatter")
logger.debug(f"Successfully imported torch-scatter version {_scatter_version}")
except importlib_metadata.PackageNotFoundError:
_scatter_available = False
_pytorch_quantization_available = importlib.util.find_spec("pytorch_quantization") is not None
try:
_pytorch_quantization_version = importlib_metadata.version("pytorch_quantization")
logger.debug(f"Successfully imported pytorch-quantization version {_pytorch_quantization_version}")
except importlib_metadata.PackageNotFoundError:
_pytorch_quantization_available = False
_soundfile_available = importlib.util.find_spec("soundfile") is not None
try:
_soundfile_version = importlib_metadata.version("soundfile")
logger.debug(f"Successfully imported soundfile version {_soundfile_version}")
except importlib_metadata.PackageNotFoundError:
_soundfile_available = False
_tensorflow_probability_available = importlib.util.find_spec("tensorflow_probability") is not None
try:
_tensorflow_probability_version = importlib_metadata.version("tensorflow_probability")
logger.debug(f"Successfully imported tensorflow-probability version {_tensorflow_probability_version}")
except importlib_metadata.PackageNotFoundError:
_tensorflow_probability_available = False
_timm_available = importlib.util.find_spec("timm") is not None
try:
_timm_version = importlib_metadata.version("timm")
logger.debug(f"Successfully imported timm version {_timm_version}")
except importlib_metadata.PackageNotFoundError:
_timm_available = False
_torchaudio_available = importlib.util.find_spec("torchaudio") is not None
try:
_torchaudio_version = importlib_metadata.version("torchaudio")
logger.debug(f"Successfully imported torchaudio version {_torchaudio_version}")
except importlib_metadata.PackageNotFoundError:
_torchaudio_available = False
_phonemizer_available = importlib.util.find_spec("phonemizer") is not None
try:
_phonemizer_version = importlib_metadata.version("phonemizer")
logger.debug(f"Successfully imported phonemizer version {_phonemizer_version}")
except importlib_metadata.PackageNotFoundError:
_phonemizer_available = False
_pyctcdecode_available = importlib.util.find_spec("pyctcdecode") is not None
try:
_pyctcdecode_version = importlib_metadata.version("pyctcdecode")
logger.debug(f"Successfully imported pyctcdecode version {_pyctcdecode_version}")
except importlib_metadata.PackageNotFoundError:
_pyctcdecode_available = False
_librosa_available = importlib.util.find_spec("librosa") is not None
try:
_librosa_version = importlib_metadata.version("librosa")
logger.debug(f"Successfully imported librosa version {_librosa_version}")
except importlib_metadata.PackageNotFoundError:
_librosa_available = False
torch_cache_home = os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
old_default_cache_path = os.path.join(torch_cache_home, "transformers")
# New default cache, shared with the Datasets library
hf_cache_home = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
default_cache_path = os.path.join(hf_cache_home, "transformers")
# Onetime move from the old location to the new one if no ENV variable has been set.
if (
os.path.isdir(old_default_cache_path)
and not os.path.isdir(default_cache_path)
and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ
and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ
and "TRANSFORMERS_CACHE" not in os.environ
):
logger.warning(
"In Transformers v4.0.0, the default path to cache downloaded models changed from "
"'~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have overridden "
"and '~/.cache/torch/transformers' is a directory that exists, we're moving it to "
"'~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should "
"only see this message once."
)
shutil.move(old_default_cache_path, default_cache_path)
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules"
SESSION_ID = uuid4().hex
DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", False) in ENV_VARS_TRUE_VALUES
WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
CONFIG_NAME = "config.json"
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
MODEL_CARD_NAME = "modelcard.json"
SENTENCEPIECE_UNDERLINE = "▁"
SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
MULTIPLE_CHOICE_DUMMY_INPUTS = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
_staging_mode = os.environ.get("HUGGINGFACE_CO_STAGING", "NO").upper() in ENV_VARS_TRUE_VALUES
_default_endpoint = "https://moon-staging.huggingface.co" if _staging_mode else "https://huggingface.co"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", _default_endpoint)
HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}"
# This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs.
TORCH_FX_REQUIRED_VERSION = version.parse("1.9")
TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION = version.parse("1.8")
_is_offline_mode = True if os.environ.get("TRANSFORMERS_OFFLINE", "0").upper() in ENV_VARS_TRUE_VALUES else False
def is_offline_mode():
return _is_offline_mode
def is_torch_available():
return _torch_available
def is_pyctcdecode_available():
return _pyctcdecode_available
def is_librosa_available():
return _librosa_available
def is_torch_cuda_available():
if is_torch_available():
import torch
return torch.cuda.is_available()
else:
return False
def is_torch_bf16_available():
if not is_torch_available():
return False
import torch
# since currently no utility function is available we build our own.
# some bits come from https://github.com/pytorch/pytorch/blob/2289a12f21c54da93bf5d696e3f9aea83dd9c10d/torch/testing/_internal/common_cuda.py#L51
# with additional check for torch version
# to succeed:
# 1. the hardware needs to support bf16 (arch >= Ampere)
# 2. torch >= 1.10 (1.9 should be enough for AMP API has changed in 1.10, so using 1.10 as minimal)
# 3. CUDA >= 11
# 4. torch.autocast exists
# XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's
# really only correct for the 0th gpu (or currently set default device if different from 0)
if not torch.cuda.is_available() or torch.version.cuda is None:
return False
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
return False
if int(torch.version.cuda.split(".")[0]) < 11:
return False
if version.parse(torch.__version__) < version.parse("1.10"):
return False
if not hasattr(torch, "autocast"):
return False
return True
def is_torch_tf32_available():
if not is_torch_available():
return False
import torch
if not torch.cuda.is_available() or torch.version.cuda is None:
return False
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
return False
if int(torch.version.cuda.split(".")[0]) < 11:
return False
if version.parse(torch.__version__) < version.parse("1.7"):
return False
return True
_torch_fx_available = _torch_onnx_dict_inputs_support_available = False
if _torch_available:
torch_version = version.parse(importlib_metadata.version("torch"))
_torch_fx_available = (torch_version.major, torch_version.minor) == (
TORCH_FX_REQUIRED_VERSION.major,
TORCH_FX_REQUIRED_VERSION.minor,
)
_torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION
def is_torch_fx_available():
return _torch_fx_available
def is_torch_onnx_dict_inputs_support_available():
return _torch_onnx_dict_inputs_support_available
def is_tf_available():
return _tf_available
def is_coloredlogs_available():
return _coloredlogs_available
def is_tf2onnx_available():
return _tf2onnx_available
def is_onnx_available():
return _onnx_available
def is_flax_available():
return _flax_available
def is_torch_tpu_available():
if not _torch_available:
return False
# This test is probably enough, but just in case, we unpack a bit.
if importlib.util.find_spec("torch_xla") is None:
return False
if importlib.util.find_spec("torch_xla.core") is None:
return False
return importlib.util.find_spec("torch_xla.core.xla_model") is not None
def is_datasets_available():
return _datasets_available
def is_detectron2_available():
return _detectron2_available
def is_rjieba_available():
return importlib.util.find_spec("rjieba") is not None
def is_psutil_available():
return importlib.util.find_spec("psutil") is not None
def is_py3nvml_available():
return importlib.util.find_spec("py3nvml") is not None
def is_apex_available():
return importlib.util.find_spec("apex") is not None
def is_faiss_available():
return _faiss_available
def is_scipy_available():
return importlib.util.find_spec("scipy") is not None
def is_sklearn_available():
if importlib.util.find_spec("sklearn") is None:
return False
return is_scipy_available() and importlib.util.find_spec("sklearn.metrics")
def is_sentencepiece_available():
return importlib.util.find_spec("sentencepiece") is not None
def is_protobuf_available():
if importlib.util.find_spec("google") is None:
return False
return importlib.util.find_spec("google.protobuf") is not None
def is_tokenizers_available():
return importlib.util.find_spec("tokenizers") is not None
def is_vision_available():
return importlib.util.find_spec("PIL") is not None
def is_pytesseract_available():
return importlib.util.find_spec("pytesseract") is not None
def is_spacy_available():
return importlib.util.find_spec("spacy") is not None
def is_ftfy_available():
return importlib.util.find_spec("ftfy") is not None
def is_in_notebook():
try:
# Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py
get_ipython = sys.modules["IPython"].get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
if "VSCODE_PID" in os.environ:
raise ImportError("vscode")
return importlib.util.find_spec("IPython") is not None
except (AttributeError, ImportError, KeyError):
return False
def is_scatter_available():
return _scatter_available
def is_pytorch_quantization_available():
return _pytorch_quantization_available
def is_tensorflow_probability_available():
return _tensorflow_probability_available
def is_pandas_available():
return importlib.util.find_spec("pandas") is not None
def is_sagemaker_dp_enabled():
# Get the sagemaker specific env variable.
sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
sagemaker_params = json.loads(sagemaker_params)
if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
def is_sagemaker_mp_enabled():
# Get the sagemaker specific mp parameters from smp_options variable.
smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
smp_options = json.loads(smp_options)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
mpi_options = json.loads(mpi_options)
if not mpi_options.get("sagemaker_mpi_enabled", False):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
def is_training_run_on_sagemaker():
return "SAGEMAKER_JOB_NAME" in os.environ
def is_soundfile_availble():
return _soundfile_available
def is_timm_available():
return _timm_available
def is_torchaudio_available():
return _torchaudio_available
def is_speech_available():
# For now this depends on torchaudio but the exact dependency might evolve in the future.
return _torchaudio_available
def is_phonemizer_available():
return _phonemizer_available
def torch_only_method(fn):
def wrapper(*args, **kwargs):
if not _torch_available:
raise ImportError(
"You need to install pytorch to use this method or class, "
"or activate it with environment variables USE_TORCH=1 and USE_TF=0."
)
else:
return fn(*args, **kwargs)
return wrapper
# docstyle-ignore
DATASETS_IMPORT_ERROR = """
{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with:
```
pip install datasets
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install datasets
```
then restarting your kernel.
Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current
working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or
that python file if that's the case.
"""
# docstyle-ignore
TOKENIZERS_IMPORT_ERROR = """
{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with:
```
pip install tokenizers
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install tokenizers
```
"""
# docstyle-ignore
SENTENCEPIECE_IMPORT_ERROR = """
{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones
that match your environment.
"""
# docstyle-ignore
PROTOBUF_IMPORT_ERROR = """
{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
that match your environment.
"""
# docstyle-ignore
FAISS_IMPORT_ERROR = """
{0} requires the faiss library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones
that match your environment.
"""
# docstyle-ignore
PYTORCH_IMPORT_ERROR = """
{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
"""
# docstyle-ignore
SKLEARN_IMPORT_ERROR = """
{0} requires the scikit-learn library but it was not found in your environment. You can install it with:
```
pip install -U scikit-learn
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install -U scikit-learn
```
"""
# docstyle-ignore
TENSORFLOW_IMPORT_ERROR = """
{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the
installation page: https://www.tensorflow.org/install and follow the ones that match your environment.
"""
# docstyle-ignore
DETECTRON2_IMPORT_ERROR = """
{0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones
that match your environment.
"""
# docstyle-ignore
FLAX_IMPORT_ERROR = """
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/google/flax and follow the ones that match your environment.
"""
# docstyle-ignore
SCATTER_IMPORT_ERROR = """
{0} requires the torch-scatter library but it was not found in your environment. You can install it with pip as
explained here: https://github.com/rusty1s/pytorch_scatter.
"""
# docstyle-ignore
PYTORCH_QUANTIZATION_IMPORT_ERROR = """
{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip:
`pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com`
"""
# docstyle-ignore
TENSORFLOW_PROBABILITY_IMPORT_ERROR = """
{0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as
explained here: https://github.com/tensorflow/probability.
"""
# docstyle-ignore
PANDAS_IMPORT_ERROR = """
{0} requires the pandas library but it was not found in your environment. You can install it with pip as
explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html.
"""
# docstyle-ignore
PHONEMIZER_IMPORT_ERROR = """
{0} requires the phonemizer library but it was not found in your environment. You can install it with pip:
`pip install phonemizer`
"""
# docstyle-ignore
SCIPY_IMPORT_ERROR = """
{0} requires the scipy library but it was not found in your environment. You can install it with pip:
`pip install scipy`
"""
# docstyle-ignore
SPEECH_IMPORT_ERROR = """
{0} requires the torchaudio library but it was not found in your environment. You can install it with pip:
`pip install torchaudio`
"""
# docstyle-ignore
TIMM_IMPORT_ERROR = """
{0} requires the timm library but it was not found in your environment. You can install it with pip:
`pip install timm`
"""
# docstyle-ignore
VISION_IMPORT_ERROR = """
{0} requires the PIL library but it was not found in your environment. You can install it with pip:
`pip install pillow`
"""
# docstyle-ignore
PYTESSERACT_IMPORT_ERROR = """
{0} requires the PyTesseract library but it was not found in your environment. You can install it with pip:
`pip install pytesseract`
"""
# docstyle-ignore
PYCTCDECODE_IMPORT_ERROR = """
{0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip:
`pip install pyctcdecode`
"""
BACKENDS_MAPPING = OrderedDict(
[
("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)),
("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)),
("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)),
("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)),
("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)),
("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)),
("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)),
("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)),
("scatter", (is_scatter_available, SCATTER_IMPORT_ERROR)),
("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)),
("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)),
("speech", (is_speech_available, SPEECH_IMPORT_ERROR)),
("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)),
("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)),
("timm", (is_timm_available, TIMM_IMPORT_ERROR)),
("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)),
("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
("vision", (is_vision_available, VISION_IMPORT_ERROR)),
("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
]
)
def requires_backends(obj, backends):
if not isinstance(backends, (list, tuple)):
backends = [backends]
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not all(BACKENDS_MAPPING[backend][0]() for backend in backends):
raise ImportError("".join([BACKENDS_MAPPING[backend][1].format(name) for backend in backends]))
class DummyObject(type):
"""
Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
`requires_backend` each time a user tries to access any method of that class.
"""
def __getattr__(cls, key):
if key.startswith("_"):
return super().__getattr__(cls, key)
requires_backends(cls, cls._backends)
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_start_docstrings_to_model_forward(*docstr):
def docstring_decorator(fn):
docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
class_name = f"[`{fn.__qualname__.split('.')[0]}`]"
intro = f" The {class_name} forward method, overrides the `__call__` special method."
note = r"""
<Tip>
Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`]
instance afterwards instead of this since the former takes care of running the pre and post processing steps while
the latter silently ignores them.
</Tip>
"""
fn.__doc__ = intro + note + docstring
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr)
return fn
return docstring_decorator
PT_RETURN_INTRODUCTION = r"""
Returns:
[`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of
`torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various
elements depending on the configuration ([`{config_class}`]) and inputs.
"""
TF_RETURN_INTRODUCTION = r"""
Returns:
[`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if
`return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the
configuration ([`{config_class}`]) and inputs.
"""
def _get_indent(t):
"""Returns the indentation in the first line of t"""
search = re.search(r"^(\s*)\S", t)
return "" if search is None else search.groups()[0]
def _convert_output_args_doc(output_args_doc):
"""Convert output_args_doc to display properly."""
# Split output_arg_doc in blocks argument/description
indent = _get_indent(output_args_doc)
blocks = []
current_block = ""
for line in output_args_doc.split("\n"):
# If the indent is the same as the beginning, the line is the name of new arg.
if _get_indent(line) == indent:
if len(current_block) > 0:
blocks.append(current_block[:-1])
current_block = f"{line}\n"
else:
# Otherwise it's part of the description of the current arg.
# We need to remove 2 spaces to the indentation.
current_block += f"{line[2:]}\n"
blocks.append(current_block[:-1])
# Format each block for proper rendering
for i in range(len(blocks)):
blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i])
blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i])
return "\n".join(blocks)
def _prepare_output_docstrings(output_type, config_class, min_indent=None):
"""
Prepares the return part of the docstring using `output_type`.
"""
output_docstring = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = output_docstring.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
params_docstring = "\n".join(lines[(i + 1) :])
params_docstring = _convert_output_args_doc(params_docstring)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
result = intro + params_docstring
# Apply minimum indent if necessary
if min_indent is not None:
lines = result.split("\n")
# Find the indent of the first nonempty line
i = 0
while len(lines[i]) == 0:
i += 1
indent = len(_get_indent(lines[i]))
# If too small, add indentation to all nonempty lines
if indent < min_indent:
to_add = " " * (min_indent - indent)
lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines]
result = "\n".join(lines)
return result
PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```
"""
PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example of single-label classification:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
```
Example of multi-label classification:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt")
>>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
PT_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True)
>>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> import torch
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs, labels=inputs["input_ids"])
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_SPEECH_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
PT_SPEECH_CTC_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_ids = torch.argmax(logits, dim=-1)
>>> # transcribe speech
>>> transcription = processor.batch_decode(predicted_ids)
>>> transcription[0]
{expected_output}
```
```python
>>> with processor.as_target_processor():
... inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids
>>> # compute loss
>>> loss = model(**inputs).loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_SPEECH_SEQ_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_ids = torch.argmax(logits, dim=-1).item()
>>> predicted_label = model.config.id2label[predicted_class_ids]
>>> predicted_label
{expected_output}
```
```python
>>> # compute loss - target_label is e.g. "down"
>>> target_label = model.config.id2label[0]
>>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]])
>>> loss = model(**inputs).loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_SPEECH_FRAME_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate)
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> probabilities = torch.sigmoid(logits[0])
>>> # labels is a one-hot array of shape (num_frames, num_speakers)
>>> labels = (probabilities > 0.5).long()
>>> labels[0].tolist()
{expected_output}
```
"""
PT_SPEECH_XVECTOR_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(
... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True
... )
>>> with torch.no_grad():
... embeddings = model(**inputs).embeddings
>>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu()
>>> # the resulting embeddings can be used for cosine similarity-based retrieval
>>> cosine_sim = torch.nn.CosineSimilarity(dim=-1)
>>> similarity = cosine_sim(embeddings[0], embeddings[1])
>>> threshold = 0.7 # the optimal threshold is dataset-dependent
>>> if similarity < threshold:
... print("Speakers are not the same!")
>>> round(similarity.item(), 2)
{expected_output}
```
"""
PT_VISION_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = feature_extractor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
PT_VISION_SEQ_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = feature_extractor(image, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
{expected_output}
```
"""
PT_SAMPLE_DOCSTRINGS = {
"SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": PT_MASKED_LM_SAMPLE,
"LMHead": PT_CAUSAL_LM_SAMPLE,
"BaseModel": PT_BASE_MODEL_SAMPLE,
"SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE,
"CTC": PT_SPEECH_CTC_SAMPLE,
"AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE,
"AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE,
"AudioXVector": PT_SPEECH_XVECTOR_SAMPLE,
"VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE,
"ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE,
}
TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> input_ids = inputs["input_ids"]
>>> inputs["labels"] = tf.reshape(
... tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))
>>> ) # Batch size 1
>>> outputs = model(inputs)
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
TF_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> input_dict = tokenizer(question, text, return_tensors="tf")
>>> outputs = model(input_dict)
>>> start_logits = outputs.start_logits
>>> end_logits = outputs.end_logits
>>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0])
>>> answer = " ".join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0] + 1])
```
"""
TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1
>>> outputs = model(inputs)
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
TF_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf")
>>> inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"]
>>> outputs = model(inputs)
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
TF_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
TF_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True)
>>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
>>> outputs = model(inputs) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> logits = outputs.logits
```
"""
TF_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> logits = outputs.logits
```
"""
TF_SAMPLE_DOCSTRINGS = {
"SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": TF_MASKED_LM_SAMPLE,
"LMHead": TF_CAUSAL_LM_SAMPLE,
"BaseModel": TF_BASE_MODEL_SAMPLE,
}
FLAX_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="jax")
>>> outputs = model(**inputs)
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```
"""
FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
FLAX_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True)
>>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}})
>>> logits = outputs.logits
```
"""
FLAX_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
>>> outputs = model(**inputs)
>>> # retrieve logts for next token
>>> next_token_logits = outputs.logits[:, -1]
```
"""
FLAX_SAMPLE_DOCSTRINGS = {
"SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": FLAX_MASKED_LM_SAMPLE,
"BaseModel": FLAX_BASE_MODEL_SAMPLE,
"LMHead": FLAX_CAUSAL_LM_SAMPLE,
}
def add_code_sample_docstrings(
*docstr,
processor_class=None,
checkpoint=None,
output_type=None,
config_class=None,
mask="[MASK]",
model_cls=None,
modality=None,
expected_output="",
expected_loss="",
):
def docstring_decorator(fn):
# model_class defaults to function's class if not specified otherwise
model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls
if model_class[:2] == "TF":
sample_docstrings = TF_SAMPLE_DOCSTRINGS
elif model_class[:4] == "Flax":
sample_docstrings = FLAX_SAMPLE_DOCSTRINGS
else:
sample_docstrings = PT_SAMPLE_DOCSTRINGS
# putting all kwargs for docstrings in a dict to be used
# with the `.format(**doc_kwargs)`. Note that string might
# be formatted with non-existing keys, which is fine.
doc_kwargs = dict(
model_class=model_class,
processor_class=processor_class,
checkpoint=checkpoint,
mask=mask,
expected_output=expected_output,
expected_loss=expected_loss,
)
if "SequenceClassification" in model_class and modality == "audio":
code_sample = sample_docstrings["AudioClassification"]
elif "SequenceClassification" in model_class:
code_sample = sample_docstrings["SequenceClassification"]
elif "QuestionAnswering" in model_class:
code_sample = sample_docstrings["QuestionAnswering"]
elif "TokenClassification" in model_class:
code_sample = sample_docstrings["TokenClassification"]
elif "MultipleChoice" in model_class:
code_sample = sample_docstrings["MultipleChoice"]
elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]:
code_sample = sample_docstrings["MaskedLM"]
elif "LMHead" in model_class or "CausalLM" in model_class:
code_sample = sample_docstrings["LMHead"]
elif "CTC" in model_class:
code_sample = sample_docstrings["CTC"]
elif "AudioFrameClassification" in model_class:
code_sample = sample_docstrings["AudioFrameClassification"]
elif "XVector" in model_class and modality == "audio":
code_sample = sample_docstrings["AudioXVector"]
elif "Model" in model_class and modality == "audio":
code_sample = sample_docstrings["SpeechBaseModel"]
elif "Model" in model_class and modality == "vision":
code_sample = sample_docstrings["VisionBaseModel"]
elif "Model" in model_class or "Encoder" in model_class:
code_sample = sample_docstrings["BaseModel"]
elif "ImageClassification" in model_class:
code_sample = sample_docstrings["ImageClassification"]
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
func_doc = (fn.__doc__ or "") + "".join(docstr)
output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class)
built_doc = code_sample.format(**doc_kwargs)
fn.__doc__ = func_doc + output_doc + built_doc
return fn
return docstring_decorator
def replace_return_docstrings(output_type=None, config_class=None):
def docstring_decorator(fn):
func_doc = fn.__doc__
lines = func_doc.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
indent = len(_get_indent(lines[i]))
lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent)
func_doc = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, "
f"current docstring is:\n{func_doc}"
)
fn.__doc__ = func_doc
return fn
return docstring_decorator
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def hf_bucket_url(
model_id: str, filename: str, subfolder: Optional[str] = None, revision: Optional[str] = None, mirror=None
) -> str:
"""
Resolve a model identifier, a file name, and an optional revision id, to a huggingface.co-hosted url, redirecting
to Cloudfront (a Content Delivery Network, or CDN) for large files.
Cloudfront is replicated over the globe so downloads are way faster for the end user (and it also lowers our
bandwidth costs).
Cloudfront aggressively caches files by default (default TTL is 24 hours), however this is not an issue here
because we migrated to a git-based versioning system on huggingface.co, so we now store the files on S3/Cloudfront
in a content-addressable way (i.e., the file name is its hash). Using content-addressable filenames means cache
can't ever be stale.
In terms of client-side caching from this library, we base our caching on the objects' ETag. An object' ETag is:
its sha1 if stored in git, or its sha256 if stored in git-lfs. Files cached locally from transformers before v3.5.0
are not shared with those new files, because the cached file's name contains a hash of the url (which changed).
"""
if subfolder is not None:
filename = f"{subfolder}/{filename}"
if mirror:
if mirror in ["tuna", "bfsu"]:
raise ValueError("The Tuna and BFSU mirrors are no longer available. Try removing the mirror argument.")
legacy_format = "/" not in model_id
if legacy_format:
return f"{mirror}/{model_id}-{filename}"
else:
return f"{mirror}/{model_id}/{filename}"
if revision is None:
revision = "main"
return HUGGINGFACE_CO_PREFIX.format(model_id=model_id, revision=revision, filename=filename)
def url_to_filename(url: str, etag: Optional[str] = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's,
delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can
identify it as a HDF5 file (see
https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
filename = sha256(url_bytes).hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
filename += "." + sha256(etag_bytes).hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be `None`) stored for *filename*. Raise `EnvironmentError` if *filename* or its
stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError(f"file {cache_path} not found")
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError(f"file {meta_path} not found")
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]:
"""
Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url,
etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin*
are added.
Args:
cache_dir (`Union[str, Path]`, *optional*):
The cache directory to search for models within. Will default to the transformers cache if unset.
Returns:
List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)`
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
elif isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cached_models = []
for file in os.listdir(cache_dir):
if file.endswith(".json"):
meta_path = os.path.join(cache_dir, file)
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
if url.endswith(".bin"):
size_MB = os.path.getsize(meta_path.strip(".json")) / 1e6
cached_models.append((url, etag, size_MB))
return cached_models
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
extract_compressed_file=False,
force_extract=False,
use_auth_token: Union[bool, str, None] = None,
local_files_only=False,
) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file
and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and
then return the path
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-download the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletely received file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
use_auth_token: Optional string or boolean to use as Bearer token for remote files. If True,
will get token from ~/.huggingface.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and override the folder where it was extracted.
Return:
Local path (string) of file or if networking is off, last version of file cached on disk.
Raises:
In case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
use_auth_token=use_auth_token,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError(f"file {url_or_filename} not found")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError(f"Archive format of {output_path} could not be identified")
return output_path_extracted
return output_path
def define_sagemaker_information():
try:
instance_data = requests.get(os.environ["ECS_CONTAINER_METADATA_URI"]).json()
dlc_container_used = instance_data["Image"]
dlc_tag = instance_data["Image"].split(":")[1]
except Exception:
dlc_container_used = None
dlc_tag = None
sagemaker_params = json.loads(os.getenv("SM_FRAMEWORK_PARAMS", "{}"))
runs_distributed_training = True if "sagemaker_distributed_dataparallel_enabled" in sagemaker_params else False
account_id = os.getenv("TRAINING_JOB_ARN").split(":")[4] if "TRAINING_JOB_ARN" in os.environ else None
sagemaker_object = {
"sm_framework": os.getenv("SM_FRAMEWORK_MODULE", None),
"sm_region": os.getenv("AWS_REGION", None),
"sm_number_gpu": os.getenv("SM_NUM_GPUS", 0),
"sm_number_cpu": os.getenv("SM_NUM_CPUS", 0),
"sm_distributed_training": runs_distributed_training,
"sm_deep_learning_container": dlc_container_used,
"sm_deep_learning_container_tag": dlc_tag,
"sm_account_id": account_id,
}
return sagemaker_object
def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
"""
Formats a user-agent string with basic info about a request.
"""
ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_tf_available():
ua += f"; tensorflow/{_tf_version}"
if DISABLE_TELEMETRY:
return ua + "; telemetry/off"
if is_training_run_on_sagemaker():
ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items())
# CI will set this value to True
if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(user_agent, dict):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
class RepositoryNotFoundError(HTTPError):
"""
Raised when trying to access a hf.co URL with an invalid repository name, or with a private repo name the user does
not have access to.
"""
class EntryNotFoundError(HTTPError):
"""Raised when trying to access a hf.co URL with a valid repository and revision but an invalid filename."""
class RevisionNotFoundError(HTTPError):
"""Raised when trying to access a hf.co URL with a valid repository but an invalid revision."""
def _raise_for_status(request):
"""
Internal version of `request.raise_for_status()` that will refine a potential HTTPError.
"""
if "X-Error-Code" in request.headers:
error_code = request.headers["X-Error-Code"]
if error_code == "RepoNotFound":
raise RepositoryNotFoundError(f"404 Client Error: Repository Not Found for url: {request.url}")
elif error_code == "EntryNotFound":
raise EntryNotFoundError(f"404 Client Error: Entry Not Found for url: {request.url}")
elif error_code == "RevisionNotFound":
raise RevisionNotFoundError((f"404 Client Error: Revision Not Found for url: {request.url}"))
request.raise_for_status()
def http_get(url: str, temp_file: BinaryIO, proxies=None, resume_size=0, headers: Optional[Dict[str, str]] = None):
"""
Download remote file. Do not gobble up errors.
"""
headers = copy.deepcopy(headers)
if resume_size > 0:
headers["Range"] = f"bytes={resume_size}-"
r = requests.get(url, stream=True, proxies=proxies, headers=headers)
_raise_for_status(r)
content_length = r.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
# `tqdm` behavior is determined by `utils.logging.is_progress_bar_enabled()`
# and can be set using `utils.logging.enable/disable_progress_bar()`
progress = tqdm(
unit="B",
unit_scale=True,
unit_divisor=1024,
total=total,
initial=resume_size,
desc="Downloading",
)
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(
url: str,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
use_auth_token: Union[bool, str, None] = None,
local_files_only=False,
) -> Optional[str]:
"""
Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the
path to the cached file.
Return:
Local path (string) of file or if networking is off, last version of file cached on disk.
Raises:
In case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
headers = {"user-agent": http_user_agent(user_agent)}
if isinstance(use_auth_token, str):
headers["authorization"] = f"Bearer {use_auth_token}"
elif use_auth_token:
token = HfFolder.get_token()
if token is None:
raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.")
headers["authorization"] = f"Bearer {token}"
url_to_download = url
etag = None
if not local_files_only:
try:
r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=etag_timeout)
_raise_for_status(r)
etag = r.headers.get("X-Linked-Etag") or r.headers.get("ETag")
# We favor a custom header indicating the etag of the linked resource, and
# we fallback to the regular etag header.
# If we don't have any of those, raise an error.
if etag is None:
raise OSError(
"Distant resource does not have an ETag, we won't be able to reliably ensure reproducibility."
)
# In case of a redirect,
# save an extra redirect on the request.get call,
# and ensure we download the exact atomic version even if it changed
# between the HEAD and the GET (unlikely, but hey).
if 300 <= r.status_code <= 399:
url_to_download = r.headers["Location"]
except (requests.exceptions.SSLError, requests.exceptions.ProxyError):
# Actually raise for those subclasses of ConnectionError
raise
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
# Otherwise, our Internet connection is down.
# etag is None
pass
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# etag is None == we don't have a connection or we passed local_files_only.
# try to get the last downloaded one
if etag is None:
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [
file
for file in fnmatch.filter(os.listdir(cache_dir), filename.split(".")[0] + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(matching_files) > 0:
return os.path.join(cache_dir, matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise FileNotFoundError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
else:
raise ValueError(
"Connection error, and we cannot find the requested files in the cached path."
" Please try again or make sure your Internet connection is on."
)
# From now on, etag is not None.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
# If the download just completed while the lock was activated.
if os.path.exists(cache_path) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager() -> "io.BufferedWriter":
with open(incomplete_path, "ab") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, mode="wb", dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
http_get(url_to_download, temp_file, proxies=proxies, resume_size=resume_size, headers=headers)
logger.info(f"storing {url} in cache at {cache_path}")
os.replace(temp_file.name, cache_path)
# NamedTemporaryFile creates a file with hardwired 0600 perms (ignoring umask), so fixing it.
umask = os.umask(0o666)
os.umask(umask)
os.chmod(cache_path, 0o666 & ~umask)
logger.info(f"creating metadata file for {cache_path}")
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
def get_file_from_repo(
path_or_repo: Union[str, os.PathLike],
filename: str,
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: bool = False,
proxies: Optional[Dict[str, str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
):
"""
Tries to locate a file in a local folder and repo, downloads and cache it if necessary.
Args:
path_or_repo (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a model repo on huggingface.co.
- a path to a *directory* potentially containing the file.
filename (`str`):
The name of the file to locate in `path_or_repo`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
`Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the
file does not exist.
Examples:
```python
# Download a tokenizer configuration from huggingface.co and cache.
tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json")
# This model does not have a tokenizer config so the result will be None.
tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json")
```"""
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
path_or_repo = str(path_or_repo)
if os.path.isdir(path_or_repo):
resolved_file = os.path.join(path_or_repo, filename)
return resolved_file if os.path.isfile(resolved_file) else None
else:
resolved_file = hf_bucket_url(path_or_repo, filename=filename, revision=revision, mirror=None)
try:
# Load from URL or cache if already cached
resolved_file = cached_path(
resolved_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
except RepositoryNotFoundError as err:
logger.error(err)
raise EnvironmentError(
f"{path_or_repo} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to "
"pass a token having permission to this repo with `use_auth_token` or log in with "
"`huggingface-cli login` and pass `use_auth_token=True`."
)
except RevisionNotFoundError as err:
logger.error(err)
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists "
"for this model name. Check the model page at "
f"'https://huggingface.co/{path_or_repo}' for available revisions."
)
except EnvironmentError:
# The repo and revision exist, but the file does not or there was a connection error fetching it.
return None
return resolved_file
def has_file(
path_or_repo: Union[str, os.PathLike],
filename: str,
revision: Optional[str] = None,
mirror: Optional[str] = None,
proxies: Optional[Dict[str, str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
):
"""
Checks if a repo contains a given file wihtout downloading it. Works for remote repos and local folders.
<Tip warning={false}>
This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for
this repo, but will return False for regular connection errors.
</Tip>
"""
if os.path.isdir(path_or_repo):
return os.path.isfile(os.path.join(path_or_repo, filename))
url = hf_bucket_url(path_or_repo, filename=filename, revision=revision, mirror=mirror)
headers = {"user-agent": http_user_agent()}
if isinstance(use_auth_token, str):
headers["authorization"] = f"Bearer {use_auth_token}"
elif use_auth_token:
token = HfFolder.get_token()
if token is None:
raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.")
headers["authorization"] = f"Bearer {token}"
r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10)
try:
_raise_for_status(r)
return True
except RepositoryNotFoundError as e:
logger.error(e)
raise EnvironmentError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.")
except RevisionNotFoundError as e:
logger.error(e)
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions."
)
except requests.HTTPError:
# We return false for EntryNotFoundError (logical) as well as any connection error.
return False
def get_list_of_files(
path_or_repo: Union[str, os.PathLike],
revision: Optional[str] = None,
use_auth_token: Optional[Union[bool, str]] = None,
local_files_only: bool = False,
) -> List[str]:
"""
Gets the list of files inside `path_or_repo`.
Args:
path_or_repo (`str` or `os.PathLike`):
Can be either the id of a repo on huggingface.co or a path to a *directory*.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
local_files_only (`bool`, *optional*, defaults to `False`):
Whether or not to only rely on local files and not to attempt to download any files.
<Tip warning={true}>
This API is not optimized, so calling it a lot may result in connection errors.
</Tip>
Returns:
`List[str]`: The list of files available in `path_or_repo`.
"""
path_or_repo = str(path_or_repo)
# If path_or_repo is a folder, we just return what is inside (subdirectories included).
if os.path.isdir(path_or_repo):
list_of_files = []
for path, dir_names, file_names in os.walk(path_or_repo):
list_of_files.extend([os.path.join(path, f) for f in file_names])
return list_of_files
# Can't grab the files if we are on offline mode.
if is_offline_mode() or local_files_only:
return []
# Otherwise we grab the token and use the list_repo_files method.
if isinstance(use_auth_token, str):
token = use_auth_token
elif use_auth_token is True:
token = HfFolder.get_token()
else:
token = None
try:
return list_repo_files(path_or_repo, revision=revision, token=token)
except HTTPError as e:
raise ValueError(
f"{path_or_repo} is not a local path or a model identifier on the model Hub. Did you make a typo?"
) from e
class cached_property(property):
"""
Descriptor that mimics @property but caches output in member variable.
From tensorflow_datasets
Built-in in functools from Python 3.8.
"""
def __get__(self, obj, objtype=None):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
attr = "__cached_" + self.fget.__name__
cached = getattr(obj, attr, None)
if cached is None:
cached = self.fget(obj)
setattr(obj, attr, cached)
return cached
def torch_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_torch_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires PyTorch.")
return wrapper
def tf_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_tf_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires TF.")
return wrapper
def is_torch_fx_proxy(x):
if is_torch_fx_available():
import torch.fx
return isinstance(x, torch.fx.Proxy)
return False
def is_tensor(x):
"""
Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`.
"""
if is_torch_fx_proxy(x):
return True
if is_torch_available():
import torch
if isinstance(x, torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(x, tf.Tensor):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(x, (jnp.ndarray, Tracer)):
return True
return isinstance(x, np.ndarray)
def _is_numpy(x):
return isinstance(x, np.ndarray)
def _is_torch(x):
import torch
return isinstance(x, torch.Tensor)
def _is_torch_device(x):
import torch
return isinstance(x, torch.device)
def _is_tensorflow(x):
import tensorflow as tf
return isinstance(x, tf.Tensor)
def _is_jax(x):
import jax.numpy as jnp # noqa: F811
return isinstance(x, jnp.ndarray)
def to_py_obj(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
"""
if isinstance(obj, (dict, UserDict)):
return {k: to_py_obj(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_available() and _is_tensorflow(obj):
return obj.numpy().tolist()
elif is_torch_available() and _is_torch(obj):
return obj.detach().cpu().tolist()
elif is_flax_available() and _is_jax(obj):
return np.asarray(obj).tolist()
elif isinstance(obj, (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def to_numpy(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.
"""
if isinstance(obj, (dict, UserDict)):
return {k: to_numpy(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return np.array(obj)
elif is_tf_available() and _is_tensorflow(obj):
return obj.numpy()
elif is_torch_available() and _is_torch(obj):
return obj.detach().cpu().numpy()
elif is_flax_available() and _is_jax(obj):
return np.asarray(obj)
else:
return obj
class ModelOutput(OrderedDict):
"""
Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a
tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular
python dictionary.
<Tip warning={true}>
You can't unpack a `ModelOutput` directly. Use the [`~file_utils.ModelOutput.to_tuple`] method to convert it to a
tuple before.
</Tip>
"""
def __post_init__(self):
class_fields = fields(self)
# Safety and consistency checks
if not len(class_fields):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(first_field):
if isinstance(first_field, dict):
iterator = first_field.items()
first_field_iterator = True
else:
try:
iterator = iter(first_field)
first_field_iterator = True
except TypeError:
first_field_iterator = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for element in iterator:
if (
not isinstance(element, (list, tuple))
or not len(element) == 2
or not isinstance(element[0], str)
):
break
setattr(self, element[0], element[1])
if element[1] is not None:
self[element[0]] = element[1]
elif first_field is not None:
self[class_fields[0].name] = first_field
else:
for field in class_fields:
v = getattr(self, field.name)
if v is not None:
self[field.name] = v
def __delitem__(self, *args, **kwargs):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def setdefault(self, *args, **kwargs):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def pop(self, *args, **kwargs):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def update(self, *args, **kwargs):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self, name, value):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(name, value)
super().__setattr__(name, value)
def __setitem__(self, key, value):
# Will raise a KeyException if needed
super().__setitem__(key, value)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(key, value)
def to_tuple(self) -> Tuple[Any]:
"""
Convert self to a tuple containing all the attributes/keys that are not `None`.
"""
return tuple(self[k] for k in self.keys())
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
def _missing_(cls, value):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}"
)
class PaddingStrategy(ExplicitEnum):
"""
Possible values for the `padding` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an
IDE.
"""
LONGEST = "longest"
MAX_LENGTH = "max_length"
DO_NOT_PAD = "do_not_pad"
class TensorType(ExplicitEnum):
"""
Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for
tab-completion in an IDE.
"""
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
JAX = "jax"
class _LazyModule(ModuleType):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
# Very heavily inspired by optuna.integration._IntegrationModule
# https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
super().__init__(name)
self._modules = set(import_structure.keys())
self._class_to_module = {}
for key, values in import_structure.items():
for value in values:
self._class_to_module[value] = key
# Needed for autocompletion in an IDE
self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
self.__file__ = module_file
self.__spec__ = module_spec
self.__path__ = [os.path.dirname(module_file)]
self._objects = {} if extra_objects is None else extra_objects
self._name = name
self._import_structure = import_structure
# Needed for autocompletion in an IDE
def __dir__(self):
result = super().__dir__()
# The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
# they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
for attr in self.__all__:
if attr not in result:
result.append(attr)
return result
def __getattr__(self, name: str) -> Any:
if name in self._objects:
return self._objects[name]
if name in self._modules:
value = self._get_module(name)
elif name in self._class_to_module.keys():
module = self._get_module(self._class_to_module[name])
value = getattr(module, name)
else:
raise AttributeError(f"module {self.__name__} has no attribute {name}")
setattr(self, name, value)
return value
def _get_module(self, module_name: str):
try:
return importlib.import_module("." + module_name, self.__name__)
except Exception as e:
raise RuntimeError(
f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its traceback):\n{e}"
) from e
def __reduce__(self):
return (self.__class__, (self._name, self.__file__, self._import_structure))
def copy_func(f):
"""Returns a copy of a function f."""
# Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
def is_local_clone(repo_path, repo_url):
"""
Checks if the folder in `repo_path` is a local clone of `repo_url`.
"""
# First double-check that `repo_path` is a git repo
if not os.path.exists(os.path.join(repo_path, ".git")):
return False
test_git = subprocess.run("git branch".split(), cwd=repo_path)
if test_git.returncode != 0:
return False
# Then look at its remotes
remotes = subprocess.run(
"git remote -v".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=repo_path,
).stdout
return repo_url in remotes.split()
class PushToHubMixin:
"""
A Mixin containing the functionality to push a model or tokenizer to the hub.
"""
def push_to_hub(
self,
repo_path_or_name: Optional[str] = None,
repo_url: Optional[str] = None,
use_temp_dir: bool = False,
commit_message: Optional[str] = None,
organization: Optional[str] = None,
private: Optional[bool] = None,
use_auth_token: Optional[Union[bool, str]] = None,
**model_card_kwargs
) -> str:
"""
Upload the {object_files} to the 🤗 Model Hub while synchronizing a local clone of the repo in
`repo_path_or_name`.
Parameters:
repo_path_or_name (`str`, *optional*):
Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case
the repository will have the name of that local folder). If not specified, will default to the name
given by `repo_url` and a local directory with that name will be created.
repo_url (`str`, *optional*):
Specify this in case you want to push to an existing repository in the hub. If unspecified, a new
repository will be created in your namespace (unless you specify an `organization`) with `repo_name`.
use_temp_dir (`bool`, *optional*, defaults to `False`):
Whether or not to clone the distant repo in a temporary directory or in `repo_path_or_name` inside the
current working directory. This will slow things down if you are making changes in an existing repo
since you will need to clone the repo before every push.
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"add {object}"`.
organization (`str`, *optional*):
Organization in which you want to push your {object} (you must be a member of this organization).
private (`bool`, *optional*):
Whether or not the repository created should be private (requires a paying subscription).
use_auth_token (`bool` or `str`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`). Will default to `True` if
`repo_url` is not specified.
Returns:
`str`: The url of the commit of your {object} in the given repository.
Examples:
```python
from transformers import {object_class}
{object} = {object_class}.from_pretrained("bert-base-cased")
# Push the {object} to your namespace with the name "my-finetuned-bert" and have a local clone in the
# *my-finetuned-bert* folder.
{object}.push_to_hub("my-finetuned-bert")
# Push the {object} to your namespace with the name "my-finetuned-bert" with no local clone.
{object}.push_to_hub("my-finetuned-bert", use_temp_dir=True)
# Push the {object} to an organization with the name "my-finetuned-bert" and have a local clone in the
# *my-finetuned-bert* folder.
{object}.push_to_hub("my-finetuned-bert", organization="huggingface")
# Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.
{object}.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")
```
"""
if use_temp_dir:
# Make sure we use the right `repo_name` for the `repo_url` before replacing it.
if repo_url is None:
if use_auth_token is None:
use_auth_token = True
repo_name = Path(repo_path_or_name).name
repo_url = self._get_repo_url_from_name(
repo_name, organization=organization, private=private, use_auth_token=use_auth_token
)
repo_path_or_name = tempfile.mkdtemp()
# Create or clone the repo. If the repo is already cloned, this just retrieves the path to the repo.
repo = self._create_or_get_repo(
repo_path_or_name=repo_path_or_name,
repo_url=repo_url,
organization=organization,
private=private,
use_auth_token=use_auth_token,
)
# Save the files in the cloned repo
self.save_pretrained(repo_path_or_name)
if hasattr(self, "history") and hasattr(self, "create_model_card"):
# This is a Keras model and we might be able to fish out its History and make a model card out of it
base_model_card_args = {
"output_dir": repo_path_or_name,
"model_name": Path(repo_path_or_name).name,
}
base_model_card_args.update(model_card_kwargs)
self.create_model_card(**base_model_card_args)
# Commit and push!
url = self._push_to_hub(repo, commit_message=commit_message)
# Clean up! Clean up! Everybody everywhere!
if use_temp_dir:
shutil.rmtree(repo_path_or_name)
return url
@staticmethod
def _get_repo_url_from_name(
repo_name: str,
organization: Optional[str] = None,
private: bool = None,
use_auth_token: Optional[Union[bool, str]] = None,
) -> str:
if isinstance(use_auth_token, str):
token = use_auth_token
elif use_auth_token:
token = HfFolder.get_token()
if token is None:
raise ValueError(
"You must login to the Hugging Face hub on this computer by typing `transformers-cli login` and "
"entering your credentials to use `use_auth_token=True`. Alternatively, you can pass your own "
"token as the `use_auth_token` argument."
)
else:
token = None
# Special provision for the test endpoint (CI)
return create_repo(
token,
repo_name,
organization=organization,
private=private,
repo_type=None,
exist_ok=True,
)
@classmethod
def _create_or_get_repo(
cls,
repo_path_or_name: Optional[str] = None,
repo_url: Optional[str] = None,
organization: Optional[str] = None,
private: bool = None,
use_auth_token: Optional[Union[bool, str]] = None,
) -> Repository:
if repo_path_or_name is None and repo_url is None:
raise ValueError("You need to specify a `repo_path_or_name` or a `repo_url`.")
if use_auth_token is None and repo_url is None:
use_auth_token = True
if repo_path_or_name is None:
repo_path_or_name = repo_url.split("/")[-1]
if repo_url is None and not os.path.exists(repo_path_or_name):
repo_name = Path(repo_path_or_name).name
repo_url = cls._get_repo_url_from_name(
repo_name, organization=organization, private=private, use_auth_token=use_auth_token
)
# Create a working directory if it does not exist.
if not os.path.exists(repo_path_or_name):
os.makedirs(repo_path_or_name)
repo = Repository(repo_path_or_name, clone_from=repo_url, use_auth_token=use_auth_token)
repo.git_pull()
return repo
@classmethod
def _push_to_hub(cls, repo: Repository, commit_message: Optional[str] = None) -> str:
if commit_message is None:
if "Tokenizer" in cls.__name__:
commit_message = "add tokenizer"
elif "Config" in cls.__name__:
commit_message = "add config"
else:
commit_message = "add model"
return repo.push_to_hub(commit_message=commit_message)
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
class ContextManagers:
"""
Wrapper for `contextlib.ExitStack` which enters a collection of context managers. Adaptation of `ContextManagers`
in the `fastcore` library.
"""
def __init__(self, context_managers: List[ContextManager]):
self.context_managers = context_managers
self.stack = ExitStack()
def __enter__(self):
for context_manager in self.context_managers:
self.stack.enter_context(context_manager)
def __exit__(self, *args, **kwargs):
self.stack.__exit__(*args, **kwargs)
|
the-stack_0_7822 | from pathlib import Path
import moderngl_window
from moderngl_window import geometry
class Gradient(moderngl_window.WindowConfig):
title = "Gradient"
resource_dir = (Path(__file__) / "../resources").absolute()
aspect_ratio = None
window_size = 720, 720
resizable = False
samples = 16
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.quad_fs = geometry.quad_fs()
self.gradient_program = self.load_program("gradient_shader.glsl")
self.gradient_program["wnd_size"].value = self.wnd.buffer_size
# change this if you want it to go faster/slower
self.gradient_program["speed"].value = 7.5
def render(self, time: float, frame_time: float) -> None:
self.gradient_program["time"].value = time
self.quad_fs.render(self.gradient_program)
if __name__ == "__main__":
Gradient.run()
|
the-stack_0_7823 | from __future__ import unicode_literals
import re
import sys
import six
from botocore.awsrequest import AWSPreparedRequest
from moto.core.utils import (
str_to_rfc_1123_datetime,
py2_strip_unicode_keys,
unix_time_millis,
)
from six.moves.urllib.parse import parse_qs, urlparse, unquote, parse_qsl
import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin, ActionAuthenticatorMixin
from moto.core.utils import path_url
from moto.core import ACCOUNT_ID
from moto.s3bucket_path.utils import (
bucket_name_from_url as bucketpath_bucket_name_from_url,
parse_key_name as bucketpath_parse_key_name,
is_delete_keys as bucketpath_is_delete_keys,
)
from .exceptions import (
BucketAlreadyExists,
DuplicateTagKeys,
InvalidContinuationToken,
S3ClientError,
MissingBucket,
MissingKey,
MissingVersion,
InvalidPartOrder,
MalformedXML,
MalformedACLError,
IllegalLocationConstraintException,
InvalidNotificationARN,
InvalidNotificationEvent,
ObjectNotInActiveTierError,
NoSystemTags,
PreconditionFailed,
InvalidRange,
)
from .models import (
s3_backend,
get_canned_acl,
FakeGrantee,
FakeGrant,
FakeAcl,
FakeKey,
)
from .utils import (
bucket_name_from_url,
clean_key_name,
undo_clean_key_name,
metadata_from_headers,
parse_region_from_url,
)
from xml.dom import minidom
DEFAULT_REGION_NAME = "us-east-1"
ACTION_MAP = {
"BUCKET": {
"GET": {
"uploads": "ListBucketMultipartUploads",
"location": "GetBucketLocation",
"lifecycle": "GetLifecycleConfiguration",
"versioning": "GetBucketVersioning",
"policy": "GetBucketPolicy",
"website": "GetBucketWebsite",
"acl": "GetBucketAcl",
"tagging": "GetBucketTagging",
"logging": "GetBucketLogging",
"cors": "GetBucketCORS",
"notification": "GetBucketNotification",
"accelerate": "GetAccelerateConfiguration",
"versions": "ListBucketVersions",
"public_access_block": "GetPublicAccessBlock",
"DEFAULT": "ListBucket",
},
"PUT": {
"lifecycle": "PutLifecycleConfiguration",
"versioning": "PutBucketVersioning",
"policy": "PutBucketPolicy",
"website": "PutBucketWebsite",
"acl": "PutBucketAcl",
"tagging": "PutBucketTagging",
"logging": "PutBucketLogging",
"cors": "PutBucketCORS",
"notification": "PutBucketNotification",
"accelerate": "PutAccelerateConfiguration",
"public_access_block": "PutPublicAccessBlock",
"DEFAULT": "CreateBucket",
},
"DELETE": {
"lifecycle": "PutLifecycleConfiguration",
"policy": "DeleteBucketPolicy",
"tagging": "PutBucketTagging",
"cors": "PutBucketCORS",
"public_access_block": "DeletePublicAccessBlock",
"DEFAULT": "DeleteBucket",
},
},
"KEY": {
"GET": {
"uploadId": "ListMultipartUploadParts",
"acl": "GetObjectAcl",
"tagging": "GetObjectTagging",
"versionId": "GetObjectVersion",
"DEFAULT": "GetObject",
},
"PUT": {
"acl": "PutObjectAcl",
"tagging": "PutObjectTagging",
"DEFAULT": "PutObject",
},
"DELETE": {
"uploadId": "AbortMultipartUpload",
"versionId": "DeleteObjectVersion",
"DEFAULT": " DeleteObject",
},
"POST": {
"uploads": "PutObject",
"restore": "RestoreObject",
"uploadId": "PutObject",
},
},
"CONTROL": {
"GET": {"publicAccessBlock": "GetPublicAccessBlock"},
"PUT": {"publicAccessBlock": "PutPublicAccessBlock"},
"DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"},
},
}
def parse_key_name(pth):
# strip the first '/' left by urlparse
return pth[1:] if pth.startswith("/") else pth
def is_delete_keys(request, path, bucket_name):
# GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty)
# Python sends a request as url/?delete (treating it as a flag)
# https://github.com/spulec/moto/issues/2937
return (
path == "/?delete"
or path == "/?delete="
or (path == "/" and getattr(request, "query_string", "") == "delete")
)
class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
self.method = ""
self.path = ""
self.data = {}
self.headers = {}
@property
def should_autoescape(self):
return True
def all_buckets(self):
self.data["Action"] = "ListAllMyBuckets"
self._authenticate_and_authorize_s3_action()
# No bucket specified. Listing all buckets
all_buckets = self.backend.get_all_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
host = request.headers.get("host", request.headers.get("Host"))
if not host:
host = urlparse(request.url).netloc
if (
not host
or host.startswith("localhost")
or host.startswith("localstack")
or re.match(r"^[^.]+$", host)
or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host)
):
# Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev),
# (3) local host names that do not contain a "." (e.g., Docker container host names), or
# (4) kubernetes host names
return False
match = re.match(r"^([^\[\]:]+)(:\d+)?$", host)
if match:
match = re.match(
r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", match.groups()[0]
)
if match:
return False
match = re.match(r"^\[(.+)\](:\d+)?$", host)
if match:
match = re.match(
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
match.groups()[0],
re.IGNORECASE,
)
if match:
return False
path_based = host == "s3.amazonaws.com" or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host
)
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_or_control_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
def bucket_response(self, request, full_url, headers):
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
return self._send_response(response)
@staticmethod
def _send_response(response):
if isinstance(response, six.string_types):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
if not isinstance(response_content, six.binary_type):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers):
querystring = self._get_querystring(full_url)
method = request.method
region_name = parse_region_from_url(full_url)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
self.data["BucketName"] = bucket_name
if hasattr(request, "body"):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b""
if isinstance(body, six.binary_type):
body = body.decode("utf-8")
body = "{0}".format(body).encode("utf-8")
if method == "HEAD":
return self._bucket_response_head(bucket_name)
elif method == "GET":
return self._bucket_response_get(bucket_name, querystring)
elif method == "PUT":
return self._bucket_response_put(
request, body, region_name, bucket_name, querystring
)
elif method == "DELETE":
return self._bucket_response_delete(body, bucket_name, querystring)
elif method == "POST":
return self._bucket_response_post(request, body, bucket_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
@staticmethod
def _get_querystring(full_url):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
return querystring
def _bucket_response_head(self, bucket_name):
try:
self.backend.get_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, ""
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring):
self._set_action("BUCKET", "GET", querystring)
self._authenticate_and_authorize_s3_action()
if "uploads" in querystring:
for unsup in ("delimiter", "max-uploads"):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(
unsup
)
)
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if "prefix" in querystring:
prefix = querystring.get("prefix", [None])[0]
multiparts = [
upload
for upload in multiparts
if upload.key_name.startswith(prefix)
]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(bucket_name=bucket_name, uploads=multiparts)
elif "location" in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
location = bucket.location
# us-east-1 is different - returns a None location
if location == DEFAULT_REGION_NAME:
location = None
return template.render(location=location)
elif "lifecycle" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=bucket.rules)
elif "versioning" in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif "policy" in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif "website" in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name
)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif "acl" in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(obj=bucket)
elif "tagging" in querystring:
tags = self.backend.get_bucket_tagging(bucket_name)["Tags"]
# "Special Error" if no tags:
if len(tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return template.render(tags=tags)
elif "logging" in querystring:
logging = self.backend.get_bucket_logging(bucket_name)
if not logging:
template = self.response_template(S3_NO_LOGGING_CONFIG)
return 200, {}, template.render()
template = self.response_template(S3_LOGGING_CONFIG)
return 200, {}, template.render(logging=logging)
elif "cors" in querystring:
cors = self.backend.get_bucket_cors(bucket_name)
if len(cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(cors=cors)
elif "notification" in querystring:
notification_configuration = self.backend.get_bucket_notification_configuration(
bucket_name
)
if not notification_configuration:
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(config=notification_configuration)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif "publicAccessBlock" in querystring:
public_block_config = self.backend.get_bucket_public_access_block(
bucket_name
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return template.render(public_block_config=public_block_config)
elif "versions" in querystring:
delimiter = querystring.get("delimiter", [None])[0]
encoding_type = querystring.get("encoding-type", [None])[0]
key_marker = querystring.get("key-marker", [None])[0]
max_keys = querystring.get("max-keys", [None])[0]
prefix = querystring.get("prefix", [""])[0]
version_id_marker = querystring.get("version-id-marker", [None])[0]
bucket = self.backend.get_bucket(bucket_name)
versions = self.backend.get_bucket_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix,
)
latest_versions = self.backend.get_bucket_latest_versions(
bucket_name=bucket_name
)
key_list = []
delete_marker_list = []
for version in versions:
if isinstance(version, FakeKey):
key_list.append(version)
else:
delete_marker_list.append(version)
template = self.response_template(S3_BUCKET_GET_VERSIONS)
key_list.sort(key=lambda r: (r.name, -unix_time_millis(r.last_modified)))
return (
200,
{},
template.render(
key_list=key_list,
delete_marker_list=delete_marker_list,
latest_versions=latest_versions,
bucket=bucket,
prefix="",
max_keys=1000,
delimiter="",
is_truncated="false",
),
)
elif "encryption" in querystring:
encryption = self.backend.get_bucket_encryption(bucket_name)
if not encryption:
template = self.response_template(S3_NO_ENCRYPTION)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_ENCRYPTION_CONFIG)
return 200, {}, template.render(encryption=encryption)
elif querystring.get("list-type", [None])[0] == "2":
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, six.binary_type):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
marker = querystring.get("marker", [None])[0]
result_keys, result_folders = self.backend.prefix_query(
bucket, prefix, delimiter
)
if marker:
result_keys = self._get_results_from_token(result_keys, marker)
result_keys, is_truncated, next_marker = self._truncate_result(
result_keys, max_keys
)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return (
200,
{},
template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
is_truncated=is_truncated,
next_marker=next_marker,
max_keys=max_keys,
),
)
def _set_action(self, action_resource_type, method, querystring):
action_set = False
for action_in_querystring, action in ACTION_MAP[action_resource_type][
method
].items():
if action_in_querystring in querystring:
self.data["Action"] = action
action_set = True
if not action_set:
self.data["Action"] = ACTION_MAP[action_resource_type][method]["DEFAULT"]
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
continuation_token = querystring.get("continuation-token", [None])[0]
if continuation_token is not None and continuation_token == "":
raise InvalidContinuationToken()
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, six.binary_type):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
result_keys, result_folders = self.backend.prefix_query(
bucket, prefix, delimiter
)
fetch_owner = querystring.get("fetch-owner", [False])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
start_after = querystring.get("start-after", [None])[0]
# sort the combination of folders and keys into lexicographical order
all_keys = result_keys + result_folders
all_keys.sort(key=self._get_name)
if continuation_token or start_after:
limit = continuation_token or start_after
all_keys = self._get_results_from_token(all_keys, limit)
truncated_keys, is_truncated, next_continuation_token = self._truncate_result(
all_keys, max_keys
)
result_keys, result_folders = self._split_truncated_keys(truncated_keys)
key_count = len(result_keys) + len(result_folders)
return template.render(
bucket=bucket,
prefix=prefix or "",
delimiter=delimiter,
key_count=key_count,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after,
)
@staticmethod
def _get_name(key):
if isinstance(key, FakeKey):
return key.name
else:
return key
@staticmethod
def _split_truncated_keys(truncated_keys):
result_keys = []
result_folders = []
for key in truncated_keys:
if isinstance(key, FakeKey):
result_keys.append(key)
else:
result_folders.append(key)
return result_keys, result_folders
def _get_results_from_token(self, result_keys, token):
continuation_index = 0
for key in result_keys:
if (key.name if isinstance(key, FakeKey) else key) > token:
break
continuation_index += 1
return result_keys[continuation_index:]
def _truncate_result(self, result_keys, max_keys):
if len(result_keys) > max_keys:
is_truncated = "true"
result_keys = result_keys[:max_keys]
item = result_keys[-1]
next_continuation_token = item.name if isinstance(item, FakeKey) else item
else:
is_truncated = "false"
next_continuation_token = None
return result_keys, is_truncated, next_continuation_token
def _body_contains_location_constraint(self, body):
if body:
try:
xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"]
return True
except KeyError:
pass
return False
def _create_bucket_configuration_is_empty(self, body):
if body:
try:
create_bucket_configuration = xmltodict.parse(body)[
"CreateBucketConfiguration"
]
del create_bucket_configuration["@xmlns"]
if len(create_bucket_configuration) == 0:
return True
except KeyError:
pass
return False
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
return parsed_xml
def _bucket_response_put(
self, request, body, region_name, bucket_name, querystring
):
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
self._set_action("BUCKET", "PUT", querystring)
self._authenticate_and_authorize_s3_action()
if "versioning" in querystring:
ver = re.search("<Status>([A-Za-z]+)</Status>", body.decode())
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif "lifecycle" in querystring:
rules = xmltodict.parse(body)["LifecycleConfiguration"]["Rule"]
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.set_bucket_lifecycle(bucket_name, rules)
return ""
elif "policy" in querystring:
self.backend.set_bucket_policy(bucket_name, body)
return "True"
elif "acl" in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.set_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif "website" in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "logging" in querystring:
try:
self.backend.put_bucket_logging(
bucket_name, self._logging_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
elif "notification" in querystring:
try:
self.backend.put_bucket_notification_configuration(
bucket_name, self._notification_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(
bucket_name, accelerate_status
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "publicAccessBlock" in querystring:
pab_config = self._parse_pab_config(body)
self.backend.put_bucket_public_access_block(
bucket_name, pab_config["PublicAccessBlockConfiguration"]
)
return ""
elif "encryption" in querystring:
try:
self.backend.put_bucket_encryption(
bucket_name, self._encryption_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
# us-east-1, the default AWS region behaves a bit differently
# - you should not use it as a location constraint --> it fails
# - querying the location constraint returns None
# - LocationConstraint has to be specified if outside us-east-1
if (
region_name != DEFAULT_REGION_NAME
and not self._body_contains_location_constraint(body)
):
raise IllegalLocationConstraintException()
if body:
if self._create_bucket_configuration_is_empty(body):
raise MalformedXML()
try:
forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][
"LocationConstraint"
]
if forced_region == DEFAULT_REGION_NAME:
raise S3ClientError(
"InvalidLocationConstraint",
"The specified location-constraint is not valid",
)
else:
region_name = forced_region
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
if region_name == DEFAULT_REGION_NAME:
# us-east-1 has different behavior
new_bucket = self.backend.get_bucket(bucket_name)
else:
raise
if "x-amz-acl" in request.headers:
# TODO: Support the XML-based ACL format
self.backend.set_bucket_acl(
bucket_name, self._acl_from_headers(request.headers)
)
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring):
self._set_action("BUCKET", "DELETE", querystring)
self._authenticate_and_authorize_s3_action()
if "policy" in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif "lifecycle" in querystring:
bucket = self.backend.get_bucket(bucket_name)
bucket.delete_lifecycle()
return 204, {}, ""
elif "publicAccessBlock" in querystring:
self.backend.delete_bucket_public_access_block(bucket_name)
return 204, {}, ""
elif "encryption" in querystring:
bucket = self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name):
response_headers = {}
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
path = self._get_path(request)
if self.is_delete_keys(request, path, bucket_name):
self.data["Action"] = "DeleteObject"
self._authenticate_and_authorize_s3_action()
return self._bucket_response_delete_keys(request, body, bucket_name)
self.data["Action"] = "PutObject"
self._authenticate_and_authorize_s3_action()
# POST to bucket-url should create file from form
if hasattr(request, "form"):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
form = dict(parse_qsl(body))
key = form["key"]
if "file" in form:
f = form["file"]
else:
f = request.files["file"].stream.read()
if "success_action_redirect" in form:
response_headers["Location"] = form["success_action_redirect"]
if "success_action_status" in form:
status_code = form["success_action_status"]
elif "success_action_redirect" in form:
status_code = 303
else:
status_code = 204
new_key = self.backend.set_object(bucket_name, key, f)
if form.get("acl"):
acl = get_canned_acl(form.get("acl"))
new_key.set_acl(acl)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return status_code, response_headers, ""
@staticmethod
def _get_path(request):
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = (
request.full_path
if hasattr(request, "full_path")
else path_url(request.url)
)
return path
def _bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
body_dict = xmltodict.parse(body)
objects = body_dict["Delete"].get("Object", [])
if not isinstance(objects, list):
# We expect a list of objects, but when there is a single <Object> node xmltodict does not
# return a list.
objects = [objects]
if len(objects) == 0:
raise MalformedXML()
deleted_objects = []
error_names = []
for object_ in objects:
key_name = object_["Key"]
version_id = object_.get("VersionId", None)
success, _ = self.backend.delete_object(
bucket_name, undo_clean_key_name(key_name), version_id=version_id
)
if success:
deleted_objects.append((key_name, version_id))
else:
error_names.append(key_name)
return (
200,
{},
template.render(deleted=deleted_objects, delete_errors=error_names),
)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get("range").split("=")
if "," in rspec:
raise NotImplementedError("Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split("-"))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
raise InvalidRange(
actual_size=str(length), range_requested=request.headers.get("range")
)
response_headers["content-range"] = "bytes {0}-{1}/{2}".format(
begin, end, length
)
content = response_content[begin : end + 1]
response_headers["content-length"] = len(content)
return 206, response_headers, content
def key_or_control_response(self, request, full_url, headers):
# Key and Control are lumped in because splitting out the regex is too much of a pain :/
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
response_headers = {}
try:
# Is this an S3 control response?
if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url:
response = self._control_response(request, full_url, headers)
else:
response = self._key_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, six.string_types):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if (
status_code == 200
and "range" in request.headers
and request.headers["range"] != ""
):
try:
return self._handle_range_header(
request, response_headers, response_content
)
except S3ClientError as s3error:
return s3error.code, {}, s3error.description
return status_code, response_headers, response_content
def _control_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b""
if method == "GET":
return self._control_response_get(request, query, headers)
elif method == "PUT":
return self._control_response_put(request, body, query, headers)
elif method == "DELETE":
return self._control_response_delete(request, query, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _control_response_get(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "GET", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
public_block_config = self.backend.get_account_public_access_block(
headers["x-amz-account-id"]
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return (
200,
response_headers,
template.render(public_block_config=public_block_config),
)
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_put(self, request, body, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "PUT", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
pab_config = self._parse_pab_config(body)
self.backend.put_account_public_access_block(
headers["x-amz-account-id"],
pab_config["PublicAccessBlockConfiguration"],
)
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_delete(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "DELETE", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
self.backend.delete_account_public_access_block(headers["x-amz-account-id"])
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
# Because we patch the requests library the boto/boto3 API
# requests go through this method but so do
# `requests.get("https://bucket-name.s3.amazonaws.com/file-name")`
# Here we deny public access to private files by checking the
# ACL and checking for the mere presence of an Authorization
# header.
if "Authorization" not in request.headers:
if hasattr(request, "url"):
signed_url = "Signature=" in request.url
elif hasattr(request, "requestline"):
signed_url = "Signature=" in request.path
key = self.backend.get_object(bucket_name, key_name)
if key:
if not key.acl.public_read and not signed_url:
return 403, {}, ""
elif signed_url:
# coming in from requests.get(s3.generate_presigned_url())
if self._invalid_headers(request.url, dict(request.headers)):
return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
# when the data is being passed as a file
if request.files and not body:
for _, value in request.files.items():
body = value.stream.read()
if body is None:
body = b""
if method == "GET":
return self._key_response_get(
bucket_name, query, key_name, headers=request.headers
)
elif method == "PUT":
return self._key_response_put(
request, body, bucket_name, query, key_name, headers
)
elif method == "HEAD":
return self._key_response_head(
bucket_name, query, key_name, headers=request.headers
)
elif method == "DELETE":
return self._key_response_delete(bucket_name, query, key_name)
elif method == "POST":
return self._key_response_post(request, body, bucket_name, query, key_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _key_response_get(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "GET", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId"):
upload_id = query["uploadId"][0]
parts = self.backend.list_multipart(bucket_name, upload_id)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return (
200,
response_headers,
template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
count=len(parts),
parts=parts,
),
)
version_id = query.get("versionId", [None])[0]
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
if key is None and version_id is None:
raise MissingKey(key_name)
elif key is None:
raise MissingVersion(version_id)
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
raise PreconditionFailed("If-Unmodified-Since")
if if_match and key.etag != if_match:
raise PreconditionFailed("If-Match")
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
if "acl" in query:
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(obj=key)
if "tagging" in query:
tags = self.backend.get_key_tags(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(tags=tags)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
self._set_action("KEY", "PUT", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId") and query.get("partNumber"):
upload_id = query["uploadId"][0]
part_number = int(query["partNumber"][0])
if "x-amz-copy-source" in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = (
src_key.split("?versionId=")
if "?versionId=" in src_key
else (src_key, None)
)
src_range = request.headers.get("x-amz-copy-source-range", "").split(
"bytes="
)[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
if self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
):
key = self.backend.copy_part(
bucket_name,
upload_id,
part_number,
src_bucket,
src_key,
src_version_id,
start_byte,
end_byte,
)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.set_part(bucket_name, upload_id, part_number, body)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get("x-amz-storage-class", "STANDARD")
encryption = request.headers.get("x-amz-server-side-encryption", None)
kms_key_id = request.headers.get(
"x-amz-server-side-encryption-aws-kms-key-id", None
)
bucket_key_enabled = request.headers.get(
"x-amz-server-side-encryption-bucket-key-enabled", None
)
if bucket_key_enabled is not None:
bucket_key_enabled = str(bucket_key_enabled).lower()
acl = self._acl_from_headers(request.headers)
if acl is None:
acl = self.backend.get_bucket(bucket_name).acl
tagging = self._tagging_from_headers(request.headers)
if "acl" in query:
key = self.backend.get_object(bucket_name, key_name)
# TODO: Support the XML-based ACL format
key.set_acl(acl)
return 200, response_headers, ""
if "tagging" in query:
if "versionId" in query:
version_id = query["versionId"][0]
else:
version_id = None
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
tagging = self._tagging_from_xml(body)
self.backend.set_key_tags(key, tagging, key_name)
return 200, response_headers, ""
if "x-amz-copy-source" in request.headers:
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key = request.headers.get("x-amz-copy-source")
if isinstance(src_key, six.binary_type):
src_key = src_key.decode("utf-8")
src_key_parsed = urlparse(src_key)
src_bucket, src_key = (
clean_key_name(src_key_parsed.path).lstrip("/").split("/", 1)
)
src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0]
key = self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
)
if key is not None:
if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]:
if key.response_dict.get(
"x-amz-restore"
) is None or 'ongoing-request="true"' in key.response_dict.get(
"x-amz-restore"
):
raise ObjectNotInActiveTierError(key)
self.backend.copy_key(
src_bucket,
src_key,
bucket_name,
key_name,
storage=storage_class,
acl=acl,
src_version_id=src_version_id,
)
else:
return 404, response_headers, ""
new_key = self.backend.get_object(bucket_name, key_name)
mdirective = request.headers.get("x-amz-metadata-directive")
if mdirective is not None and mdirective == "REPLACE":
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
self.backend.set_key_tags(new_key, tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, "streaming") and request.streaming
closing_connection = headers.get("connection") == "close"
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_object(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.set_object(
bucket_name,
key_name,
body,
storage=storage_class,
encryption=encryption,
kms_key_id=kms_key_id,
bucket_key_enabled=bucket_key_enabled,
)
request.streaming = True
metadata = metadata_from_headers(request.headers)
metadata.update(metadata_from_headers(query))
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get(
"x-amz-website-redirect-location"
)
self.backend.set_key_tags(new_key, tagging)
response_headers.update(new_key.response_dict)
return 200, response_headers, ""
def _key_response_head(self, bucket_name, query, key_name, headers):
response_headers = {}
version_id = query.get("versionId", [None])[0]
part_number = query.get("partNumber", [None])[0]
if part_number:
part_number = int(part_number)
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(
bucket_name, key_name, version_id=version_id, part_number=part_number
)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
return 412, response_headers, ""
if if_match and key.etag != if_match:
return 412, response_headers, ""
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _acl_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml.get("AccessControlPolicy"):
raise MalformedACLError()
# The owner is needed for some reason...
if not parsed_xml["AccessControlPolicy"].get("Owner"):
# TODO: Validate that the Owner is actually correct.
raise MalformedACLError()
# If empty, then no ACLs:
if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None:
return []
if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"):
raise MalformedACLError()
permissions = ["READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL"]
if not isinstance(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list
):
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = [
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]
]
grants = self._get_grants_from_xml(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"],
MalformedACLError,
permissions,
)
return FakeAcl(grants)
def _get_grants_from_xml(self, grant_list, exception_type, permissions):
grants = []
for grant in grant_list:
if grant.get("Permission", "") not in permissions:
raise exception_type()
if grant["Grantee"].get("@xsi:type", "") not in [
"CanonicalUser",
"AmazonCustomerByEmail",
"Group",
]:
raise exception_type()
# TODO: Verify that the proper grantee data is supplied based on the type.
grants.append(
FakeGrant(
[
FakeGrantee(
id=grant["Grantee"].get("ID", ""),
display_name=grant["Grantee"].get("DisplayName", ""),
uri=grant["Grantee"].get("URI", ""),
)
],
[grant["Permission"]],
)
)
return grants
def _acl_from_headers(self, headers):
canned_acl = headers.get("x-amz-acl", "")
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
if not header.startswith("x-amz-grant-"):
continue
permission = {
"read": "READ",
"write": "WRITE",
"read-acp": "READ_ACP",
"write-acp": "WRITE_ACP",
"full-control": "FULL_CONTROL",
}[header[len("x-amz-grant-") :]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="([^"]+)"', key_and_value.strip()
).groups()
if key.lower() == "id":
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
tags = {}
if headers.get("x-amz-tagging"):
parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True)
for tag in parsed_header.items():
tags[tag[0]] = tag[1][0]
return tags
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={"Tag": True})
tags = {}
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags[tag["Key"]] = tag["Value"]
return tags
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = {}
# Optional if no tags are being sent:
if parsed_xml["Tagging"].get("TagSet"):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list):
tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[
"Tagging"
]["TagSet"]["Tag"]["Value"]
else:
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
if tag["Key"] in tags:
raise DuplicateTagKeys()
tags[tag["Key"]] = tag["Value"]
# Verify that "aws:" is not in the tags. If so, then this is a problem:
for key, _ in tags.items():
if key.startswith("aws:"):
raise NoSystemTags()
return tags
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _encryption_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if (
not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule")
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get(
"ApplyServerSideEncryptionByDefault"
)
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("SSEAlgorithm")
):
raise MalformedXML()
return [parsed_xml["ServerSideEncryptionConfiguration"]]
def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"):
return {}
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"):
raise MalformedXML()
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"):
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = ""
# Get the ACLs:
if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"):
permissions = ["READ", "WRITE", "FULL_CONTROL"]
if not isinstance(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
list,
):
target_grants = self._get_grants_from_xml(
[
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
]["Grant"]
],
MalformedXML,
permissions,
)
else:
target_grants = self._get_grants_from_xml(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
MalformedXML,
permissions,
)
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
] = target_grants
return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]
def _notification_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not len(parsed_xml["NotificationConfiguration"]):
return {}
# The types of notifications, and their required fields (apparently lambda is categorized by the API as
# "CloudFunction"):
notification_fields = [
("Topic", "sns"),
("Queue", "sqs"),
("CloudFunction", "lambda"),
]
event_names = [
"s3:ReducedRedundancyLostObject",
"s3:ObjectCreated:*",
"s3:ObjectCreated:Put",
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:CompleteMultipartUpload",
"s3:ObjectRemoved:*",
"s3:ObjectRemoved:Delete",
"s3:ObjectRemoved:DeleteMarkerCreated",
]
found_notifications = (
0 # Tripwire -- if this is not ever set, then there were no notifications
)
for name, arn_string in notification_fields:
# 1st verify that the proper notification configuration has been passed in (with an ARN that is close
# to being correct -- nothing too complex in the ARN logic):
the_notification = parsed_xml["NotificationConfiguration"].get(
"{}Configuration".format(name)
)
if the_notification:
found_notifications += 1
if not isinstance(the_notification, list):
the_notification = parsed_xml["NotificationConfiguration"][
"{}Configuration".format(name)
] = [the_notification]
for n in the_notification:
if not n[name].startswith("arn:aws:{}:".format(arn_string)):
raise InvalidNotificationARN()
# 2nd, verify that the Events list is correct:
assert n["Event"]
if not isinstance(n["Event"], list):
n["Event"] = [n["Event"]]
for event in n["Event"]:
if event not in event_names:
raise InvalidNotificationEvent()
# Parse out the filters:
if n.get("Filter"):
# Error if S3Key is blank:
if not n["Filter"]["S3Key"]:
raise KeyError()
if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list):
n["Filter"]["S3Key"]["FilterRule"] = [
n["Filter"]["S3Key"]["FilterRule"]
]
for filter_rule in n["Filter"]["S3Key"]["FilterRule"]:
assert filter_rule["Name"] in ["suffix", "prefix"]
assert filter_rule["Value"]
if not found_notifications:
return {}
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml["AccelerateConfiguration"]
return config["Status"]
def _key_response_delete(self, bucket_name, query, key_name):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
if query.get("uploadId"):
upload_id = query["uploadId"][0]
self.backend.cancel_multipart(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
if "tagging" in query:
self.backend.delete_object_tagging(
bucket_name, key_name, version_id=version_id
)
template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE)
return 204, {}, template.render(version_id=version_id)
success, response_meta = self.backend.delete_object(
bucket_name, key_name, version_id=version_id
)
response_headers = {}
if response_meta is not None:
for k in response_meta:
response_headers["x-amz-{}".format(k)] = response_meta[k]
return 204, response_headers, ""
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName("Part")
prev = 0
for p in ps:
pn = int(p.getElementsByTagName("PartNumber")[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName("ETag")[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name):
self._set_action("KEY", "POST", query)
self._authenticate_and_authorize_s3_action()
if body == b"" and "uploads" in query:
metadata = metadata_from_headers(request.headers)
multipart = self.backend.initiate_multipart(bucket_name, key_name, metadata)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name, key_name=key_name, upload_id=multipart.id
)
return 200, {}, response
if query.get("uploadId"):
body = self._complete_multipart_body(body)
upload_id = query["uploadId"][0]
key = self.backend.complete_multipart(bucket_name, upload_id, body)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
headers = {}
if key.version_id:
headers["x-amz-version-id"] = key.version_id
return (
200,
headers,
template.render(
bucket_name=bucket_name, key_name=key.name, etag=key.etag
),
)
elif "restore" in query:
es = minidom.parseString(body).getElementsByTagName("Days")
days = es[0].childNodes[0].wholeText
key = self.backend.get_object(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far"
)
def _invalid_headers(self, url, headers):
"""
Verify whether the provided metadata in the URL is also present in the headers
:param url: .../file.txt&content-type=app%2Fjson&Signature=..
:param headers: Content-Type=app/json
:return: True or False
"""
metadata_to_check = {
"content-disposition": "Content-Disposition",
"content-encoding": "Content-Encoding",
"content-language": "Content-Language",
"content-length": "Content-Length",
"content-md5": "Content-MD5",
"content-type": "Content-Type",
}
for url_key, header_key in metadata_to_check.items():
metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if header_key not in headers or (url_value != headers[header_key]):
return True
return False
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>{{ bucket.creation_date_ISO8601 }}</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_marker %}
<NextMarker>{{ next_marker }}</NextMarker>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ key_count }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
{% if rule.filter %}
<Filter>
{% if rule.filter.prefix != None %}
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% endif %}
{% if rule.filter.tag_key %}
<Tag>
<Key>{{ rule.filter.tag_key }}</Key>
<Value>{{ rule.filter.tag_value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
<And>
{% if rule.filter.and_filter.prefix != None %}
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% endif %}
{% for key, value in rule.filter.and_filter.tags.items() %}
<Tag>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</And>
{% endif %}
</Filter>
{% else %}
{% if rule.prefix != None %}
<Prefix>{{ rule.prefix }}</Prefix>
{% endif %}
{% endif %}
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
{% if rule.expired_object_delete_marker %}
<ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker>
{% endif %}
</Expiration>
{% endif %}
{% if rule.nvt_noncurrent_days and rule.nvt_storage_class %}
<NoncurrentVersionTransition>
<NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays>
<StorageClass>{{ rule.nvt_storage_class }}</StorageClass>
</NoncurrentVersionTransition>
{% endif %}
{% if rule.nve_noncurrent_days %}
<NoncurrentVersionExpiration>
<NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays>
</NoncurrentVersionExpiration>
{% endif %}
{% if rule.aimu_days %}
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<KeyMarker>{{ key_marker }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
<IsLatest>{% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{% if latest_versions[marker.name] == marker.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ marker.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k, v in deleted %}
<Deleted>
<Key>{{k}}</Key>
{% if v %}<VersionId>{{v}}</VersionId>{% endif %}
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_KEY_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteObjectTaggingResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<VersionId>{{version_id}}</VersionId>
</DeleteObjectTaggingResult>
"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in obj.acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in tags %}
<Tag>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<PartNumberMarker>1</PartNumberMarker>
<NextPartNumberMarker>{{ count }}</NextPartNumberMarker>
<MaxParts>{{ count }}</MaxParts>
<IsTruncated>false</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>False</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::"""
+ ACCOUNT_ID
+ """:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
)
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<LoggingEnabled>
<TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket>
<TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix>
{% if logging.get("TargetGrants") %}
<TargetGrants>
{% for grant in logging["TargetGrants"] %}
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grant.grantees[0].type }}">
{% if grant.grantees[0].uri %}
<URI>{{ grant.grantees[0].uri }}</URI>
{% endif %}
{% if grant.grantees[0].id %}
<ID>{{ grant.grantees[0].id }}</ID>
{% endif %}
{% if grant.grantees[0].display_name %}
<DisplayName>{{ grant.grantees[0].display_name }}</DisplayName>
{% endif %}
</Grantee>
<Permission>{{ grant.permissions[0] }}</Permission>
</Grant>
{% endfor %}
</TargetGrants>
{% endif %}
</LoggingEnabled>
</BucketLoggingStatus>
"""
S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
"""
S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketEncryptionStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
{% for entry in encryption %}
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm>
{% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %}
<KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID>
{% endif %}
</ApplyServerSideEncryptionByDefault>
</Rule>
{% endfor %}
</BucketEncryptionStatus>
"""
S3_INVALID_PRESIGNED_PARAMETERS = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>SignatureDoesNotMatch</Code>
<Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ServerSideEncryptionConfigurationNotFoundError</Code>
<Message>The server side encryption configuration was not found</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in config.topic %}
<TopicConfiguration>
<Id>{{ topic.id }}</Id>
<Topic>{{ topic.arn }}</Topic>
{% for event in topic.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if topic.filters %}
<Filter>
<S3Key>
{% for rule in topic.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</TopicConfiguration>
{% endfor %}
{% for queue in config.queue %}
<QueueConfiguration>
<Id>{{ queue.id }}</Id>
<Queue>{{ queue.arn }}</Queue>
{% for event in queue.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if queue.filters %}
<Filter>
<S3Key>
{% for rule in queue.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</QueueConfiguration>
{% endfor %}
{% for cf in config.cloud_function %}
<CloudFunctionConfiguration>
<Id>{{ cf.id }}</Id>
<CloudFunction>{{ cf.arn }}</CloudFunction>
{% for event in cf.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if cf.filters %}
<Filter>
<S3Key>
{% for rule in cf.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</CloudFunctionConfiguration>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""
S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION = """
<PublicAccessBlockConfiguration>
<BlockPublicAcls>{{public_block_config.block_public_acls}}</BlockPublicAcls>
<IgnorePublicAcls>{{public_block_config.ignore_public_acls}}</IgnorePublicAcls>
<BlockPublicPolicy>{{public_block_config.block_public_policy}}</BlockPublicPolicy>
<RestrictPublicBuckets>{{public_block_config.restrict_public_buckets}}</RestrictPublicBuckets>
</PublicAccessBlockConfiguration>
"""
|
the-stack_0_7824 | import json
import os
from convlab2.util.multiwoz.state import default_state
from convlab2.dst.rule.multiwoz.dst_util import normalize_value
from convlab2.dst.dst import DST
from convlab2.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA
class RuleDST(DST):
"""Rule based DST which trivially updates new values from NLU result to states.
Attributes:
state(dict):
Dialog state. Function ``tatk.util.multiwoz.state.default_state`` returns a default state.
value_dict(dict):
It helps check whether ``user_act`` has correct content.
"""
def __init__(self):
DST.__init__(self)
self.state = default_state()
path = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
path = os.path.join(path, 'data/multiwoz/value_dict.json')
self.value_dict = json.load(open(path))
def update(self, user_act=None):
"""
update belief_state, request_state
:param user_act:
:return:
"""
self.state['user_action'] = user_act
for intent, domain, slot, value in user_act:
domain = domain.lower()
intent = intent.lower()
if domain in ['unk', 'general', 'booking']:
continue
if intent == 'inform':
k = REF_SYS_DA[domain.capitalize()].get(slot, slot)
if k is None:
continue
try:
assert domain in self.state['belief_state']
except:
raise Exception('Error: domain <{}> not in new belief state'.format(domain))
domain_dic = self.state['belief_state'][domain]
assert 'semi' in domain_dic
assert 'book' in domain_dic
if k in domain_dic['semi']:
nvalue = normalize_value(self.value_dict, domain, k, value)
self.state['belief_state'][domain]['semi'][k] = nvalue
elif k in domain_dic['book']:
self.state['belief_state'][domain]['book'][k] = value
elif k.lower() in domain_dic['book']:
self.state['belief_state'][domain]['book'][k.lower()] = value
elif k == 'trainID' and domain == 'train':
self.state['belief_state'][domain]['book'][k] = normalize_value(self.value_dict, domain, k, value)
else:
# raise Exception('unknown slot name <{}> of domain <{}>'.format(k, domain))
with open('unknown_slot.log', 'a+') as f:
f.write('unknown slot name <{}> of domain <{}>\n'.format(k, domain))
elif intent == 'request':
k = REF_SYS_DA[domain.capitalize()].get(slot, slot)
if domain not in self.state['request_state']:
self.state['request_state'][domain] = {}
if k not in self.state['request_state'][domain]:
self.state['request_state'][domain][k] = 0
return self.state
def init_session(self):
"""Initialize ``self.state`` with a default state, which ``tatk.util.multiwoz.state.default_state`` returns."""
self.state = default_state()
if __name__ == '__main__':
# from tatk.dst.rule.multiwoz import RuleDST
dst = RuleDST()
# Action is a dict. Its keys are strings(domain-type pairs, both uppercase and lowercase is OK) and its values are list of lists.
# The domain may be one of ('Attraction', 'Hospital', 'Booking', 'Hotel', 'Restaurant', 'Taxi', 'Train', 'Police').
# The type may be "inform" or "request".
# For example, the action below has a key "Hotel-Inform", in which "Hotel" is domain and "Inform" is action type.
# Each list in the value of "Hotel-Inform" is a slot-value pair. "Area" is slot and "east" is value. "Star" is slot and "4" is value.
action = [
["Inform", "Hotel", "Area", "east"],
["Inform", "Hotel", "Stars", "4"]
]
# method `update` updates the attribute `state` of tracker, and returns it.
state = dst.update(action)
assert state == dst.state
assert state == {'user_action': [["Inform", "Hotel", "Area", "east"], ["Inform", "Hotel", "Stars", "4"]],
'system_action': [],
'belief_state': {'police': {'book': {'booked': []}, 'semi': {}},
'hotel': {'book': {'booked': [], 'people': '', 'day': '', 'stay': ''},
'semi': {'name': '',
'area': 'east',
'parking': '',
'pricerange': '',
'stars': '4',
'internet': '',
'type': ''}},
'attraction': {'book': {'booked': []},
'semi': {'type': '', 'name': '', 'area': ''}},
'restaurant': {'book': {'booked': [], 'people': '', 'day': '', 'time': ''},
'semi': {'food': '', 'pricerange': '', 'name': '', 'area': ''}},
'hospital': {'book': {'booked': []}, 'semi': {'department': ''}},
'taxi': {'book': {'booked': []},
'semi': {'leaveAt': '',
'destination': '',
'departure': '',
'arriveBy': ''}},
'train': {'book': {'booked': [], 'people': ''},
'semi': {'leaveAt': '',
'destination': '',
'day': '',
'arriveBy': '',
'departure': ''}}},
'request_state': {},
'terminated': False,
'history': []}
# Please call `init_session` before a new dialog. This initializes the attribute `state` of tracker with a default state, which `tatk.util.multiwoz.state.default_state` returns. But You needn't call it before the first dialog, because tracker gets a default state in its constructor.
dst.init_session()
action = [["Inform", "Train", "Arrive", "19:45"]]
state = dst.update(action)
assert state == {'user_action': [["Inform", "Train", "Arrive", "19:45"]],
'system_action': [],
'belief_state': {'police': {'book': {'booked': []}, 'semi': {}},
'hotel': {'book': {'booked': [], 'people': '', 'day': '', 'stay': ''},
'semi': {'name': '',
'area': '',
'parking': '',
'pricerange': '',
'stars': '',
'internet': '',
'type': ''}},
'attraction': {'book': {'booked': []},
'semi': {'type': '', 'name': '', 'area': ''}},
'restaurant': {'book': {'booked': [], 'people': '', 'day': '', 'time': ''},
'semi': {'food': '', 'pricerange': '', 'name': '', 'area': ''}},
'hospital': {'book': {'booked': []}, 'semi': {'department': ''}},
'taxi': {'book': {'booked': []},
'semi': {'leaveAt': '',
'destination': '',
'departure': '',
'arriveBy': ''}},
'train': {'book': {'booked': [], 'people': ''},
'semi': {'leaveAt': '',
'destination': '',
'day': '',
'arriveBy': '19:45',
'departure': ''}}},
'request_state': {},
'terminated': False,
'history': []}
|
the-stack_0_7825 | """Utility functions to handle downloaded files."""
import glob
import os
import pathlib
from hashlib import md5
def get_next_name(file_path: str) -> str:
"""
Get next available name to download file.
Parameters
----------
file_path: str
Absolute path of the file for which next available name to
be generated.
Returns
-------
str
Absolute path of the next available name for the file.
"""
posix_path = pathlib.Path(file_path)
counter: int = 1
new_file_name: str = os.path.join("{0}", "{1}-copy{2}{3}")
while os.path.isfile(
new_file_name.format(
posix_path.parent,
posix_path.stem,
counter,
"".join(posix_path.suffixes),
)
):
counter += 1
return new_file_name.format(
posix_path.parent,
posix_path.stem,
counter,
"".join(posix_path.suffixes),
)
def manage_duplicate_file(file_path: str):
"""
Check if a file is duplicate.
Compare the md5 of files with copy name pattern
and remove if the md5 hash is same.
Parameters
----------
file_path: str
Absolute path of the file for which duplicates needs to
be managed.
Returns
-------
str
Absolute path of the duplicate managed file.
"""
# pylint: disable = R1732
posix_path = pathlib.Path(file_path)
file_base_name: str = "".join(posix_path.stem.split("-copy")[0])
name_pattern: str = f"{posix_path.parent}/{file_base_name}*"
# Reason for using `str.translate()`
# https://stackoverflow.com/q/22055500/6730439
old_files: list = glob.glob(
name_pattern.translate({ord("["): "[[]", ord("]"): "[]]"})
)
if file_path in old_files:
old_files.remove(file_path)
current_file_md5: str = md5(open(file_path, "rb").read()).hexdigest()
for old_file_path in old_files:
old_file_md5: str = md5(open(old_file_path, "rb").read()).hexdigest()
if current_file_md5 == old_file_md5:
os.remove(file_path)
return old_file_path
return file_path
|
the-stack_0_7826 | import pytest
from nyr.interpreter.interpreter import Interpreter
from nyr.parser.parser import Parser
def testUninitializedVariable():
ast = Parser().parse("let x;")
env = Interpreter().interpret(ast)
assert env == {'x': None}
@pytest.mark.parametrize(
("code"), (
pytest.param("let x; let y;", id="seperate"),
pytest.param("let x, y;", id="merged"),
),
)
def testMultipleUninitializedVariables(code: str):
ast = Parser().parse(code)
env = Interpreter().interpret(ast)
assert env == {"x": None, "y": None}
@pytest.mark.parametrize(
("code", "expected"), (
pytest.param(
'let string = "I am a string!";',
{"string": "I am a string!"},
id="string",
),
pytest.param(
"let int = 42;",
{"int": 42},
id="int",
),
pytest.param(
"let float = 3.14159;",
{"float": 3.14159},
id="float",
),
pytest.param(
"let bool = false;",
{"bool": False},
id="bool",
),
pytest.param(
"let none = null;",
{"none": None},
id="none",
),
),
)
def testTypeAssignments(code: str, expected):
ast = Parser().parse(code)
env = Interpreter().interpret(ast)
assert env == expected
def testMixedInitialize():
ast = Parser().parse("let x, y = 7, z;")
env = Interpreter().interpret(ast)
assert env == {
'x': None,
'y': 7,
'z': None,
}
def testAssignWithBinaryExpr():
ast = Parser().parse("""
let x = 4;
let y = 7;
let z = x + y;
""")
env = Interpreter().interpret(ast)
assert env == {
"x": 4,
"y": 7,
"z": 11,
}
@pytest.mark.parametrize(
("code"), (
pytest.param("let x; let x;", id="seperate"),
pytest.param("let x, x;", id="merged"),
),
)
def testVarExists(code: str):
ast = Parser().parse(code)
# FIXME: Wrong error returned from code
with pytest.raises(Exception, match='Unknown variable "None"'):
Interpreter().interpret(ast)
def testVarNotExists():
ast = Parser().parse("x = 4;")
with pytest.raises(Exception, match='Variable "x" does not exist in available scope'):
Interpreter().interpret(ast)
|
the-stack_0_7827 | import abc
import glob
import os
from typing import (Any, Dict, List, Optional)
import importlib
import redis
from pkg_resources import resource_filename
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class DevEnvMonitor(abc.ABC):
"""Class to monitor a development environments for the need to start Activity Monitor Instances"""
@staticmethod
def get_dev_env_name() -> List[str]:
"""Method to return a list of name(s) of the development environment that this class interfaces with.
Should be the value used in the `name` attribute of the Dev Env Environment Component"""
raise NotImplemented
@abc.abstractmethod
def run(self, dev_env_monitor_key: str) -> None:
"""Method called in a periodically scheduled async worker that should check the dev env and manage Activity
Monitor Instances as needed
Args:
dev_env_monitor_key(str): The unique string used as the key in redis to track this DevEnvMonitor instance
"""
raise NotImplemented
class DevEnvMonitorManager(object):
"""Class to manage creating DevEnvMonitor instances"""
def __init__(self, database=1) -> None:
# Get available monitor classes from redis or register available classes
# Redis is used to store this information to reduce the overhead of re-registering all classes every time
# DevEnvMonitorManager is instantiated, which happens often, both in the LabManager API and in async workers
redis_conn = redis.Redis(db=database)
data = redis_conn.hgetall('##AVAILABLE_DEV_ENV_MONITOR_CLASSES##')
if data:
# Load the class info from redis
# TODO: verify if loading from imports is actually faster than using this redis cache implementation
result_dict = {}
for key in data:
# Decode from bytes to strings if needed
value = data[key]
if type(key) == bytes:
key = key.decode('utf-8')
if type(value) == bytes:
value = value.decode('utf-8')
module_name, class_name = value.rsplit('.', 1)
# load the module
m = importlib.import_module(module_name)
# get the class and store
result_dict[key] = getattr(m, class_name)
self.available_monitors = result_dict
else:
self.available_monitors = self._register_monitor_classes()
for key in self.available_monitors:
logger.info("Registering DevEnvMonitor Class: {} for {}".format(self.available_monitors[key], key))
redis_conn.hset('##AVAILABLE_DEV_ENV_MONITOR_CLASSES##', key,
"{}.{}".format(self.available_monitors[key].__module__,
self.available_monitors[key].__name__))
def _register_monitor_classes(self) -> Dict[str, Any]:
"""Private method to register all available Dev Env Monitor classes
Creates a dictionary of the form {development environment name: monitor class name, ...}
Returns:
dict
"""
# Dynamically find files to import that start with monitor_*
monitor_dir = os.path.join(resource_filename('gtmcore', 'activity'), 'monitors')
for module_name in glob.glob('{}{}monitor_*'.format(monitor_dir, os.path.sep)):
filename = os.path.basename(module_name)
importlib.import_module("gtmcore.activity.monitors.{}".format(filename.split(".py")[0]))
all_monitor_classes = [cls for cls in DevEnvMonitor.__subclasses__()]
register_data: Dict[str, Any] = {}
for cls in all_monitor_classes:
dev_env_name = cls.get_dev_env_name()
if any([(name in register_data) for name in dev_env_name]):
msg = "Two Development Environment Monitors attempting to register for a single Dev Env:"
msg = "{}\n Dev Env: {}".format(msg, dev_env_name)
msg = "{}\n Class 1: {}".format(msg, [register_data[n] for n in dev_env_name])
msg = "{}\n Class 2: {}".format(msg, cls)
raise ValueError(msg)
# New Dev Env. Register it for all supported dev envs
for name in dev_env_name:
register_data[name] = cls
return register_data
def is_available(self, dev_env_name: str) -> bool:
"""Method to test if a dev env monitor is available for a given development environment name
Args:
dev_env_name(str): Name of a development environment to monitor
Returns:
bool
"""
return dev_env_name in self.available_monitors
def get_monitor_instance(self, dev_env_name: str) -> Optional[DevEnvMonitor]:
"""Method to get a Dev Env Monitor instance based on the Dev Env name
Args:
dev_env_name(str): Name of a development environment to monitor
Returns:
DevEnvMonitor
"""
if self.is_available(dev_env_name):
return self.available_monitors[dev_env_name]()
else:
return None
|
the-stack_0_7828 | """Module for encoding and decoding length delimited fields"""
# Copyright (c) 2018-2022 NCC Group Plc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import copy
import sys
import six
import logging
from google.protobuf.internal import wire_format, encoder, decoder
import blackboxprotobuf.lib
from blackboxprotobuf.lib.types import varint
from blackboxprotobuf.lib.exceptions import (
EncoderException,
DecoderException,
TypedefException,
)
def encode_string(value):
"""Encode a string as a length delimited byte array"""
try:
value = six.ensure_text(value)
except TypeError as exc:
six.raise_from(
EncoderException("Error encoding string to message: %r" % value), exc
)
return encode_bytes(value)
def encode_bytes(value):
"""Encode a length delimited byte array"""
if isinstance(value, bytearray):
value = bytes(value)
try:
value = six.ensure_binary(value)
except TypeError as exc:
six.raise_from(
EncoderException("Error encoding bytes to message: %r" % value), exc
)
encoded_length = varint.encode_varint(len(value))
return encoded_length + value
def decode_bytes(buf, pos):
"""Decode a length delimited bytes array from buf"""
length, pos = varint.decode_varint(buf, pos)
end = pos + length
try:
return buf[pos:end], end
except IndexError as exc:
six.raise_from(
DecoderException(
(
"Error decoding bytes. Decoded length %d is longer than bytes"
" available %d"
)
% (length, len(buf) - pos)
),
exc,
)
def encode_bytes_hex(value):
"""Encode a length delimited byte array represented by a hex string"""
try:
return encode_bytes(binascii.unhexlify(value))
except (TypeError, binascii.Error) as exc:
six.raise_from(
EncoderException("Error encoding hex bytestring %s" % value), exc
)
def decode_bytes_hex(buf, pos):
"""Decode a length delimited byte array from buf and return a hex encoded string"""
value, pos = decode_bytes(buf, pos)
return binascii.hexlify(value), pos
def decode_string(value, pos):
"""Decode a length delimited byte array as a string"""
length, pos = varint.decode_varint(value, pos)
end = pos + length
try:
# backslash escaping isn't reversible easily
return value[pos:end].decode("utf-8"), end
except (TypeError, UnicodeDecodeError) as exc:
six.raise_from(
DecoderException("Error decoding UTF-8 string %s" % value[pos:end]), exc
)
def encode_message(data, config, typedef, path=None, field_order=None):
"""Encode a Python dictionary to a binary protobuf message"""
output = bytearray()
if path is None:
path = []
skiplist = set()
if field_order is not None:
for field_number, index in field_order:
if field_number in data:
value = data[field_number]
# This will probably fail in some weird cases, and will get a weird
# encoding for packed numbers but our main conern when ordering
# fields is that it's a default decoding which won't be a packed
try:
new_output = _encode_message_field(
config, typedef, path, field_number, value, selected_index=index
)
output += new_output
skiplist.add((field_number, index))
except EncoderException as exc:
logging.warn(
"Error encoding priority field: %s %s %r %r",
field_number,
index,
path,
exc,
)
for field_number, value in data.items():
new_output = _encode_message_field(
config, typedef, path, field_number, value, skiplist=skiplist
)
output += new_output
return output
def _encode_message_field(
config, typedef, path, field_number, value, selected_index=None, skiplist=None
):
# Encodes a single field of a message to the byte array
# If selected_index is passed, it will only encode a single element if value is a list
# If skiplist is passed, it should be in the form of (field_number,index)
# and this will skip encoding those elements
# Get the field number convert it as necessary
alt_field_number = None
if six.PY2:
string_types = (str, unicode)
else:
string_types = str
if isinstance(field_number, string_types):
if "-" in field_number:
field_number, alt_field_number = field_number.split("-")
for number, info in typedef.items():
if info.get("name", "") != "" and info["name"] == field_number and field_number != "":
field_number = number
break
else:
field_number = str(field_number)
field_path = path[:]
field_path.append(field_number)
if field_number not in typedef:
raise EncoderException(
"Provided field name/number %s is not valid" % (field_number),
field_path,
)
field_typedef = typedef[field_number]
# Get encoder
if "type" not in field_typedef:
raise TypedefException(
"Field %s does not have a defined type." % field_number, field_path
)
field_type = field_typedef["type"]
field_order = field_typedef.get("field_order", None)
field_encoder = None
if alt_field_number is not None:
if alt_field_number not in field_typedef["alt_typedefs"]:
raise EncoderException(
"Provided alt field name/number %s is not valid for field_number %s"
% (alt_field_number, field_number),
field_path,
)
if isinstance(field_typedef["alt_typedefs"][alt_field_number], dict):
innertypedef = field_typedef["alt_typedefs"][alt_field_number]
field_encoder = lambda data: encode_lendelim_message(
data, config, innertypedef, path=field_path, field_order=field_order
)
else:
# just let the field
field_type = field_typedef["alt_typedefs"][alt_field_number]
if field_encoder is None:
if field_type == "message":
innertypedef = None
if "message_typedef" in field_typedef:
innertypedef = field_typedef["message_typedef"]
elif "message_type_name" in field_typedef:
message_type_name = field_typedef["message_type_name"]
if message_type_name not in config.known_types:
raise TypedefException(
"Message type (%s) has not been defined"
% field_typedef["message_type_name"],
field_path,
)
innertypedef = config.known_types[message_type_name]
else:
raise TypedefException(
"Could not find message typedef for %s" % field_number,
field_path,
)
field_encoder = lambda data: encode_lendelim_message(
data, config, innertypedef, path=field_path, field_order=field_order
)
else:
if field_type not in blackboxprotobuf.lib.types.ENCODERS:
raise TypedefException("Unknown type: %s" % field_type)
field_encoder = blackboxprotobuf.lib.types.ENCODERS[field_type]
if field_encoder is None:
raise TypedefException(
"Encoder not implemented for %s" % field_type, field_path
)
# Encode the tag
tag = encoder.TagBytes(
int(field_number), blackboxprotobuf.lib.types.WIRETYPES[field_type]
)
output = bytearray()
try:
# Handle repeated values
if isinstance(value, list) and not field_type.startswith("packed_"):
if selected_index is not None:
if selected_index >= len(value):
raise EncoderException(
"Selected index is greater than the length of values: %r %r"
% (selected_index, len(value)),
path,
)
output += tag
output += field_encoder(value[selected_index])
else:
for index, repeated in enumerate(value):
if skiplist is None or (field_number, index) not in skiplist:
output += tag
output += field_encoder(repeated)
else:
if skiplist is None or (field_number, 0) not in skiplist:
output += tag
output += field_encoder(value)
except EncoderException as exc:
exc.set_path(field_path)
six.reraise(*sys.exc_info())
return output
def decode_message(buf, config, typedef=None, pos=0, end=None, depth=0, path=None):
"""Decode a protobuf message with no length prefix"""
if end is None:
end = len(buf)
if typedef is None:
typedef = {}
else:
# Don't want to accidentally modify the original
typedef = copy.deepcopy(typedef)
if path is None:
path = []
output = {}
grouped_fields, field_order, pos = _group_by_number(buf, pos, end, path)
for (field_number, (wire_type, buffers)) in grouped_fields.items():
# wire_type should already be validated by _group_by_number
path = path[:] + [field_number]
field_outputs = None
field_typedef = typedef.get(field_number, {})
field_key = _get_field_key(field_number, typedef, path)
# Easy cases. Fixed size or bytes/string
if (
wire_type
in [
wire_format.WIRETYPE_FIXED32,
wire_format.WIRETYPE_FIXED64,
wire_format.WIRETYPE_VARINT,
]
or ("type" in field_typedef and field_typedef["type"] != "message")
):
if "type" not in field_typedef:
field_typedef["type"] = config.get_default_type(wire_type)
else:
# have a type, but make sure it matches the wiretype
if (
blackboxprotobuf.lib.types.WIRETYPES[field_typedef["type"]]
!= wire_type
):
raise DecoderException(
"Type %s from typedef did not match wiretype %s for "
"field %s" % (field_typedef["type"], wire_type, field_key),
path=path,
)
# we already have a type, just map the decoder
if field_typedef["type"] not in blackboxprotobuf.lib.types.DECODERS:
raise TypedefException(
"Got unkown type %s for field_number %s"
% (field_typedef["type"], field_number),
path=path,
)
decoder = blackboxprotobuf.lib.types.DECODERS[field_typedef["type"]]
field_outputs = [decoder(buf, 0) for buf in buffers]
# this shouldn't happen, but let's check just in case
for buf, _pos in zip(buffers, [y for _, y in field_outputs]):
assert len(buf) == _pos
field_outputs = [value for (value, _) in field_outputs]
if len(field_outputs) == 1:
output[field_key] = field_outputs[0]
else:
output[field_key] = field_outputs
elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED:
_try_decode_lendelim_fields(
buffers, field_key, field_typedef, output, config
)
# Save the field typedef/type back to the typedef
typedef[field_number] = field_typedef
return output, typedef, field_order, pos
def _group_by_number(buf, pos, end, path):
# Parse through the whole message and split into buffers based on wire
# type and organized by field number. This forces us to parse the whole
# message at once, but I think we're doing that anyway. This catches size
# errors early as well, which is usually the best indicator of if it's a
# protobuf message or not.
# Returns a dictionary like:
# {
# "2": (<wiretype>, [<data>])
# }
output_map = {}
field_order = []
while pos < end:
# Read in a field
tag, pos = varint.decode_uvarint(buf, pos)
field_number, wire_type = wire_format.UnpackTag(tag)
# We want field numbers as strings everywhere
field_number = str(field_number)
path = path[:] + [field_number]
if field_number in output_map and output_map[field_number][0] != wire_type:
# This should never happen
raise DecoderException(
"Field %s has mistmatched wiretypes. Previous: %s Now: %s"
% (field_number, output_map[field_number][0], wire_type),
path=path,
)
length = None
if wire_type == wire_format.WIRETYPE_VARINT:
# We actually have to read in the whole varint to figure out it's size
_, new_pos = varint.decode_varint(buf, pos)
length = new_pos - pos
elif wire_type == wire_format.WIRETYPE_FIXED32:
length = 4
elif wire_type == wire_format.WIRETYPE_FIXED64:
length = 8
elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED:
# Read the length from the start of the message
# add on the length of the length tag as well
bytes_length, new_pos = varint.decode_varint(buf, pos)
length = bytes_length + (new_pos - pos)
elif wire_type in [
wire_format.WIRETYPE_START_GROUP,
wire_format.WIRETYPE_END_GROUP,
]:
raise DecoderException("GROUP wire types not supported", path=path)
else:
raise DecoderException("Got unkown wire type: %d" % wire_type, path=path)
if pos + length > end:
raise DecoderException(
"Decoded length for field %s goes over end: %d > %d"
% (field_number, pos + length, end),
path=path,
)
field_buf = buf[pos : pos + length]
if field_number in output_map:
output_map[field_number][1].append(field_buf)
else:
output_map[field_number] = (wire_type, [field_buf])
field_order.append((field_number, len(output_map[field_number][1]) - 1))
pos += length
return output_map, field_order, pos
def _get_field_key(field_number, typedef, path):
# Translate a field_number into a name if one is available in the typedef
if not isinstance(field_number, (int, str)):
raise EncoderException("Field key in message must be a str or int", path=path)
if isinstance(field_number, int):
field_number = str(field_number)
# handle an alt_typedef by transforming 1-1 to name-1
# I don't think should actually be used with the current uses of
# _get_field_key
alt_field_number = None
if "-" in field_number:
field_number, alt_field_number = field_number.split("-")
if field_number in typedef and typedef[field_number].get("name", "") != "":
field_key = typedef[field_number]["name"]
else:
field_key = field_number
# Return the new field_name + alt_field_number
return field_key + ("" if alt_field_number is None else "-" + alt_field_number)
def _try_decode_lendelim_fields(
buffers, field_key, field_typedef, message_output, config
):
# This is where things get weird
# To start, since we want to decode messages and not treat every
# embedded message as bytes, we have to guess if it's a message or
# not.
# Unlike other types, we can't assume our message types are
# consistent across the tree or even within the same message.
# A field could be a bytes type that that decodes to multiple different
# messages that don't have the same type definition. This is where
# 'alt_typedefs' let us say that these are the different message types
# we've seen for this one field.
# In general, if something decodes as a message once, the rest should too
# and we can enforce that across a single message, but not multiple
# messages.
# This is going to change the definition of "alt_typedefs" a bit from just
# alternate message type definitions to also allowing downgrading to
# 'bytes' or string with an 'alt_type' if it doesn't parse
try:
outputs_map = {}
field_order = []
# grab all dictonary alt_typedefs
all_typedefs = {
# we don't want this to modify in-place if it fails
key: copy.deepcopy(value)
for key, value in field_typedef.get("alt_typedefs", {}).items()
if isinstance(value, dict)
}
all_typedefs["1"] = copy.deepcopy(field_typedef.get("message_typedef", {}))
for buf in buffers:
output = None
output_typedef = None
output_typedef_num = None
new_field_order = []
for alt_typedef_num, alt_typedef in sorted(
all_typedefs.items(), key=lambda x: int(x[0])
):
try:
(
output,
output_typedef,
new_field_order,
_,
) = decode_lendelim_message(buf, config, alt_typedef)
except:
continue
output_typedef_num = alt_typedef_num
break
# try an anonymous type
# let the error propogate up if we fail this
if output is None:
output, output_typedef, new_field_order, _ = decode_lendelim_message(
buf, config, {}
)
output_typedef_num = str(
max([int(i) for i in ["0"] + list(all_typedefs.keys())]) + 1
)
# save the output or typedef we found
all_typedefs[output_typedef_num] = output_typedef
output_list = outputs_map.get(output_typedef_num, [])
output_list.append(output)
outputs_map[output_typedef_num] = output_list
# we should technically have a different field order for each instance of the data
# but that would require a very messy JSON which we're trying to avoid
if len(new_field_order) > len(field_order):
field_order = new_field_order
# was able to decode everything as a message
field_typedef["type"] = "message"
field_typedef["message_typedef"] = all_typedefs["1"]
field_typedef["field_order"] = field_order
if len(all_typedefs.keys()) > 1:
del all_typedefs["1"]
field_typedef.setdefault("alt_typedefs", {}).update(all_typedefs)
# messages get set as "key-alt_number"
for output_typedef_num, outputs in outputs_map.items():
output_field_key = field_key
if output_typedef_num != "1":
output_field_key += "-" + output_typedef_num
message_output[output_field_key] = (
outputs if len(outputs) > 1 else outputs[0]
)
# success, return
return
except DecoderException as exc:
# this should be pretty common, don't be noisy or throw an exception
logging.debug(
"Could not decode a buffer for field number %s as a message: %s",
field_key,
exc,
)
# Decoding as a message did not work, try strings and then bytes
# The bytes decoding should never fail
for target_type in ["string", config.default_binary_type]:
try:
outputs = []
decoder = blackboxprotobuf.lib.types.DECODERS[target_type]
for buf in buffers:
output, _ = decoder(buf, 0)
outputs.append(output)
# all outputs worked, this is our type
# check if there is a message type already in the typedef
if "type" in field_typedef and "message" == field_typedef["type"]:
# we already had a message type. save it as an alt_typedef
# check if we already have this type as an alt_typedef
output_typedef_nums = {
key: value
for key, value in field_typedef.setdefault(
"alt_typedefs", {}
).items()
if value == target_type
}.keys()
output_typedef_num = None
if len(output_typedef_nums) == 0:
# find the next largest alt typedef number to put this type as
output_typedef_num = str(
max([int(i) for i in ["0"] + all_typedefs.keys()]) + 1
)
field_typedef.setdefault("alt_typedefs", {})[
output_typedef_num
] = target_type
else:
# we already have an alt typedef with this number
output_typedef_num = output_typedef_nums[0]
message_output[field_key + "-" + output_typedef_num] = (
outputs if len(outputs) > 1 else outputs[0]
)
else:
field_typedef["type"] = target_type
message_output[field_key] = outputs if len(outputs) > 1 else outputs[0]
return
except DecoderException:
continue
def encode_lendelim_message(data, config, typedef, path=None, field_order=None):
"""Encode data as a length delimited protobuf message"""
message_out = encode_message(
data, config, typedef, path=path, field_order=field_order
)
length = varint.encode_varint(len(message_out))
logging.debug("Message length encoded: %d", len(length) + len(message_out))
return length + message_out
def decode_lendelim_message(buf, config, typedef=None, pos=0, depth=0, path=None):
"""Deocde a length delimited protobuf message from buf"""
length, pos = varint.decode_varint(buf, pos)
ret = decode_message(
buf, config, typedef, pos, pos + length, depth=depth, path=path
)
return ret
def generate_packed_encoder(wrapped_encoder):
"""Generate an encoder for a packed type based on a base type encoder"""
def length_wrapper(values):
# Encode repeat values and prefix with the length
output = bytearray()
for value in values:
output += wrapped_encoder(value)
length = varint.encode_varint(len(output))
return length + output
return length_wrapper
def generate_packed_decoder(wrapped_decoder):
"""Generate an decoder for a packed type based on a base type decoder"""
def length_wrapper(buf, pos):
# Decode repeat values prefixed with the length
length, pos = varint.decode_varint(buf, pos)
end = pos + length
output = []
while pos < end:
value, pos = wrapped_decoder(buf, pos)
output.append(value)
if pos > end:
raise DecoderException(
(
"Error decoding packed field. Packed length larger than"
" buffer: decoded = %d, left = %d"
)
% (length, len(buf) - pos)
)
return output, pos
return length_wrapper
|
the-stack_0_7829 | #!/usr/bin/python
import os
import struct
from collections import OrderedDict
FILE_NAME = "DR.SG0" #change name to extract other file
def showData(f):
f.seek(1)
data = f.read(1)
driverId = struct.unpack("<B",(data))[0]
print("DriverId: {0}".format(driverId))
data = f.read(1)
useWeapons = struct.unpack("<B",(data))[0]
print("useWeapons: {0}".format(useWeapons))
data = f.read(1)
difficulty = struct.unpack("<B",(data))[0]
print("Difficulty: {0}".format(difficulty))
data = f.read(15)
print("SaveGame Name: {0}".format(data))
drivers=20
for index in range(drivers):
f.seek(19+108*index)
name = f.read(12)
print("#{0}".format(index))
print(" Name: {0}".format(name))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Damage: {0}".format(data))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Engine: {0}".format(data%256))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Tire: {0}".format(data%255 ))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Armour: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Car Type: {0}".format(data%255))
if index ==driverId:
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" ?: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" ?: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" ?: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Color: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Money: {0}".format(data%(255*255)))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Loan Type: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Loan Races Left: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Actual car value: {0}".format(data%(255*255)))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Face: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Points: {0}".format(data))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Rank: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Races won: {0}".format(data))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Total races: {0}".format(data%255))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" ?: {0}".format(data))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Total income: {0}".format(data))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Mines: {0}".format(data))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Spikes: {0}".format(data))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Rocket: {0}".format(data))
data = f.read(4)
data = struct.unpack("I",(data))[0]
print(" Sabotage: {0}".format(data))
else:
f.read(19*4)
print("Reading: {0}".format(FILE_NAME))
f = open(FILE_NAME, "rb")
fDest = open(FILE_NAME + "_decrypted","w+")
try:
positions = 2179
initialPosition = 0
for index in range(positions):
f.seek(index)
data = f.read(1)
if index==0:
initialPosition =struct.unpack("<B",(data))[0]
data =initialPosition
else:
tmpData = struct.unpack("<B",(data))[0] << index%6
tmpData=tmpData %256
data = tmpData|struct.unpack("<B",(data))[0] >> (8-index%6)
data=data %256
data +=-17* index
data=data %256
data=data+initialPosition
data=data %256
fDest.seek(index)
fDest.write(chr(data))
showData(fDest)
print("File readed: {0}".format(FILE_NAME))
finally:
fDest.close()
fDest.close()
f.close()
|
the-stack_0_7830 | #!/usr/bin/python
proto = ["ssh", "http", "https"]
protoa = ["ssh", "http", "https"]
print(proto)
proto.append('dns') # adds dns to EOL
protoa.append('dns') # adds dns to EOL
print(proto)
proto2 = [22,80,443,53] # list common ports
proto.extend(proto2) #pass proto2 as arguement to ext method
print(proto)
protoa.append(proto2) #pass proto2 as arguement to ext method
print(protoa)
#.insert added to lists
proto.insert(2,proto2) #inserts list proto2 into index 2
print(proto)
|
the-stack_0_7831 | import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "config.settings.local"
) # pragma: no cover
app = Celery("dear_petition")
# Using a string here means the worker will not have to
# pickle the object when using Windows.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
class CeleryAppConfig(AppConfig):
name = "dear_petition.taskapp"
verbose_name = "Celery Config"
def ready(self):
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
@app.task(bind=True)
def debug_task(self):
print(f"Request: {self.request!r}") # pragma: no cover
|
the-stack_0_7833 | """
WHAT: A class which which manages the interface with MySQL
WHY: Need to read and write data to MySQL
ASSUMES: MySQL is running per the connection parameters
FUTURE IMPROVEMENTS: Add table upload functions, and DDL creation as required
WHO: SL 2020-08-13
"""
import mysql.connector as mysql
import pandas as pd
from tqdm import tqdm
import modConfig
import logging
import os
class clsDatabase:
"""
See header
"""
def __init__(self):
"""
Constructor
"""
self.connection = None
def connect(self):
"""
Connect to MySQL
:return: None
"""
self.connection = mysql.connect(host='localhost', port=12345, database='xxx', user='root', password='xxx', autocommit=False, option_files="my.cnf")
def disconnect(self):
"""
Disconnect from MySQL
:return: None
"""
if self.connection is not None:
self.connection.close()
def reconnect(self):
"""
Reconnect to MySQL
:return: None
"""
self.disconnect()
self.connect()
def execute(self, sql, expectingReturn=False):
"""
Execute a sql query, and optional return results as a pandas DataFrame
:param sql: Any sql statement
:param expectingReturn: True meaning return a pandas DataFrame, False meaning no return
:return: pandas DataFrame or None
"""
if expectingReturn:
return pd.read_sql(sql=sql, con=self.connection)
else:
cursor = self.connection.cursor()
cursor.execute(sql)
cursor.close()
def uploadTableViaDataFrame(self, df, tableName, clearTable=False, shouldCrashOnBadRow=True):
"""
Uploads a pandas DataFrame to a given MySQL table via insert statements
:param df: A pandas DataFrame with column names which match the target table column names
:param tableName: A MySQL table name
:param clearTable: Boolean whether to clear the table before uploading
:return: None
"""
sql = "insert into `%s`\n(`" % tableName + "`,`".join(df.columns) + "`)\nvalues\n(" + ",".join(["%s"]*len(df.columns)) + ")"
cursor = self.connection.cursor()
if clearTable:
cursor.execute("delete from `%s`" % tableName)
rowCounter = 0
for row in tqdm(df.values.tolist(), desc="Uploading table %s" % tableName, unit="row"):
try:
cursor.execute(sql, row)
except Exception as e:
if shouldCrashOnBadRow:
raise
else:
msg = "Row failure at row %s with error: %s" % (str(rowCounter), str(e))
print(msg)
logging.error(msg)
rowCounter += 1
cursor.close()
self.connection.commit()
def uploadTableViaCsvFile(self, fileName, tableName, columnNames=None, clearTable=False):
"""
Uploads a pandas DataFrame to a given MySQL table via local csv file.
NOTE Server must have local_infile turned on
set @@global.local_infile = 1
NOTE Client must have local_infile enabled in its .cnf file
[client]
allow_local_infile=ON
:param fileName: A comma separated text file with equivalent of pd.to_csv( , index=False, line_terminator='\n')
:param tableName: A MySQL table name
:param clearTable: Boolean whether to clear the table before uploading
:return: None
"""
sql = \
"""
LOAD DATA LOCAL INFILE '%s' INTO TABLE `%s`
FIELDS TERMINATED BY ','
ENCLOSED BY '"'
LINES TERMINATED BY '\n'
IGNORE 1 LINES
""" % (fileName.replace(os.sep, '/'), tableName) # os.sep is to handle windows file paths
if columnNames is not None:
sql += "\n(`" + "`,`".join(columnNames) + "`)"
sql += ";"
cursor = self.connection.cursor()
if clearTable:
cursor.execute("delete from `%s`" % tableName)
cursor.execute(sql)
cursor.close()
self.connection.commit()
if __name__ == '__main__':
print("Connecting to database")
db = clsDatabase()
db.connect()
print("Querying database")
df = db.execute(sql="select current_timestamp", expectingReturn=True)
print(df)
print("Disconnecting from database")
db.disconnect()
print("Done")
|
the-stack_0_7834 | import json
import logging
import sys
import unittest
from handlers.proj_schedule_initializer import lambda_handler
from handlers.proj_schedule_initializer import logger
logging.basicConfig(format='%(asctime)s %(filename)s [line:%(lineno)d] [PID:%(process)d] %(levelname)s: %(message)s',
stream=sys.stdout)
class ScheduleInitializerTestCase(unittest.TestCase):
def test_get_schedule_initializer(self):
logger.info("test get schedule initializer")
get_test_event = {
"resource": "/schedule/",
"path": "/schedule/",
"httpMethod": "GET",
"queryStringParameters": {
"pageSize": "20",
"pageNo": "0",
"userId": "test-editor"
},
"multiValueQueryStringParameters": {
"pageSize": [
"20"
],
"pageNo": [
"0"
],
"userId": [
"test-editor"
]
},
"pathParameters": {}
}
handler_response = lambda_handler(get_test_event, None)
logger.debug(json.dumps(handler_response, indent=2))
logger.debug(json.dumps(json.loads(handler_response["body"]), indent=2))
self.assertEqual(handler_response["statusCode"], 200)
logger.info("Completed!")
def test_post_schedule_initializer(self):
logger.info("test post schedule initializer")
post_test_event = {
"resource": "/schedule/",
"path": "/schedule/",
"httpMethod": "POST",
"queryStringParameters": {
"targetArea": "New York",
"userId": "test-editor"
},
"multiValueQueryStringParameters": {
"targetArea": [
"New York"
],
"userId": [
"test-editor"
],
},
"pathParameters": {}
}
handler_response = lambda_handler(post_test_event, None)
print(json.dumps(handler_response, indent=2))
self.assertEqual(handler_response["statusCode"], 200)
logger.debug(json.dumps(json.loads(handler_response["body"]), indent=2))
logger.info("Completed!")
if __name__ == '__main__':
unittest.main()
|
the-stack_0_7835 | # coding: utf-8
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | datasets | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import time
import numpy as np
import tensorflow as tf
from test import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None,
"Where the training/test data is stored.")
flags.DEFINE_string("save_path", None,
"Model output directory.")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
class PTBInput(object):
"""input 데이터"""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
class PTBModel(object):
"""PTB 모델"""
def __init__(self, is_training, config, input_):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse checkpoint which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, data_type())
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.contrib.rnn.static_rnn(
# cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
# Reshape logits to be 3-D tensor for sequence loss
logits = tf.reshape(logits, [batch_size, num_steps, vocab_size])
# use the contrib sequence loss and average over the batches
loss = tf.contrib.seq2seq.sequence_loss(
logits,
input_.targets,
tf.ones([batch_size, num_steps], dtype=data_type()),
average_across_timesteps=False,
average_across_batch=True
)
# update the cost variables
self._cost = cost = tf.reduce_sum(loss)
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads = tf.gradients(cost,tvars)
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""--model flag가 small일때의 설정값들"""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""--model flag가 medium일때의 설정값들"""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""--model flag가 large일때의 설정값들"""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
print(cost)
iters += model.input.num_steps
print(iters)
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
elif FLAGS.model == "test":
return TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
if __name__ == "__main__":
tf.app.run() |
the-stack_0_7837 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from numpy.testing import assert_allclose
import pytest
from jax import random
import jax.numpy as jnp
import numpyro
from numpyro.contrib.control_flow import cond, scan
import numpyro.distributions as dist
from numpyro.handlers import seed, substitute, trace
from numpyro.infer import MCMC, NUTS, SVI, Predictive, Trace_ELBO
from numpyro.infer.util import potential_energy
def test_scan():
def model(T=10, q=1, r=1, phi=0.0, beta=0.0):
def transition(state, i):
x0, mu0 = state
x1 = numpyro.sample("x", dist.Normal(phi * x0, q))
mu1 = beta * mu0 + x1
y1 = numpyro.sample("y", dist.Normal(mu1, r))
numpyro.deterministic("y2", y1 * 2)
return (x1, mu1), (x1, y1)
mu0 = x0 = numpyro.sample("x_0", dist.Normal(0, q))
y0 = numpyro.sample("y_0", dist.Normal(mu0, r))
_, xy = scan(transition, (x0, mu0), jnp.arange(T))
x, y = xy
return jnp.append(x0, x), jnp.append(y0, y)
T = 10
num_samples = 100
kernel = NUTS(model)
mcmc = MCMC(kernel, num_warmup=100, num_samples=num_samples)
mcmc.run(random.PRNGKey(0), T=T)
assert set(mcmc.get_samples()) == {"x", "y", "y2", "x_0", "y_0"}
mcmc.print_summary()
samples = mcmc.get_samples()
x = samples.pop("x")[0] # take 1 sample of x
# this tests for the composition of condition and substitute
# this also tests if we can use `vmap` for predictive.
future = 5
predictive = Predictive(
numpyro.handlers.condition(model, {"x": x}),
samples,
return_sites=["x", "y", "y2"],
parallel=True,
)
result = predictive(random.PRNGKey(1), T=T + future)
expected_shape = (num_samples, T + future)
assert result["x"].shape == expected_shape
assert result["y"].shape == expected_shape
assert result["y2"].shape == expected_shape
assert_allclose(result["x"][:, :T], jnp.broadcast_to(x, (num_samples, T)))
assert_allclose(result["y"][:, :T], samples["y"])
@pytest.mark.xfail(raises=RuntimeError)
def test_nested_scan_smoke():
def model():
def outer_fn(y, val):
def body_fn(z, val):
z = numpyro.sample("z", dist.Normal(z, 1))
return z, z
y = numpyro.sample("y", dist.Normal(y, 1))
_, zs = scan(body_fn, y, None, 4)
return y, zs
x = numpyro.sample("x", dist.Normal(0, 1))
_, zs = scan(outer_fn, x, None, 3)
return zs
data = jnp.arange(12).reshape((3, 4))
# we can scan but can't substitute values through multiple levels of scan
with trace(), seed(rng_seed=0), substitute(data={"z": data}):
zs = model()
assert_allclose(zs, data)
def test_scan_constrain_reparam_compatible():
def model(T, q=1, r=1, phi=0.0, beta=0.0):
x = 0.0
mu = 0.0
for i in range(T):
x = numpyro.sample(f"x_{i}", dist.LogNormal(phi * x, q))
mu = beta * mu + x
numpyro.sample(f"y_{i}", dist.Normal(mu, r))
def fun_model(T, q=1, r=1, phi=0.0, beta=0.0):
def transition(state, i):
x, mu = state
x = numpyro.sample("x", dist.LogNormal(phi * x, q))
mu = beta * mu + x
numpyro.sample("y", dist.Normal(mu, r))
return (x, mu), None
scan(transition, (0.0, 0.0), jnp.arange(T))
T = 10
params = {}
for i in range(T):
params[f"x_{i}"] = (i + 1.0) / 10
params[f"y_{i}"] = -i / 5
fun_params = {"x": jnp.arange(1, T + 1) / 10, "y": -jnp.arange(T) / 5}
actual_log_joint = potential_energy(fun_model, (T,), {}, fun_params)
expected_log_joint = potential_energy(model, (T,), {}, params)
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_without_stack():
def multiply_and_add_repeatedly(K, c_in):
def iteration(c_prev, c_in):
c_next = jnp.dot(c_prev, K) + c_in
return c_next, (c_next,)
_, (ys,) = scan(iteration, init=jnp.asarray([1.0, 0.0]), xs=c_in)
return ys
result = multiply_and_add_repeatedly(
K=jnp.asarray([[0.7, 0.3], [0.3, 0.7]]), c_in=jnp.asarray([[1.0, 0.0]])
)
assert_allclose(
result,
[[1.7, 0.3]],
)
def test_cond():
def model():
def true_fun(_):
x = numpyro.sample("x", dist.Normal(4.0))
numpyro.deterministic("z", x - 4.0)
def false_fun(_):
x = numpyro.sample("x", dist.Normal(0.0))
numpyro.deterministic("z", x)
cluster = numpyro.sample("cluster", dist.Normal())
cond(cluster > 0, true_fun, false_fun, None)
def guide():
m1 = numpyro.param("m1", 2.0)
s1 = numpyro.param("s1", 0.1, constraint=dist.constraints.positive)
m2 = numpyro.param("m2", 2.0)
s2 = numpyro.param("s2", 0.1, constraint=dist.constraints.positive)
def true_fun(_):
numpyro.sample("x", dist.Normal(m1, s1))
def false_fun(_):
numpyro.sample("x", dist.Normal(m2, s2))
cluster = numpyro.sample("cluster", dist.Normal())
cond(cluster > 0, true_fun, false_fun, None)
svi = SVI(model, guide, numpyro.optim.Adam(1e-2), Trace_ELBO(num_particles=100))
params, losses = svi.run(random.PRNGKey(0), num_steps=2500)
predictive = Predictive(
model,
guide=guide,
params=params,
num_samples=1000,
return_sites=["cluster", "x", "z"],
)
result = predictive(random.PRNGKey(0))
assert result["cluster"].shape == (1000,)
assert result["x"].shape == (1000,)
assert result["z"].shape == (1000,)
mcmc = MCMC(
NUTS(model),
num_warmup=500,
num_samples=2500,
num_chains=4,
chain_method="sequential",
)
mcmc.run(random.PRNGKey(0))
x = mcmc.get_samples()["x"]
assert x.shape == (10_000,)
assert_allclose(
[x[x > 2.0].mean(), x[x > 2.0].std(), x[x < 2.0].mean(), x[x < 2.0].std()],
[4.01, 0.965, -0.01, 0.965],
atol=0.1,
)
assert_allclose([x.mean(), x.std()], [2.0, jnp.sqrt(5.0)], atol=0.5)
|
the-stack_0_7838 | """
Pytorch models.
"""
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.functional as F
import torch.optim as optim
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.nn.init import xavier_uniform_
import utils
CUDA = torch.cuda.is_available()
class TSpec(nn.Module):
def __init__(self, ts_len, spec_len, hid_dim, layers, out_dims):
"""
Model that accepts, as input, a timeseries concatenated with the
spectra of that timeseries. The timeseries is fed through a 1DCNN
to extract interesting shapes in the signal. Simultaneously, the
spectra of the timeseries is analyzed by a seperate MLP head, which
learns about informative peaks in the spectra. The outputs of these
two paths are then concatenated and fed through an embedding MLP.
Finally, for the n outputs requested, single MLP layer is used to
predict either a real number (regression) or distribution
(classification).
ts_len: Number of timepoints in the timeseries (CNN->LSTM path).
spec_len: Number of frequency bins in the spectra (MLP).
hid_dim: Controls the size of all intermediate layers.
layers: Number of layers for the CNN, MLP, and embedding components.
out_dims: List of integers for the size of each output head. One
for each prediction task. Regression == 1,
Classification >= 1.
"""
super(TSpec, self).__init__()
self.ts_len = ts_len
self.spec_len = spec_len
self.hid_dim = hid_dim
self.layers = layers
self.out_dims = out_dims
# 5-layer CNN accepts the timeseries input.
# Use mean-pooling so we are more sensitive to exact mean R-R times.
# Conv --> AvgPool --> BatchNorm --> ReLU.
self.conv = nn.Sequential(
nn.Conv1d(1, hid_dim, 5),
nn.AvgPool1d(5),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
nn.Conv1d(hid_dim, hid_dim, 5),
nn.AvgPool1d(5),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
nn.Conv1d(hid_dim, hid_dim, 5),
nn.AvgPool1d(5),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
nn.Conv1d(hid_dim, hid_dim, 5),
nn.AvgPool1d(5),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
nn.Conv1d(hid_dim, hid_dim, 3),
nn.AvgPool1d(2),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
)
# n-layer MLP accepts the spectra. Linear --> Batchnorm --> ReLU
# Minimum 2-layers, first layer always embeds to FIXED neurons.
FIXED = 1000
arch = []
arch.append(nn.Linear(spec_len, FIXED))
arch.append(nn.BatchNorm1d(FIXED))
arch.append(nn.ReLU())
for i in range(layers):
if i == 0:
arch.append(nn.Linear(FIXED, hid_dim))
else:
arch.append(nn.Linear(hid_dim, hid_dim))
arch.append(nn.BatchNorm1d(hid_dim))
arch.append(nn.ReLU())
self.mlp = nn.Sequential(*arch)
# Embedding mixes the timeseries and spectral representations.
# Linear --> BatchNorm --> ReLU.
arch = []
for i in range(layers):
if i == 0:
arch.append(nn.Linear(hid_dim*2, hid_dim))
else:
arch.append(nn.Linear(hid_dim, hid_dim))
arch.append(nn.BatchNorm1d(hid_dim))
arch.append(nn.ReLU())
self.embedding = nn.Sequential(*arch)
# Output heads are a single fully connected layer.
self.outputs = nn.ModuleList([])
for out_dim in out_dims:
self.outputs.append(nn.Linear(hid_dim, out_dim))
def forward(self, X):
"""
X is size=(batch_size, ts_len+spec_len).
We use self.ts_len and self.spec_len to split X to be fed into
the CNN head and MLP head.
"""
batch_size = X.size(0)
X_time = X[:, :self.ts_len]
X_spec = X[:, self.ts_len:]
# Convolutional step on timeseries.
conv_act = self.conv(X_time.unsqueeze(1))
# Pass spectra through MLP.
mlp_act = self.mlp(X_spec)
# Hidden state is the concatenation CNN and MLP branches.
hid = torch.cat([conv_act.squeeze(), mlp_act], dim=1)
# Embed mixed representations from CNN and MLP.
y_hat = self.embedding(hid)
# Generate individual predictions from this embedding.
y_hats = []
for i, output in enumerate(self.outputs):
y_hats.append(output(y_hat))
return(y_hats)
|
the-stack_0_7839 | import copy
import datetime
import logging
import traceback
import warnings
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from urllib.parse import urlparse
from great_expectations._version import get_versions # isort:skip
__version__ = get_versions()["version"] # isort:skip
del get_versions # isort:skip
from great_expectations.core import IDDict
from great_expectations.core.batch import BatchMarkers, BatchSpec
from great_expectations.core.batch_spec import (
RuntimeQueryBatchSpec,
SqlAlchemyDatasourceBatchSpec,
)
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.data_context.types.base import ConcurrencyConfig
from great_expectations.exceptions import (
DatasourceKeyPairAuthBadPassphraseError,
ExecutionEngineError,
GreatExpectationsError,
InvalidBatchSpecError,
InvalidConfigError,
)
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.expectations.row_conditions import parse_condition_to_sqlalchemy
from great_expectations.util import (
filter_properties_dict,
get_sqlalchemy_url,
import_library_module,
)
from great_expectations.validator.metric_configuration import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sa
except ImportError:
sa = None
try:
from sqlalchemy.engine import reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql import Selectable
from sqlalchemy.sql.elements import TextClause, quoted_name
except ImportError:
reflection = None
DefaultDialect = None
Selectable = None
TextClause = None
quoted_name = None
OperationalError = None
try:
import psycopg2
import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2
except (ImportError, KeyError):
sqlalchemy_psycopg2 = None
try:
import sqlalchemy_redshift.dialect
except ImportError:
sqlalchemy_redshift = None
try:
import snowflake.sqlalchemy.snowdialect
if sa:
# Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register("snowflake", "snowflake.sqlalchemy", "dialect")
except (ImportError, KeyError, AttributeError):
snowflake = None
try:
import pybigquery.sqlalchemy_bigquery
###
# NOTE: 20210816 - jdimatteo: A convention we rely on is for SqlAlchemy dialects
# to define an attribute "dialect". A PR has been submitted to fix this upstream
# with https://github.com/googleapis/python-bigquery-sqlalchemy/pull/251. If that
# fix isn't present, add this "dialect" attribute here:
if not hasattr(pybigquery.sqlalchemy_bigquery, "dialect"):
pybigquery.sqlalchemy_bigquery.dialect = (
pybigquery.sqlalchemy_bigquery.BigQueryDialect
)
# Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in Azure (our CI/CD pipeline) in certain cases, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register(
"bigquery", "pybigquery.sqlalchemy_bigquery", "dialect"
)
try:
getattr(pybigquery.sqlalchemy_bigquery, "INTEGER")
bigquery_types_tuple = None
except AttributeError:
# In older versions of the pybigquery driver, types were not exported, so we use a hack
logger.warning(
"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later."
)
from collections import namedtuple
BigQueryTypes = namedtuple(
"BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map)
)
bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)
except (ImportError, AttributeError):
bigquery_types_tuple = None
pybigquery = None
def _get_dialect_type_module(dialect):
"""Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates
with the database/database implementation. Currently checks for RedShift/BigQuery dialects"""
if dialect is None:
logger.warning(
"No sqlalchemy dialect found; relying in top-level sqlalchemy types."
)
return sa
try:
# Redshift does not (yet) export types to top level; only recognize base SA types
if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect):
return dialect.sa
except (TypeError, AttributeError):
pass
# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple
try:
if (
isinstance(
dialect,
pybigquery.sqlalchemy_bigquery.BigQueryDialect,
)
and bigquery_types_tuple is not None
):
return bigquery_types_tuple
except (TypeError, AttributeError):
pass
return dialect
class SqlAlchemyExecutionEngine(ExecutionEngine):
def __init__(
self,
name=None,
credentials=None,
data_context=None,
engine=None,
connection_string=None,
url=None,
batch_data_dict=None,
create_temp_table=True,
concurrency: Optional[ConcurrencyConfig] = None,
**kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine
):
"""Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the
desired database. Also initializes the dialect to be used and configures usage statistics.
Args:
name (str): \
The name of the SqlAlchemyExecutionEngine
credentials: \
If the Execution Engine is not provided, the credentials can be used to build the Execution
Engine. If the Engine is provided, it will be used instead
data_context (DataContext): \
An object representing a Great Expectations project that can be used to access Expectation
Suites and the Project Data itself
engine (Engine): \
A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, useful if an
Engine has already been configured and should be reused. Will override Credentials
if provided.
connection_string (string): \
If neither the engines nor the credentials have been provided, a connection string can be used
to access the data. This will be overridden by both the engine and credentials if those are
provided.
url (string): \
If neither the engines, the credentials, nor the connection_string have been provided,
a url can be used to access the data. This will be overridden by all other configuration
options if any are provided.
concurrency (ConcurrencyConfig): Concurrency config used to configure the sqlalchemy engine.
"""
super().__init__(name=name, batch_data_dict=batch_data_dict)
self._name = name
self._credentials = credentials
self._connection_string = connection_string
self._url = url
self._create_temp_table = create_temp_table
if engine is not None:
if credentials is not None:
logger.warning(
"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. "
"Ignoring credentials."
)
self.engine = engine
else:
concurrency = (
concurrency if concurrency is not None else ConcurrencyConfig()
)
concurrency.add_sqlalchemy_create_engine_parameters(kwargs)
if credentials is not None:
self.engine = self._build_engine(credentials=credentials, **kwargs)
elif connection_string is not None:
self.engine = sa.create_engine(connection_string, **kwargs)
elif url is not None:
self.drivername = urlparse(url).scheme
self.engine = sa.create_engine(url, **kwargs)
else:
raise InvalidConfigError(
"Credentials or an engine are required for a SqlAlchemyExecutionEngine."
)
# Get the dialect **for purposes of identifying types**
if self.engine.dialect.name.lower() in [
"postgresql",
"mysql",
"sqlite",
"oracle",
"mssql",
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect_module = import_library_module(
module_name="sqlalchemy.dialects." + self.engine.dialect.name
)
elif self.engine.dialect.name.lower() == "snowflake":
self.dialect_module = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.engine.dialect.name.lower() == "redshift":
self.dialect_module = import_library_module(
module_name="sqlalchemy_redshift.dialect"
)
elif self.engine.dialect.name.lower() == "bigquery":
self.dialect_module = import_library_module(
module_name="pybigquery.sqlalchemy_bigquery"
)
else:
self.dialect_module = None
# <WILL> 20210726 - engine_backup is used by the snowflake connector, which requires connection and engine
# to be closed and disposed separately. Currently self.engine can refer to either a Connection or Engine,
# depending on the backend. This will need to be cleaned up in an upcoming refactor, so that Engine and
# Connection can be handled separately.
self._engine_backup = None
if self.engine and self.engine.dialect.name.lower() in [
"sqlite",
"mssql",
"snowflake",
"mysql",
]:
self._engine_backup = self.engine
# sqlite/mssql temp tables only persist within a connection so override the engine
self.engine = self.engine.connect()
# Send a connect event to provide dialect type
if data_context is not None and getattr(
data_context, "_usage_statistics_handler", None
):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="execution_engine.sqlalchemy.connect",
event_payload={
"anonymized_name": handler._execution_engine_anonymizer.anonymize(
self.name
),
"sqlalchemy_dialect": self.engine.name,
},
success=True,
)
# Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values,
# and set the instance "_config" variable equal to the resulting dictionary.
self._config = {
"name": name,
"credentials": credentials,
"data_context": data_context,
"engine": engine,
"connection_string": connection_string,
"url": url,
"batch_data_dict": batch_data_dict,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
self._config.update(kwargs)
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
@property
def credentials(self):
return self._credentials
@property
def connection_string(self):
return self._connection_string
@property
def url(self):
return self._url
def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine":
"""
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a
private key path.
"""
# Update credentials with anything passed during connection time
drivername = credentials.pop("drivername")
schema_name = credentials.pop("schema_name", None)
if schema_name is not None:
logger.warning(
"schema_name specified creating a URL with schema is not supported. Set a default "
"schema on the user connecting to your database."
)
create_engine_kwargs = kwargs
connect_args = credentials.pop("connect_args", None)
if connect_args:
create_engine_kwargs["connect_args"] = connect_args
if "private_key_path" in credentials:
options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(
drivername, credentials
)
else:
options = get_sqlalchemy_url(drivername, **credentials)
self.drivername = drivername
engine = sa.create_engine(options, **create_engine_kwargs)
return engine
def _get_sqlalchemy_key_pair_auth_url(
self, drivername: str, credentials: dict
) -> Tuple["sa.engine.url.URL", Dict]:
"""
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided
values into a private key. If passphrase is incorrect, this will fail and an exception is raised.
Args:
drivername(str) - The name of the driver class
credentials(dict) - A dictionary of database credentials used to access the database
Returns:
a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
private_key_path = credentials.pop("private_key_path")
private_key_passphrase = credentials.pop("private_key_passphrase")
with Path(private_key_path).expanduser().resolve().open(mode="rb") as key:
try:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode()
if private_key_passphrase
else None,
backend=default_backend(),
)
except ValueError as e:
if "incorrect password" in str(e).lower():
raise DatasourceKeyPairAuthBadPassphraseError(
datasource_name="SqlAlchemyDatasource",
message="Decryption of key failed, was the passphrase incorrect?",
) from e
else:
raise e
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
credentials_driver_name = credentials.pop("drivername", None)
create_engine_kwargs = {"connect_args": {"private_key": pkb}}
return (
get_sqlalchemy_url(drivername or credentials_driver_name, **credentials),
create_engine_kwargs,
)
def get_domain_records(
self,
domain_kwargs: Dict,
) -> Selectable:
"""
Uses the given domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to
obtain and/or query a batch. Returns in the format of an SqlAlchemy table/column(s) object.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
Returns:
An SqlAlchemy table/column(s) (the selectable object for obtaining data on which to compute)
"""
batch_id = domain_kwargs.get("batch_id")
if batch_id is None:
# We allow no batch id specified if there is only one batch
if self.active_batch_data:
data_object = self.active_batch_data
else:
raise GreatExpectationsError(
"No batch is specified, but could not identify a loaded batch."
)
else:
if batch_id in self.loaded_batch_data_dict:
data_object = self.loaded_batch_data_dict[batch_id]
else:
raise GreatExpectationsError(
f"Unable to find batch with batch_id {batch_id}"
)
if "table" in domain_kwargs and domain_kwargs["table"] is not None:
# TODO: Add logic to handle record_set_name once implemented
# (i.e. multiple record sets (tables) in one batch
if domain_kwargs["table"] != data_object.selectable.name:
selectable = sa.Table(
domain_kwargs["table"],
sa.MetaData(),
schema=data_object._schema_name,
)
else:
selectable = data_object.selectable
elif "query" in domain_kwargs:
raise ValueError(
"query is not currently supported by SqlAlchemyExecutionEngine"
)
else:
selectable = data_object.selectable
# Filtering by row condition.
if (
"row_condition" in domain_kwargs
and domain_kwargs["row_condition"] is not None
):
condition_parser = domain_kwargs["condition_parser"]
if condition_parser == "great_expectations__experimental__":
parsed_condition = parse_condition_to_sqlalchemy(
domain_kwargs["row_condition"]
)
selectable = sa.select(
"*", from_obj=selectable, whereclause=parsed_condition
)
else:
raise GreatExpectationsError(
"SqlAlchemyExecutionEngine only supports the great_expectations condition_parser."
)
if "column" in domain_kwargs:
return selectable
if (
"column_A" in domain_kwargs
and "column_B" in domain_kwargs
and "ignore_row_if" in domain_kwargs
):
if self.active_batch_data.use_quoted_name:
# Checking if case-sensitive and using appropriate name
# noinspection PyPep8Naming
column_A_name = quoted_name(domain_kwargs["column_A"], quote=True)
# noinspection PyPep8Naming
column_B_name = quoted_name(domain_kwargs["column_B"], quote=True)
else:
# noinspection PyPep8Naming
column_A_name = domain_kwargs["column_A"]
# noinspection PyPep8Naming
column_B_name = domain_kwargs["column_B"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "both_values_are_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.and_(
sa.column(column_A_name) == None,
sa.column(column_B_name) == None,
)
)
)
)
elif ignore_row_if == "either_value_is_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.or_(
sa.column(column_A_name) == None,
sa.column(column_B_name) == None,
)
)
)
)
else:
if ignore_row_if not in ["neither", "never"]:
raise ValueError(
f'Unrecognized value of ignore_row_if ("{ignore_row_if}").'
)
if ignore_row_if == "never":
warnings.warn(
f"""The correct "no-action" value of the "ignore_row_if" directive for the column pair case is \
"neither" (the use of "{ignore_row_if}" will be deprecated). Please update code accordingly.
""",
DeprecationWarning,
)
return selectable
if "column_list" in domain_kwargs and "ignore_row_if" in domain_kwargs:
if self.active_batch_data.use_quoted_name:
# Checking if case-sensitive and using appropriate name
column_list = [
quoted_name(domain_kwargs[column_name], quote=True)
for column_name in domain_kwargs["column_list"]
]
else:
column_list = domain_kwargs["column_list"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "all_values_are_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.and_(
*(
sa.column(column_name) == None
for column_name in column_list
)
)
)
)
)
elif ignore_row_if == "any_value_is_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.or_(
*(
sa.column(column_name) == None
for column_name in column_list
)
)
)
)
)
else:
if ignore_row_if != "never":
raise ValueError(
f'Unrecognized value of ignore_row_if ("{ignore_row_if}").'
)
return selectable
return selectable
def get_compute_domain(
self,
domain_kwargs: Dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) -> Tuple[Selectable, dict, dict]:
"""Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would
like to be using, or a corresponding string value representing it. String types include "identity",
"column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the
class MetricDomainTypes.
accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when
describing the domain and simply transferred with their associated values into accessor_domain_kwargs.
Returns:
SqlAlchemy column
"""
selectable = self.get_domain_records(
domain_kwargs=domain_kwargs,
)
# Extracting value from enum if it is given for future computation
domain_type = MetricDomainTypes(domain_type)
# Warning user if accessor keys are in any domain that is not of type table, will be ignored
if (
domain_type != MetricDomainTypes.TABLE
and accessor_keys is not None
and len(list(accessor_keys)) > 0
):
logger.warning(
'Accessor keys ignored since Metric Domain Type is not "table"'
)
compute_domain_kwargs = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs = {}
if domain_type == MetricDomainTypes.TABLE:
if accessor_keys is not None and len(list(accessor_keys)) > 0:
for key in accessor_keys:
accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key)
if len(domain_kwargs.keys()) > 0:
# Warn user if kwarg not "normal".
unexpected_keys: set = set(compute_domain_kwargs.keys()).difference(
{
"batch_id",
"table",
"row_condition",
"condition_parser",
}
)
if len(unexpected_keys) > 0:
unexpected_keys_str: str = ", ".join(
map(lambda element: f'"{element}"', unexpected_keys)
)
logger.warning(
f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".'
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.COLUMN:
if "column" not in compute_domain_kwargs:
raise GreatExpectationsError(
"Column not provided in compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column"] = quoted_name(
compute_domain_kwargs.pop("column"), quote=True
)
else:
accessor_domain_kwargs["column"] = compute_domain_kwargs.pop("column")
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
if not (
"column_A" in compute_domain_kwargs
and "column_B" in compute_domain_kwargs
):
raise GreatExpectationsError(
"column_A or column_B not found within compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column_A"] = quoted_name(
compute_domain_kwargs.pop("column_A"), quote=True
)
accessor_domain_kwargs["column_B"] = quoted_name(
compute_domain_kwargs.pop("column_B"), quote=True
)
else:
accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop(
"column_A"
)
accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop(
"column_B"
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.MULTICOLUMN:
if "column_list" not in domain_kwargs:
raise GreatExpectationsError(
"column_list not found within domain_kwargs"
)
column_list = compute_domain_kwargs.pop("column_list")
if len(column_list) < 2:
raise GreatExpectationsError(
"column_list must contain at least 2 columns"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column_list"] = [
quoted_name(column_name, quote=True) for column_name in column_list
]
else:
accessor_domain_kwargs["column_list"] = column_list
return selectable, compute_domain_kwargs, accessor_domain_kwargs
# Letting selectable fall through
return selectable, compute_domain_kwargs, accessor_domain_kwargs
def resolve_metric_bundle(
self,
metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]],
) -> dict:
"""For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds
bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail
if bundling the metrics together is not possible.
Args:
metric_fn_bundle (Iterable[Tuple[MetricConfiguration, Callable, dict]): \
A Dictionary containing a MetricProvider's MetricConfiguration (its unique identifier), its metric provider function
(the function that actually executes the metric), and the arguments to pass to the metric provider function.
A dictionary of metrics defined in the registry and corresponding arguments
Returns:
A dictionary of metric names and their corresponding now-queried values.
"""
resolved_metrics = {}
# We need a different query for each domain (where clause).
queries: Dict[Tuple, dict] = {}
for (
metric_to_resolve,
engine_fn,
compute_domain_kwargs,
accessor_domain_kwargs,
metric_provider_kwargs,
) in metric_fn_bundle:
if not isinstance(compute_domain_kwargs, IDDict):
compute_domain_kwargs = IDDict(compute_domain_kwargs)
domain_id = compute_domain_kwargs.to_id()
if domain_id not in queries:
queries[domain_id] = {
"select": [],
"ids": [],
"domain_kwargs": compute_domain_kwargs,
}
queries[domain_id]["select"].append(
engine_fn.label(metric_to_resolve.metric_name)
)
queries[domain_id]["ids"].append(metric_to_resolve.id)
for query in queries.values():
domain_kwargs = query["domain_kwargs"]
selectable = self.get_domain_records(
domain_kwargs=domain_kwargs,
)
assert len(query["select"]) == len(query["ids"])
try:
"""
If a custom query is passed, selectable will be TextClause and not formatted
as a subquery wrapped in "(subquery) alias". TextClause must first be converted
to TextualSelect using sa.columns() before it can be converted to type Subquery
"""
if isinstance(selectable, TextClause):
res = self.engine.execute(
sa.select(query["select"]).select_from(
selectable.columns().subquery()
)
).fetchall()
else:
res = self.engine.execute(
sa.select(query["select"]).select_from(selectable)
).fetchall()
logger.debug(
f"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(domain_kwargs).to_id()}"
)
except OperationalError as oe:
exception_message: str = "An SQL execution Exception occurred. "
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(oe).__name__}: "{str(oe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise ExecutionEngineError(message=exception_message)
assert (
len(res) == 1
), "all bundle-computed metrics must be single-value statistics"
assert len(query["ids"]) == len(
res[0]
), "unexpected number of metrics returned"
for idx, id in enumerate(query["ids"]):
resolved_metrics[id] = convert_to_json_serializable(res[0][idx])
return resolved_metrics
def close(self):
"""
Note: Will 20210729
This is a helper function that will close and dispose Sqlalchemy objects that are used to connect to a database.
Databases like Snowflake require the connection and engine to be instantiated and closed separately, and not
doing so has caused problems with hanging connections.
Currently the ExecutionEngine does not support handling connections and engine separately, and will actually
override the engine with a connection in some cases, obfuscating what object is used to actually used by the
ExecutionEngine to connect to the external database. This will be handled in an upcoming refactor, which will
allow this function to eventually become:
self.connection.close()
self.engine.dispose()
More background can be found here: https://github.com/great-expectations/great_expectations/pull/3104/
"""
if self._engine_backup:
self.engine.close()
self._engine_backup.dispose()
else:
self.engine.dispose()
### Splitter methods for partitioning tables ###
def _split_on_whole_table(self, table_name: str, batch_identifiers: dict):
"""'Split' by returning the whole table"""
# return sa.column(column_name) == batch_identifiers[column_name]
return 1 == 1
def _split_on_column_value(
self, table_name: str, column_name: str, batch_identifiers: dict
):
"""Split using the values in the named column"""
return sa.column(column_name) == batch_identifiers[column_name]
def _split_on_converted_datetime(
self,
table_name: str,
column_name: str,
batch_identifiers: dict,
date_format_string: str = "%Y-%m-%d",
):
"""Convert the values in the named column to the given date_format, and split on that"""
return (
sa.func.strftime(
date_format_string,
sa.column(column_name),
)
== batch_identifiers[column_name]
)
def _split_on_divided_integer(
self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
return (
sa.cast(sa.column(column_name) / divisor, sa.Integer)
== batch_identifiers[column_name]
)
def _split_on_mod_integer(
self, table_name: str, column_name: str, mod: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
return sa.column(column_name) % mod == batch_identifiers[column_name]
def _split_on_multi_column_values(
self, table_name: str, column_names: List[str], batch_identifiers: dict
):
"""Split on the joint values in the named columns"""
return sa.and_(
*(
sa.column(column_name) == column_value
for column_name, column_value in batch_identifiers.items()
)
)
def _split_on_hashed_column(
self,
table_name: str,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
):
"""Split on the hashed value of the named column"""
return (
sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits)
== batch_identifiers[column_name]
)
### Sampling methods ###
# _sample_using_limit
# _sample_using_random
# _sample_using_mod
# _sample_using_a_list
# _sample_using_md5
def _sample_using_mod(
self,
column_name,
mod: int,
value: int,
):
"""Take the mod of named column, and only keep rows that match the given value"""
return sa.column(column_name) % mod == value
def _sample_using_a_list(
self,
column_name: str,
value_list: list,
):
"""Match the values in the named column against value_list, and only keep the matches"""
return sa.column(column_name).in_(value_list)
def _sample_using_md5(
self,
column_name: str,
hash_digits: int = 1,
hash_value: str = "f",
):
"""Hash the values in the named column, and split on that"""
return (
sa.func.right(
sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits
)
== hash_value
)
def _build_selectable_from_batch_spec(self, batch_spec) -> Union[Selectable, str]:
table_name: str = batch_spec["table_name"]
if "splitter_method" in batch_spec:
splitter_fn = getattr(self, batch_spec["splitter_method"])
split_clause = splitter_fn(
table_name=table_name,
batch_identifiers=batch_spec["batch_identifiers"],
**batch_spec["splitter_kwargs"],
)
else:
split_clause = True
if "sampling_method" in batch_spec:
if batch_spec["sampling_method"] == "_sample_using_limit":
# SQLalchemy's semantics for LIMIT are different than normal WHERE clauses,
# so the business logic for building the query needs to be different.
if self.engine.dialect.name.lower() == "oracle":
# limit doesn't compile properly for oracle so we will append rownum to query string later
raw_query = (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
)
query = str(
raw_query.compile(
self.engine, compile_kwargs={"literal_binds": True}
)
)
query += "\nAND ROWNUM <= %d" % batch_spec["sampling_kwargs"]["n"]
return query
else:
return (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
.limit(batch_spec["sampling_kwargs"]["n"])
)
elif batch_spec["sampling_method"] == "_sample_using_random":
num_rows: int = self.engine.execute(
sa.select([sa.func.count()])
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(split_clause)
).scalar()
p: Optional[float] = batch_spec["sampling_kwargs"]["p"] or 1.0
sample_size: int = round(p * num_rows)
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(split_clause)
.order_by(sa.func.random())
.limit(sample_size)
)
else:
sampler_fn = getattr(self, batch_spec["sampling_method"])
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(
sa.and_(
split_clause,
sampler_fn(**batch_spec["sampling_kwargs"]),
)
)
)
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(split_clause)
)
def get_batch_data_and_markers(
self, batch_spec: BatchSpec
) -> Tuple[Any, BatchMarkers]:
if not isinstance(
batch_spec, (SqlAlchemyDatasourceBatchSpec, RuntimeQueryBatchSpec)
):
raise InvalidBatchSpecError(
f"""SqlAlchemyExecutionEngine accepts batch_spec only of type SqlAlchemyDatasourceBatchSpec or
RuntimeQueryBatchSpec (illegal type "{str(type(batch_spec))}" was received).
"""
)
batch_data: Optional[SqlAlchemyBatchData] = None
batch_markers: BatchMarkers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
temp_table_name: Optional[str]
if "bigquery_temp_table" in batch_spec:
temp_table_name = batch_spec.get("bigquery_temp_table")
else:
temp_table_name = None
source_table_name = batch_spec.get("table_name", None)
source_schema_name = batch_spec.get("schema_name", None)
if isinstance(batch_spec, RuntimeQueryBatchSpec):
# query != None is already checked when RuntimeQueryBatchSpec is instantiated
query: str = batch_spec.query
batch_spec.query = "SQLQuery"
batch_data = SqlAlchemyBatchData(
execution_engine=self,
query=query,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
elif isinstance(batch_spec, SqlAlchemyDatasourceBatchSpec):
if self.engine.dialect.name.lower() == "oracle":
selectable: str = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
else:
selectable: Selectable = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
batch_data = SqlAlchemyBatchData(
execution_engine=self,
selectable=selectable,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
return batch_data, batch_markers
|
the-stack_0_7844 | import argparse
from datetime import datetime
from pathlib import Path
from .. import cli
from ..core.repository import Repository
from ..utils import create_filename
from ..utils import log
from ..utils import output_csv
from ..utils import parse_datetime
def parse_dateto(s):
return parse_datetime(s + ' 23:59:59')
def parse_datefrom(s):
return parse_datetime(s + ' 00:00:00')
def get_common_parser():
parser = argparse.ArgumentParser()
parser.set_defaults(
datefrom=None,
dateto=datetime.now(),
enable_cache=True,
nop=False,
repositories=[],
user=None,
verbose=False,
version=cli.__version__,
)
parser.add_argument(
'--from', action='store', dest='datefrom', type=parse_datefrom,
help='filter created_at FROM: e.g. 2020-04-06'
)
parser.add_argument(
'--to', action='store', dest='dateto', type=parse_dateto,
help='filter created_at TO: e.g. 2020-04-06'
)
parser.add_argument(
'--disable-cache', action='store_false', dest='enable_cache',
help='disable cache'
)
parser.add_argument(
'--nop', action='store_true',
help='use as a separator for option handling of positional argument'
)
parser.add_argument(
'--repository', nargs='*', dest='repositories',
help='set repositories'
)
parser.add_argument(
'--user', action='store',
help='set user to filter assignee of pull request'
)
parser.add_argument(
'--verbose', action='store_true',
help='set verbose mode'
)
parser.add_argument(
'--version', action='version', version=f'%(prog)s {cli.__version__}',
help='show version'
)
return parser
def get_csv_path(args, repo_name, gh, create_data):
filename = create_filename(repo_name, args.api)
path = Path(filename)
if args.enable_cache and path.exists():
log.info(f'use existent {path}')
return path
with Repository(args, gh, repo_name) as repo:
data = create_data(repo)
return output_csv(args, data, filename)
|
the-stack_0_7846 | import json
from tornado.web import RequestHandler
__author__ = 'TIF'
class BaseHandler(RequestHandler):
@property
def sched(self):
return self.application.scheduler
def from_body_get_arguments(self):
body = self.request.body
return json.load(body) |
the-stack_0_7848 | '''
Miscellaneous data generator utilities.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
def apply_inverse_transforms(y_pred_decoded, inverse_transforms):
'''
Takes a list or Numpy array of decoded predictions and applies a given list of
transforms to them. The list of inverse transforms would usually contain the
inverter functions that some of the image transformations that come with this
data generator return. This function would normally be used to transform predictions
that were made on a transformed image back to the original image.
Arguments:
y_pred_decoded (list or array): Either a list of length `batch_size` that
contains Numpy arrays that contain the predictions for each batch item
or a Numpy array. If this is a list of Numpy arrays, the arrays would
usually have the shape `(num_predictions, 6)`, where `num_predictions`
is different for each batch item. If this is a Numpy array, it would
usually have the shape `(batch_size, num_predictions, 6)`. The last axis
would usually contain the class ID, confidence score, and four bounding
box coordinates for each prediction.
inverse_predictions (list): A nested list of length `batch_size` that contains
for each batch item a list of functions that take one argument (one element
of `y_pred_decoded` if it is a list or one slice along the first axis of
`y_pred_decoded` if it is an array) and return an output of the same shape
and data type.
Returns:
The transformed predictions, which have the same structure as `y_pred_decoded`.
'''
if isinstance(y_pred_decoded, list):
y_pred_decoded_inv = []
for i in range(len(y_pred_decoded)):
y_pred_decoded_inv.append(np.copy(y_pred_decoded[i]))
if y_pred_decoded_inv[i].size > 0: # If there are any predictions for this batch item.
for inverter in inverse_transforms[i]:
if not (inverter is None):
y_pred_decoded_inv[i] = inverter(y_pred_decoded_inv[i])
elif isinstance(y_pred_decoded, np.ndarray):
y_pred_decoded_inv = np.copy(y_pred_decoded)
for i in range(len(y_pred_decoded)):
if y_pred_decoded_inv[i].size > 0: # If there are any predictions for this batch item.
for inverter in inverse_transforms[i]:
if not (inverter is None):
y_pred_decoded_inv[i] = inverter(y_pred_decoded_inv[i])
else:
raise ValueError("`y_pred_decoded` must be either a list or a Numpy array.")
return y_pred_decoded_inv
|
the-stack_0_7849 | #! python
"""
Class `CountRunner` to report read status of FL reads,
as well as make abundance report.
"""
import logging
import os.path as op
from .CountingUtils import read_group_file, output_read_count_FL, make_abundance_file
__author__ = '[email protected]'
log = logging.getLogger(__name__)
class CountRunner(object):
"""
Compute read status of FL CCS reads and output abundance report.
"""
def __init__(self, group_filename,
transcript_to_reads_dict, read_to_length_dict,
output_read_stat_filename,
output_abundance_filename):
"""
Parameters:
group_filename -- an input group file associating collapsed isoforms with FL reads.
transcript_to_reads_dict -- {isoform: zmws} dict
output_read_stat_filename -- an output FL read status report
output_abundance_filename -- an output abundance report
"""
# output read status report and abundance report
self.read_stat_fn = output_read_stat_filename
self.abundance_fn = output_abundance_filename
self.group_filename = group_filename # input, group file of collapsed isoforms
self.transcript_to_reads_dict = transcript_to_reads_dict # input: {isoforms: reads}
self.read_to_length_dict = read_to_length_dict # input {read: length}
self.sample_prefix = None
self.validate_inputs()
def __str__(self):
return ("<%s (%s, zmw) to count reads abundance of isoforms>\n" %
(self.__class__.__name__, self.group_filename))
def validate_inputs(self):
"""Validate existence of input files."""
logging.info("Validing inputs.")
if not op.exists(self.group_filename):
raise IOError("Input group file %s does not exist" % self.group_filename)
def run(self, restricted_movies=None):
"""
Compute read status for FL reads, and make abundance report.
Parameters:
restricted_movies -- if is None, process reads from ALL movies; otherwise
only process reads in the list of restricted movies.
"""
# Read cid info from the input group file.
cid_info = read_group_file(group_filename=self.group_filename,
sample_prefixes=self.sample_prefix)
# Output FL read status
logging.debug("Computing read status of FL reads.")
output_read_count_FL(cid_info=cid_info,
output_filename=self.read_stat_fn,
sample_prefix=self.sample_prefix,
transcript_to_reads_dict=self.transcript_to_reads_dict,
read_to_length_dict=self.read_to_length_dict,
output_mode='w', restricted_movies=restricted_movies)
logging.info("Read status of FL reads written to %s", self.read_stat_fn)
logging.info("IsoSeqS does not use NFL reads, don't append read status of nFL reads.")
# Make abundance file
make_abundance_file(read_stat_filename=self.read_stat_fn,
output_filename=self.abundance_fn,
given_total=None,
restricted_movies=restricted_movies,
write_header_comments=True)
logging.info("Abundance file written to %s", self.abundance_fn)
|
the-stack_0_7850 | #!/usr/bin/python
from collections import OrderedDict
from Qt import QtGui, QtCore, QtWidgets
from NodeGraphQt.constants import (IN_PORT, OUT_PORT,
NODE_WIDTH, NODE_HEIGHT,
NODE_ICON_SIZE, ICON_NODE_BASE,
NODE_SEL_COLOR, NODE_SEL_BORDER_COLOR,
PORT_FALLOFF, Z_VAL_NODE,
ITEM_CACHE_MODE)
from NodeGraphQt.errors import NodeWidgetError
from NodeGraphQt.qgraphics.node_abstract import AbstractNodeItem
from NodeGraphQt.qgraphics.node_overlay_disabled import XDisabledItem
from NodeGraphQt.qgraphics.node_text_item import NodeTextItem
from NodeGraphQt.qgraphics.port import PortItem, CustomPortItem
class NodeItem(AbstractNodeItem):
"""
Base Node item.
Args:
name (str): name displayed on the node.
parent (QtWidgets.QGraphicsItem): parent item.
"""
def __init__(self, name='node', parent=None):
super(NodeItem, self).__init__(name, parent)
pixmap = QtGui.QPixmap(ICON_NODE_BASE)
if pixmap.size().height() > NODE_ICON_SIZE:
pixmap = pixmap.scaledToHeight(
NODE_ICON_SIZE, QtCore.Qt.SmoothTransformation
)
self._properties['icon'] = ICON_NODE_BASE
self._icon_item = QtWidgets.QGraphicsPixmapItem(pixmap, self)
self._icon_item.setTransformationMode(QtCore.Qt.SmoothTransformation)
self._text_item = NodeTextItem(self.name, self)
self._x_item = XDisabledItem(self, 'DISABLED')
self._input_items = OrderedDict()
self._output_items = OrderedDict()
self._widgets = OrderedDict()
self._proxy_mode = False
self._proxy_mode_threshold = 70
def paint(self, painter, option, widget):
"""
Draws the node base not the ports or text.
Args:
painter (QtGui.QPainter): painter used for drawing the item.
option (QtGui.QStyleOptionGraphicsItem):
used to describe the parameters needed to draw.
widget (QtWidgets.QWidget): not used.
"""
self.auto_switch_mode()
painter.save()
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtCore.Qt.NoBrush)
# base background.
margin = 1.0
rect = self.boundingRect()
rect = QtCore.QRectF(rect.left() + margin,
rect.top() + margin,
rect.width() - (margin * 2),
rect.height() - (margin * 2))
radius = 4.0
painter.setBrush(QtGui.QColor(*self.color))
painter.drawRoundedRect(rect, radius, radius)
# light overlay on background when selected.
if self.selected:
painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR))
painter.drawRoundedRect(rect, radius, radius)
# node name background.
padding = 3.0, 2.0
text_rect = self._text_item.boundingRect()
text_rect = QtCore.QRectF(text_rect.x() + padding[0],
rect.y() + padding[1],
rect.width() - padding[0] - margin,
text_rect.height() - (padding[1] * 2))
if self.selected:
painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR))
else:
painter.setBrush(QtGui.QColor(0, 0, 0, 80))
painter.drawRoundedRect(text_rect, 3.0, 3.0)
# node border
if self.selected:
border_width = 1.2
border_color = QtGui.QColor(*NODE_SEL_BORDER_COLOR)
else:
border_width = 0.8
border_color = QtGui.QColor(*self.border_color)
border_rect = QtCore.QRectF(rect.left(), rect.top(),
rect.width(), rect.height())
pen = QtGui.QPen(border_color, border_width)
pen.setCosmetic(self.viewer().get_zoom() < 0.0)
path = QtGui.QPainterPath()
path.addRoundedRect(border_rect, radius, radius)
painter.setBrush(QtCore.Qt.NoBrush)
painter.setPen(pen)
painter.drawPath(path)
painter.restore()
def mousePressEvent(self, event):
"""
Re-implemented to ignore event if LMB is over port collision area.
Args:
event (QtWidgets.QGraphicsSceneMouseEvent): mouse event.
"""
if event.button() == QtCore.Qt.LeftButton:
for p in self._input_items.keys():
if p.hovered:
event.ignore()
return
for p in self._output_items.keys():
if p.hovered:
event.ignore()
return
super(NodeItem, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
"""
Re-implemented to ignore event if Alt modifier is pressed.
Args:
event (QtWidgets.QGraphicsSceneMouseEvent): mouse event.
"""
if event.modifiers() == QtCore.Qt.AltModifier:
event.ignore()
return
super(NodeItem, self).mouseReleaseEvent(event)
def mouseDoubleClickEvent(self, event):
"""
Re-implemented to emit "node_double_clicked" signal.
Args:
event (QtWidgets.QGraphicsSceneMouseEvent): mouse event.
"""
if event.button() == QtCore.Qt.LeftButton:
# enable text item edit mode.
items = self.scene().items(event.scenePos())
if self._text_item in items:
self._text_item.set_editable(True)
self._text_item.setFocus()
event.ignore()
return
viewer = self.viewer()
if viewer:
viewer.node_double_clicked.emit(self.id)
super(NodeItem, self).mouseDoubleClickEvent(event)
def itemChange(self, change, value):
"""
Re-implemented to update pipes on selection changed.
Args:
change:
value:
"""
if change == self.ItemSelectedChange and self.scene():
self.reset_pipes()
if value:
self.highlight_pipes()
self.setZValue(Z_VAL_NODE)
if not self.selected:
self.setZValue(Z_VAL_NODE + 1)
return super(NodeItem, self).itemChange(change, value)
def _tooltip_disable(self, state):
"""
Updates the node tooltip when the node is enabled/disabled.
Args:
state (bool): node disable state.
"""
tooltip = '<b>{}</b>'.format(self.name)
if state:
tooltip += ' <font color="red"><b>(DISABLED)</b></font>'
tooltip += '<br/>{}<br/>'.format(self.type_)
self.setToolTip(tooltip)
def _set_base_size(self, add_w=0.0, add_h=0.0):
"""
Sets the initial base size for the node.
Args:
add_w (float): add additional width.
add_h (float): add additional height.
"""
self._width, self._height = self.calc_size(add_w, add_h)
if self._width < NODE_WIDTH:
self._width = NODE_WIDTH
if self._height < NODE_HEIGHT:
self._height = NODE_HEIGHT
def _set_text_color(self, color):
"""
set text color.
Args:
color (tuple): color value in (r, g, b, a).
"""
text_color = QtGui.QColor(*color)
for port, text in self._input_items.items():
text.setDefaultTextColor(text_color)
for port, text in self._output_items.items():
text.setDefaultTextColor(text_color)
self._text_item.setDefaultTextColor(text_color)
def activate_pipes(self):
"""
active pipe color.
"""
ports = self.inputs + self.outputs
for port in ports:
for pipe in port.connected_pipes:
pipe.activate()
def highlight_pipes(self):
"""
Highlight pipe color.
"""
ports = self.inputs + self.outputs
for port in ports:
for pipe in port.connected_pipes:
pipe.highlight()
def reset_pipes(self):
"""
Reset all the pipe colors.
"""
ports = self.inputs + self.outputs
for port in ports:
for pipe in port.connected_pipes:
pipe.reset()
def calc_size(self, add_w=0.0, add_h=0.0):
"""
Calculates the minimum node size.
Args:
add_w (float): additional width.
add_h (float): additional height.
Returns:
tuple(float, float): width, height.
"""
# width, height from node name text.
text_w = self._text_item.boundingRect().width()
text_h = self._text_item.boundingRect().height()
# width, height from node ports.
port_width = 0.0
p_input_text_width = 0.0
p_output_text_width = 0.0
p_input_height = 0.0
p_output_height = 0.0
for port, text in self._input_items.items():
if not port.isVisible():
continue
if not port_width:
port_width = port.boundingRect().width()
t_width = text.boundingRect().width()
if text.isVisible() and t_width > p_input_text_width:
p_input_text_width = text.boundingRect().width()
p_input_height += port.boundingRect().height()
for port, text in self._output_items.items():
if not port.isVisible():
continue
if not port_width:
port_width = port.boundingRect().width()
t_width = text.boundingRect().width()
if text.isVisible() and t_width > p_output_text_width:
p_output_text_width = text.boundingRect().width()
p_output_height += port.boundingRect().height()
port_text_width = p_input_text_width + p_output_text_width
# width, height from node embedded widgets.
widget_width = 0.0
widget_height = 0.0
for widget in self._widgets.values():
w_width = widget.boundingRect().width()
w_height = widget.boundingRect().height()
if w_width > widget_width:
widget_width = w_width
widget_height += w_height
side_padding = 0.0
if all([widget_width, p_input_text_width, p_output_text_width]):
port_text_width = max([p_input_text_width, p_output_text_width])
port_text_width *= 2
elif widget_width:
side_padding = 10
width = port_width + max([text_w, port_text_width]) + side_padding
height = max([text_h, p_input_height, p_output_height, widget_height])
if widget_width:
# add additional width for node widget.
width += widget_width
if widget_height:
# add bottom margin for node widget.
height += 4.0
height *= 1.05
# additional width, height.
width += add_w
height += add_h
return width, height
def align_icon(self, h_offset=0.0, v_offset=0.0):
"""
Align node icon to the default top left of the node.
Args:
v_offset (float): additional vertical offset.
h_offset (float): additional horizontal offset.
"""
icon_rect = self._icon_item.boundingRect()
text_rect = self._text_item.boundingRect()
x = self.boundingRect().left() + 2.0
y = text_rect.center().y() - (icon_rect.height() / 2)
self._icon_item.setPos(x + h_offset, y + v_offset)
def align_label(self, h_offset=0.0, v_offset=0.0):
"""
Center node label text to the top of the node.
Args:
v_offset (float): vertical offset.
h_offset (float): horizontal offset.
"""
rect = self.boundingRect()
text_rect = self._text_item.boundingRect()
x = rect.center().x() - (text_rect.width() / 2)
self._text_item.setPos(x + h_offset, rect.y() + v_offset)
def align_widgets(self, v_offset=0.0):
"""
Align node widgets to the default center of the node.
Args:
v_offset (float): vertical offset.
"""
if not self._widgets:
return
rect = self.boundingRect()
y = rect.y() + v_offset
inputs = [p for p in self.inputs if p.isVisible()]
outputs = [p for p in self.outputs if p.isVisible()]
for widget in self._widgets.values():
widget_rect = widget.boundingRect()
if not inputs:
x = rect.left() + 10
widget.widget().setTitleAlign('left')
elif not outputs:
x = rect.right() - widget_rect.width() - 10
widget.widget().setTitleAlign('right')
else:
x = rect.center().x() - (widget_rect.width() / 2)
widget.widget().setTitleAlign('center')
widget.setPos(x, y)
y += widget_rect.height()
def align_ports(self, v_offset=0.0):
"""
Align input, output ports in the node layout.
Args:
v_offset (float): port vertical offset.
"""
width = self._width
txt_offset = PORT_FALLOFF - 2
spacing = 1
# adjust input position
inputs = [p for p in self.inputs if p.isVisible()]
if inputs:
port_width = inputs[0].boundingRect().width()
port_height = inputs[0].boundingRect().height()
port_x = (port_width / 2) * -1
port_y = v_offset
for port in inputs:
port.setPos(port_x, port_y)
port_y += port_height + spacing
# adjust input text position
for port, text in self._input_items.items():
if port.isVisible():
txt_x = port.boundingRect().width() / 2 - txt_offset
text.setPos(txt_x, port.y() - 1.5)
# adjust output position
outputs = [p for p in self.outputs if p.isVisible()]
if outputs:
port_width = outputs[0].boundingRect().width()
port_height = outputs[0].boundingRect().height()
port_x = width - (port_width / 2)
port_y = v_offset
for port in outputs:
port.setPos(port_x, port_y)
port_y += port_height + spacing
# adjust output text position
for port, text in self._output_items.items():
if port.isVisible():
txt_width = text.boundingRect().width() - txt_offset
txt_x = port.x() - txt_width
text.setPos(txt_x, port.y() - 1.5)
def draw_node(self):
"""
Re-draw the node item in the scene.
(re-implemented for vertical layout design)
"""
height = self._text_item.boundingRect().height() + 4.0
# setup initial base size.
self._set_base_size(add_h=height)
# set text color when node is initialized.
self._set_text_color(self.text_color)
# set the tooltip
self._tooltip_disable(self.disabled)
# --- set the initial node layout ---
# (do all the graphic item layout offsets here)
# align label text
self.align_label()
# align icon
self.align_icon(h_offset=2.0, v_offset=1.0)
# arrange input and output ports.
self.align_ports(v_offset=height)
# arrange node widgets
self.align_widgets(v_offset=height)
self.update()
def post_init(self, viewer=None, pos=None):
"""
Called after node has been added into the scene.
Adjust the node layout and form after the node has been added.
Args:
viewer (NodeGraphQt.widgets.viewer.NodeViewer): not used
pos (tuple): cursor position.
"""
self.draw_node()
# set initial node position.
if pos:
self.xy_pos = pos
def auto_switch_mode(self):
"""
Decide whether to draw the node with proxy mode.
(this is called at the start in the "self.paint()" function.)
"""
if ITEM_CACHE_MODE is QtWidgets.QGraphicsItem.ItemCoordinateCache:
return
rect = self.sceneBoundingRect()
l = self.viewer().mapToGlobal(
self.viewer().mapFromScene(rect.topLeft()))
r = self.viewer().mapToGlobal(
self.viewer().mapFromScene(rect.topRight()))
# width is the node width in screen
width = r.x() - l.x()
self.set_proxy_mode(width < self._proxy_mode_threshold)
def set_proxy_mode(self, mode):
"""
Set whether to draw the node with proxy mode.
(proxy mode toggles visibility for some qgraphic items in the node.)
Args:
mode (bool): true to enable proxy mode.
"""
if mode is self._proxy_mode:
return
self._proxy_mode = mode
visible = not mode
# disable overlay item.
self._x_item.proxy_mode = self._proxy_mode
# node widget visibility.
for w in self._widgets.values():
w.widget().setVisible(visible)
# input port text visibility.
for port, text in self._input_items.items():
if port.display_name:
text.setVisible(visible)
# output port text visibility.
for port, text in self._output_items.items():
if port.display_name:
text.setVisible(visible)
self._text_item.setVisible(visible)
self._icon_item.setVisible(visible)
@property
def icon(self):
return self._properties['icon']
@icon.setter
def icon(self, path=None):
self._properties['icon'] = path
path = path or ICON_NODE_BASE
pixmap = QtGui.QPixmap(path)
if pixmap.size().height() > NODE_ICON_SIZE:
pixmap = pixmap.scaledToHeight(NODE_ICON_SIZE,
QtCore.Qt.SmoothTransformation)
self._icon_item.setPixmap(pixmap)
if self.scene():
self.post_init()
self.update()
@AbstractNodeItem.width.setter
def width(self, width=0.0):
w, h = self.calc_size()
width = width if width > w else w
AbstractNodeItem.width.fset(self, width)
@AbstractNodeItem.height.setter
def height(self, height=0.0):
w, h = self.calc_size()
h = 70 if h < 70 else h
height = height if height > h else h
AbstractNodeItem.height.fset(self, height)
@AbstractNodeItem.disabled.setter
def disabled(self, state=False):
AbstractNodeItem.disabled.fset(self, state)
for n, w in self._widgets.items():
w.widget().setDisabled(state)
self._tooltip_disable(state)
self._x_item.setVisible(state)
@AbstractNodeItem.selected.setter
def selected(self, selected=False):
AbstractNodeItem.selected.fset(self, selected)
if selected:
self.highlight_pipes()
@AbstractNodeItem.name.setter
def name(self, name=''):
AbstractNodeItem.name.fset(self, name)
if name == self._text_item.toPlainText():
return
self._text_item.setPlainText(name)
if self.scene():
self.align_label()
self.update()
@AbstractNodeItem.color.setter
def color(self, color=(100, 100, 100, 255)):
AbstractNodeItem.color.fset(self, color)
if self.scene():
self.scene().update()
self.update()
@AbstractNodeItem.text_color.setter
def text_color(self, color=(100, 100, 100, 255)):
AbstractNodeItem.text_color.fset(self, color)
self._set_text_color(color)
self.update()
@property
def text_item(self):
"""
Get the node name text qgraphics item.
Returns:
NodeTextItem: node text object.
"""
return self._text_item
@property
def inputs(self):
"""
Returns:
list[PortItem]: input port graphic items.
"""
return list(self._input_items.keys())
@property
def outputs(self):
"""
Returns:
list[PortItem]: output port graphic items.
"""
return list(self._output_items.keys())
def _add_port(self, port):
"""
Adds a port qgraphics item into the node.
Args:
port (PortItem): port item.
Returns:
PortItem: port qgraphics item.
"""
text = QtWidgets.QGraphicsTextItem(port.name, self)
text.font().setPointSize(8)
text.setFont(text.font())
text.setVisible(port.display_name)
text.setCacheMode(ITEM_CACHE_MODE)
if port.port_type == IN_PORT:
self._input_items[port] = text
elif port.port_type == OUT_PORT:
self._output_items[port] = text
if self.scene():
self.post_init()
return port
def add_input(self, name='input', multi_port=False, display_name=True,
locked=False, painter_func=None):
"""
Adds a port qgraphics item into the node with the "port_type" set as
IN_PORT.
Args:
name (str): name for the port.
multi_port (bool): allow multiple connections.
display_name (bool): display the port name.
locked (bool): locked state.
painter_func (function): custom paint function.
Returns:
PortItem: input port qgraphics item.
"""
if painter_func:
port = CustomPortItem(self, painter_func)
else:
port = PortItem(self)
port.name = name
port.port_type = IN_PORT
port.multi_connection = multi_port
port.display_name = display_name
port.locked = locked
return self._add_port(port)
def add_output(self, name='output', multi_port=False, display_name=True,
locked=False, painter_func=None):
"""
Adds a port qgraphics item into the node with the "port_type" set as
OUT_PORT.
Args:
name (str): name for the port.
multi_port (bool): allow multiple connections.
display_name (bool): display the port name.
locked (bool): locked state.
painter_func (function): custom paint function.
Returns:
PortItem: output port qgraphics item.
"""
if painter_func:
port = CustomPortItem(self, painter_func)
else:
port = PortItem(self)
port.name = name
port.port_type = OUT_PORT
port.multi_connection = multi_port
port.display_name = display_name
port.locked = locked
return self._add_port(port)
def _delete_port(self, port, text):
"""
Removes port item and port text from node.
Args:
port (PortItem): port object.
text (QtWidgets.QGraphicsTextItem): port text object.
"""
port.setParentItem(None)
text.setParentItem(None)
self.scene().removeItem(port)
self.scene().removeItem(text)
del port
del text
def delete_input(self, port):
"""
Remove input port from node.
Args:
port (PortItem): port object.
"""
self._delete_port(port, self._input_items.pop(port))
def delete_output(self, port):
"""
Remove output port from node.
Args:
port (PortItem): port object.
"""
self._delete_port(port, self._output_items.pop(port))
def get_input_text_item(self, port_item):
"""
Args:
port_item (PortItem): port item.
Returns:
QGraphicsTextItem: graphic item used for the port text.
"""
return self._input_items[port_item]
def get_output_text_item(self, port_item):
"""
Args:
port_item (PortItem): port item.
Returns:
QGraphicsTextItem: graphic item used for the port text.
"""
return self._output_items[port_item]
@property
def widgets(self):
return self._widgets.copy()
def add_widget(self, widget):
self._widgets[widget.get_name()] = widget
def get_widget(self, name):
widget = self._widgets.get(name)
if widget:
return widget
raise NodeWidgetError('node has no widget "{}"'.format(name))
def has_widget(self, name):
return name in self._widgets.keys()
def from_dict(self, node_dict):
super(NodeItem, self).from_dict(node_dict)
widgets = node_dict.pop('widgets', {})
for name, value in widgets.items():
if self._widgets.get(name):
self._widgets[name].value = value
class NodeItemVertical(NodeItem):
"""
Vertical Node item.
Args:
name (str): name displayed on the node.
parent (QtWidgets.QGraphicsItem): parent item.
"""
def __init__(self, name='node', parent=None):
super(NodeItemVertical, self).__init__(name, parent)
font = QtGui.QFont()
font.setPointSize(15)
self.text_item.setFont(font)
def paint(self, painter, option, widget):
"""
Draws the node base not the ports.
Args:
painter (QtGui.QPainter): painter used for drawing the item.
option (QtGui.QStyleOptionGraphicsItem):
used to describe the parameters needed to draw.
widget (QtWidgets.QWidget): not used.
"""
self.auto_switch_mode()
painter.save()
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtCore.Qt.NoBrush)
# base background.
margin = 1.0
rect = self.boundingRect()
rect = QtCore.QRectF(rect.left() + margin,
rect.top() + margin,
rect.width() - (margin * 2),
rect.height() - (margin * 2))
radius = 4.0
painter.setBrush(QtGui.QColor(*self.color))
painter.drawRoundedRect(rect, radius, radius)
# light overlay on background when selected.
if self.selected:
painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR))
painter.drawRoundedRect(rect, radius, radius)
# top & bottom edge background.
padding = 2.0
height = 10
if self.selected:
painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR))
else:
painter.setBrush(QtGui.QColor(0, 0, 0, 80))
for y in [rect.y() + padding, rect.height() - height - 1]:
edge_rect = QtCore.QRectF(rect.x() + padding, y,
rect.width() - (padding * 2), height)
painter.drawRoundedRect(edge_rect, 3.0, 3.0)
# node border
border_width = 0.8
border_color = QtGui.QColor(*self.border_color)
if self.selected:
border_width = 1.2
border_color = QtGui.QColor(*NODE_SEL_BORDER_COLOR)
border_rect = QtCore.QRectF(rect.left(), rect.top(),
rect.width(), rect.height())
pen = QtGui.QPen(border_color, border_width)
pen.setCosmetic(self.viewer().get_zoom() < 0.0)
painter.setBrush(QtCore.Qt.NoBrush)
painter.setPen(pen)
painter.drawRoundedRect(border_rect, radius, radius)
painter.restore()
def align_icon(self, h_offset=0.0, v_offset=0.0):
"""
Align node icon to the right side of the node.
Args:
v_offset (float): vertical offset.
h_offset (float): horizontal offset.
"""
center_y = self.boundingRect().center().y()
icon_rect = self._icon_item.boundingRect()
text_rect = self._text_item.boundingRect()
x = self.boundingRect().right() + h_offset
y = center_y - text_rect.height() - (icon_rect.height() / 2) + v_offset
self._icon_item.setPos(x, y)
def align_label(self, h_offset=0.0, v_offset=0.0):
"""
Align node label to the right side of the node.
Args:
v_offset (float): vertical offset.
h_offset (float): horizontal offset.
"""
rect = self._text_item.boundingRect()
x = self.boundingRect().right() + h_offset
y = self.boundingRect().center().y() - (rect.height() / 2) + v_offset
self.text_item.setPos(x, y)
def align_ports(self, v_offset=0.0):
"""
Align input, output ports in the node layout.
"""
# adjust input position
inputs = [p for p in self.inputs if p.isVisible()]
if inputs:
port_width = inputs[0].boundingRect().width()
port_height = inputs[0].boundingRect().height()
half_width = port_width/2
delta = self._width / (len(inputs)+1)
port_x = delta
port_y = (port_height / 2) * -1
for port in inputs:
port.setPos(port_x - half_width, port_y)
port_x += delta
# adjust output position
outputs = [p for p in self.outputs if p.isVisible()]
if outputs:
port_width = outputs[0].boundingRect().width()
port_height = outputs[0].boundingRect().height()
half_width = port_width / 2
delta = self._width / (len(outputs)+1)
port_x = delta
port_y = self._height - (port_height / 2)
for port in outputs:
port.setPos(port_x-half_width, port_y)
port_x += delta
def align_widgets(self, v_offset=0.0):
"""
Align node widgets to the default center of the node.
Args:
v_offset (float): vertical offset.
"""
if not self._widgets:
return
rect = self.boundingRect()
y = rect.center().y() + v_offset
widget_height = 0.0
for widget in self._widgets.values():
widget_rect = widget.boundingRect()
widget_height += widget_rect.height()
y -= widget_height / 2
for widget in self._widgets.values():
widget_rect = widget.boundingRect()
x = rect.center().x() - (widget_rect.width() / 2)
widget.widget().setTitleAlign('center')
widget.setPos(x, y)
y += widget_rect.height()
def draw_node(self):
"""
Re-draw the node item in the scene.
"""
# setup initial base size.
self._set_base_size()
# set text color when node is initialized.
self._set_text_color(self.text_color)
# set the tooltip
self._tooltip_disable(self.disabled)
# --- setup node layout ---
# (do all the graphic item layout offsets here)
# align label text
self.align_label(h_offset=6)
# align icon
self.align_icon(v_offset=4)
# arrange input and output ports.
self.align_ports()
# arrange node widgets
self.align_widgets()
self.update()
def calc_size(self, add_w=0.0, add_h=0.0):
"""
Calculate minimum node size.
Args:
add_w (float): additional width.
add_h (float): additional height.
"""
p_input_width = 0.0
p_output_width = 0.0
p_input_height = 0.0
p_output_height = 0.0
for port in self._input_items.keys():
if port.isVisible():
p_input_width += port.boundingRect().width()
if not p_input_height:
p_input_height = port.boundingRect().height()
for port in self._output_items.keys():
if port.isVisible():
p_output_width += port.boundingRect().width()
if not p_output_height:
p_output_height = port.boundingRect().height()
widget_width = 0.0
widget_height = 0.0
for widget in self._widgets.values():
if widget.boundingRect().width() > widget_width:
widget_width = widget.boundingRect().width()
widget_height += widget.boundingRect().height()
width = max([p_input_width, p_output_width, widget_width]) + add_w
height = p_input_height + p_output_height + widget_height + add_h
return width, height
def add_input(self, name='input', multi_port=False, display_name=True,
locked=False, painter_func=None):
"""
Adds a port qgraphics item into the node with the "port_type" set as
IN_PORT
Args:
name (str): name for the port.
multi_port (bool): allow multiple connections.
display_name (bool): (not used).
locked (bool): locked state.
painter_func (function): custom paint function.
Returns:
PortItem: port qgraphics item.
"""
return super(NodeItemVertical, self).add_input(
name, multi_port, False, locked, painter_func)
def add_output(self, name='output', multi_port=False, display_name=True,
locked=False, painter_func=None):
"""
Adds a port qgraphics item into the node with the "port_type" set as
OUT_PORT
Args:
name (str): name for the port.
multi_port (bool): allow multiple connections.
display_name (bool): (not used).
locked (bool): locked state.
painter_func (function): custom paint function.
Returns:
PortItem: port qgraphics item.
"""
return super(NodeItemVertical, self).add_output(
name, multi_port, False, locked, painter_func)
|
the-stack_0_7851 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, XEN and Parallels.
"""
import binascii
import collections
from collections import deque
import contextlib
import errno
import functools
import glob
import itertools
import operator
import os
import pwd
import random
import shutil
import tempfile
import time
import uuid
from castellan import key_manager
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick import encryptors
from os_brick.encryptors import luks as luks_encryptor
from os_brick import exception as brick_exception
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.api.metadata import password
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import crypto
from nova import exception
from nova.i18n import _
from nova import image
from nova.network import model as network_model
from nova import objects
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.libvirt
import nova.privsep.path
from nova import rc_fields
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk_api
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import mount
from nova.virt.libvirt.volume import remotefs
from nova.virt import netutils
from nova.volume import cinder
libvirt = None
uefi_logged = False
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
DEFAULT_UEFI_LOADER_PATH = {
"x86_64": "/usr/share/OVMF/OVMF_CODE.fd",
"aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd"
}
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = None
# Guest config console string
CONSOLE = "console=tty0 console=ttyS0 console=hvc0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
class InjectionInfo(collections.namedtuple(
'InjectionInfo', ['network_info', 'files', 'admin_pass'])):
__slots__ = ()
def __repr__(self):
return ('InjectionInfo(network_info=%r, files=%r, '
'admin_pass=<SANITIZED>)') % (self.network_info, self.files)
libvirt_volume_drivers = [
'iscsi=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'drbd=nova.virt.libvirt.volume.drbd.LibvirtDRBDVolumeDriver',
'fake=nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs=nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.aoe.LibvirtAOEVolumeDriver',
'fibre_channel='
'nova.virt.libvirt.volume.fibrechannel.'
'LibvirtFibreChannelVolumeDriver',
'gpfs=nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'hgst=nova.virt.libvirt.volume.hgst.LibvirtHGSTVolumeDriver',
'scaleio=nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
'disco=nova.virt.libvirt.volume.disco.LibvirtDISCOVolumeDriver',
'vzstorage='
'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver',
'veritas_hyperscale='
'nova.virt.libvirt.volume.vrtshyperscale.'
'LibvirtHyperScaleVolumeDriver',
'storpool=nova.virt.libvirt.volume.storpool.LibvirtStorPoolVolumeDriver',
]
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
# For information about when MIN_LIBVIRT_VERSION and
# NEXT_MIN_LIBVIRT_VERSION can be changed, consult
#
# https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix
#
# Currently this is effectively the min version for i686/x86_64
# + KVM/QEMU, as other architectures/hypervisors require newer
# versions. Over time, this will become a common min version
# for all architectures/hypervisors, as this value rises to
# meet them.
MIN_LIBVIRT_VERSION = (1, 2, 9)
MIN_QEMU_VERSION = (2, 1, 0)
# TODO(berrange): Re-evaluate this at start of each release cycle
# to decide if we want to plan a future min version bump.
# MIN_LIBVIRT_VERSION can be updated to match this after
# NEXT_MIN_LIBVIRT_VERSION has been at a higher value for
# one cycle
NEXT_MIN_LIBVIRT_VERSION = (3, 0, 0)
NEXT_MIN_QEMU_VERSION = (2, 8, 0)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
# Libvirt version 1.2.17 is required for successful block live migration
# of vm booted from image with attached devices
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION = (1, 2, 17)
# PowerPC based hosts that support NUMA using libvirt
MIN_LIBVIRT_NUMA_VERSION_PPC = (1, 2, 19)
# Versions of libvirt with known NUMA topology issues
# See bug #1449028
BAD_LIBVIRT_NUMA_VERSIONS = [(1, 2, 9, 2)]
# Versions of libvirt with broken cpu pinning support. This excludes
# versions of libvirt with broken NUMA support since pinning needs
# NUMA
# See bug #1438226
BAD_LIBVIRT_CPU_POLICY_VERSIONS = [(1, 2, 10)]
# Virtuozzo driver support
MIN_VIRTUOZZO_VERSION = (7, 0, 0)
MIN_LIBVIRT_VIRTUOZZO_VERSION = (1, 2, 12)
# Ability to set the user guest password with Qemu
MIN_LIBVIRT_SET_ADMIN_PASSWD = (1, 2, 16)
# Ability to set the user guest password with parallels
MIN_LIBVIRT_PARALLELS_SET_ADMIN_PASSWD = (2, 0, 0)
# s/390 & s/390x architectures with KVM
MIN_LIBVIRT_KVM_S390_VERSION = (1, 2, 13)
MIN_QEMU_S390_VERSION = (2, 3, 0)
# libvirt < 1.3 reported virt_functions capability
# only when VFs are enabled.
# libvirt 1.3 fix f391889f4e942e22b9ef8ecca492de05106ce41e
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION = (1, 3, 0)
# Use the "logd" backend for handling stdout/stderr from QEMU processes.
MIN_LIBVIRT_VIRTLOGD = (1, 3, 3)
MIN_QEMU_VIRTLOGD = (2, 7, 0)
# ppc64/ppc64le architectures with KVM
# NOTE(rfolco): Same levels for Libvirt/Qemu on Big Endian and Little
# Endian giving the nuance around guest vs host architectures
MIN_LIBVIRT_KVM_PPC64_VERSION = (1, 2, 12)
# aarch64 architecture with KVM
# 'chardev' support got sorted out in 3.6.0
MIN_LIBVIRT_KVM_AARCH64_VERSION = (3, 6, 0)
# Names of the types that do not get compressed during migration
NO_COMPRESSION_TYPES = ('qcow2',)
# number of serial console limit
QEMU_MAX_SERIAL_PORTS = 4
# Qemu supports 4 serial consoles, we remove 1 because of the PTY one defined
ALLOWED_QEMU_SERIAL_PORTS = QEMU_MAX_SERIAL_PORTS - 1
# realtime support
MIN_LIBVIRT_REALTIME_VERSION = (1, 2, 13)
# libvirt postcopy support
MIN_LIBVIRT_POSTCOPY_VERSION = (1, 3, 3)
# qemu postcopy support
MIN_QEMU_POSTCOPY_VERSION = (2, 5, 0)
MIN_LIBVIRT_OTHER_ARCH = {
fields.Architecture.S390: MIN_LIBVIRT_KVM_S390_VERSION,
fields.Architecture.S390X: MIN_LIBVIRT_KVM_S390_VERSION,
fields.Architecture.PPC: MIN_LIBVIRT_KVM_PPC64_VERSION,
fields.Architecture.PPC64: MIN_LIBVIRT_KVM_PPC64_VERSION,
fields.Architecture.PPC64LE: MIN_LIBVIRT_KVM_PPC64_VERSION,
fields.Architecture.AARCH64: MIN_LIBVIRT_KVM_AARCH64_VERSION,
}
MIN_QEMU_OTHER_ARCH = {
fields.Architecture.S390: MIN_QEMU_S390_VERSION,
fields.Architecture.S390X: MIN_QEMU_S390_VERSION,
}
# perf events support
MIN_LIBVIRT_PERF_VERSION = (2, 0, 0)
LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'
PERF_EVENTS_CPU_FLAG_MAPPING = {'cmt': 'cmt',
'mbml': 'mbm_local',
'mbmt': 'mbm_total',
}
# Mediated devices support
MIN_LIBVIRT_MDEV_SUPPORT = (3, 4, 0)
# libvirt>=3.10 is required for volume multiattach if qemu<2.10.
# See https://bugzilla.redhat.com/show_bug.cgi?id=1378242
# for details.
MIN_LIBVIRT_MULTIATTACH = (3, 10, 0)
MIN_LIBVIRT_LUKS_VERSION = (2, 2, 0)
MIN_QEMU_LUKS_VERSION = (2, 6, 0)
VGPU_RESOURCE_SEMAPHORE = "vgpu_resources"
MIN_MIGRATION_SPEED_BW = 1 # 1 MiB/s
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
"supports_tagged_attach_interface": True,
"supports_tagged_attach_volume": True,
"supports_extend_volume": True,
# Multiattach support is conditional on qemu and libvirt versions
# determined in init_host.
"supports_multiattach": False
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
libvirt_migrate.libvirt = libvirt
self._host = host.Host(self._uri(), read_only,
lifecycle_event_handler=self.emit_event,
conn_event_handler=self._handle_conn_event)
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._caps = None
self._supported_perf_events = []
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
host=self._host)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
# TODO(mriedem): Long-term we should load up the volume drivers on
# demand as needed rather than doing this on startup, as there might
# be unsupported volume drivers in this list based on the underlying
# platform.
self.volume_drivers = self._get_volume_drivers()
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warning('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.',
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = cinder.API()
self._image_api = image.API()
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial)
self.job_tracker = instancejobtracker.InstanceJobTracker()
self._remotefs = remotefs.RemoteFilesystem()
self._live_migration_flags = self._block_migration_flags = 0
self.active_migrations = {}
# Compute reserved hugepages from conf file at the very
# beginning to ensure any syntax error will be reported and
# avoid any re-calculation when computing resources.
self._reserved_hugepages = hardware.numa_get_reserved_huge_pages()
def _get_volume_drivers(self):
driver_registry = dict()
for driver_str in libvirt_volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
try:
driver_registry[driver_type] = driver_class(self._host)
except brick_exception.InvalidConnectorProtocol:
LOG.debug('Unable to load volume driver %s. It is not '
'supported on this host.', driver)
return driver_registry
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems don't support O_DIRECT though. For those we
# fallback to 'writethrough' which gives host crash safety, and
# is safe for migration provided the filesystem is cache coherent
# (cluster filesystems typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not utils.supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
# Shareable disks like for a multi-attach volume need to have the
# driver cache disabled.
if getattr(conf, 'shareable', False):
conf.driver_cache = 'none'
else:
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def _do_quality_warnings(self):
"""Warn about potential configuration issues.
This will log a warning message for things such as untested driver or
host arch configurations in order to indicate potential issues to
administrators.
"""
caps = self._host.get_capabilities()
hostarch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (fields.Architecture.I686,
fields.Architecture.X86_64)):
LOG.warning('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: https://docs.openstack.org/'
'nova/latest/user/support-matrix.html',
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
if CONF.vnc.keymap:
LOG.warning('The option "[vnc] keymap" has been deprecated '
'in favor of configuration within the guest. '
'Update nova.conf to address this change and '
'refer to bug #1682020 for more information.')
if CONF.spice.keymap:
LOG.warning('The option "[spice] keymap" has been deprecated '
'in favor of configuration within the guest. '
'Update nova.conf to address this change and '
'refer to bug #1682020 for more information.')
def _handle_conn_event(self, enabled, reason):
LOG.info("Connection event '%(enabled)d' reason '%(reason)s'",
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def init_host(self, host):
self._host.initialize()
self._do_quality_warnings()
self._parse_migration_flags()
self._supported_perf_events = self._get_supported_perf_events()
self._set_multiattach_support()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warning("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment.")
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.InternalError(
_('Nova requires libvirt version %s or greater.') %
libvirt_utils.version_to_string(MIN_LIBVIRT_VERSION))
if CONF.libvirt.virt_type in ("qemu", "kvm"):
if self._host.has_min_version(hv_ver=MIN_QEMU_VERSION):
# "qemu-img info" calls are version dependent, so we need to
# store the version in the images module.
images.QEMU_VERSION = self._host.get_connection().getVersion()
else:
raise exception.InternalError(
_('Nova requires QEMU version %s or greater.') %
libvirt_utils.version_to_string(MIN_QEMU_VERSION))
if CONF.libvirt.virt_type == 'parallels':
if not self._host.has_min_version(hv_ver=MIN_VIRTUOZZO_VERSION):
raise exception.InternalError(
_('Nova requires Virtuozzo version %s or greater.') %
libvirt_utils.version_to_string(MIN_VIRTUOZZO_VERSION))
if not self._host.has_min_version(MIN_LIBVIRT_VIRTUOZZO_VERSION):
raise exception.InternalError(
_('Running Nova with parallels virt_type requires '
'libvirt version %s') %
libvirt_utils.version_to_string(
MIN_LIBVIRT_VIRTUOZZO_VERSION))
# Give the cloud admin a heads up if we are intending to
# change the MIN_LIBVIRT_VERSION in the next release.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
LOG.warning('Running Nova with a libvirt version less than '
'%(version)s is deprecated. The required minimum '
'version of libvirt will be raised to %(version)s '
'in the next release.',
{'version': libvirt_utils.version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
LOG.warning('Running Nova with a QEMU version less than '
'%(version)s is deprecated. The required minimum '
'version of QEMU will be raised to %(version)s '
'in the next release.',
{'version': libvirt_utils.version_to_string(
NEXT_MIN_QEMU_VERSION)})
kvm_arch = fields.Architecture.from_host()
if (CONF.libvirt.virt_type in ('kvm', 'qemu') and
kvm_arch in MIN_LIBVIRT_OTHER_ARCH and
not self._host.has_min_version(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch),
MIN_QEMU_OTHER_ARCH.get(kvm_arch))):
if MIN_QEMU_OTHER_ARCH.get(kvm_arch):
raise exception.InternalError(
_('Running Nova with qemu/kvm virt_type on %(arch)s '
'requires libvirt version %(libvirt_ver)s and '
'qemu version %(qemu_ver)s, or greater') %
{'arch': kvm_arch,
'libvirt_ver': libvirt_utils.version_to_string(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch)),
'qemu_ver': libvirt_utils.version_to_string(
MIN_QEMU_OTHER_ARCH.get(kvm_arch))})
# no qemu version in the error message
raise exception.InternalError(
_('Running Nova with qemu/kvm virt_type on %(arch)s '
'requires libvirt version %(libvirt_ver)s or greater') %
{'arch': kvm_arch,
'libvirt_ver': libvirt_utils.version_to_string(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch))})
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
if self._host.has_min_version(MIN_LIBVIRT_MDEV_SUPPORT):
self._recreate_assigned_mediated_devices()
@staticmethod
def _is_existing_mdev(uuid):
# FIXME(sbauza): Some kernel can have a uevent race meaning that the
# libvirt daemon won't know when a mediated device is created unless
# you restart that daemon. Until all kernels we support are not having
# that possible race, check the sysfs directly instead of asking the
# libvirt API.
# See https://bugzilla.redhat.com/show_bug.cgi?id=1376907 for ref.
return os.path.exists('/sys/bus/mdev/devices/{0}'.format(uuid))
def _recreate_assigned_mediated_devices(self):
"""Recreate assigned mdevs that could have disappeared if we reboot
the host.
"""
mdevs = self._get_all_assigned_mediated_devices()
requested_types = self._get_supported_vgpu_types()
for (mdev_uuid, instance_uuid) in six.iteritems(mdevs):
if not self._is_existing_mdev(mdev_uuid):
self._create_new_mediated_device(requested_types, mdev_uuid)
def _set_multiattach_support(self):
# Check to see if multiattach is supported. Based on bugzilla
# https://bugzilla.redhat.com/show_bug.cgi?id=1378242 and related
# clones, the shareable flag on a disk device will only work with
# qemu<2.10 or libvirt>=3.10. So check those versions here and set
# the capability appropriately.
if (self._host.has_min_version(lv_ver=MIN_LIBVIRT_MULTIATTACH) or
not self._host.has_min_version(hv_ver=(2, 10, 0))):
self.capabilities['supports_multiattach'] = True
else:
LOG.debug('Volume multiattach is not supported based on current '
'versions of QEMU and libvirt. QEMU must be less than '
'2.10 or libvirt must be greater than or equal to 3.10.')
def _prepare_migration_flags(self):
migration_flags = 0
migration_flags |= libvirt.VIR_MIGRATE_LIVE
# Adding p2p flag only if xen is not in use, because xen does not
# support p2p migrations
if CONF.libvirt.virt_type != 'xen':
migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER
# Adding VIR_MIGRATE_UNDEFINE_SOURCE because, without it, migrated
# instance will remain defined on the source host
migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
# Adding VIR_MIGRATE_PERSIST_DEST to persist the VM on the
# destination host
migration_flags |= libvirt.VIR_MIGRATE_PERSIST_DEST
live_migration_flags = block_migration_flags = migration_flags
# Adding VIR_MIGRATE_NON_SHARED_INC, otherwise all block-migrations
# will be live-migrations instead
block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC
return (live_migration_flags, block_migration_flags)
def _handle_live_migration_tunnelled(self, migration_flags):
if (CONF.libvirt.live_migration_tunnelled is None or
CONF.libvirt.live_migration_tunnelled):
migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED
return migration_flags
def _is_post_copy_available(self):
if self._host.has_min_version(lv_ver=MIN_LIBVIRT_POSTCOPY_VERSION,
hv_ver=MIN_QEMU_POSTCOPY_VERSION):
return True
return False
def _is_virtlogd_available(self):
return self._host.has_min_version(MIN_LIBVIRT_VIRTLOGD,
MIN_QEMU_VIRTLOGD)
def _is_native_luks_available(self):
return self._host.has_min_version(MIN_LIBVIRT_LUKS_VERSION,
MIN_QEMU_LUKS_VERSION)
def _handle_live_migration_post_copy(self, migration_flags):
if CONF.libvirt.live_migration_permit_post_copy:
if self._is_post_copy_available():
migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY
else:
LOG.info('The live_migration_permit_post_copy is set '
'to True, but it is not supported.')
return migration_flags
def _handle_live_migration_auto_converge(self, migration_flags):
if (self._is_post_copy_available() and
(migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0):
LOG.info('The live_migration_permit_post_copy is set to '
'True and post copy live migration is available '
'so auto-converge will not be in use.')
elif CONF.libvirt.live_migration_permit_auto_converge:
migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
return migration_flags
def _parse_migration_flags(self):
(live_migration_flags,
block_migration_flags) = self._prepare_migration_flags()
live_migration_flags = self._handle_live_migration_tunnelled(
live_migration_flags)
block_migration_flags = self._handle_live_migration_tunnelled(
block_migration_flags)
live_migration_flags = self._handle_live_migration_post_copy(
live_migration_flags)
block_migration_flags = self._handle_live_migration_post_copy(
block_migration_flags)
live_migration_flags = self._handle_live_migration_auto_converge(
live_migration_flags)
block_migration_flags = self._handle_live_migration_auto_converge(
block_migration_flags)
self._live_migration_flags = live_migration_flags
self._block_migration_flags = block_migration_flags
# TODO(sahid): This method is targeted for removal when the tests
# have been updated to avoid its use
#
# All libvirt API calls on the libvirt.Connect object should be
# encapsulated by methods on the nova.virt.libvirt.host.Host
# object, rather than directly invoking the libvirt APIs. The goal
# is to avoid a direct dependency on the libvirt API from the
# driver.py file.
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
@staticmethod
def _uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
elif CONF.libvirt.virt_type == 'parallels':
uri = CONF.libvirt.connection_uri or 'parallels:///system'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _live_migration_uri(dest):
uris = {
'kvm': 'qemu+%s://%s/system',
'qemu': 'qemu+%s://%s/system',
'xen': 'xenmigr://%s/system',
'parallels': 'parallels+tcp://%s/system',
}
virt_type = CONF.libvirt.virt_type
# TODO(pkoniszewski): Remove fetching live_migration_uri in Pike
uri = CONF.libvirt.live_migration_uri
if uri:
return uri % dest
uri = uris.get(virt_type)
if uri is None:
raise exception.LiveMigrationURINotAvailable(virt_type=virt_type)
str_format = (dest,)
if virt_type in ('kvm', 'qemu'):
scheme = CONF.libvirt.live_migration_scheme or 'tcp'
str_format = (scheme, dest)
return uris.get(virt_type) % str_format
@staticmethod
def _migrate_uri(dest):
uri = None
# Only QEMU live migrations supports migrate-uri parameter
virt_type = CONF.libvirt.virt_type
if virt_type in ('qemu', 'kvm'):
# QEMU accept two schemes: tcp and rdma. By default
# libvirt build the URI using the remote hostname and the
# tcp schema.
uri = 'tcp://%s' % dest
# Because dest might be of type unicode, here we might return value of
# type unicode as well which is not acceptable by libvirt python
# binding when Python 2.7 is in use, so let's convert it explicitly
# back to string. When Python 3.x is in use, libvirt python binding
# accepts unicode type so it is completely fine to do a no-op str(uri)
# conversion which will return value of type unicode.
return uri and str(uri)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._host.get_guest(instance)
return True
except (exception.InternalError, exception.InstanceNotFound):
return False
def estimate_instance_overhead(self, instance_info):
overhead = super(LibvirtDriver, self).estimate_instance_overhead(
instance_info)
if isinstance(instance_info, objects.Flavor):
# A flavor object is passed during case of migrate
emu_policy = hardware.get_emulator_thread_policy_constraint(
instance_info)
if emu_policy == fields.CPUEmulatorThreadsPolicy.ISOLATE:
overhead['vcpus'] += 1
else:
# An instance object is passed during case of spawing or a
# dict is passed when computing resource for an instance
numa_topology = hardware.instance_topology_from_instance(
instance_info)
if numa_topology and numa_topology.emulator_threads_isolated:
overhead['vcpus'] += 1
return overhead
def list_instances(self):
names = []
for guest in self._host.list_guests(only_running=False):
names.append(guest.name)
return names
def list_instance_uuids(self):
uuids = []
for guest in self._host.list_guests(only_running=False):
uuids.append(guest.uuid)
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
LOG.debug('Attempting to teardown container at path %(dir)s with '
'root device: %(rootfs_dev)s',
{'dir': container_dir, 'rootfs_dev': rootfs_dev},
instance=instance)
disk_api.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance, attempt=1):
try:
guest = self._host.get_guest(instance)
if CONF.serial_console.enabled:
# This method is called for several events: destroy,
# rebuild, hard-reboot, power-off - For all of these
# events we want to release the serial ports acquired
# for the guest before destroying it.
serials = self._get_serial_ports_from_guest(guest)
for hostname, port in serials:
serial_console.release_port(host=hostname, port=port)
except exception.InstanceNotFound:
guest = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if guest is not None:
try:
old_domid = guest.id
guest.poweroff()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
# Domain already gone. This can safely be ignored.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = guest.get_power_state(self._host)
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
errmsg = e.get_error_message()
if (CONF.libvirt.virt_type == 'lxc' and
errmsg == 'internal error: '
'Some processes refused to die'):
# Some processes in the container didn't die
# fast enough for libvirt. The container will
# eventually die. For now, move on and let
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warning("Cannot destroy instance, operation time out",
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
if e.get_int1() == errno.EBUSY:
# NOTE(danpb): When libvirt kills a process it sends it
# SIGTERM first and waits 10 seconds. If it hasn't gone
# it sends SIGKILL and waits another 5 seconds. If it
# still hasn't gone then you get this EBUSY error.
# Usually when a QEMU process fails to go away upon
# SIGKILL it is because it is stuck in an
# uninterruptible kernel sleep waiting on I/O from
# some non-responsive server.
# Given the CPU load of the gate tests though, it is
# conceivable that the 15 second timeout is too short,
# particularly if the VM running tempest has a high
# steal time from the cloud host. ie 15 wallclock
# seconds may have passed, but the VM might have only
# have a few seconds of scheduled run time.
LOG.warning('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s; '
'attempt %(attempt)d of 3',
{'errcode': errcode, 'e': e,
'attempt': attempt},
instance=instance)
with excutils.save_and_reraise_exception() as ctxt:
# Try up to 3 times before giving up.
if attempt < 3:
ctxt.reraise = False
self._destroy(instance, attempt + 1)
return
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s',
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.internal_id
except exception.InstanceNotFound:
LOG.debug("During wait destroy, instance disappeared.",
instance=instance)
state = power_state.SHUTDOWN
if state == power_state.SHUTDOWN:
LOG.info("Instance destroyed successfully.", instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be an endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info("Instance may be started again.", instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info("Going to destroy instance again.", instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks)
def _undefine_domain(self, instance):
try:
guest = self._host.get_guest(instance)
try:
support_uefi = self._has_uefi_support()
guest.delete_configuration(support_uefi)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception() as ctxt:
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug("Called undefine, but domain already gone.",
instance=instance)
ctxt.reraise = False
else:
LOG.error('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s',
{'errcode': errcode,
'e': encodeutils.exception_to_unicode(e)},
instance=instance)
except exception.InstanceNotFound:
pass
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
# Continue attempting to remove firewall filters for the instance
# until it's done or there is a failure to remove the filters. If
# unfilter fails because the instance is not yet shutdown, try to
# destroy the guest again and then retry the unfilter.
while True:
try:
self.unfilter_instance(instance, network_info)
break
except libvirt.libvirtError as e:
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warning("Instance may be still running, destroy "
"it again.", instance=instance)
self._destroy(instance)
else:
errcode = e.get_error_code()
LOG.exception(_('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = _("Error unfiltering instance.")
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
raise
# FIXME(wangpan): if the instance is booted again here, such as the
# soft reboot operation boot it here, it will become
# "running deleted", should we check and destroy it
# at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device']
if disk_dev is not None:
disk_dev = disk_dev.rpartition("/")[2]
try:
self._disconnect_volume(context, connection_info, instance)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warning(
"Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s",
{'vol_id': vol.get('volume_id'),
'exc': encodeutils.exception_to_unicode(exc)},
instance=instance)
if destroy_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance, block_device_info)
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
is_shared_block_storage = False
if migrate_data and 'is_shared_block_storage' in migrate_data:
is_shared_block_storage = migrate_data.is_shared_block_storage
if destroy_disks or is_shared_block_storage:
attempts = int(instance.system_metadata.get('clean_attempts',
'0'))
success = self.delete_instance_files(instance)
# NOTE(mriedem): This is used in the _run_pending_deletes periodic
# task in the compute manager. The tight coupling is not great...
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
instance.save()
self._undefine_domain(instance)
def _detach_encrypted_volumes(self, instance, block_device_info):
"""Detaches encrypted volumes attached to instance."""
disks = self._get_instance_disk_info(instance, block_device_info)
encrypted_volumes = filter(dmcrypt.is_encrypted,
[disk['path'] for disk in disks])
for path in encrypted_volumes:
dmcrypt.delete_volume(path)
def _get_serial_ports_from_guest(self, guest, mode=None):
"""Returns an iterator over serial port(s) configured on guest.
:param mode: Should be a value in (None, bind, connect)
"""
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# The 'serial' device is the base for x86 platforms. Other platforms
# (e.g. kvm on system z = S390X) can only use 'console' devices.
xpath_mode = "[@mode='%s']" % mode if mode else ""
serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode
tcp_devices = tree.findall(serial_tcp)
if len(tcp_devices) == 0:
tcp_devices = tree.findall(console_tcp)
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
def _get_scsi_controller_max_unit(self, guest):
"""Returns the max disk unit used by scsi controller"""
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
addrs = "./devices/disk[@device='disk']/address[@type='drive']"
ret = []
for obj in tree.findall(addrs):
ret.append(int(obj.get('unit', 0)))
return max(ret)
@staticmethod
def _get_rbd_driver():
return rbd_utils.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
# NOTE(nic): On revert_resize, the cleanup steps for the root
# volume are handled with an "rbd snap rollback" command,
# and none of this is needed (and is, in fact, harmful) so
# filter out non-ephemerals from the list
if instance.task_state == task_states.RESIZE_REVERTING:
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('disk.local'))
else:
filter_fn = lambda disk: disk.startswith(instance.uuid)
LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)
def _cleanup_lvm(self, instance, block_device_info):
"""Delete all LVM disks for given instance object."""
if instance.get('ephemeral_key_uuid') is not None:
self._detach_encrypted_volumes(instance, block_device_info)
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance.uuid
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disks = [fullpath(disk) for disk in logical_volumes
if belongs_to_instance(disk)]
return disks
return []
def get_volume_connector(self, instance):
root_helper = utils.get_root_helper()
return connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
CONF.libvirt.volume_use_multipath,
enforce_multipath=True,
host=CONF.host)
def _cleanup_resize(self, context, instance, network_info):
inst_base = libvirt_utils.get_instance_path(instance)
target = inst_base + '_resize'
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
attempts = 0
while(os.path.exists(target) and attempts < 5):
shutil.rmtree(target, ignore_errors=True)
if os.path.exists(target):
time.sleep(random.randint(20, 200) / 100.0)
attempts += 1
root_disk = self.image_backend.by_name(instance, 'disk')
# TODO(nic): Set ignore_errors=False in a future release.
# It is set to True here to avoid any upgrade issues surrounding
# instances being in pending resize state when the software is updated;
# in that case there will be no snapshot to remove. Once it can be
# reasonably assumed that no such instances exist in the wild
# anymore, it should be set back to False (the default) so it will
# throw errors, like it should.
if root_disk.exists():
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
# NOTE(mjozefcz):
# self.image_backend.image for some backends recreates instance
# directory and image disk.info - remove it here if exists
# Do not remove inst_base for volume-backed instances since that
# could potentially remove the files on the destination host
# if using shared storage.
if (os.path.exists(inst_base) and not root_disk.exists() and
not compute_utils.is_volume_backed_instance(
context, instance)):
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if instance.host != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.unfilter_instance(instance, network_info)
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def _connect_volume(self, context, connection_info, instance,
encryption=None, allow_native_luks=True):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.connect_volume(connection_info, instance)
self._attach_encryptor(context, connection_info, encryption,
allow_native_luks)
def _should_disconnect_target(self, context, connection_info, instance):
connection_count = 0
# NOTE(jdg): Multiattach is a special case (not to be confused
# with shared_targets). With multiattach we may have a single volume
# attached multiple times to *this* compute node (ie Server-1 and
# Server-2). So, if we receive a call to delete the attachment for
# Server-1 we need to take special care to make sure that the Volume
# isn't also attached to another Server on this Node. Otherwise we
# will indiscriminantly delete the connection for all Server and that's
# no good. So check if it's attached multiple times on this node
# if it is we skip the call to brick to delete the connection.
if connection_info.get('multiattach', False):
volume = self._volume_api.get(
context,
driver_block_device.get_volume_id(connection_info))
attachments = volume.get('attachments', {})
if len(attachments) > 1:
# First we get a list of all Server UUID's associated with
# this Host (Compute Node). We're going to use this to
# determine if the Volume being detached is also in-use by
# another Server on this Host, ie just check to see if more
# than one attachment.server_id for this volume is in our
# list of Server UUID's for this Host
servers_this_host = objects.InstanceList.get_uuids_by_host(
context, instance.host)
# NOTE(jdg): nova.volume.cinder translates the
# volume['attachments'] response into a dict which includes
# the Server UUID as the key, so we're using that
# here to check against our server_this_host list
for server_id, data in attachments.items():
if server_id in servers_this_host:
connection_count += 1
return (False if connection_count > 1 else True)
def _disconnect_volume(self, context, connection_info, instance,
encryption=None):
self._detach_encryptor(context, connection_info, encryption=encryption)
if self._should_disconnect_target(context, connection_info, instance):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.disconnect_volume(connection_info, instance)
else:
LOG.info("Detected multiple connections on this host for volume: "
"%s, skipping target disconnect.",
driver_block_device.get_volume_id(connection_info),
instance=instance)
def _extend_volume(self, connection_info, instance):
vol_driver = self._get_volume_driver(connection_info)
return vol_driver.extend_volume(connection_info, instance)
def _use_native_luks(self, encryption=None):
"""Is LUKS the required provider and native QEMU LUKS available
"""
provider = None
if encryption:
provider = encryption.get('provider', None)
if provider in encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP:
provider = encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP[provider]
return provider == encryptors.LUKS and self._is_native_luks_available()
def _get_volume_config(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
conf = vol_driver.get_config(connection_info, disk_info)
self._set_cache_mode(conf)
return conf
def _get_volume_encryptor(self, connection_info, encryption):
root_helper = utils.get_root_helper()
return encryptors.get_volume_encryptor(root_helper=root_helper,
keymgr=key_manager.API(CONF),
connection_info=connection_info,
**encryption)
def _get_volume_encryption(self, context, connection_info):
"""Get the encryption metadata dict if it is not provided
"""
encryption = {}
volume_id = driver_block_device.get_volume_id(connection_info)
if volume_id:
encryption = encryptors.get_encryption_metadata(context,
self._volume_api, volume_id, connection_info)
return encryption
def _attach_encryptor(self, context, connection_info, encryption,
allow_native_luks):
"""Attach the frontend encryptor if one is required by the volume.
The request context is only used when an encryption metadata dict is
not provided. The encryption metadata dict being populated is then used
to determine if an attempt to attach the encryptor should be made.
If native LUKS decryption is enabled then create a Libvirt volume
secret containing the LUKS passphrase for the volume.
"""
if encryption is None:
encryption = self._get_volume_encryption(context, connection_info)
if (encryption and allow_native_luks and
self._use_native_luks(encryption)):
# NOTE(lyarwood): Fetch the associated key for the volume and
# decode the passphrase from the key.
# FIXME(lyarwood): c-vol currently creates symmetric keys for use
# with volumes, leading to the binary to hex to string conversion
# below.
keymgr = key_manager.API(CONF)
key = keymgr.get(context, encryption['encryption_key_id'])
key_encoded = key.get_encoded()
passphrase = binascii.hexlify(key_encoded).decode('utf-8')
# NOTE(lyarwood): Retain the behaviour of the original os-brick
# encryptors and format any volume that does not identify as
# encrypted with LUKS.
# FIXME(lyarwood): Remove this once c-vol correctly formats
# encrypted volumes during their initial creation:
# https://bugs.launchpad.net/cinder/+bug/1739442
device_path = connection_info.get('data').get('device_path')
if device_path:
root_helper = utils.get_root_helper()
if not luks_encryptor.is_luks(root_helper, device_path):
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor._format_volume(passphrase, **encryption)
# NOTE(lyarwood): Store the passphrase as a libvirt secret locally
# on the compute node. This secret is used later when generating
# the volume config.
volume_id = driver_block_device.get_volume_id(connection_info)
self._host.create_secret('volume', volume_id, password=passphrase)
elif encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
def _detach_encryptor(self, context, connection_info, encryption):
"""Detach the frontend encryptor if one is required by the volume.
The request context is only used when an encryption metadata dict is
not provided. The encryption metadata dict being populated is then used
to determine if an attempt to detach the encryptor should be made.
If native LUKS decryption is enabled then delete previously created
Libvirt volume secret from the host.
"""
volume_id = driver_block_device.get_volume_id(connection_info)
if volume_id and self._host.find_secret('volume', volume_id):
return self._host.delete_secret('volume', volume_id)
if encryption is None:
encryption = self._get_volume_encryption(context, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
def _check_discard_for_attach_volume(self, conf, instance):
"""Perform some checks for volumes configured for discard support.
If discard is configured for the volume, and the guest is using a
configuration known to not work, we will log a message explaining
the reason why.
"""
if conf.driver_discard == 'unmap' and conf.target_bus == 'virtio':
LOG.debug('Attempting to attach volume %(id)s with discard '
'support enabled to an instance using an '
'unsupported configuration. target_bus = '
'%(bus)s. Trim commands will not be issued to '
'the storage device.',
{'bus': conf.target_bus,
'id': conf.serial},
instance=instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
self._connect_volume(context, connection_info, instance,
encryption=encryption)
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
if disk_info['bus'] == 'scsi':
disk_info['unit'] = self._get_scsi_controller_max_unit(guest) + 1
conf = self._get_volume_config(connection_info, disk_info)
self._check_discard_for_attach_volume(conf, instance)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(conf, persistent=True, live=live)
# NOTE(artom) If we're attaching with a device role tag, we need to
# rebuild device_metadata. If we're attaching without a role
# tag, we're rebuilding it here needlessly anyways. This isn't a
# massive deal, and it helps reduce code complexity by not having
# to indicate to the virt driver that the attach is tagged. The
# really important optimization of not calling the database unless
# device_metadata has actually changed is done for us by
# instance.save().
instance.device_metadata = self._build_device_metadata(
context, instance)
instance.save()
except Exception:
LOG.exception(_('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
with excutils.save_and_reraise_exception():
self._disconnect_volume(context, connection_info, instance,
encryption=encryption)
def _swap_volume(self, guest, disk_path, conf, resize_to):
"""Swap existing disk with a new block device."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file. We'll use this
# to redefine the domain if anything fails during the volume swap.
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
support_uefi = self._has_uefi_support()
guest.delete_configuration(support_uefi)
try:
# Start copy with VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file. Use
# VIR_DOMAIN_BLOCK_REBASE_COPY_DEV if it's a block device to
# make sure XML is generated correctly (bug 1691195)
copy_dev = conf.source_type == 'block'
dev.rebase(conf.source_path, copy=True, reuse_ext=True,
copy_dev=copy_dev)
while not dev.is_job_complete():
time.sleep(0.5)
dev.abort_job(pivot=True)
except Exception as exc:
LOG.exception("Failure rebasing volume %(new_path)s on "
"%(old_path)s.", {'new_path': conf.source_path,
'old_path': disk_path})
raise exception.VolumeRebaseFailed(reason=six.text_type(exc))
if resize_to:
dev.resize(resize_to * units.Gi / units.Ki)
# Make sure we will redefine the domain using the updated
# configuration after the volume was swapped. The dump_inactive
# keyword arg controls whether we pull the inactive (persistent)
# or active (live) config from the domain. We want to pull the
# live config after the volume was updated to use when we redefine
# the domain.
xml = guest.get_xml_desc(dump_inactive=False, dump_sensitive=True)
finally:
self._host.write_instance_config(xml)
def swap_volume(self, context, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
# NOTE(lyarwood): https://bugzilla.redhat.com/show_bug.cgi?id=760547
old_encrypt = self._get_volume_encryption(context, old_connection_info)
new_encrypt = self._get_volume_encryption(context, new_connection_info)
if ((old_encrypt and self._use_native_luks(old_encrypt)) or
(new_encrypt and self._use_native_luks(new_encrypt))):
raise NotImplementedError(_("Swap volume is not supported for "
"encrypted volumes when native LUKS decryption is enabled."))
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
if not guest.get_disk(disk_dev):
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
# NOTE (lyarwood): new_connection_info will be modified by the
# following _connect_volume call down into the volume drivers. The
# majority of the volume drivers will add a device_path that is in turn
# used by _get_volume_config to set the source_path of the
# LibvirtConfigGuestDisk object it returns. We do not explicitly save
# this to the BDM here as the upper compute swap_volume method will
# eventually do this for us.
self._connect_volume(context, new_connection_info, instance)
conf = self._get_volume_config(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(context, new_connection_info, instance)
raise NotImplementedError(_("Swap only supports host devices"))
try:
self._swap_volume(guest, disk_dev, conf, resize_to)
except exception.VolumeRebaseFailed:
with excutils.save_and_reraise_exception():
self._disconnect_volume(context, new_connection_info, instance)
self._disconnect_volume(context, old_connection_info, instance)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
return xml
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
disk_dev = mountpoint.rpartition("/")[2]
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
# NOTE(lyarwood): The volume must be detached from the VM before
# detaching any attached encryptors or disconnecting the underlying
# volume in _disconnect_volume. Otherwise, the encryptor or volume
# driver may report that the volume is still in use.
wait_for_detach = guest.detach_device_with_retry(guest.get_disk,
disk_dev,
live=live)
wait_for_detach()
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warning("During detach_volume, instance disappeared.",
instance=instance)
except exception.DeviceNotFound:
# We should still try to disconnect logical device from
# host, an error might have happened during a previous
# call.
LOG.info("Device %s not found in instance.",
disk_dev, instance=instance)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warning("During detach_volume, instance disappeared.",
instance=instance)
else:
raise
self._disconnect_volume(context, connection_info, instance,
encryption=encryption)
def extend_volume(self, connection_info, instance):
try:
new_size = self._extend_volume(connection_info, instance)
except NotImplementedError:
raise exception.ExtendVolumeNotSupported()
# Resize the device in QEMU so its size is updated and
# detected by the instance without rebooting.
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
active_state = state in (power_state.RUNNING, power_state.PAUSED)
if active_state:
disk_path = connection_info['data']['device_path']
LOG.debug('resizing block device %(dev)s to %(size)u kb',
{'dev': disk_path, 'size': new_size})
dev = guest.get_block_device(disk_path)
dev.resize(new_size // units.Ki)
else:
LOG.debug('Skipping block device resize, guest is not running',
instance=instance)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
LOG.warning('During extend_volume, instance disappeared.',
instance=instance)
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.exception('resizing block device failed.',
instance=instance)
def attach_interface(self, context, instance, image_meta, vif):
guest = self._host.get_guest(instance)
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError:
LOG.error('attaching network adapter failed.',
instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
try:
# NOTE(artom) If we're attaching with a device role tag, we need to
# rebuild device_metadata. If we're attaching without a role
# tag, we're rebuilding it here needlessly anyways. This isn't a
# massive deal, and it helps reduce code complexity by not having
# to indicate to the virt driver that the attach is tagged. The
# really important optimization of not calling the database unless
# device_metadata has actually changed is done for us by
# instance.save().
instance.device_metadata = self._build_device_metadata(
context, instance)
instance.save()
except Exception:
# NOTE(artom) If we fail here it means the interface attached
# successfully but building and/or saving the device metadata
# failed. Just unplugging the vif is therefore not enough cleanup,
# we need to detach the interface.
with excutils.save_and_reraise_exception(reraise=False):
LOG.error('Interface attached successfully but building '
'and/or saving device metadata failed.',
instance=instance, exc_info=True)
self.detach_interface(context, instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
def detach_interface(self, context, instance, vif):
guest = self._host.get_guest(instance)
cfg = self.vif_driver.get_config(instance, vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type, self._host)
interface = guest.get_interface_by_cfg(cfg)
try:
self.vif_driver.unplug(instance, vif)
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so if the interface is not found then
# we can just log it as a warning.
if not interface:
mac = vif.get('address')
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': mac}, instance=instance)
return
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
# Now we are going to loop until the interface is detached or we
# timeout.
wait_for_detach = guest.detach_device_with_retry(
guest.get_interface_by_cfg, cfg, live=live,
alternative_device_name=self.vif_driver.get_vif_devname(vif))
wait_for_detach()
except exception.DeviceDetachFailed:
# We failed to detach the device even with the retry loop, so let's
# dump some debug information to the logs before raising back up.
with excutils.save_and_reraise_exception():
devname = self.vif_driver.get_vif_devname(vif)
interface = guest.get_interface_by_cfg(cfg)
if interface:
LOG.warning(
'Failed to detach interface %(devname)s after '
'repeated attempts. Final interface xml:\n'
'%(interface_xml)s\nFinal guest xml:\n%(guest_xml)s',
{'devname': devname,
'interface_xml': interface.to_xml(),
'guest_xml': guest.get_xml_desc()},
instance=instance)
except exception.DeviceNotFound:
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': vif.get('address')}, instance=instance)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning("During detach_interface, instance disappeared.",
instance=instance)
else:
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so we might have failed because the
# network device no longer exists. Libvirt will fail with
# "operation failed: no matching network device was found"
# which unfortunately does not have a unique error code so we
# need to look up the interface by config and if it's not found
# then we can just log it as a warning rather than tracing an
# error.
mac = vif.get('address')
interface = guest.get_interface_by_cfg(cfg)
if interface:
LOG.error('detaching network adapter failed.',
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': mac}, instance=instance)
def _create_snapshot_metadata(self, image_meta, instance,
img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance.kernel_id,
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
'ramdisk_id': instance.ramdisk_id,
}
}
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type
# NOTE(vish): glance forces ami disk format to be ami
if image_meta.disk_format == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
if image_meta.obj_attr_is_set("container_format"):
metadata['container_format'] = image_meta.container_format
else:
metadata['container_format'] = "bare"
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
snapshot = self._image_api.get(context, image_id)
# source_format is an on-disk format
# source_type is a backend type
disk_path, source_format = libvirt_utils.find_disk(guest)
source_type = libvirt_utils.get_disk_type_from_path(disk_path)
# We won't have source_type for raw or qcow2 disks, because we can't
# determine that from the path. We should have it from the libvirt
# xml, though.
if source_type is None:
source_type = source_format
# For lxc instances we won't have it either from libvirt xml
# (because we just gave libvirt the mounted filesystem), or the path,
# so source_type is still going to be None. In this case,
# root_disk is going to default to CONF.libvirt.images_type
# below, which is still safe.
image_format = CONF.libvirt.snapshot_image_format or source_type
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(instance.image_meta,
instance,
image_format,
snapshot['name'])
snapshot_name = uuidutils.generate_uuid(dashed=False)
state = guest.get_power_state(self._host)
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.
# It is necessary in case this situation changes in the
# future.
if (self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU)
and source_type not in ('lvm')
and not CONF.ephemeral_storage_encryption.enabled
and not CONF.workarounds.disable_libvirt_livesnapshot
# NOTE(rmk): We cannot perform live snapshots when a
# managedSave file is present, so we will use the cold/legacy
# method for instances which are shutdown or paused.
# NOTE(mriedem): Live snapshot doesn't work with paused
# instances on older versions of libvirt/qemu. We can likely
# remove the restriction on PAUSED once we require
# libvirt>=3.6.0 and qemu>=2.10 since that works with the
# Pike Ubuntu Cloud Archive testing in Queens.
and state not in (power_state.SHUTDOWN, power_state.PAUSED)):
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
guest.get_block_device(disk_path).abort_job()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
self._prepare_domain_for_snapshot(context, live_snapshot, state,
instance)
root_disk = self.image_backend.by_libvirt_path(
instance, disk_path, image_type=source_type)
if live_snapshot:
LOG.info("Beginning live snapshot process", instance=instance)
else:
LOG.info("Beginning cold snapshot process", instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
metadata['location'] = root_disk.direct_snapshot(
context, snapshot_name, image_format, image_id,
instance.image_ref)
self._snapshot_domain(context, live_snapshot, virt_dom, state,
instance)
self._image_api.update(context, image_id, metadata,
purge_props=False)
except (NotImplementedError, exception.ImageUnacceptable,
exception.Forbidden) as e:
if type(e) != NotImplementedError:
LOG.warning('Performing standard snapshot because direct '
'snapshot failed: %(error)s',
{'error': encodeutils.exception_to_unicode(e)})
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
root_disk.cleanup_direct_snapshot(failed_snap,
also_destroy_volume=True,
ignore_errors=True)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
expected_state=task_states.IMAGE_UPLOADING)
# TODO(nic): possibly abstract this out to the root_disk
if source_type == 'rbd' and live_snapshot:
# Standard snapshot uses qemu-img convert from RBD which is
# not safe to run with live_snapshot.
live_snapshot = False
# Suspend the guest, so this is no longer a live snapshot
self._prepare_domain_for_snapshot(context, live_snapshot,
state, instance)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the tempdir
os.chmod(tmpdir, 0o701)
self._live_snapshot(context, instance, guest,
disk_path, out_path, source_format,
image_format, instance.image_meta)
else:
root_disk.snapshot_extract(out_path, image_format)
LOG.info("Snapshot extracted, beginning image upload",
instance=instance)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.info('Instance %(instance_name)s disappeared '
'while taking snapshot of it: [Error Code '
'%(error_code)s] %(ex)s',
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex},
instance=instance)
raise exception.InstanceNotFound(
instance_id=instance.uuid)
else:
raise
finally:
self._snapshot_domain(context, live_snapshot, virt_dom,
state, instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path, 'rb') as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to snapshot image"))
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
root_disk.cleanup_direct_snapshot(
failed_snap, also_destroy_volume=True,
ignore_errors=True)
LOG.info("Snapshot image upload complete", instance=instance)
def _prepare_domain_for_snapshot(self, context, live_snapshot, state,
instance):
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self.suspend(context, instance)
def _snapshot_domain(self, context, live_snapshot, virt_dom, state,
instance):
guest = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
guest = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
guest = self._create_domain(domain=virt_dom, pause=True)
if guest is not None:
self._attach_pci_devices(
guest, pci_manager.get_instance_pci_devs(instance))
self._attach_direct_passthrough_ports(
context, instance, guest)
def _can_set_admin_password(self, image_meta):
if CONF.libvirt.virt_type == 'parallels':
if not self._host.has_min_version(
MIN_LIBVIRT_PARALLELS_SET_ADMIN_PASSWD):
raise exception.SetAdminPasswdNotSupported()
elif CONF.libvirt.virt_type in ('kvm', 'qemu'):
if not self._host.has_min_version(
MIN_LIBVIRT_SET_ADMIN_PASSWD):
raise exception.SetAdminPasswdNotSupported()
if not image_meta.properties.get('hw_qemu_guest_agent', False):
raise exception.QemuGuestAgentNotEnabled()
else:
raise exception.SetAdminPasswdNotSupported()
# TODO(melwitt): Combine this with the similar xenapi code at some point.
def _save_instance_password_if_sshkey_present(self, instance, new_pass):
sshkey = instance.key_data if 'key_data' in instance else None
if sshkey and sshkey.startswith("ssh-rsa"):
enc = crypto.ssh_encrypt_text(sshkey, new_pass)
# NOTE(melwitt): The convert_password method doesn't actually do
# anything with the context argument, so we can pass None.
instance.system_metadata.update(
password.convert_password(None, base64.encode_as_text(enc)))
instance.save()
def set_admin_password(self, instance, new_pass):
self._can_set_admin_password(instance.image_meta)
guest = self._host.get_guest(instance)
user = instance.image_meta.properties.get("os_admin_user")
if not user:
if instance.os_type == "windows":
user = "Administrator"
else:
user = "root"
try:
guest.set_user_password(user, new_pass)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
LOG.debug('Failed to set password: QEMU agent unresponsive',
instance_uuid=instance.uuid)
raise NotImplementedError()
err_msg = encodeutils.exception_to_unicode(ex)
msg = (_('Error from libvirt while set password for username '
'"%(user)s": [Error Code %(error_code)s] %(ex)s')
% {'user': user, 'error_code': error_code, 'ex': err_msg})
raise exception.InternalError(msg)
else:
# Save the password in sysmeta so it may be retrieved from the
# metadata service.
self._save_instance_password_if_sshkey_present(instance, new_pass)
def _can_quiesce(self, instance, image_meta):
if CONF.libvirt.virt_type not in ('kvm', 'qemu'):
raise exception.InstanceQuiesceNotSupported(
instance_id=instance.uuid)
if not image_meta.properties.get('hw_qemu_guest_agent', False):
raise exception.QemuGuestAgentNotEnabled()
def _requires_quiesce(self, image_meta):
return image_meta.properties.get('os_require_quiesce', False)
def _set_quiesced(self, context, instance, image_meta, quiesced):
self._can_quiesce(instance, image_meta)
try:
guest = self._host.get_guest(instance)
if quiesced:
guest.freeze_filesystems()
else:
guest.thaw_filesystems()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
err_msg = encodeutils.exception_to_unicode(ex)
msg = (_('Error from libvirt while quiescing %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': err_msg})
raise exception.InternalError(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
The qemu-guest-agent must be setup to execute fsfreeze.
"""
self._set_quiesced(context, instance, image_meta, True)
def unquiesce(self, context, instance, image_meta):
"""Thaw the guest filesystems after snapshot."""
self._set_quiesced(context, instance, image_meta, False)
def _live_snapshot(self, context, instance, guest, disk_path, out_path,
source_format, image_format, image_meta):
"""Snapshot an instance without downtime."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path,
format=source_format)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
format=source_format,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
quiesced = False
try:
self._set_quiesced(context, instance, image_meta, True)
quiesced = True
except exception.NovaException as err:
if self._requires_quiesce(image_meta):
raise
LOG.info('Skipping quiescing instance: %(reason)s.',
{'reason': err}, instance=instance)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
support_uefi = self._has_uefi_support()
guest.delete_configuration(support_uefi)
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
dev.rebase(disk_delta, copy=True, reuse_ext=True, shallow=True)
while not dev.is_job_complete():
time.sleep(0.5)
dev.abort_job()
nova.privsep.path.chown(disk_delta, uid=os.getuid())
finally:
self._host.write_instance_config(xml)
if quiesced:
self._set_quiesced(context, instance, image_meta, False)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, guest,
volume_id, new_file):
"""Perform volume snapshot.
:param guest: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param new_file: relative path to new qcow2 file present on share
"""
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
# NOTE(mriedem): This used to include a check for gluster in
# addition to netfs since they were added together. Support for
# gluster was removed in the 16.0.0 Pike release. It is unclear,
# however, if other volume drivers rely on the netfs disk source
# protocol.
elif disk_info['source_protocol'] == 'netfs':
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.InternalError(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml, instance=instance)
image_meta = instance.image_meta
try:
# Check to see if we can quiesce the guest before taking the
# snapshot.
self._can_quiesce(instance, image_meta)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=True)
return
except libvirt.libvirtError:
# If the image says that quiesce is required then we fail.
if self._requires_quiesce(image_meta):
raise
LOG.exception(_('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'),
instance=instance)
except (exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled) as err:
# If the image says that quiesce is required then we need to fail.
if self._requires_quiesce(image_meta):
raise
LOG.info('Skipping quiescing instance: %(reason)s.',
{'reason': err}, instance=instance)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=False)
except libvirt.libvirtError:
LOG.exception(_('Unable to create VM snapshot, '
'failing volume_snapshot operation.'),
instance=instance)
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
driver_bdm = driver_block_device.convert_volume(bdm)
if driver_bdm:
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
msg = _('Unknown type: %s') % create_info['type']
raise exception.InternalError(msg)
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
msg = _('snapshot_id required in create_info')
raise exception.InternalError(msg)
try:
self._volume_snapshot_create(context, instance, guest,
volume_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
@staticmethod
def _rebase_with_qemu_img(guest, device, active_disk_object,
rebase_base):
"""Rebase a device tied to a guest using qemu-img.
:param guest:the Guest which owns the device being rebased
:type guest: nova.virt.libvirt.guest.Guest
:param device: the guest block device to rebase
:type device: nova.virt.libvirt.guest.BlockDevice
:param active_disk_object: the guest block device to rebase
:type active_disk_object: nova.virt.libvirt.config.\
LibvirtConfigGuestDisk
:param rebase_base: the new parent in the backing chain
:type rebase_base: None or string
"""
# It's unsure how well qemu-img handles network disks for
# every protocol. So let's be safe.
active_protocol = active_disk_object.source_protocol
if active_protocol is not None:
msg = _("Something went wrong when deleting a volume snapshot: "
"rebasing a %(protocol)s network disk using qemu-img "
"has not been fully tested") % {'protocol':
active_protocol}
LOG.error(msg)
raise exception.InternalError(msg)
if rebase_base is None:
# If backing_file is specified as "" (the empty string), then
# the image is rebased onto no backing file (i.e. it will exist
# independently of any backing file).
backing_file = ""
qemu_img_extra_arg = []
else:
# If the rebased image is going to have a backing file then
# explicitly set the backing file format to avoid any security
# concerns related to file format auto detection.
backing_file = rebase_base
b_file_fmt = images.qemu_img_info(backing_file).file_format
qemu_img_extra_arg = ['-F', b_file_fmt]
qemu_img_extra_arg.append(active_disk_object.source_path)
utils.execute("qemu-img", "rebase", "-b", backing_file,
*qemu_img_extra_arg)
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
"""
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info,
instance=instance)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.InternalError(msg)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
# Find dev name
my_dev = None
active_disk = None
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
LOG.debug('Domain XML: %s', xml, instance=instance)
msg = (_('Disk with id: %s not found attached to instance.')
% volume_id)
raise exception.InternalError(msg)
LOG.debug("found device at %s", my_dev, instance=instance)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.InternalError(msg)
# libgfapi delete
LOG.debug("XML: %s", xml)
LOG.debug("active disk object: %s", active_disk_object)
# determine reference within backing store for desired image
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s', b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.InternalError(msg)
LOG.debug('index of match (%s) is %s', b.source_name, index)
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_base = delete_info['file_to_merge'] # often None
if (active_protocol is not None) and (rebase_base is not None):
rebase_base = _get_snap_dev(rebase_base,
active_disk_object.backing_store)
# NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7,
# and when available this flag _must_ be used to ensure backing
# paths are maintained relative by qemu.
#
# If _RELATIVE flag not found, continue with old behaviour
# (relative backing path seems to work for this case)
try:
libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE
relative = rebase_base is not None
except AttributeError:
LOG.warning(
"Relative blockrebase support was not detected. "
"Continuing with old behaviour.")
relative = False
LOG.debug(
'disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, relative: %(relative)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': libvirt_guest.BlockDevice.REBASE_DEFAULT_BANDWIDTH,
'relative': str(relative)}, instance=instance)
dev = guest.get_block_device(rebase_disk)
if guest.is_active():
result = dev.rebase(rebase_base, relative=relative)
if result == 0:
LOG.debug('blockRebase started successfully',
instance=instance)
while not dev.is_job_complete():
LOG.debug('waiting for blockRebase job completion',
instance=instance)
time.sleep(0.5)
# If the guest is not running libvirt won't do a blockRebase.
# In that case, let's ask qemu-img to rebase the disk.
else:
LOG.debug('Guest is not running so doing a block rebase '
'using "qemu-img rebase"', instance=instance)
self._rebase_with_qemu_img(guest, dev, active_disk_object,
rebase_base)
else:
# commit with blockCommit()
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s ',
{'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top}, instance=instance)
dev = guest.get_block_device(commit_disk)
result = dev.commit(commit_base, commit_top, relative=True)
if result == 0:
LOG.debug('blockCommit started successfully',
instance=instance)
while not dev.is_job_complete():
LOG.debug('waiting for blockCommit job completion',
instance=instance)
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s",
encodeutils.exception_to_unicode(e),
instance=instance)
soft_reboot_success = False
if soft_reboot_success:
LOG.info("Instance soft rebooted successfully.",
instance=instance)
return
else:
LOG.warning("Failed to soft reboot instance. "
"Trying hard reboot.",
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
old_domid = guest.id
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
guest.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
for x in range(CONF.libvirt.wait_soft_reboot_seconds):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
new_domid = guest.id
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info("Instance shutdown successfully.",
instance=instance)
self._create_domain(domain=guest._domain)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info("Instance may have been rebooted during soft "
"reboot, so return now.", instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
"""
# NOTE(sbauza): Since we undefine the guest XML when destroying, we
# need to remember the existing mdevs for reusing them.
mdevs = self._get_all_assigned_mediated_devices(instance)
mdevs = list(mdevs.keys())
# NOTE(mdbooth): In addition to performing a hard reboot of the domain,
# the hard reboot operation is relied upon by operators to be an
# automated attempt to fix as many things as possible about a
# non-functioning instance before resorting to manual intervention.
# With this goal in mind, we tear down all the aspects of an instance
# we can here without losing data. This allows us to re-initialise from
# scratch, and hopefully fix, most aspects of a non-functioning guest.
self.destroy(context, instance, network_info, destroy_disks=False,
block_device_info=block_device_info)
# Convert the system metadata to image metadata
# NOTE(mdbooth): This is a workaround for stateless Nova compute
# https://bugs.launchpad.net/nova/+bug/1349978
instance_dir = libvirt_utils.get_instance_path(instance)
fileutils.ensure_tree(instance_dir)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info,
mdevs=mdevs)
# NOTE(mdbooth): context.auth_token will not be set when we call
# _hard_reboot from resume_state_on_host_boot()
if context.auth_token is not None:
# NOTE (rmk): Re-populate any missing backing files.
config = vconfig.LibvirtConfigGuest()
config.parse_str(xml)
backing_disk_info = self._get_instance_disk_info_from_config(
config, block_device_info)
self._create_images_and_backing(context, instance, instance_dir,
backing_disk_info)
# Initialize all the necessary networking, block devices and
# start the instance.
# NOTE(melwitt): Pass vifs_already_plugged=True here even though we've
# unplugged vifs earlier. The behavior of neutron plug events depends
# on which vif type we're using and we are working with a stale network
# info cache here, so won't rely on waiting for neutron plug events.
# vifs_already_plugged=True means "do not wait for neutron plug events"
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info=block_device_info,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info("Instance rebooted successfully.",
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
self._host.get_guest(instance).pause()
def unpause(self, instance):
"""Unpause paused VM instance."""
guest = self._host.get_guest(instance)
guest.resume()
guest.sync_guest_time()
def _clean_shutdown(self, instance, timeout, retry_interval):
"""Attempt to shutdown the instance gracefully.
:param instance: The instance to be shutdown
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: How often in seconds to signal the instance
to shutdown while waiting
:returns: True if the shutdown succeeded
"""
# List of states that represent a shutdown instance
SHUTDOWN_STATES = [power_state.SHUTDOWN,
power_state.CRASHED]
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
# wait for it to shutdown
return True
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
LOG.info("Instance already shutdown.", instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
guest.shutdown()
retry_countdown = retry_interval
for sec in range(timeout):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
LOG.info("Instance shutdown successfully after %d seconds.",
sec, instance=instance)
return True
# Note(PhilD): We can't assume that the Guest was able to process
# any previous shutdown signal (for example it may
# have still been startingup, so within the overall
# timeout we re-trigger the shutdown every
# retry_interval
if retry_countdown == 0:
retry_countdown = retry_interval
# Instance could shutdown at any time, in which case we
# will get an exception when we call shutdown
try:
LOG.debug("Instance in state %s after %d seconds - "
"resending shutdown", state, sec,
instance=instance)
guest.shutdown()
except libvirt.libvirtError:
# Assume this is because its now shutdown, so loop
# one more time to clean up.
LOG.debug("Ignoring libvirt exception from shutdown "
"request.", instance=instance)
continue
else:
retry_countdown -= 1
time.sleep(1)
LOG.info("Instance failed to shutdown in %d seconds.",
timeout, instance=instance)
return False
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
if timeout:
self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def trigger_crash_dump(self, instance):
"""Trigger crash dump by injecting an NMI to the specified instance."""
try:
self._host.get_guest(instance).inject_nmi()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
raise exception.TriggerCrashDumpNotSupported()
elif error_code == libvirt.VIR_ERR_OPERATION_INVALID:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
LOG.exception(_('Error from libvirt while injecting an NMI to '
'%(instance_uuid)s: '
'[Error Code %(error_code)s] %(ex)s'),
{'instance_uuid': instance.uuid,
'error_code': error_code, 'ex': ex})
raise
def suspend(self, context, instance):
"""Suspend the specified instance."""
guest = self._host.get_guest(instance)
self._detach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._detach_direct_passthrough_ports(context, instance, guest)
self._detach_mediated_devices(guest)
guest.save_memory_state()
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
guest = self._create_domain_and_network(context, xml, instance,
network_info, block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._attach_direct_passthrough_ports(
context, instance, guest, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
guest.sync_guest_time()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except (exception.InternalError, exception.InstanceNotFound):
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_image_id = None
if image_meta.obj_attr_is_set("id"):
rescue_image_id = image_meta.id
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
rescue=True)
injection_info = InjectionInfo(network_info=network_info,
admin_pass=rescue_password,
files=None)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance, injection_info,
rescue=True)
self._create_image(context, instance, disk_info['mapping'],
injection_info=injection_info, suffix='.rescue',
disk_images=rescue_images)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images)
self._destroy(instance)
self._create_domain(xml, post_xml_callback=gen_confdrive)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
self._destroy(instance)
self._create_domain(xml, virt_dom)
os.unlink(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
if os.path.isdir(rescue_file):
shutil.rmtree(rescue_file)
else:
os.unlink(rescue_file)
# cleanup rescue volume
lvm.remove_volumes([lvmdisk for lvmdisk in self._lvm_disks(instance)
if lvmdisk.endswith('.rescue')])
if CONF.libvirt.images_type == 'rbd':
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('.rescue'))
LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)
def poll_rebooting_instances(self, timeout, instances):
pass
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
injection_info = InjectionInfo(network_info=network_info,
files=injected_files,
admin_pass=admin_password)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance,
injection_info)
self._create_image(context, instance, disk_info['mapping'],
injection_info=injection_info,
block_device_info=block_device_info)
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
# Does the guest need to be assigned some vGPU mediated devices ?
mdevs = self._allocate_mdevs(allocations)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
mdevs=mdevs)
self._create_domain_and_network(
context, xml, instance, network_info,
block_device_info=block_device_info,
post_xml_callback=gen_confdrive,
destroy_disks_on_failure=True)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info("Instance spawned successfully.", instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _get_console_output_file(self, instance, console_log):
bytes_to_read = MAX_CONSOLE_BYTES
log_data = b"" # The last N read bytes
i = 0 # in case there is a log rotation (like "virtlogd")
path = console_log
while bytes_to_read > 0 and os.path.exists(path):
read_log_data, remaining = nova.privsep.path.last_bytes(
path, bytes_to_read)
# We need the log file content in chronological order,
# that's why we *prepend* the log data.
log_data = read_log_data + log_data
# Prep to read the next file in the chain
bytes_to_read -= len(read_log_data)
path = console_log + "." + str(i)
i += 1
if remaining > 0:
LOG.info('Truncated console log returned, '
'%d bytes ignored', remaining, instance=instance)
return log_data
def get_console_output(self, context, instance):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# If the guest has a console logging to a file prefer to use that
file_consoles = tree.findall("./devices/console[@type='file']")
if file_consoles:
for file_console in file_consoles:
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
if not os.path.exists(path):
LOG.info('Instance is configured with a file console, '
'but the backing file is not (yet?) present',
instance=instance)
return ""
return self._get_console_output_file(instance, path)
# Try 'pty' types
pty_consoles = tree.findall("./devices/console[@type='pty']")
if pty_consoles:
for pty_console in pty_consoles:
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
raise exception.ConsoleNotAvailable()
else:
raise exception.ConsoleNotAvailable()
console_log = self._get_console_log_path(instance)
data = nova.privsep.libvirt.readpty(pty)
# NOTE(markus_z): The virt_types kvm and qemu are the only ones
# which create a dedicated file device for the console logging.
# Other virt_types like xen, lxc, uml, parallels depend on the
# flush of that pty device into the "console.log" file to ensure
# that a series of "get_console_output" calls return the complete
# content even after rebooting a guest.
nova.privsep.path.writefile(console_log, 'a+', data)
return self._get_console_output_file(instance, console_log)
def get_host_ip_addr(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warning('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s',
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='vnc']")
if graphic is not None:
return graphic.get('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vnc.server_proxyclient_address
return ctype.ConsoleVNC(host=host, port=port)
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='spice']")
if graphic is not None:
return (graphic.get('port'), graphic.get('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance.name)
host = CONF.spice.server_proxyclient_address
return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1])
def get_serial_console(self, context, instance):
guest = self._host.get_guest(instance)
for hostname, port in self._get_serial_ports_from_guest(
guest, mode='bind'):
return ctype.ConsoleSerial(host=hostname, port=port)
raise exception.ConsoleTypeUnavailable(console_type='serial')
@staticmethod
def _create_ephemeral(target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
context=None, specified_fs=None,
vm_mode=None):
if not is_block_dev:
if (CONF.libvirt.virt_type == "parallels" and
vm_mode == fields.VMMode.EXE):
libvirt_utils.create_ploop_image('expanded', target,
'%dG' % ephemeral_size,
specified_fs)
return
libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
# Run as root only for block devices.
disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, context=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
nova.privsep.fs.unprivileged_mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _ensure_console_log_for_instance(self, instance):
# NOTE(mdbooth): Although libvirt will create this file for us
# automatically when it starts, it will initially create it with
# root ownership and then chown it depending on the configuration of
# the domain it is launching. Quobyte CI explicitly disables the
# chown by setting dynamic_ownership=0 in libvirt's config.
# Consequently when the domain starts it is unable to write to its
# console.log. See bug https://bugs.launchpad.net/nova/+bug/1597644
#
# To work around this, we create the file manually before starting
# the domain so it has the same ownership as Nova. This works
# for Quobyte CI because it is also configured to run qemu as the same
# user as the Nova service. Installations which don't set
# dynamic_ownership=0 are not affected because libvirt will always
# correctly configure permissions regardless of initial ownership.
#
# Setting dynamic_ownership=0 is dubious and potentially broken in
# more ways than console.log (see comment #22 on the above bug), so
# Future Maintainer who finds this code problematic should check to see
# if we still support it.
console_file = self._get_console_log_path(instance)
LOG.debug('Ensure instance console log exists: %s', console_file,
instance=instance)
try:
libvirt_utils.file_open(console_file, 'a').close()
# NOTE(sfinucan): We can safely ignore permission issues here and
# assume that it is libvirt that has taken ownership of this file.
except IOError as ex:
if ex.errno != errno.EACCES:
raise
LOG.debug('Console file already exists: %s.', console_file)
@staticmethod
def _get_disk_config_image_type():
# TODO(mikal): there is a bug here if images_type has
# changed since creation of the instance, but I am pretty
# sure that this bug already exists.
return 'rbd' if CONF.libvirt.images_type == 'rbd' else 'raw'
@staticmethod
def _is_booted_from_volume(block_device_info):
"""Determines whether the VM is booting from volume
Determines whether the block device info indicates that the VM
is booting from a volume.
"""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
return bool(block_device.get_root_bdm(block_device_mapping))
def _inject_data(self, disk, instance, injection_info):
"""Injects data in a disk image
Helper used for injecting data in a disk image file system.
:param disk: The disk we're injecting into (an Image object)
:param instance: The instance we're injecting into
:param injection_info: Injection info
"""
# Handles the partition need to be used.
LOG.debug('Checking root disk injection %s',
str(injection_info), instance=instance)
target_partition = None
if not instance.kernel_id:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance.key_data)
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
else:
admin_pass = injection_info.admin_pass
# Handles the network injection.
net = netutils.get_injected_network_template(
injection_info.network_info,
libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
if any((key, net, metadata, admin_pass, injection_info.files)):
LOG.debug('Injecting %s', str(injection_info),
instance=instance)
img_id = instance.image_ref
try:
disk_api.inject_data(disk.get_model(self._conn),
key, net, metadata, admin_pass,
injection_info.files,
partition=target_partition,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Error injecting data into image '
'%(img_id)s (%(e)s)',
{'img_id': img_id, 'e': e},
instance=instance)
# NOTE(sileht): many callers of this method assume that this
# method doesn't fail if an image already exists but instead
# think that it will be reused (ie: (live)-migration/resize)
def _create_image(self, context, instance,
disk_mapping, injection_info=None, suffix='',
disk_images=None, block_device_info=None,
fallback_from_host=None,
ignore_bdi_for_swap=False):
booted_from_volume = self._is_booted_from_volume(block_device_info)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.by_name(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info('Creating image', instance=instance)
inst_type = instance.get_flavor()
swap_mb = 0
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
if ignore_bdi_for_swap:
# This is a workaround to support legacy swap resizing,
# which does not touch swap size specified in bdm,
# but works with flavor specified size only.
# In this case we follow the legacy logic and ignore block
# device info completely.
# NOTE(ft): This workaround must be removed when a correct
# implementation of resize operation changing sizes in bdms is
# developed. Also at that stage we probably may get rid of
# the direct usage of flavor swap size here,
# leaving the work with bdm only.
swap_mb = inst_type['swap']
else:
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
if (CONF.libvirt.virt_type == "parallels" and
instance.vm_mode == fields.VMMode.EXE):
msg = _("Swap disk is not supported "
"for Virtuozzo container")
raise exception.Invalid(msg)
if not disk_images:
disk_images = {'image_id': instance.image_ref,
'kernel_id': instance.kernel_id,
'ramdisk_id': instance.ramdisk_id}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images['kernel_id'])
raw('kernel').cache(fetch_func=libvirt_utils.fetch_raw_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images['ramdisk_id'])
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_raw_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'])
if CONF.libvirt.virt_type == 'uml':
# PONDERING(mikal): can I assume that root is UID zero in every
# OS? Probably not.
uid = pwd.getpwnam('root').pw_uid
nova.privsep.path.chown(image('disk').path, uid=uid)
self._create_and_inject_local_root(context, instance,
booted_from_volume, suffix,
disk_images, injection_info,
fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = nova.privsep.fs.get_fs_type_for_os_type(
instance.os_type)
# Generate a file extension based on the file system
# type and the mkfs commands configured if any
file_extension = nova.privsep.fs.get_file_extension_for_os_type(
os_type_with_default, CONF.default_ephemeral_format)
vm_mode = fields.VMMode.get_from_instance(instance)
ephemeral_gb = instance.flavor.ephemeral_gb
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev,
vm_mode=vm_mode)
fname = "ephemeral_%s_%s" % (ephemeral_gb, file_extension)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev,
vm_mode=vm_mode)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], file_extension)
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
context=context,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
def _create_and_inject_local_root(self, context, instance,
booted_from_volume, suffix, disk_images,
injection_info, fallback_from_host):
# File injection only if needed
need_inject = (not configdrive.required_by(instance) and
injection_info is not None and
CONF.libvirt.inject_partition != -2)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images['image_id'])
size = instance.flavor.root_gb * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = self.image_backend.by_name(instance, 'disk' + suffix,
CONF.libvirt.images_type)
if instance.task_state == task_states.RESIZE_FINISH:
backend.create_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
self._try_fetch_image_cache(backend, fetch_func, context,
root_fname, disk_images['image_id'],
instance, size, fallback_from_host)
if need_inject:
self._inject_data(backend, instance, injection_info)
elif need_inject:
LOG.warning('File injection into a boot from volume '
'instance is not supported', instance=instance)
def _create_configdrive(self, context, instance, injection_info,
rescue=False):
# As this method being called right after the definition of a
# domain, but before its actual launch, device metadata will be built
# and saved in the instance for it to be used by the config drive and
# the metadata service.
instance.device_metadata = self._build_device_metadata(context,
instance)
if configdrive.required_by(instance):
LOG.info('Using config drive', instance=instance)
name = 'disk.config'
if rescue:
name += '.rescue'
config_disk = self.image_backend.by_name(
instance, name, self._get_disk_config_image_type())
# Don't overwrite an existing config drive
if not config_disk.exists():
extra_md = {}
if injection_info.admin_pass:
extra_md['admin_pass'] = injection_info.admin_pass
inst_md = instance_metadata.InstanceMetadata(
instance, content=injection_info.files, extra_md=extra_md,
network_info=injection_info.network_info,
request_context=context)
cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
with cdb:
# NOTE(mdbooth): We're hardcoding here the path of the
# config disk when using the flat backend. This isn't
# good, but it's required because we need a local path we
# know we can write to in case we're subsequently
# importing into rbd. This will be cleaned up when we
# replace this with a call to create_from_func, but that
# can't happen until we've updated the backends and we
# teach them not to cache config disks. This isn't
# possible while we're still using cache() under the hood.
config_disk_local_path = os.path.join(
libvirt_utils.get_instance_path(instance), name)
LOG.info('Creating config drive at %(path)s',
{'path': config_disk_local_path},
instance=instance)
try:
cdb.make_drive(config_disk_local_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error('Creating config drive failed with '
'error: %s', e, instance=instance)
try:
config_disk.import_file(
instance, config_disk_local_path, name)
finally:
# NOTE(mikal): if the config drive was imported into RBD,
# then we no longer need the local copy
if CONF.libvirt.images_type == 'rbd':
LOG.info('Deleting local config drive %(path)s '
'because it was imported into RBD.',
{'path': config_disk_local_path},
instance=instance)
os.unlink(config_disk_local_path)
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=six.text_type(exc))
def _detach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.detach_device(self._get_guest_pci_device(dev), live=True)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev.address)
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning("Instance disappeared while detaching "
"a PCI device from it.")
else:
raise
def _attach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.attach_device(self._get_guest_pci_device(dev))
except libvirt.libvirtError:
LOG.error('Attaching PCI devices %(dev)s to %(dom)s failed.',
{'dev': pci_devs, 'dom': guest.id})
raise
@staticmethod
def _has_direct_passthrough_port(network_info):
for vif in network_info:
if (vif['vnic_type'] in
network_model.VNIC_TYPES_DIRECT_PASSTHROUGH):
return True
return False
def _attach_direct_passthrough_ports(
self, context, instance, guest, network_info=None):
if network_info is None:
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_direct_passthrough_port(network_info):
for vif in network_info:
if (vif['vnic_type'] in
network_model.VNIC_TYPES_DIRECT_PASSTHROUGH):
cfg = self.vif_driver.get_config(instance,
vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
LOG.debug('Attaching direct passthrough port %(port)s '
'to %(dom)s', {'port': vif, 'dom': guest.id},
instance=instance)
guest.attach_device(cfg)
def _detach_direct_passthrough_ports(self, context, instance, guest):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_direct_passthrough_port(network_info):
# In case of VNIC_TYPES_DIRECT_PASSTHROUGH ports we create
# pci request per direct passthrough port. Therefore we can trust
# that pci_slot value in the vif is correct.
direct_passthrough_pci_addresses = [
vif['profile']['pci_slot']
for vif in network_info
if (vif['vnic_type'] in
network_model.VNIC_TYPES_DIRECT_PASSTHROUGH and
vif['profile'].get('pci_slot') is not None)
]
# use detach_pci_devices to avoid failure in case of
# multiple guest direct passthrough ports with the same MAC
# (protection use-case, ports are on different physical
# interfaces)
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
direct_passthrough_pci_addresses = (
[pci_dev for pci_dev in pci_devs
if pci_dev.address in direct_passthrough_pci_addresses])
self._detach_pci_devices(guest, direct_passthrough_pci_addresses)
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service and disable_reason else
DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warning('Cannot update service status on host "%s" '
'since it is not registered.', CONF.host)
except Exception:
LOG.warning('Cannot update service status on host "%s" '
'due to an unexpected exception.', CONF.host,
exc_info=True)
if enabled:
mount.get_manager().host_up(self._host)
else:
mount.get_manager().host_down()
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
extra_flags = CONF.libvirt.cpu_model_extra_flags
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
caps = self._host.get_capabilities()
# AArch64 lacks 'host-model' support because neither libvirt
# nor QEMU are able to tell what the host CPU model exactly is.
# And there is no CPU description code for ARM(64) at this
# point.
# Also worth noting: 'host-passthrough' mode will completely
# break live migration, *unless* all the Compute nodes (running
# libvirtd) have *identical* CPUs.
if caps.host.cpu.arch == fields.Architecture.AARCH64:
mode = "host-passthrough"
LOG.info('CPU mode "host-passthrough" was chosen. Live '
'migration can break unless all compute nodes '
'have identical cpus. AArch64 does not support '
'other modes.')
else:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
# FIXME (kchamart): We're intentionally restricting the choices
# (in the conf/libvirt.py) for 'extra_flags` to just 'PCID', to
# address the immediate guest performance degradation caused by
# "Meltdown" CVE fixes on certain Intel CPU models. In a future
# patch, we will:
# (a) Remove the restriction of choices for 'extra_flags',
# allowing to add / remove additional CPU flags, as it will
# make way for other useful features.
# (b) Remove the below check for "host-model", as it is a
# valid configuration to supply additional CPU flags to it.
# (c) Revisit and fix the warnings / exception handling for
# different combinations of CPU modes and 'extra_flags'.
elif ((mode == "host-model" or mode == "host-passthrough") and
extra_flags):
extra_flags = []
LOG.warning("Setting extra CPU flags is only valid in "
"combination with a custom CPU model. Refer "
"to the 'nova.conf' documentation for "
"'[libvirt]/cpu_model_extra_flags'")
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen, "
"with extra flags: '%(extra_flags)s'",
{'mode': mode,
'model': (model or ""),
'extra_flags': (extra_flags or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
# NOTE (kchamart): Currently there's no existing way to ask if a
# given CPU model + CPU flags combination is supported by KVM &
# a specific QEMU binary. However, libvirt runs the 'CPUID'
# command upfront -- before even a Nova instance (a QEMU
# process) is launched -- to construct CPU models and check
# their validity; so we are good there. In the long-term,
# upstream libvirt intends to add an additional new API that can
# do fine-grained validation of a certain CPU model + CPU flags
# against a specific QEMU binary (the libvirt RFE bug for that:
# https://bugzilla.redhat.com/show_bug.cgi?id=1559832).
for flag in extra_flags:
cpu.add_feature(vconfig.LibvirtConfigGuestCPUFeature(flag))
return cpu
def _get_guest_cpu_config(self, flavor, image_meta,
guest_cpu_numa_config, instance_numa_topology):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
topology = hardware.get_best_cpu_topology(
flavor, image_meta, numa_topology=instance_numa_topology)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa_config
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
disk_unit = None
disk = self.image_backend.by_name(instance, name, image_type)
if (name == 'disk.config' and image_type == 'rbd' and
not disk.exists()):
# This is likely an older config drive that has not been migrated
# to rbd yet. Try to fall back on 'flat' image type.
# TODO(melwitt): Add online migration of some sort so we can
# remove this fall back once we know all config drives are in rbd.
# NOTE(vladikr): make sure that the flat image exist, otherwise
# the image will be created after the domain definition.
flat_disk = self.image_backend.by_name(instance, name, 'flat')
if flat_disk.exists():
disk = flat_disk
LOG.debug('Config drive not found in RBD, falling back to the '
'instance directory', instance=instance)
disk_info = disk_mapping[name]
if 'unit' in disk_mapping and disk_info['bus'] == 'scsi':
disk_unit = disk_mapping['unit']
disk_mapping['unit'] += 1 # Increments for the next disk added
conf = disk.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._host.get_version(),
disk_unit=disk_unit)
return conf
def _get_guest_fs_config(self, instance, name, image_type=None):
disk = self.image_backend.by_name(instance, name, image_type)
return disk.libvirt_fs_info("/", "ploop")
def _get_guest_storage_config(self, context, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type, os_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
mount_rootfs = CONF.libvirt.virt_type == "lxc"
scsi_controller = self._get_scsi_controller(image_meta)
if scsi_controller and scsi_controller.model == 'virtio-scsi':
# The virtio-scsi can handle up to 256 devices but the
# optional element "address" must be defined to describe
# where the device is placed on the controller (see:
# LibvirtConfigGuestDeviceAddressDrive).
#
# Note about why it's added in disk_mapping: It's not
# possible to pass an 'int' by reference in Python, so we
# use disk_mapping as container to keep reference of the
# unit added and be able to increment it for each disk
# added.
#
# NOTE(jaypipes,melwitt): If this is a boot-from-volume instance,
# we need to start the disk mapping unit at 1 since we set the
# bootable volume's unit to 0 for the bootable volume.
disk_mapping['unit'] = 0
if self._is_booted_from_volume(block_device_info):
disk_mapping['unit'] = 1
def _get_ephemeral_devices():
eph_devices = []
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
eph_devices.append(diskeph)
return eph_devices
if mount_rootfs:
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
elif (os_type == fields.VMMode.EXE and
CONF.libvirt.virt_type == "parallels"):
if rescue:
fsrescue = self._get_guest_fs_config(instance, "disk.rescue")
devices.append(fsrescue)
fsos = self._get_guest_fs_config(instance, "disk")
fsos.target_dir = "/mnt/rescue"
devices.append(fsos)
else:
if 'disk' in disk_mapping:
fs = self._get_guest_fs_config(instance, "disk")
devices.append(fs)
devices = devices + _get_ephemeral_devices()
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
devices = devices + _get_ephemeral_devices()
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
config_name = 'disk.config.rescue' if rescue else 'disk.config'
if config_name in disk_mapping:
diskconfig = self._get_guest_disk_config(
instance, config_name, disk_mapping, inst_type,
self._get_disk_config_image_type())
devices.append(diskconfig)
for vol in block_device.get_bdms_to_connect(block_device_mapping,
mount_rootfs):
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
self._connect_volume(context, connection_info, instance)
if scsi_controller and scsi_controller.model == 'virtio-scsi':
# Check if this is the bootable volume when in a
# boot-from-volume instance, and if so, ensure the unit
# attribute is 0.
if vol.get('boot_index') == 0:
info['unit'] = 0
else:
info['unit'] = disk_mapping['unit']
disk_mapping['unit'] += 1
cfg = self._get_volume_config(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save()
for d in devices:
self._set_cache_mode(d)
if scsi_controller:
devices.append(scsi_controller)
return devices
@staticmethod
def _get_scsi_controller(image_meta):
"""Return scsi controller or None based on image meta"""
if image_meta.properties.get('hw_scsi_model'):
hw_scsi_model = image_meta.properties.hw_scsi_model
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
scsi_controller.index = 0
return scsi_controller
def _get_host_sysinfo_serial_hardware(self):
"""Get a UUID from the host hardware
Get a UUID for the host hardware reported by libvirt.
This is typically from the SMBIOS data, unless it has
been overridden in /etc/libvirt/libvirtd.conf
"""
caps = self._host.get_capabilities()
return caps.host.uuid
def _get_host_sysinfo_serial_os(self):
"""Get a UUID from the host operating system
Get a UUID for the host operating system. Modern Linux
distros based on systemd provide a /etc/machine-id
file containing a UUID. This is also provided inside
systemd based containers and can be provided by other
init systems too, since it is just a plain text file.
"""
if not os.path.exists("/etc/machine-id"):
msg = _("Unable to get host UUID: /etc/machine-id does not exist")
raise exception.InternalError(msg)
with open("/etc/machine-id") as f:
# We want to have '-' in the right place
# so we parse & reformat the value
lines = f.read().split()
if not lines:
msg = _("Unable to get host UUID: /etc/machine-id is empty")
raise exception.InternalError(msg)
return str(uuid.UUID(lines[0]))
def _get_host_sysinfo_serial_auto(self):
if os.path.exists("/etc/machine-id"):
return self._get_host_sysinfo_serial_os()
else:
return self._get_host_sysinfo_serial_hardware()
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance.uuid
sysinfo.system_family = "Virtual Machine"
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device.address)
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen', 'parallels',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, instance):
"""Get metadata config for guest."""
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance.display_name
meta.creationTime = time.time()
if instance.image_ref not in ("", None):
meta.roottype = "image"
meta.rootid = instance.image_ref
system_meta = instance.system_metadata
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = instance.user_id
ometa.username = system_meta.get('owner_user_name', 'N/A')
ometa.projectid = instance.project_id
ometa.projectname = system_meta.get('owner_project_name', 'N/A')
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
flavor = instance.flavor
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _machine_type_mappings(self):
mappings = {}
for mapping in CONF.libvirt.hw_machine_type:
host_arch, _, machine_type = mapping.partition('=')
mappings[host_arch] = machine_type
return mappings
def _get_machine_type(self, image_meta, caps):
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
mach_type = None
if image_meta.properties.get('hw_machine_type') is not None:
mach_type = image_meta.properties.hw_machine_type
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == fields.Architecture.ARMV7:
mach_type = "vexpress-a15"
if caps.host.cpu.arch == fields.Architecture.AARCH64:
mach_type = "virt"
if caps.host.cpu.arch in (fields.Architecture.S390,
fields.Architecture.S390X):
mach_type = 's390-ccw-virtio'
# If set in the config, use that as the default.
if CONF.libvirt.hw_machine_type:
mappings = self._machine_type_mappings()
mach_type = mappings.get(caps.host.cpu.arch)
return mach_type
@staticmethod
def _create_idmaps(klass, map_strings):
idmaps = []
if len(map_strings) > 5:
map_strings = map_strings[0:5]
LOG.warning("Too many id maps, only included first five.")
for map_string in map_strings:
try:
idmap = klass()
values = [int(i) for i in map_string.split(":")]
idmap.start = values[0]
idmap.target = values[1]
idmap.count = values[2]
idmaps.append(idmap)
except (ValueError, IndexError):
LOG.warning("Invalid value for id mapping %s", map_string)
return idmaps
def _get_guest_idmaps(self):
id_maps = []
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps:
uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap,
CONF.libvirt.uid_maps)
id_maps.extend(uid_maps)
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps:
gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap,
CONF.libvirt.gid_maps)
id_maps.extend(gid_maps)
return id_maps
def _update_guest_cputune(self, guest, flavor, virt_type):
is_able = self._host.is_cpu_control_policy_capable()
cputuning = ['shares', 'period', 'quota']
wants_cputune = any([k for k in cputuning
if "quota:cpu_" + k in flavor.extra_specs.keys()])
if wants_cputune and not is_able:
raise exception.UnsupportedHostCPUControlPolicy()
if not is_able or virt_type not in ('lxc', 'kvm', 'qemu'):
return
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
# Setting the default cpu.shares value to be a value
# dependent on the number of vcpus
guest.cputune.shares = 1024 * guest.vcpus
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
def _get_cpu_numa_config_from_instance(self, instance_numa_topology,
wants_hugepages):
if instance_numa_topology:
guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA()
for instance_cell in instance_numa_topology.cells:
guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()
guest_cell.id = instance_cell.id
guest_cell.cpus = instance_cell.cpuset
guest_cell.memory = instance_cell.memory * units.Ki
# The vhost-user network backend requires file backed
# guest memory (ie huge pages) to be marked as shared
# access, not private, so an external process can read
# and write the pages.
#
# You can't change the shared vs private flag for an
# already running guest, and since we can't predict what
# types of NIC may be hotplugged, we have no choice but
# to unconditionally turn on the shared flag. This has
# no real negative functional effect on the guest, so
# is a reasonable approach to take
if wants_hugepages:
guest_cell.memAccess = "shared"
guest_cpu_numa.cells.append(guest_cell)
return guest_cpu_numa
def _has_cpu_policy_support(self):
for ver in BAD_LIBVIRT_CPU_POLICY_VERSIONS:
if self._host.has_version(ver):
ver_ = libvirt_utils.version_to_string(ver)
raise exception.CPUPinningNotSupported(reason=_(
'Invalid libvirt version %(version)s') % {'version': ver_})
return True
def _wants_hugepages(self, host_topology, instance_topology):
"""Determine if the guest / host topology implies the
use of huge pages for guest RAM backing
"""
if host_topology is None or instance_topology is None:
return False
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
# Remove smallest page size as that's not classed as a largepage
avail_pagesize = avail_pagesize[1:]
# See if we have page size set
for cell in instance_topology.cells:
if (cell.pagesize is not None and
cell.pagesize in avail_pagesize):
return True
return False
def _get_cell_pairs(self, guest_cpu_numa_config, host_topology):
"""Returns the lists of pairs(tuple) of an instance cell and
corresponding host cell:
[(LibvirtConfigGuestCPUNUMACell, NUMACell), ...]
"""
cell_pairs = []
for guest_config_cell in guest_cpu_numa_config.cells:
for host_cell in host_topology.cells:
if guest_config_cell.id == host_cell.id:
cell_pairs.append((guest_config_cell, host_cell))
return cell_pairs
def _get_pin_cpuset(self, vcpu, object_numa_cell, host_cell):
"""Returns the config object of LibvirtConfigGuestCPUTuneVCPUPin.
Prepares vcpupin config for the guest with the following caveats:
a) If there is pinning information in the cell, we pin vcpus to
individual CPUs
b) Otherwise we float over the whole host NUMA node
"""
pin_cpuset = vconfig.LibvirtConfigGuestCPUTuneVCPUPin()
pin_cpuset.id = vcpu
if object_numa_cell.cpu_pinning and self._has_cpu_policy_support():
pin_cpuset.cpuset = set([object_numa_cell.cpu_pinning[vcpu]])
else:
pin_cpuset.cpuset = host_cell.cpuset
return pin_cpuset
def _get_emulatorpin_cpuset(self, vcpu, object_numa_cell, vcpus_rt,
emulator_threads_isolated, wants_realtime,
pin_cpuset):
"""Returns a set of cpu_ids to add to the cpuset for emulator threads
with the following caveats:
a) If emulator threads policy is isolated, we pin emulator threads
to one cpu we have reserved for it.
b) Otherwise;
b1) If realtime IS NOT enabled, the emulator threads are
allowed to float cross all the pCPUs associated with
the guest vCPUs.
b2) If realtime IS enabled, at least 1 vCPU is required
to be set aside for non-realtime usage. The emulator
threads are allowed to float across the pCPUs that
are associated with the non-realtime VCPUs.
"""
emulatorpin_cpuset = set([])
if emulator_threads_isolated:
if object_numa_cell.cpuset_reserved:
emulatorpin_cpuset = object_numa_cell.cpuset_reserved
elif not wants_realtime or vcpu not in vcpus_rt:
emulatorpin_cpuset = pin_cpuset.cpuset
return emulatorpin_cpuset
def _get_guest_numa_config(self, instance_numa_topology, flavor,
allowed_cpus=None, image_meta=None):
"""Returns the config objects for the guest NUMA specs.
Determines the CPUs that the guest can be pinned to if the guest
specifies a cell topology and the host supports it. Constructs the
libvirt XML config object representing the NUMA topology selected
for the guest. Returns a tuple of:
(cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
With the following caveats:
a) If there is no specified guest NUMA topology, then
all tuple elements except cpu_set shall be None. cpu_set
will be populated with the chosen CPUs that the guest
allowed CPUs fit within, which could be the supplied
allowed_cpus value if the host doesn't support NUMA
topologies.
b) If there is a specified guest NUMA topology, then
cpu_set will be None and guest_cpu_numa will be the
LibvirtConfigGuestCPUNUMA object representing the guest's
NUMA topology. If the host supports NUMA, then guest_cpu_tune
will contain a LibvirtConfigGuestCPUTune object representing
the optimized chosen cells that match the host capabilities
with the instance's requested topology. If the host does
not support NUMA, then guest_cpu_tune and guest_numa_tune
will be None.
"""
if (not self._has_numa_support() and
instance_numa_topology is not None):
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.NUMATopologyUnsupported()
topology = self._get_host_numa_topology()
# We have instance NUMA so translate it to the config class
guest_cpu_numa_config = self._get_cpu_numa_config_from_instance(
instance_numa_topology,
self._wants_hugepages(topology, instance_numa_topology))
if not guest_cpu_numa_config:
# No NUMA topology defined for instance - let the host kernel deal
# with the NUMA effects.
# TODO(ndipanov): Attempt to spread the instance
# across NUMA nodes and expose the topology to the
# instance as an optimisation
return GuestNumaConfig(allowed_cpus, None, None, None)
if not topology:
# No NUMA topology defined for host - This will only happen with
# some libvirt versions and certain platforms.
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
# Now get configuration from the numa_topology
# Init CPUTune configuration
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_cpu_tune.emulatorpin = (
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin())
guest_cpu_tune.emulatorpin.cpuset = set([])
# Init NUMATune configuration
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
guest_numa_tune.memory = vconfig.LibvirtConfigGuestNUMATuneMemory()
guest_numa_tune.memnodes = []
emulator_threads_isolated = (
instance_numa_topology.emulator_threads_isolated)
# Set realtime scheduler for CPUTune
vcpus_rt = set([])
wants_realtime = hardware.is_realtime_enabled(flavor)
if wants_realtime:
if not self._host.has_min_version(MIN_LIBVIRT_REALTIME_VERSION):
raise exception.RealtimePolicyNotSupported()
vcpus_rt = hardware.vcpus_realtime_topology(flavor, image_meta)
vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched()
designer.set_vcpu_realtime_scheduler(
vcpusched, vcpus_rt, CONF.libvirt.realtime_scheduler_priority)
guest_cpu_tune.vcpusched.append(vcpusched)
cell_pairs = self._get_cell_pairs(guest_cpu_numa_config, topology)
for guest_node_id, (guest_config_cell, host_cell) in enumerate(
cell_pairs):
# set NUMATune for the cell
tnode = vconfig.LibvirtConfigGuestNUMATuneMemNode()
designer.set_numa_memnode(tnode, guest_node_id, host_cell.id)
guest_numa_tune.memnodes.append(tnode)
guest_numa_tune.memory.nodeset.append(host_cell.id)
# set CPUTune for the cell
object_numa_cell = instance_numa_topology.cells[guest_node_id]
for cpu in guest_config_cell.cpus:
pin_cpuset = self._get_pin_cpuset(cpu, object_numa_cell,
host_cell)
guest_cpu_tune.vcpupin.append(pin_cpuset)
emu_pin_cpuset = self._get_emulatorpin_cpuset(
cpu, object_numa_cell, vcpus_rt,
emulator_threads_isolated, wants_realtime, pin_cpuset)
guest_cpu_tune.emulatorpin.cpuset.update(emu_pin_cpuset)
# TODO(berrange) When the guest has >1 NUMA node, it will
# span multiple host NUMA nodes. By pinning emulator threads
# to the union of all nodes, we guarantee there will be
# cross-node memory access by the emulator threads when
# responding to guest I/O operations. The only way to avoid
# this would be to pin emulator threads to a single node and
# tell the guest OS to only do I/O from one of its virtual
# NUMA nodes. This is not even remotely practical.
#
# The long term solution is to make use of a new QEMU feature
# called "I/O Threads" which will let us configure an explicit
# I/O thread for each guest vCPU or guest NUMA node. It is
# still TBD how to make use of this feature though, especially
# how to associate IO threads with guest devices to eliminate
# cross NUMA node traffic. This is an area of investigation
# for QEMU community devs.
# Sort the vcpupin list per vCPU id for human-friendlier XML
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
# normalize cell.id
for i, (cell, memnode) in enumerate(zip(guest_cpu_numa_config.cells,
guest_numa_tune.memnodes)):
cell.id = i
memnode.cellid = i
return GuestNumaConfig(None, guest_cpu_tune, guest_cpu_numa_config,
guest_numa_tune)
def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type."""
if virt_type == "lxc":
ret = fields.VMMode.EXE
elif virt_type == "uml":
ret = fields.VMMode.UML
elif virt_type == "xen":
ret = fields.VMMode.XEN
else:
ret = fields.VMMode.HVM
return ret
def _set_guest_for_rescue(self, rescue, guest, inst_path, virt_type,
root_device_name):
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
def _set_guest_for_inst_kernel(self, instance, guest, inst_path, virt_type,
root_device_name, image_meta):
guest.os_kernel = os.path.join(inst_path, "kernel")
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance.ramdisk_id:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
# we only support os_command_line with images with an explicit
# kernel set and don't want to break nova if there's an
# os_command_line property without a specified kernel_id param
if image_meta.properties.get("os_command_line"):
guest.os_cmdline = image_meta.properties.os_command_line
def _set_clock(self, guest, os_type, image_meta, virt_type):
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if os_type == 'windows':
LOG.info('Configuring timezone for windows instance to localtime')
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if virt_type == "kvm":
self._set_kvm_timers(clk, os_type, image_meta)
def _set_kvm_timers(self, clk, os_type, image_meta):
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (fields.Architecture.I686,
fields.Architecture.X86_64):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
# Provide Windows guests with the paravirtualized hyperv timer source.
# This is the windows equiv of kvm-clock, allowing Windows
# guests to accurately keep time.
if os_type == 'windows':
tmhyperv = vconfig.LibvirtConfigGuestTimer()
tmhyperv.name = "hypervclock"
tmhyperv.present = True
clk.add_timer(tmhyperv)
def _set_features(self, guest, os_type, caps, virt_type, image_meta):
if virt_type == "xen":
# PAE only makes sense in X86
if caps.host.cpu.arch in (fields.Architecture.I686,
fields.Architecture.X86_64):
guest.features.append(vconfig.LibvirtConfigGuestFeaturePAE())
if (virt_type not in ("lxc", "uml", "parallels", "xen") or
(virt_type == "xen" and guest.os_type == fields.VMMode.HVM)):
guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
if (virt_type in ("qemu", "kvm") and
os_type == 'windows'):
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
hv.relaxed = True
hv.spinlocks = True
# Increase spinlock retries - value recommended by
# KVM maintainers who certify Windows guests
# with Microsoft
hv.spinlock_retries = 8191
hv.vapic = True
guest.features.append(hv)
if (virt_type in ("qemu", "kvm") and
image_meta.properties.get('img_hide_hypervisor_id')):
guest.features.append(vconfig.LibvirtConfigGuestFeatureKvmHidden())
def _check_number_of_serial_console(self, num_ports):
virt_type = CONF.libvirt.virt_type
if (virt_type in ("kvm", "qemu") and
num_ports > ALLOWED_QEMU_SERIAL_PORTS):
raise exception.SerialPortNumberLimitExceeded(
allowed=ALLOWED_QEMU_SERIAL_PORTS, virt_type=virt_type)
def _add_video_driver(self, guest, image_meta, flavor):
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga",
"xen", "qxl", "virtio")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta.properties, which
# is carried out in the next if statement below this one.
guestarch = libvirt_utils.get_arch(image_meta)
if guest.os_type == fields.VMMode.XEN:
video.type = 'xen'
elif CONF.libvirt.virt_type == 'parallels':
video.type = 'vga'
elif guestarch in (fields.Architecture.PPC,
fields.Architecture.PPC64,
fields.Architecture.PPC64LE):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif guestarch in (fields.Architecture.AARCH64):
# NOTE(kevinz): Only virtio device type is supported by AARCH64
# so use 'virtio' instead when running on AArch64 hardware.
video.type = 'virtio'
elif CONF.spice.enabled:
video.type = 'qxl'
if image_meta.properties.get('hw_video_model'):
video.type = image_meta.properties.hw_video_model
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = image_meta.properties.get('hw_video_ram', 0)
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram * units.Mi / units.Ki
guest.add_device(video)
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance.name))
guest.add_device(qga)
def _add_rng_device(self, guest, flavor):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
rng_path = CONF.libvirt.rng_dev_path
if (rng_path and not os.path.exists(rng_path)):
raise exception.RngDeviceNotExist(path=rng_path)
rng_device.backend = rng_path
guest.add_device(rng_device)
def _set_qemu_guest_agent(self, guest, flavor, instance, image_meta):
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
if image_meta.properties.get('hw_qemu_guest_agent', False):
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
self._add_qga_device(guest, instance)
rng_is_virtio = image_meta.properties.get('hw_rng_model') == 'virtio'
rng_allowed_str = flavor.extra_specs.get('hw_rng:allowed', '')
rng_allowed = strutils.bool_from_string(rng_allowed_str)
if rng_is_virtio and rng_allowed:
self._add_rng_device(guest, flavor)
def _get_guest_memory_backing_config(
self, inst_topology, numatune, flavor):
wantsmempages = False
if inst_topology:
for cell in inst_topology.cells:
if cell.pagesize:
wantsmempages = True
break
wantsrealtime = hardware.is_realtime_enabled(flavor)
membacking = None
if wantsmempages:
pages = self._get_memory_backing_hugepages_support(
inst_topology, numatune)
if pages:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.hugepages = pages
if wantsrealtime:
if not membacking:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
membacking.sharedpages = False
return membacking
def _get_memory_backing_hugepages_support(self, inst_topology, numatune):
if not self._has_numa_support():
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.MemoryPagesUnsupported()
host_topology = self._get_host_numa_topology()
if host_topology is None:
# As above, we should not get here but just in case...
raise exception.MemoryPagesUnsupported()
# Currently libvirt does not support the smallest
# pagesize set as a backend memory.
# https://bugzilla.redhat.com/show_bug.cgi?id=1173507
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
smallest = avail_pagesize[0]
pages = []
for guest_cellid, inst_cell in enumerate(inst_topology.cells):
if inst_cell.pagesize and inst_cell.pagesize > smallest:
for memnode in numatune.memnodes:
if guest_cellid == memnode.cellid:
page = (
vconfig.LibvirtConfigGuestMemoryBackingPage())
page.nodeset = [guest_cellid]
page.size_kb = inst_cell.pagesize
pages.append(page)
break # Quit early...
return pages
def _get_flavor(self, ctxt, instance, flavor):
if flavor is not None:
return flavor
return instance.flavor
def _has_uefi_support(self):
# This means that the host can support uefi booting for guests
supported_archs = [fields.Architecture.X86_64,
fields.Architecture.AARCH64]
caps = self._host.get_capabilities()
return ((caps.host.cpu.arch in supported_archs) and
os.path.exists(DEFAULT_UEFI_LOADER_PATH[caps.host.cpu.arch]))
def _get_supported_perf_events(self):
if (len(CONF.libvirt.enabled_perf_events) == 0 or
not self._host.has_min_version(MIN_LIBVIRT_PERF_VERSION)):
return []
supported_events = []
host_cpu_info = self._get_cpu_info()
for event in CONF.libvirt.enabled_perf_events:
if self._supported_perf_event(event, host_cpu_info['features']):
supported_events.append(event)
return supported_events
def _supported_perf_event(self, event, cpu_features):
libvirt_perf_event_name = LIBVIRT_PERF_EVENT_PREFIX + event.upper()
if not hasattr(libvirt, libvirt_perf_event_name):
LOG.warning("Libvirt doesn't support event type %s.", event)
return False
if (event in PERF_EVENTS_CPU_FLAG_MAPPING
and PERF_EVENTS_CPU_FLAG_MAPPING[event] not in cpu_features):
LOG.warning("Host does not support event type %s.", event)
return False
return True
def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
image_meta, flavor, root_device_name):
if virt_type == "xen":
if guest.os_type == fields.VMMode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
else:
guest.os_cmdline = CONSOLE
elif virt_type in ("kvm", "qemu"):
if caps.host.cpu.arch in (fields.Architecture.I686,
fields.Architecture.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
hw_firmware_type = image_meta.properties.get('hw_firmware_type')
if caps.host.cpu.arch == fields.Architecture.AARCH64:
if not hw_firmware_type:
hw_firmware_type = fields.FirmwareType.UEFI
if hw_firmware_type == fields.FirmwareType.UEFI:
if self._has_uefi_support():
global uefi_logged
if not uefi_logged:
LOG.warning("uefi support is without some kind of "
"functional testing and therefore "
"considered experimental.")
uefi_logged = True
guest.os_loader = DEFAULT_UEFI_LOADER_PATH[
caps.host.cpu.arch]
guest.os_loader_type = "pflash"
else:
raise exception.UEFINotSupported()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
if image_meta.properties.get('hw_boot_menu') is None:
guest.os_bootmenu = strutils.bool_from_string(
flavor.extra_specs.get('hw:boot_menu', 'no'))
else:
guest.os_bootmenu = image_meta.properties.hw_boot_menu
elif virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
elif virt_type == "parallels":
if guest.os_type == fields.VMMode.EXE:
guest.os_init_path = "/sbin/init"
def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info):
if rescue:
self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
root_device_name)
elif instance.kernel_id:
self._set_guest_for_inst_kernel(instance, guest, inst_path,
virt_type, root_device_name,
image_meta)
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
def _create_consoles(self, virt_type, guest_cfg, instance, flavor,
image_meta):
# NOTE(markus_z): Beware! Below are so many conditionals that it is
# easy to lose track. Use this chart to figure out your case:
#
# case | is serial | has | is qemu | resulting
# | enabled? | virtlogd? | or kvm? | devices
# --------------------------------------------------
# 1 | no | no | no | pty*
# 2 | no | no | yes | file + pty
# 3 | no | yes | no | see case 1
# 4 | no | yes | yes | pty with logd
# 5 | yes | no | no | see case 1
# 6 | yes | no | yes | tcp + pty
# 7 | yes | yes | no | see case 1
# 8 | yes | yes | yes | tcp with logd
# * exception: virt_type "parallels" doesn't create a device
if virt_type == 'parallels':
pass
elif virt_type not in ("qemu", "kvm"):
log_path = self._get_console_log_path(instance)
self._create_pty_device(guest_cfg,
vconfig.LibvirtConfigGuestConsole,
log_path=log_path)
elif (virt_type in ("qemu", "kvm") and
self._is_s390x_guest(image_meta)):
self._create_consoles_s390x(guest_cfg, instance,
flavor, image_meta)
elif virt_type in ("qemu", "kvm"):
self._create_consoles_qemu_kvm(guest_cfg, instance,
flavor, image_meta)
def _is_s390x_guest(self, image_meta):
s390x_archs = (fields.Architecture.S390, fields.Architecture.S390X)
return libvirt_utils.get_arch(image_meta) in s390x_archs
def _create_consoles_qemu_kvm(self, guest_cfg, instance, flavor,
image_meta):
char_dev_cls = vconfig.LibvirtConfigGuestSerial
log_path = self._get_console_log_path(instance)
if CONF.serial_console.enabled:
if not self._serial_ports_already_defined(instance):
num_ports = hardware.get_number_of_serial_ports(flavor,
image_meta)
self._check_number_of_serial_console(num_ports)
self._create_serial_consoles(guest_cfg, num_ports,
char_dev_cls, log_path)
else:
self._create_file_device(guest_cfg, instance, char_dev_cls)
self._create_pty_device(guest_cfg, char_dev_cls, log_path=log_path)
def _create_consoles_s390x(self, guest_cfg, instance, flavor, image_meta):
char_dev_cls = vconfig.LibvirtConfigGuestConsole
log_path = self._get_console_log_path(instance)
if CONF.serial_console.enabled:
if not self._serial_ports_already_defined(instance):
num_ports = hardware.get_number_of_serial_ports(flavor,
image_meta)
self._create_serial_consoles(guest_cfg, num_ports,
char_dev_cls, log_path)
else:
self._create_file_device(guest_cfg, instance, char_dev_cls,
"sclplm")
self._create_pty_device(guest_cfg, char_dev_cls, "sclp", log_path)
def _create_pty_device(self, guest_cfg, char_dev_cls, target_type=None,
log_path=None):
def _create_base_dev():
consolepty = char_dev_cls()
consolepty.target_type = target_type
consolepty.type = "pty"
return consolepty
def _create_logd_dev():
consolepty = _create_base_dev()
log = vconfig.LibvirtConfigGuestCharDeviceLog()
log.file = log_path
consolepty.log = log
return consolepty
if CONF.serial_console.enabled:
if self._is_virtlogd_available():
return
else:
# NOTE(markus_z): You may wonder why this is necessary and
# so do I. I'm certain that this is *not* needed in any
# real use case. It is, however, useful if you want to
# pypass the Nova API and use "virsh console <guest>" on
# an hypervisor, as this CLI command doesn't work with TCP
# devices (like the serial console is).
# https://bugzilla.redhat.com/show_bug.cgi?id=781467
# Pypassing the Nova API however is a thing we don't want.
# Future changes should remove this and fix the unit tests
# which ask for the existence.
guest_cfg.add_device(_create_base_dev())
else:
if self._is_virtlogd_available():
guest_cfg.add_device(_create_logd_dev())
else:
guest_cfg.add_device(_create_base_dev())
def _create_file_device(self, guest_cfg, instance, char_dev_cls,
target_type=None):
if self._is_virtlogd_available():
return
consolelog = char_dev_cls()
consolelog.target_type = target_type
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest_cfg.add_device(consolelog)
def _serial_ports_already_defined(self, instance):
try:
guest = self._host.get_guest(instance)
if list(self._get_serial_ports_from_guest(guest)):
# Serial port are already configured for instance that
# means we are in a context of migration.
return True
except exception.InstanceNotFound:
LOG.debug(
"Instance does not exist yet on libvirt, we can "
"safely pass on looking for already defined serial "
"ports in its domain XML", instance=instance)
return False
def _create_serial_consoles(self, guest_cfg, num_ports, char_dev_cls,
log_path):
for port in six.moves.range(num_ports):
console = char_dev_cls()
console.port = port
console.type = "tcp"
console.listen_host = CONF.serial_console.proxyclient_address
listen_port = serial_console.acquire_port(console.listen_host)
console.listen_port = listen_port
# NOTE: only the first serial console gets the boot messages,
# that's why we attach the logd subdevice only to that.
if port == 0 and self._is_virtlogd_available():
log = vconfig.LibvirtConfigGuestCharDeviceLog()
log.file = log_path
console.log = log
guest_cfg.add_device(console)
def _cpu_config_to_vcpu_model(self, cpu_config, vcpu_model):
"""Update VirtCPUModel object according to libvirt CPU config.
:param:cpu_config: vconfig.LibvirtConfigGuestCPU presenting the
instance's virtual cpu configuration.
:param:vcpu_model: VirtCPUModel object. A new object will be created
if None.
:return: Updated VirtCPUModel object, or None if cpu_config is None
"""
if not cpu_config:
return
if not vcpu_model:
vcpu_model = objects.VirtCPUModel()
vcpu_model.arch = cpu_config.arch
vcpu_model.vendor = cpu_config.vendor
vcpu_model.model = cpu_config.model
vcpu_model.mode = cpu_config.mode
vcpu_model.match = cpu_config.match
if cpu_config.sockets:
vcpu_model.topology = objects.VirtCPUTopology(
sockets=cpu_config.sockets,
cores=cpu_config.cores,
threads=cpu_config.threads)
else:
vcpu_model.topology = None
features = [objects.VirtCPUFeature(
name=f.name,
policy=f.policy) for f in cpu_config.features]
vcpu_model.features = features
return vcpu_model
def _vcpu_model_to_cpu_config(self, vcpu_model):
"""Create libvirt CPU config according to VirtCPUModel object.
:param:vcpu_model: VirtCPUModel object.
:return: vconfig.LibvirtConfigGuestCPU.
"""
cpu_config = vconfig.LibvirtConfigGuestCPU()
cpu_config.arch = vcpu_model.arch
cpu_config.model = vcpu_model.model
cpu_config.mode = vcpu_model.mode
cpu_config.match = vcpu_model.match
cpu_config.vendor = vcpu_model.vendor
if vcpu_model.topology:
cpu_config.sockets = vcpu_model.topology.sockets
cpu_config.cores = vcpu_model.topology.cores
cpu_config.threads = vcpu_model.topology.threads
if vcpu_model.features:
for f in vcpu_model.features:
xf = vconfig.LibvirtConfigGuestCPUFeature()
xf.name = f.name
xf.policy = f.policy
cpu_config.features.add(xf)
return cpu_config
def _guest_add_pcie_root_ports(self, guest):
"""Add PCI Express root ports.
PCI Express machine can have as many PCIe devices as it has
pcie-root-port controllers (slots in virtual motherboard).
If we want to have more PCIe slots for hotplug then we need to create
whole PCIe structure (libvirt limitation).
"""
pcieroot = vconfig.LibvirtConfigGuestPCIeRootController()
guest.add_device(pcieroot)
for x in range(0, CONF.libvirt.num_pcie_ports):
pcierootport = vconfig.LibvirtConfigGuestPCIeRootPortController()
guest.add_device(pcierootport)
def _guest_add_usb_host_keyboard(self, guest):
"""Add USB Host controller and keyboard for graphical console use.
Add USB keyboard as PS/2 support may not be present on non-x86
architectures.
"""
keyboard = vconfig.LibvirtConfigGuestInput()
keyboard.type = "keyboard"
keyboard.bus = "usb"
guest.add_device(keyboard)
usbhost = vconfig.LibvirtConfigGuestUSBHostController()
usbhost.index = 0
guest.add_device(usbhost)
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None, mdevs=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
:param mdevs: optional list of mediated devices to assign to the guest.
"""
flavor = instance.flavor
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
virt_type = CONF.libvirt.virt_type
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = virt_type
guest.name = instance.name
guest.uuid = instance.uuid
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
allowed_cpus = hardware.get_vcpu_pin_set()
guest_numa_config = self._get_guest_numa_config(
instance.numa_topology, flavor, allowed_cpus, image_meta)
guest.cpuset = guest_numa_config.cpuset
guest.cputune = guest_numa_config.cputune
guest.numatune = guest_numa_config.numatune
guest.membacking = self._get_guest_memory_backing_config(
instance.numa_topology,
guest_numa_config.numatune,
flavor)
guest.metadata.append(self._get_guest_config_meta(instance))
guest.idmaps = self._get_guest_idmaps()
for event in self._supported_perf_events:
guest.add_perf_event(event)
self._update_guest_cputune(guest, flavor, virt_type)
guest.cpu = self._get_guest_cpu_config(
flavor, image_meta, guest_numa_config.numaconfig,
instance.numa_topology)
# Notes(yjiang5): we always sync the instance's vcpu model with
# the corresponding config file.
instance.vcpu_model = self._cpu_config_to_vcpu_model(
guest.cpu, instance.vcpu_model)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
instance.root_device_name = root_device_name
guest.os_type = (fields.VMMode.get_from_instance(instance) or
self._get_guest_os_type(virt_type))
caps = self._host.get_capabilities()
self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
image_meta, flavor,
root_device_name)
if virt_type not in ('lxc', 'uml'):
self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info)
self._set_features(guest, instance.os_type, caps, virt_type,
image_meta)
self._set_clock(guest, instance.os_type, image_meta, virt_type)
storage_configs = self._get_guest_storage_config(context,
instance, image_meta, disk_info, rescue, block_device_info,
flavor, guest.os_type)
for config in storage_configs:
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, virt_type, self._host)
guest.add_device(config)
self._create_consoles(virt_type, guest, instance, flavor, image_meta)
pointer = self._get_guest_pointer_model(guest.os_type, image_meta)
if pointer:
guest.add_device(pointer)
self._guest_add_spice_channel(guest)
if self._guest_add_video_device(guest):
self._add_video_driver(guest, image_meta, flavor)
# We want video == we want graphical console. Some architectures
# do not have input devices attached in default configuration.
# Let then add USB Host controller and USB keyboard.
# x86(-64) and ppc64 have usb host controller and keyboard
# s390x does not support USB
if caps.host.cpu.arch == fields.Architecture.AARCH64:
self._guest_add_usb_host_keyboard(guest)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if virt_type in ('qemu', 'kvm'):
self._set_qemu_guest_agent(guest, flavor, instance, image_meta)
# Add PCIe root port controllers for PCI Express machines
# but only if their amount is configured
if (CONF.libvirt.num_pcie_ports and
((caps.host.cpu.arch == fields.Architecture.AARCH64 and
guest.os_mach_type.startswith('virt')) or
(caps.host.cpu.arch == fields.Architecture.X86_64 and
guest.os_mach_type is not None and
'q35' in guest.os_mach_type))):
self._guest_add_pcie_root_ports(guest)
self._guest_add_pci_devices(guest, instance)
self._guest_add_watchdog_action(guest, flavor, image_meta)
self._guest_add_memory_balloon(guest)
if mdevs:
self._guest_add_mdevs(guest, mdevs)
return guest
def _guest_add_mdevs(self, guest, chosen_mdevs):
for chosen_mdev in chosen_mdevs:
mdev = vconfig.LibvirtConfigGuestHostdevMDEV()
mdev.uuid = chosen_mdev
guest.add_device(mdev)
@staticmethod
def _guest_add_spice_channel(guest):
if (CONF.spice.enabled and CONF.spice.agent_enabled
and guest.virt_type not in ('lxc', 'uml', 'xen')):
channel = vconfig.LibvirtConfigGuestChannel()
channel.type = 'spicevmc'
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
@staticmethod
def _guest_add_memory_balloon(guest):
virt_type = guest.virt_type
# Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor
if (virt_type in ('xen', 'qemu', 'kvm') and
CONF.libvirt.mem_stats_period_seconds > 0):
balloon = vconfig.LibvirtConfigMemoryBalloon()
if virt_type in ('qemu', 'kvm'):
balloon.model = 'virtio'
else:
balloon.model = 'xen'
balloon.period = CONF.libvirt.mem_stats_period_seconds
guest.add_device(balloon)
@staticmethod
def _guest_add_watchdog_action(guest, flavor, image_meta):
# image meta takes precedence over flavor extra specs; disable the
# watchdog action by default
watchdog_action = (flavor.extra_specs.get('hw:watchdog_action')
or 'disabled')
watchdog_action = image_meta.properties.get('hw_watchdog_action',
watchdog_action)
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_action in fields.WatchdogAction.ALL:
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
def _guest_add_pci_devices(self, guest, instance):
virt_type = guest.virt_type
if virt_type in ('xen', 'qemu', 'kvm'):
# Get all generic PCI devices (non-SR-IOV).
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
# PCI devices is only supported for hypervisors
# 'xen', 'qemu' and 'kvm'.
if pci_manager.get_instance_pci_devs(instance, 'all'):
raise exception.PciDeviceUnsupportedHypervisor(type=virt_type)
@staticmethod
def _guest_add_video_device(guest):
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if CONF.vnc.enabled and guest.virt_type not in ('lxc', 'uml'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
if CONF.vnc.keymap:
graphics.keymap = CONF.vnc.keymap
graphics.listen = CONF.vnc.server_listen
guest.add_device(graphics)
add_video_driver = True
if CONF.spice.enabled and guest.virt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
if CONF.spice.keymap:
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
return add_video_driver
def _get_guest_pointer_model(self, os_type, image_meta):
pointer_model = image_meta.properties.get(
'hw_pointer_model', CONF.pointer_model)
if pointer_model is None and CONF.libvirt.use_usb_tablet:
# TODO(sahid): We set pointer_model to keep compatibility
# until the next release O*. It means operators can continue
# to use the deprecated option "use_usb_tablet" or set a
# specific device to use
pointer_model = "usbtablet"
LOG.warning('The option "use_usb_tablet" has been '
'deprecated for Newton in favor of the more '
'generic "pointer_model". Please update '
'nova.conf to address this change.')
if pointer_model == "usbtablet":
# We want a tablet if VNC is enabled, or SPICE is enabled and
# the SPICE agent is disabled. If the SPICE agent is enabled
# it provides a paravirt mouse which drastically reduces
# overhead (by eliminating USB polling).
if CONF.vnc.enabled or (
CONF.spice.enabled and not CONF.spice.agent_enabled):
return self._get_guest_usb_tablet(os_type)
else:
if CONF.pointer_model or CONF.libvirt.use_usb_tablet:
# For backward compatibility We don't want to break
# process of booting an instance if host is configured
# to use USB tablet without VNC or SPICE and SPICE
# agent disable.
LOG.warning('USB tablet requested for guests by host '
'configuration. In order to accept this '
'request VNC should be enabled or SPICE '
'and SPICE agent disabled on host.')
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
def _get_guest_usb_tablet(self, os_type):
tablet = None
if os_type == fields.VMMode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
else:
if CONF.pointer_model or CONF.libvirt.use_usb_tablet:
# For backward compatibility We don't want to break
# process of booting an instance if virtual machine mode
# is not configured as HVM.
LOG.warning('USB tablet requested for guests by host '
'configuration. In order to accept this '
'request the machine mode should be '
'configured as HVM.')
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
return tablet
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None,
mdevs=None):
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context, mdevs)
xml = conf.to_xml()
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
:param instance: nova.objects.instance.Instance object
:returns: An InstanceInfo object
"""
guest = self._host.get_guest(instance)
# Kind of ugly but we need to pass host to get_info as for a
# workaround, see libvirt/compat.py
return guest.get_info(self._host)
def _create_domain_setup_lxc(self, context, instance, image_meta,
block_device_info):
inst_path = libvirt_utils.get_instance_path(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
root_disk = block_device.get_root_bdm(block_device_mapping)
if root_disk:
self._connect_volume(context, root_disk['connection_info'],
instance)
disk_path = root_disk['connection_info']['data']['device_path']
# NOTE(apmelton) - Even though the instance is being booted from a
# cinder volume, it is still presented as a local block device.
# LocalBlockImage is used here to indicate that the instance's
# disk is backed by a local block device.
image_model = imgmodel.LocalBlockImage(disk_path)
else:
root_disk = self.image_backend.by_name(instance, 'disk')
image_model = root_disk.get_model(self._conn)
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
rootfs_dev = disk_api.setup_container(image_model,
container_dir=container_dir)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps:
id_maps = self._get_guest_idmaps()
libvirt_utils.chown_for_id_maps(container_dir, id_maps)
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
LOG.debug('Attempting to unmount container filesystem: %s',
container_dir, instance=instance)
disk_api.clean_lxc_namespace(container_dir=container_dir)
else:
disk_api.teardown_container(container_dir=container_dir)
@contextlib.contextmanager
def _lxc_disk_handler(self, context, instance, image_meta,
block_device_info):
"""Context manager to handle the pre and post instance boot,
LXC specific disk operations.
An image or a volume path will be prepared and setup to be
used by the container, prior to starting it.
The disk will be disconnected and unmounted if a container has
failed to start.
"""
if CONF.libvirt.virt_type != 'lxc':
yield
return
self._create_domain_setup_lxc(context, instance, image_meta,
block_device_info)
try:
yield
finally:
self._create_domain_cleanup_lxc(instance)
# TODO(sahid): Consider renaming this to _create_guest.
def _create_domain(self, xml=None, domain=None,
power_on=True, pause=False, post_xml_callback=None):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
:returns guest.Guest: Guest just created
"""
if xml:
guest = libvirt_guest.Guest.create(xml, self._host)
if post_xml_callback is not None:
post_xml_callback()
else:
guest = libvirt_guest.Guest(domain)
if power_on or pause:
guest.launch(pause=pause)
if not utils.is_neutron():
guest.enable_hairpin()
return guest
def _neutron_failed_callback(self, event_name, instance):
LOG.error('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s',
{'event': event_name, 'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _neutron_failed_live_migration_callback(self, event_name, instance):
msg = ('Neutron reported failure during live migration '
'with %(event)s for instance %(uuid)s' %
{'event': event_name, 'uuid': instance.uuid})
raise exception.MigrationError(reason=msg)
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _get_neutron_events_for_live_migration(self, network_info):
# Neutron should send events to Nova indicating that the VIFs
# are successfully plugged on destination host.
# TODO(sahid): Currently we only use the mechanism of waiting
# for neutron events during live-migration for linux-bridge.
return [('network-vif-plugged', vif['id'])
for vif in network_info if (
vif.get('type') == network_model.VIF_TYPE_BRIDGE)]
def _cleanup_failed_start(self, context, instance, network_info,
block_device_info, guest, destroy_disks):
try:
if guest and guest.is_active():
guest.poweroff()
finally:
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info,
destroy_disks=destroy_disks)
def _create_domain_and_network(self, context, xml, instance, network_info,
block_device_info=None, power_on=True,
vifs_already_plugged=False,
post_xml_callback=None,
destroy_disks_on_failure=False):
"""Do required network setup and create domain."""
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
pause = bool(events)
guest = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(context, instance,
instance.image_meta,
block_device_info):
guest = self._create_domain(
xml, pause=pause, power_on=power_on,
post_xml_callback=post_xml_callback)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
self._cleanup_failed_start(context, instance, network_info,
block_device_info, guest,
destroy_disks_on_failure)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warning('Timeout waiting for %(events)s for '
'instance with vm_state %(vm_state)s and '
'task_state %(task_state)s.',
{'events': events,
'vm_state': instance.vm_state,
'task_state': instance.task_state},
instance=instance)
if CONF.vif_plugging_is_fatal:
self._cleanup_failed_start(context, instance, network_info,
block_device_info, guest,
destroy_disks_on_failure)
raise exception.VirtualInterfaceCreateException()
except Exception:
# Any other error, be sure to clean up
LOG.error('Failed to start libvirt guest', instance=instance)
with excutils.save_and_reraise_exception():
self._cleanup_failed_start(context, instance, network_info,
block_device_info, guest,
destroy_disks_on_failure)
# Resume only if domain has been paused
if pause:
guest.resume()
return guest
def _get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
try:
total_pcpus = self._host.get_cpu_count()
except libvirt.libvirtError:
LOG.warning("Cannot get the number of cpu, because this "
"function is not implemented for this platform. ")
return 0
if not CONF.vcpu_pin_set:
return total_pcpus
available_ids = hardware.get_vcpu_pin_set()
# We get the list of online CPUs on the host and see if the requested
# set falls under these. If not, we retain the old behavior.
online_pcpus = None
try:
online_pcpus = self._host.get_online_cpus()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
err_msg = encodeutils.exception_to_unicode(ex)
LOG.warning(
"Couldn't retrieve the online CPUs due to a Libvirt "
"error: %(error)s with error code: %(error_code)s",
{'error': err_msg, 'error_code': error_code})
if online_pcpus:
if not (available_ids <= online_pcpus):
msg = (_("Invalid vcpu_pin_set config, one or more of the "
"specified cpuset is not online. Online cpuset(s): "
"%(online)s, requested cpuset(s): %(req)s"),
{'online': sorted(online_pcpus),
'req': sorted(available_ids)})
raise exception.Invalid(msg)
elif sorted(available_ids)[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
return len(available_ids)
@staticmethod
def _get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.items():
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
# Not all libvirt drivers will support the get_vcpus_info()
#
# For example, LXC does not have a concept of vCPUs, while
# QEMU (TCG) traditionally handles all vCPUs in a single
# thread. So both will report an exception when the vcpus()
# API call is made. In such a case we should report the
# guest as having 1 vCPU, since that lets us still do
# CPU over commit calculations that apply as the total
# guest count scales.
#
# It is also possible that we might see an exception if
# the guest is just in middle of shutting down. Technically
# we should report 0 for vCPU usage in this case, but we
# we can't reliably distinguish the vcpu not supported
# case from the just shutting down case. Thus we don't know
# whether to report 1 or 0 for vCPU count.
#
# Under-reporting vCPUs is bad because it could conceivably
# let the scheduler place too many guests on the host. Over-
# reporting vCPUs is not a problem as it'll auto-correct on
# the next refresh of usage data.
#
# Thus when getting an exception we always report 1 as the
# vCPU count, as the least worst value.
for guest in self._host.list_guests():
try:
vcpus = guest.get_vcpus_info()
total += len(list(vcpus))
except libvirt.libvirtError:
total += 1
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return total
def _get_supported_vgpu_types(self):
if not CONF.devices.enabled_vgpu_types:
return []
# TODO(sbauza): Move this check up to compute_manager.init_host
if len(CONF.devices.enabled_vgpu_types) > 1:
LOG.warning('libvirt only supports one GPU type per compute node,'
' only first type will be used.')
requested_types = CONF.devices.enabled_vgpu_types[:1]
return requested_types
def _get_vgpu_total(self):
"""Returns the number of total available vGPUs for any GPU type that is
enabled with the enabled_vgpu_types CONF option.
"""
requested_types = self._get_supported_vgpu_types()
# Bail out early if operator doesn't care about providing vGPUs
if not requested_types:
return 0
# Filter how many available mdevs we can create for all the supported
# types.
mdev_capable_devices = self._get_mdev_capable_devices(requested_types)
vgpus = 0
for dev in mdev_capable_devices:
for _type in dev['types']:
vgpus += dev['types'][_type]['availableInstances']
# Count the already created (but possibly not assigned to a guest)
# mdevs for all the supported types
mediated_devices = self._get_mediated_devices(requested_types)
vgpus += len(mediated_devices)
return vgpus
def _get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
Supported hypervisor_type is filtered by virt_type,
a parameter set by operators via `nova.conf`.
:returns: List of tuples describing instance capabilities
"""
caps = self._host.get_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
if dt != CONF.libvirt.virt_type:
continue
instance_cap = (
fields.Architecture.canonicalize(g.arch),
fields.HVType.canonicalize(dt),
fields.VMMode.canonicalize(g.ostype))
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities.
:return: see above description
"""
caps = self._host.get_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['cells'] = len(getattr(caps.host.topology, 'cells', [1]))
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = set()
for f in caps.host.cpu.features:
features.add(f.name)
cpu_info['features'] = features
return cpu_info
def _get_pcinet_info(self, vf_address):
"""Returns a dict of NET device."""
devname = pci_utils.get_net_name_by_vf_pci_address(vf_address)
if not devname:
return
virtdev = self._host.device_lookup_by_name(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
return {'name': cfgdev.name,
'capabilities': cfgdev.pci_capability.features}
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev, pci_address):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if fun_cap.type == 'virt_functions':
return {
'dev_type': fields.PciDeviceType.SRIOV_PF,
}
if (fun_cap.type == 'phys_function' and
len(fun_cap.device_addrs) != 0):
phys_address = "%04x:%02x:%02x.%01x" % (
fun_cap.device_addrs[0][0],
fun_cap.device_addrs[0][1],
fun_cap.device_addrs[0][2],
fun_cap.device_addrs[0][3])
return {
'dev_type': fields.PciDeviceType.SRIOV_VF,
'parent_addr': phys_address,
}
# Note(moshele): libvirt < 1.3 reported virt_functions capability
# only when VFs are enabled. The check below is a workaround
# to get the correct report regardless of whether or not any
# VFs are enabled for the device.
if not self._host.has_min_version(
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION):
is_physical_function = pci_utils.is_physical_function(
*pci_utils.get_pci_address_fields(pci_address))
if is_physical_function:
return {'dev_type': fields.PciDeviceType.SRIOV_PF}
return {'dev_type': fields.PciDeviceType.STANDARD}
def _get_device_capabilities(device, address):
"""Get PCI VF device's additional capabilities.
If a PCI device is a virtual function, this function reads the PCI
parent's network capabilities (must be always a NIC device) and
appends this information to the device's dictionary.
"""
if device.get('dev_type') == fields.PciDeviceType.SRIOV_VF:
pcinet_info = self._get_pcinet_info(address)
if pcinet_info:
return {'capabilities':
{'network': pcinet_info.get('capabilities')}}
return {}
virtdev = self._host.device_lookup_by_name(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": "%04x" % cfgdev.pci_capability.product_id,
"vendor_id": "%04x" % cfgdev.pci_capability.vendor_id,
}
device["numa_node"] = cfgdev.pci_capability.numa_node
# requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev, address))
device.update(_get_device_capabilities(device, address))
return device
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containing a list of the assignable PCI
devices information
"""
# Bail early if we know we can't support `listDevices` to avoid
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._host.list_pci_devices() or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warning("URI %(uri)s does not support "
"listDevices: %(error)s",
{'uri': self._uri(),
'error': encodeutils.exception_to_unicode(ex)})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_info.append(self._get_pcidev_info(name))
return jsonutils.dumps(pci_info)
def _get_mdev_capabilities_for_dev(self, devname, types=None):
"""Returns a dict of MDEV capable device with the ID as first key
and then a list of supported types, each of them being a dict.
:param types: Only return those specific types.
"""
virtdev = self._host.device_lookup_by_name(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
device = {
"dev_id": cfgdev.name,
"types": {},
}
for mdev_cap in cfgdev.pci_capability.mdev_capability:
for cap in mdev_cap.mdev_types:
if not types or cap['type'] in types:
device["types"].update({cap['type']: {
'availableInstances': cap['availableInstances'],
'name': cap['name'],
'deviceAPI': cap['deviceAPI']}})
return device
def _get_mdev_capable_devices(self, types=None):
"""Get host devices supporting mdev types.
Obtain devices information from libvirt and returns a list of
dictionaries.
:param types: Filter only devices supporting those types.
"""
if not self._host.has_min_version(MIN_LIBVIRT_MDEV_SUPPORT):
return []
dev_names = self._host.list_mdev_capable_devices() or []
mdev_capable_devices = []
for name in dev_names:
device = self._get_mdev_capabilities_for_dev(name, types)
if not device["types"]:
continue
mdev_capable_devices.append(device)
return mdev_capable_devices
def _get_mediated_device_information(self, devname):
"""Returns a dict of a mediated device."""
virtdev = self._host.device_lookup_by_name(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
device = {
"dev_id": cfgdev.name,
# name is like mdev_00ead764_fdc0_46b6_8db9_2963f5c815b4
"uuid": str(uuid.UUID(cfgdev.name[5:].replace('_', '-'))),
"type": cfgdev.mdev_information.type,
"iommu_group": cfgdev.mdev_information.iommu_group,
}
return device
def _get_mediated_devices(self, types=None):
"""Get host mediated devices.
Obtain devices information from libvirt and returns a list of
dictionaries.
:param types: Filter only devices supporting those types.
"""
if not self._host.has_min_version(MIN_LIBVIRT_MDEV_SUPPORT):
return []
dev_names = self._host.list_mediated_devices() or []
mediated_devices = []
for name in dev_names:
device = self._get_mediated_device_information(name)
if not types or device["type"] in types:
mediated_devices.append(device)
return mediated_devices
def _get_all_assigned_mediated_devices(self, instance=None):
"""Lookup all instances from the host and return all the mediated
devices that are assigned to a guest.
:param instance: Only return mediated devices for that instance.
:returns: A dictionary of keys being mediated device UUIDs and their
respective values the instance UUID of the guest using it.
"""
allocated_mdevs = {}
if instance:
guest = self._host.get_guest(instance)
guests = [guest]
else:
guests = self._host.list_guests(only_running=False)
for guest in guests:
cfg = guest.get_config()
for device in cfg.devices:
if isinstance(device, vconfig.LibvirtConfigGuestHostdevMDEV):
allocated_mdevs[device.uuid] = guest.uuid
return allocated_mdevs
@staticmethod
def _vgpu_allocations(allocations):
"""Filtering only the VGPU allocations from a list of allocations.
:param allocations: Information about resources allocated to the
instance via placement, of the form returned by
SchedulerReportClient.get_allocations_for_consumer.
"""
if not allocations:
# If no allocations, there is no vGPU request.
return {}
RC_VGPU = rc_fields.ResourceClass.VGPU
vgpu_allocations = {}
for rp in allocations:
res = allocations[rp]['resources']
if RC_VGPU in res and res[RC_VGPU] > 0:
vgpu_allocations[rp] = {'resources': {RC_VGPU: res[RC_VGPU]}}
return vgpu_allocations
def _get_existing_mdevs_not_assigned(self, requested_types=None):
"""Returns the already created mediated devices that are not assigned
to a guest yet.
:param requested_types: Filter out the result for only mediated devices
having those types.
"""
allocated_mdevs = self._get_all_assigned_mediated_devices()
mdevs = self._get_mediated_devices(requested_types)
available_mdevs = set([mdev["uuid"]
for mdev in mdevs]) - set(allocated_mdevs)
return available_mdevs
def _create_new_mediated_device(self, requested_types, uuid=None):
"""Find a physical device that can support a new mediated device and
create it.
:param requested_types: Filter only capable devices supporting those
types.
:param uuid: The possible mdev UUID we want to create again
:returns: the newly created mdev UUID or None if not possible
"""
# Try to see if we can still create a new mediated device
devices = self._get_mdev_capable_devices(requested_types)
for device in devices:
# For the moment, the libvirt driver only supports one
# type per host
# TODO(sbauza): Once we support more than one type, make
# sure we look at the flavor/trait for the asked type.
asked_type = requested_types[0]
if device['types'][asked_type]['availableInstances'] > 0:
# That physical GPU has enough room for a new mdev
dev_name = device['dev_id']
# We need the PCI address, not the libvirt name
# The libvirt name is like 'pci_0000_84_00_0'
pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_'))
chosen_mdev = nova.privsep.libvirt.create_mdev(pci_addr,
asked_type,
uuid=uuid)
return chosen_mdev
@utils.synchronized(VGPU_RESOURCE_SEMAPHORE)
def _allocate_mdevs(self, allocations):
"""Returns a list of mediated device UUIDs corresponding to available
resources we can assign to the guest(s) corresponding to the allocation
requests passed as argument.
That method can either find an existing but unassigned mediated device
it can allocate, or create a new mediated device from a capable
physical device if the latter has enough left capacity.
:param allocations: Information about resources allocated to the
instance via placement, of the form returned by
SchedulerReportClient.get_allocations_for_consumer.
That code is supporting Placement API version 1.12
"""
vgpu_allocations = self._vgpu_allocations(allocations)
if not vgpu_allocations:
return
# TODO(sbauza): Once we have nested resource providers, find which one
# is having the related allocation for the specific VGPU type.
# For the moment, we should only have one allocation for
# ResourceProvider.
# TODO(sbauza): Iterate over all the allocations once we have
# nested Resource Providers. For the moment, just take the first.
if len(vgpu_allocations) > 1:
LOG.warning('More than one allocation was passed over to libvirt '
'while at the moment libvirt only supports one. Only '
'the first allocation will be looked up.')
alloc = six.next(six.itervalues(vgpu_allocations))
vgpus_asked = alloc['resources'][rc_fields.ResourceClass.VGPU]
requested_types = self._get_supported_vgpu_types()
# Which mediated devices are created but not assigned to a guest ?
mdevs_available = self._get_existing_mdevs_not_assigned(
requested_types)
chosen_mdevs = []
for c in six.moves.range(vgpus_asked):
chosen_mdev = None
if mdevs_available:
# Take the first available mdev
chosen_mdev = mdevs_available.pop()
else:
chosen_mdev = self._create_new_mediated_device(requested_types)
if not chosen_mdev:
# If we can't find devices having available VGPUs, just raise
raise exception.ComputeResourcesUnavailable(
reason='vGPU resource is not available')
else:
chosen_mdevs.append(chosen_mdev)
return chosen_mdevs
def _detach_mediated_devices(self, guest):
mdevs = guest.get_all_devices(
devtype=vconfig.LibvirtConfigGuestHostdevMDEV)
for mdev_cfg in mdevs:
try:
guest.detach_device(mdev_cfg, live=True)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
# NOTE(sbauza): There is a pending issue with libvirt that
# doesn't allow to hot-unplug mediated devices. Let's
# short-circuit the suspend action and set the instance back
# to ACTIVE.
# TODO(sbauza): Once libvirt supports this, amend the resume()
# operation to support reallocating mediated devices.
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
reason = _("Suspend is not supported for instances having "
"attached vGPUs.")
raise exception.InstanceFaultRollback(
exception.InstanceSuspendFailure(reason=reason))
else:
raise
def _has_numa_support(self):
# This means that the host can support LibvirtConfigGuestNUMATune
# and the nodeset field in LibvirtConfigGuestMemoryBackingPage
for ver in BAD_LIBVIRT_NUMA_VERSIONS:
if self._host.has_version(ver):
if not getattr(self, '_bad_libvirt_numa_version_warn', False):
LOG.warning('You are running with libvirt version %s '
'which is known to have broken NUMA support. '
'Consider patching or updating libvirt on '
'this host if you need NUMA support.',
libvirt_utils.version_to_string(ver))
self._bad_libvirt_numa_version_warn = True
return False
caps = self._host.get_capabilities()
if (caps.host.cpu.arch in (fields.Architecture.I686,
fields.Architecture.X86_64,
fields.Architecture.AARCH64) and
self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU)):
return True
elif (caps.host.cpu.arch in (fields.Architecture.PPC64,
fields.Architecture.PPC64LE) and
self._host.has_min_version(MIN_LIBVIRT_NUMA_VERSION_PPC,
hv_type=host.HV_DRIVER_QEMU)):
return True
return False
def _get_host_numa_topology(self):
if not self._has_numa_support():
return
caps = self._host.get_capabilities()
topology = caps.host.topology
if topology is None or not topology.cells:
return
cells = []
allowed_cpus = hardware.get_vcpu_pin_set()
online_cpus = self._host.get_online_cpus()
if allowed_cpus:
allowed_cpus &= online_cpus
else:
allowed_cpus = online_cpus
def _get_reserved_memory_for_cell(self, cell_id, page_size):
cell = self._reserved_hugepages.get(cell_id, {})
return cell.get(page_size, 0)
for cell in topology.cells:
cpuset = set(cpu.id for cpu in cell.cpus)
siblings = sorted(map(set,
set(tuple(cpu.siblings)
if cpu.siblings else ()
for cpu in cell.cpus)
))
cpuset &= allowed_cpus
siblings = [sib & allowed_cpus for sib in siblings]
# Filter out empty sibling sets that may be left
siblings = [sib for sib in siblings if len(sib) > 0]
mempages = [
objects.NUMAPagesTopology(
size_kb=pages.size,
total=pages.total,
used=0,
reserved=_get_reserved_memory_for_cell(
self, cell.id, pages.size))
for pages in cell.mempages]
cell = objects.NUMACell(id=cell.id, cpuset=cpuset,
memory=cell.memory / units.Ki,
cpu_usage=0, memory_usage=0,
siblings=siblings,
pinned_cpus=set([]),
mempages=mempages)
cells.append(cell)
return objects.NUMATopology(cells=cells)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id, instance=instance)
vol_stats = self.block_stats(instance, mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance, disk_id):
"""Note that this function takes an instance name."""
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s',
{'instance_name': instance.name, 'disk': disk_id,
'errcode': errcode, 'e': e},
instance=instance)
except exception.InstanceNotFound:
LOG.info('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device', instance.name,
instance=instance)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def get_inventory(self, nodename):
"""Return a dict, keyed by resource class, of inventory information for
the supplied node.
"""
disk_gb = int(self._get_local_gb_info()['total'])
memory_mb = int(self._host.get_memory_mb_total())
vcpus = self._get_vcpu_total()
# NOTE(sbauza): For the moment, the libvirt driver only supports
# providing the total number of virtual GPUs for a single GPU type. If
# you have multiple physical GPUs, each of them providing multiple GPU
# types, libvirt will return the total sum of virtual GPUs
# corresponding to the single type passed in enabled_vgpu_types
# configuration option. Eg. if you have 2 pGPUs supporting 'nvidia-35',
# each of them having 16 available instances, the total here will be
# 32.
# If one of the 2 pGPUs doesn't support 'nvidia-35', it won't be used.
# TODO(sbauza): Use ProviderTree and traits to make a better world.
vgpus = self._get_vgpu_total()
# NOTE(jaypipes): We leave some fields like allocation_ratio and
# reserved out of the returned dicts here because, for now at least,
# the RT injects those values into the inventory dict based on the
# compute_nodes record values.
result = {
rc_fields.ResourceClass.VCPU: {
'total': vcpus,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
},
rc_fields.ResourceClass.MEMORY_MB: {
'total': memory_mb,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
},
rc_fields.ResourceClass.DISK_GB: {
'total': disk_gb,
'min_unit': 1,
'max_unit': disk_gb,
'step_size': 1,
},
}
if vgpus > 0:
# Only provide VGPU resource classes if the driver supports it.
result[rc_fields.ResourceClass.VGPU] = {
'total': vgpus,
'min_unit': 1,
'max_unit': vgpus,
'step_size': 1,
}
return result
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: unused in this driver
:returns: dictionary containing resource info
"""
disk_info_dict = self._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = self._get_instance_capabilities()
data["vcpus"] = self._get_vcpu_total()
data["memory_mb"] = self._host.get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self._get_vcpu_used()
data["memory_mb_used"] = self._host.get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
data["cpu_info"] = jsonutils.dumps(self._get_cpu_info())
disk_free_gb = disk_info_dict['free']
disk_over_committed = self._get_disk_over_committed_size_total()
available_least = disk_free_gb * units.Gi - disk_over_committed
data['disk_available_least'] = available_least / units.Gi
data['pci_passthrough_devices'] = \
self._get_pci_passthrough_devices()
numa_topology = self._get_host_numa_topology()
if numa_topology:
data['numa_topology'] = numa_topology._to_json()
else:
data['numa_topology'] = None
return data
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
:returns:
- tempfile: A dict containing the tempfile info on the destination
host
- None:
1. If the instance path is not existing.
2. If the image backend is shared block storage type.
"""
if self.image_backend.backend().is_shared_block_storage():
return None
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a LibvirtLiveMigrateData object
"""
if disk_over_commit:
disk_available_gb = dst_compute_info['local_gb']
else:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = (
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb)
# Compare CPU
if not instance.vcpu_model or not instance.vcpu_model.model:
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(None, source_cpu_info, instance)
else:
self._compare_cpu(instance.vcpu_model, None, instance)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file(instance)
data = objects.LibvirtLiveMigrateData()
data.filename = filename
data.image_type = CONF.libvirt.images_type
data.graphics_listen_addr_vnc = CONF.vnc.server_listen
data.graphics_listen_addr_spice = CONF.spice.server_listen
if CONF.serial_console.enabled:
data.serial_listen_addr = CONF.serial_console.proxyclient_address
else:
data.serial_listen_addr = None
# Notes(eliqiao): block_migration and disk_over_commit are not
# nullable, so just don't set them if they are None
if block_migration is not None:
data.block_migration = block_migration
if disk_over_commit is not None:
data.disk_over_commit = disk_over_commit
data.disk_available_mb = disk_available_mb
return data
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data.filename
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a LibvirtLiveMigrateData object
"""
if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
md_obj = objects.LibvirtLiveMigrateData()
md_obj.from_legacy_dict(dest_check_data)
dest_check_data = md_obj
# Checking shared storage connectivity
# if block migration, instances_path should not be on shared storage.
source = CONF.host
dest_check_data.is_shared_instance_path = (
self._check_shared_storage_test_file(
dest_check_data.filename, instance))
dest_check_data.is_shared_block_storage = (
self._is_shared_block_storage(instance, dest_check_data,
block_device_info))
if 'block_migration' not in dest_check_data:
dest_check_data.block_migration = (
not dest_check_data.is_on_shared_storage())
if dest_check_data.block_migration:
# TODO(eliqiao): Once block_migration flag is removed from the API
# we can safely remove the if condition
if dest_check_data.is_on_shared_storage():
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
if 'disk_over_commit' in dest_check_data:
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data.disk_available_mb,
dest_check_data.disk_over_commit,
block_device_info)
if block_device_info:
bdm = block_device_info.get('block_device_mapping')
# NOTE(pkoniszewski): libvirt from version 1.2.17 upwards
# supports selective block device migration. It means that it
# is possible to define subset of block devices to be copied
# during migration. If they are not specified - block devices
# won't be migrated. However, it does not work when live
# migration is tunnelled through libvirt.
if bdm and not self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
# NOTE(stpierre): if this instance has mapped volumes,
# we can't do a block migration, since that will result
# in volumes being copied from themselves to themselves,
# which is a recipe for disaster.
ver = ".".join([str(x) for x in
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION])
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration feature requires libvirt version'
' %(libvirt_ver)s') %
{'uuid': instance.uuid, 'libvirt_ver': ver})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
# NOTE(eliqiao): Selective disk migrations are not supported
# with tunnelled block migrations so we can block them early.
if (bdm and
(self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0)):
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration is not supported with tunnelled'
' block migrations.') % {'uuid': instance.uuid})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
elif not (dest_check_data.is_shared_block_storage or
dest_check_data.is_shared_instance_path):
reason = _("Shared storage live-migration requires either shared "
"storage or boot-from-volume with no local disks.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data.instance_relative_path = instance_path
# NOTE(lyarwood): Used to indicate to the dest that the src is capable
# of wiring up the encrypted disk configuration for the domain.
# Note that this does not require the QEMU and Libvirt versions to
# decrypt LUKS to be installed on the source node. Only the Nova
# utility code to generate the correct XML is required, so we can
# default to True here for all computes >= Queens.
dest_check_data.src_supports_native_luks = True
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data,
block_device_info=None):
"""Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
:param instance: nova.objects.instance.Instance object
:param dest_check_data: dict with boolean fields image_type,
is_shared_instance_path, and is_volume_backed
"""
if (dest_check_data.obj_attr_is_set('image_type') and
CONF.libvirt.images_type == dest_check_data.image_type and
self.image_backend.backend().is_shared_block_storage()):
# NOTE(dgenin): currently true only for RBD image backend
return True
if (dest_check_data.is_shared_instance_path and
self.image_backend.backend().is_file_in_instance_path()):
# NOTE(angdraug): file based image backends (Flat, Qcow2)
# place block device files under the instance path
return True
if (dest_check_data.is_volume_backed and
not bool(self._get_instance_disk_info(instance,
block_device_info))):
return True
return False
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit,
block_device_info):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
disk_infos = self._get_instance_disk_info(instance, block_device_info)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance.uuid,
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, guest_cpu, host_cpu_str, instance):
"""Check the host is compatible with the requested CPU
:param guest_cpu: nova.objects.VirtCPUModel or None
:param host_cpu_str: JSON from _get_cpu_info() method
If the 'guest_cpu' parameter is not None, this will be
validated for migration compatibility with the host.
Otherwise the 'host_cpu_str' JSON string will be used for
validation.
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(kchamart): Comparing host to guest CPU model for emulated
# guests (<domain type='qemu'>) should not matter -- in this
# mode (QEMU "TCG") the CPU is fully emulated in software and no
# hardware acceleration, like KVM, is involved. So, skip the CPU
# compatibility check for the QEMU domain type, and retain it for
# KVM guests.
if CONF.libvirt.virt_type not in ['kvm']:
return
if guest_cpu is None:
info = jsonutils.loads(host_cpu_str)
LOG.info('Instance launched has CPU info: %s', host_cpu_str)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
else:
cpu = self._vcpu_model_to_cpu_config(guest_cpu)
u = ("http://libvirt.org/html/libvirt-libvirt-host.html#"
"virCPUCompareResult")
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
ret = self._host.compare_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.debug("URI %(uri)s does not support cpu comparison. "
"It will be proceeded though. Error: %(error)s",
{'uri': self._uri(), 'error': e})
return
else:
LOG.error(m, {'ret': e, 'u': u})
raise exception.MigrationPreCheckError(
reason=m % {'ret': e, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self, instance):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file, instance=instance)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename, instance):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
# NOTE(tpatzig): if instances_path is a shared volume that is
# under heavy IO (many instances on many compute nodes),
# then checking the existence of the testfile fails,
# just because it takes longer until the client refreshes and new
# content gets visible.
# os.utime (like touch) on the directory forces the client to refresh.
os.utime(CONF.instances_path, None)
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
exists = False
else:
exists = True
LOG.debug('Check if temp file %s exists to indicate shared storage '
'is being used for migration. Exists? %s', tmp_file, exists,
instance=instance)
return exists
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = list(range(CONF.live_migration_retry_count))
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.InternalError(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
"""
# 'dest' will be substituted into 'migration_uri' so ensure
# it does't contain any characters that could be used to
# exploit the URI accepted by libivrt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
self._live_migration(context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def live_migration_abort(self, instance):
"""Aborting a running live-migration.
:param instance: instance object that is in migration
"""
guest = self._host.get_guest(instance)
dom = guest._domain
try:
dom.abortJob()
except libvirt.libvirtError as e:
LOG.error("Failed to cancel migration %s",
encodeutils.exception_to_unicode(e), instance=instance)
raise
def _verify_serial_console_is_disabled(self):
if CONF.serial_console.enabled:
msg = _('Your destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly you'
' must disable serial console.')
raise exception.MigrationError(reason=msg)
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, guest,
device_names, bandwidth):
"""Invoke the live migration operation
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
:param guest: the guest domain object
:param device_names: list of device names that are being migrated with
instance
:param bandwidth: MiB/s of bandwidth allowed for the migration at start
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
"""
try:
if migrate_data.block_migration:
migration_flags = self._block_migration_flags
else:
migration_flags = self._live_migration_flags
serial_listen_addr = libvirt_migrate.serial_listen_addr(
migrate_data)
if not serial_listen_addr:
# In this context we want to ensure that serial console is
# disabled on source node. This is because nova couldn't
# retrieve serial listen address from destination node, so we
# consider that destination node might have serial console
# disabled as well.
self._verify_serial_console_is_disabled()
# NOTE(aplanas) migrate_uri will have a value only in the
# case that `live_migration_inbound_addr` parameter is
# set, and we propose a non tunneled migration.
migrate_uri = None
if ('target_connect_addr' in migrate_data and
migrate_data.target_connect_addr is not None):
dest = migrate_data.target_connect_addr
if (migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED == 0):
migrate_uri = self._migrate_uri(dest)
params = None
new_xml_str = None
if CONF.libvirt.virt_type != "parallels":
new_xml_str = libvirt_migrate.get_updated_guest_xml(
# TODO(sahid): It's not a really good idea to pass
# the method _get_volume_config and we should to find
# a way to avoid this in future.
guest, migrate_data, self._get_volume_config)
if self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
params = {
'destination_xml': new_xml_str,
'migrate_disks': device_names,
}
# NOTE(pkoniszewski): Because of precheck which blocks
# tunnelled block live migration with mapped volumes we
# can safely remove migrate_disks when tunnelling is on.
# Otherwise we will block all tunnelled block migrations,
# even when an instance does not have volumes mapped.
# This is because selective disk migration is not
# supported in tunnelled block live migration. Also we
# cannot fallback to migrateToURI2 in this case because of
# bug #1398999
if (migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0):
params.pop('migrate_disks')
# TODO(sahid): This should be in
# post_live_migration_at_source but no way to retrieve
# ports acquired on the host for the guest at this
# step. Since the domain is going to be removed from
# libvird on source host after migration, we backup the
# serial ports to release them if all went well.
serial_ports = []
if CONF.serial_console.enabled:
serial_ports = list(self._get_serial_ports_from_guest(guest))
guest.migrate(self._live_migration_uri(dest),
migrate_uri=migrate_uri,
flags=migration_flags,
params=params,
domain_xml=new_xml_str,
bandwidth=bandwidth)
for hostname, port in serial_ports:
serial_console.release_port(host=hostname, port=port)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Live Migration failure: %s", e, instance=instance)
# If 'migrateToURI' fails we don't know what state the
# VM instances on each host are in. Possibilities include
#
# 1. src==running, dst==none
#
# Migration failed & rolled back, or never started
#
# 2. src==running, dst==paused
#
# Migration started but is still ongoing
#
# 3. src==paused, dst==paused
#
# Migration data transfer completed, but switchover
# is still ongoing, or failed
#
# 4. src==paused, dst==running
#
# Migration data transfer completed, switchover
# happened but cleanup on source failed
#
# 5. src==none, dst==running
#
# Migration fully succeeded.
#
# Libvirt will aim to complete any migration operation
# or roll it back. So even if the migrateToURI call has
# returned an error, if the migration was not finished
# libvirt should clean up.
#
# So we take the error raise here with a pinch of salt
# and rely on the domain job info status to figure out
# what really happened to the VM, which is a much more
# reliable indicator.
#
# In particular we need to try very hard to ensure that
# Nova does not "forget" about the guest. ie leaving it
# running on a different host to the one recorded in
# the database, as that would be a serious resource leak
LOG.debug("Migration operation thread has finished",
instance=instance)
def _live_migration_copy_disk_paths(self, context, instance, guest):
'''Get list of disks to copy during migration
:param context: security context
:param instance: the instance being migrated
:param guest: the Guest instance being migrated
Get the list of disks to copy during migration.
:returns: a list of local source paths and a list of device names to
copy
'''
disk_paths = []
device_names = []
block_devices = []
# TODO(pkoniszewski): Remove version check when we bump min libvirt
# version to >= 1.2.17.
if (self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED == 0 and
self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION)):
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance,
bdm_list)
block_device_mappings = driver.block_device_info_get_mapping(
block_device_info)
for bdm in block_device_mappings:
device_name = str(bdm['mount_device'].rsplit('/', 1)[1])
block_devices.append(device_name)
for dev in guest.get_all_disks():
if dev.readonly or dev.shareable:
continue
if dev.source_type not in ["file", "block"]:
continue
if dev.target_dev in block_devices:
continue
disk_paths.append(dev.source_path)
device_names.append(dev.target_dev)
return (disk_paths, device_names)
def _live_migration_data_gb(self, instance, disk_paths):
'''Calculate total amount of data to be transferred
:param instance: the nova.objects.Instance being migrated
:param disk_paths: list of disk paths that are being migrated
with instance
Calculates the total amount of data that needs to be
transferred during the live migration. The actual
amount copied will be larger than this, due to the
guest OS continuing to dirty RAM while the migration
is taking place. So this value represents the minimal
data size possible.
:returns: data size to be copied in GB
'''
ram_gb = instance.flavor.memory_mb * units.Mi / units.Gi
if ram_gb < 2:
ram_gb = 2
disk_gb = 0
for path in disk_paths:
try:
size = os.stat(path).st_size
size_gb = (size / units.Gi)
if size_gb < 2:
size_gb = 2
disk_gb += size_gb
except OSError as e:
LOG.warning("Unable to stat %(disk)s: %(ex)s",
{'disk': path, 'ex': e})
# Ignore error since we don't want to break
# the migration monitoring thread operation
return ram_gb + disk_gb
def _get_migration_flags(self, is_block_migration):
if is_block_migration:
return self._block_migration_flags
return self._live_migration_flags
def _live_migration_monitor(self, context, instance, guest,
dest, post_method,
recover_method, block_migration,
migrate_data, finish_event,
disk_paths):
on_migration_failure = deque()
data_gb = self._live_migration_data_gb(instance, disk_paths)
downtime_steps = list(libvirt_migrate.downtime_steps(data_gb))
migration = migrate_data.migration
curdowntime = None
migration_flags = self._get_migration_flags(
migrate_data.block_migration)
n = 0
start = time.time()
progress_time = start
progress_watermark = None
previous_data_remaining = -1
is_post_copy_enabled = self._is_post_copy_enabled(migration_flags)
while True:
info = guest.get_job_info()
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Either still running, or failed or completed,
# lets untangle the mess
if not finish_event.ready():
LOG.debug("Operation thread is still running",
instance=instance)
else:
info.type = libvirt_migrate.find_job_type(guest, instance)
LOG.debug("Fixed incorrect job type to be %d",
info.type, instance=instance)
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Migration is not yet started
LOG.debug("Migration not running yet",
instance=instance)
elif info.type == libvirt.VIR_DOMAIN_JOB_UNBOUNDED:
# Migration is still running
#
# This is where we wire up calls to change live
# migration status. eg change max downtime, cancel
# the operation, change max bandwidth
libvirt_migrate.run_tasks(guest, instance,
self.active_migrations,
on_migration_failure,
migration,
is_post_copy_enabled)
now = time.time()
elapsed = now - start
if ((progress_watermark is None) or
(progress_watermark == 0) or
(progress_watermark > info.data_remaining)):
progress_watermark = info.data_remaining
progress_time = now
progress_timeout = CONF.libvirt.live_migration_progress_timeout
completion_timeout = int(
CONF.libvirt.live_migration_completion_timeout * data_gb)
if libvirt_migrate.should_abort(instance, now, progress_time,
progress_timeout, elapsed,
completion_timeout,
migration.status):
try:
guest.abort_job()
except libvirt.libvirtError as e:
LOG.warning("Failed to abort migration %s",
encodeutils.exception_to_unicode(e),
instance=instance)
self._clear_empty_migration(instance)
raise
if (is_post_copy_enabled and
libvirt_migrate.should_switch_to_postcopy(
info.memory_iteration, info.data_remaining,
previous_data_remaining, migration.status)):
libvirt_migrate.trigger_postcopy_switch(guest,
instance,
migration)
previous_data_remaining = info.data_remaining
curdowntime = libvirt_migrate.update_downtime(
guest, instance, curdowntime,
downtime_steps, elapsed)
# We loop every 500ms, so don't log on every
# iteration to avoid spamming logs for long
# running migrations. Just once every 5 secs
# is sufficient for developers to debug problems.
# We log once every 30 seconds at info to help
# admins see slow running migration operations
# when debug logs are off.
if (n % 10) == 0:
# Ignoring memory_processed, as due to repeated
# dirtying of data, this can be way larger than
# memory_total. Best to just look at what's
# remaining to copy and ignore what's done already
#
# TODO(berrange) perhaps we could include disk
# transfer stats in the progress too, but it
# might make memory info more obscure as large
# disk sizes might dwarf memory size
remaining = 100
if info.memory_total != 0:
remaining = round(info.memory_remaining *
100 / info.memory_total)
libvirt_migrate.save_stats(instance, migration,
info, remaining)
lg = LOG.debug
if (n % 60) == 0:
lg = LOG.info
lg("Migration running for %(secs)d secs, "
"memory %(remaining)d%% remaining; "
"(bytes processed=%(processed_memory)d, "
"remaining=%(remaining_memory)d, "
"total=%(total_memory)d)",
{"secs": n / 2, "remaining": remaining,
"processed_memory": info.memory_processed,
"remaining_memory": info.memory_remaining,
"total_memory": info.memory_total}, instance=instance)
if info.data_remaining > progress_watermark:
lg("Data remaining %(remaining)d bytes, "
"low watermark %(watermark)d bytes "
"%(last)d seconds ago",
{"remaining": info.data_remaining,
"watermark": progress_watermark,
"last": (now - progress_time)}, instance=instance)
n = n + 1
elif info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
# Migration is all done
LOG.info("Migration operation has completed",
instance=instance)
post_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_FAILED:
# Migration did not succeed
LOG.error("Migration operation has aborted", instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_CANCELLED:
# Migration was stopped by admin
LOG.warning("Migration operation was cancelled",
instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, migrate_data,
migration_status='cancelled')
break
else:
LOG.warning("Unexpected migration job type: %d",
info.type, instance=instance)
time.sleep(0.5)
self._clear_empty_migration(instance)
def _clear_empty_migration(self, instance):
try:
del self.active_migrations[instance.uuid]
except KeyError:
LOG.warning("There are no records in active migrations "
"for instance", instance=instance)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration,
migrate_data):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
This fires off a new thread to run the blocking migration
operation, and then this thread monitors the progress of
migration and controls its operation
"""
guest = self._host.get_guest(instance)
disk_paths = []
device_names = []
if (migrate_data.block_migration and
CONF.libvirt.virt_type != "parallels"):
disk_paths, device_names = self._live_migration_copy_disk_paths(
context, instance, guest)
deadline = CONF.vif_plugging_timeout
if utils.is_neutron() and deadline:
# We don't generate events if CONF.vif_plugging_timeout=0
# meaning that the operator disabled using them.
# In case of Linux Bridge, the agent is waiting for new
# TAP devices on destination node. They are going to be
# created by libvirt at the very beginning of the
# live-migration process. Then receiving the events from
# Neutron will ensure that everything is configured
# correctly.
events = self._get_neutron_events_for_live_migration(
instance.get_network_info())
else:
# TODO(sahid): This 'is_neutron()' condition should be
# removed when nova-network will be erased from the tree
# (Rocky).
events = []
if events:
# We start migration with the minimum bandwidth
# speed. Depending on the VIF type (see:
# _get_neutron_events_for_live_migration) we will wait for
# Neutron to send events that confirm network is setup or
# directly configure QEMU to use the maximun BW allowed.
bandwidth = MIN_MIGRATION_SPEED_BW
else:
bandwidth = CONF.libvirt.live_migration_bandwidth
try:
error_cb = self._neutron_failed_live_migration_callback
with self.virtapi.wait_for_instance_event(instance, events,
deadline=deadline,
error_callback=error_cb):
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, guest,
device_names, bandwidth)
except eventlet.timeout.Timeout:
msg = ('Timeout waiting for VIF plugging events, '
'canceling migration')
raise exception.MigrationError(reason=msg)
else:
if utils.is_neutron() and events:
LOG.debug('VIF events received, continuing migration '
'with max bandwidth configured: %d',
CONF.libvirt.live_migration_bandwidth,
instance=instance)
# Configure QEMU to use the maximum bandwidth allowed.
guest.migrate_configure_max_speed(
CONF.libvirt.live_migration_bandwidth)
finish_event = eventlet.event.Event()
self.active_migrations[instance.uuid] = deque()
def thread_finished(thread, event):
LOG.debug("Migration operation thread notification",
instance=instance)
event.send()
opthread.link(thread_finished, finish_event)
# Let eventlet schedule the new thread right away
time.sleep(0)
try:
LOG.debug("Starting monitoring of live migration",
instance=instance)
self._live_migration_monitor(context, instance, guest, dest,
post_method, recover_method,
block_migration, migrate_data,
finish_event, disk_paths)
except Exception as ex:
LOG.warning("Error monitoring migration: %(ex)s",
{"ex": ex}, instance=instance, exc_info=True)
raise
finally:
LOG.debug("Live migration monitoring is all done",
instance=instance)
def _is_post_copy_enabled(self, migration_flags):
if self._is_post_copy_available():
if (migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0:
return True
return False
def live_migration_force_complete(self, instance):
try:
self.active_migrations[instance.uuid].append('force-complete')
except KeyError:
raise exception.NoActiveMigrationForInstance(
instance_id=instance.uuid)
def _try_fetch_image(self, context, path, image_id, instance,
fallback_from_host=None):
try:
libvirt_utils.fetch_image(context, path, image_id)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore on "
"image service, attempting to copy image "
"from %(host)s",
{'image_id': image_id, 'host': fallback_from_host})
libvirt_utils.copy_image(src=path, dest=path,
host=fallback_from_host,
receive=True)
def _fetch_instance_kernel_ramdisk(self, context, instance,
fallback_from_host=None):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance.kernel_id:
kernel_path = os.path.join(instance_dir, 'kernel')
# NOTE(dsanders): only fetch image if it's not available at
# kernel_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(kernel_path):
self._try_fetch_image(context,
kernel_path,
instance.kernel_id,
instance, fallback_from_host)
if instance.ramdisk_id:
ramdisk_path = os.path.join(instance_dir, 'ramdisk')
# NOTE(dsanders): only fetch image if it's not available at
# ramdisk_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(ramdisk_path):
self._try_fetch_image(context,
ramdisk_path,
instance.ramdisk_id,
instance, fallback_from_host)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
try:
self.destroy(context, instance, network_info, block_device_info,
destroy_disks)
finally:
# NOTE(gcb): Failed block live migration may leave instance
# directory at destination node, ensure it is always deleted.
is_shared_instance_path = True
if migrate_data:
is_shared_instance_path = migrate_data.is_shared_instance_path
if (migrate_data.obj_attr_is_set("serial_listen_ports")
and migrate_data.serial_listen_ports):
# Releases serial ports reserved.
for port in migrate_data.serial_listen_ports:
serial_console.release_port(
host=migrate_data.serial_listen_addr, port=port)
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
shutil.rmtree(instance_dir)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
"""Preparation live migration."""
if disk_info is not None:
disk_info = jsonutils.loads(disk_info)
LOG.debug('migrate_data in pre_live_migration: %s', migrate_data,
instance=instance)
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
is_block_migration = migrate_data.block_migration
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
LOG.debug('Creating instance directory: %s', instance_dir,
instance=instance)
os.mkdir(instance_dir)
# Recreate the disk.info file and in doing so stop the
# imagebackend from recreating it incorrectly by inspecting the
# contents of each file when using the Raw backend.
if disk_info:
image_disk_info = {}
for info in disk_info:
image_file = os.path.basename(info['path'])
image_path = os.path.join(instance_dir, image_file)
image_disk_info[image_path] = info['type']
LOG.debug('Creating disk.info with the contents: %s',
image_disk_info, instance=instance)
image_disk_info_path = os.path.join(instance_dir,
'disk.info')
libvirt_utils.write_to_file(image_disk_info_path,
jsonutils.dumps(image_disk_info))
if not is_shared_block_storage:
# Ensure images and backing files are present.
LOG.debug('Checking to make sure images and backing files are '
'present before live migration.', instance=instance)
self._create_images_and_backing(
context, instance, instance_dir, disk_info,
fallback_from_host=instance.host)
if (configdrive.required_by(instance) and
CONF.config_drive_format == 'iso9660'):
# NOTE(pkoniszewski): Due to a bug in libvirt iso config
# drive needs to be copied to destination prior to
# migration when instance path is not shared and block
# storage is not shared. Files that are already present
# on destination are excluded from a list of files that
# need to be copied to destination. If we don't do that
# live migration will fail on copying iso config drive to
# destination and writing to read-only device.
# Please see bug/1246201 for more details.
src = "%s:%s/disk.config" % (instance.host, instance_dir)
self._remotefs.copy_file(src, instance_dir)
if not is_block_migration:
# NOTE(angdraug): when block storage is shared between source
# and destination and instance path isn't (e.g. volume backed
# or rbd backed instance), instance path on destination has to
# be prepared
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if len(block_device_mapping):
LOG.debug('Connecting volumes before live migration.',
instance=instance)
for bdm in block_device_mapping:
connection_info = bdm['connection_info']
# NOTE(lyarwood): Handle the P to Q LM during upgrade use case
# where an instance has encrypted volumes attached using the
# os-brick encryptors. Do not attempt to attach the encrypted
# volume using native LUKS decryption on the destionation.
src_native_luks = False
if migrate_data.obj_attr_is_set('src_supports_native_luks'):
src_native_luks = migrate_data.src_supports_native_luks
dest_native_luks = self._is_native_luks_available()
allow_native_luks = src_native_luks and dest_native_luks
self._connect_volume(context, connection_info, instance,
allow_native_luks=allow_native_luks)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
LOG.debug('Plugging VIFs before live migration.', instance=instance)
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warning('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.',
{'cnt': cnt, 'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
# Store server_listen and latest disk device info
if not migrate_data:
migrate_data = objects.LibvirtLiveMigrateData(bdms=[])
else:
migrate_data.bdms = []
# Store live_migration_inbound_addr
migrate_data.target_connect_addr = \
CONF.libvirt.live_migration_inbound_addr
migrate_data.supported_perf_events = self._supported_perf_events
migrate_data.serial_listen_ports = []
if CONF.serial_console.enabled:
num_ports = hardware.get_number_of_serial_ports(
instance.flavor, instance.image_meta)
for port in six.moves.range(num_ports):
migrate_data.serial_listen_ports.append(
serial_console.acquire_port(
migrate_data.serial_listen_addr))
for vol in block_device_mapping:
connection_info = vol['connection_info']
if connection_info.get('serial'):
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type,
instance.image_meta, vol)
bdmi = objects.LibvirtLiveMigrateBDMInfo()
bdmi.serial = connection_info['serial']
bdmi.connection_info = connection_info
bdmi.bus = disk_info['bus']
bdmi.dev = disk_info['dev']
bdmi.type = disk_info['type']
bdmi.format = disk_info.get('format')
bdmi.boot_index = disk_info.get('boot_index')
volume_secret = self._host.find_secret('volume', vol.volume_id)
if volume_secret:
bdmi.encryption_secret_uuid = volume_secret.UUIDString()
migrate_data.bdms.append(bdmi)
return migrate_data
def _try_fetch_image_cache(self, image, fetch_func, context, filename,
image_id, instance, size,
fallback_from_host=None):
try:
image.cache(fetch_func=fetch_func,
context=context,
filename=filename,
image_id=image_id,
size=size)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore "
"on image service, attempting to copy "
"image from %(host)s",
{'image_id': image_id, 'host': fallback_from_host},
instance=instance)
def copy_from_host(target):
libvirt_utils.copy_image(src=target,
dest=target,
host=fallback_from_host,
receive=True)
image.cache(fetch_func=copy_from_host,
filename=filename)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info, fallback_from_host=None):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info:
disk info specified in _get_instance_disk_info_from_config
(list of dicts)
:param fallback_from_host:
host where we can retrieve images if the glance images are
not available.
"""
# Virtuozzo containers don't use backing file
if (CONF.libvirt.virt_type == "parallels" and
instance.vm_mode == fields.VMMode.EXE):
return
if not disk_info:
disk_info = []
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
disk = self.image_backend.by_name(instance, instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
# The argument 'size' is used by image.cache to
# validate disk size retrieved from cache against
# the instance disk size (should always return OK)
# and ephemeral_size is used by _create_ephemeral
# to build the image if the disk is not already
# cached.
disk.cache(
fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance.os_type,
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=info['virt_disk_size'] / units.Gi)
elif cache_name.startswith('swap'):
inst_type = instance.get_flavor()
swap_mb = inst_type.swap
disk.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
self._try_fetch_image_cache(disk,
libvirt_utils.fetch_image,
context, cache_name,
instance.image_ref,
instance,
info['virt_disk_size'],
fallback_from_host)
# if disk has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(
context, instance, fallback_from_host=fallback_from_host)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_api = self._volume_api
for vol in block_device_mapping:
volume_id = vol['connection_info']['serial']
if vol['attachment_id'] is None:
# Cinder v2 api flow: Retrieve connection info from Cinder's
# initialize_connection API. The info returned will be
# accurate for the source server.
connector = self.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(
context, volume_id, connector)
else:
# cinder v3.44 api flow: Retrieve the connection_info for
# the old attachment from cinder.
old_attachment_id = \
migrate_data.old_vol_attachment_ids[volume_id]
old_attachment = volume_api.attachment_get(
context, old_attachment_id)
connection_info = old_attachment['connection_info']
# TODO(leeantho) The following multipath_id logic is temporary
# and will be removed in the future once os-brick is updated
# to handle multipath for drivers in a more efficient way.
# For now this logic is needed to ensure the connection info
# data is correct.
# Pull out multipath_id from the bdm information. The
# multipath_id can be placed into the connection info
# because it is based off of the volume and will be the
# same on the source and destination hosts.
if 'multipath_id' in vol['connection_info']['data']:
multipath_id = vol['connection_info']['data']['multipath_id']
connection_info['data']['multipath_id'] = multipath_id
self._disconnect_volume(context, connection_info, instance)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# The source node set the VIR_MIGRATE_PERSIST_DEST flag when live
# migrating so the guest xml should already be persisted on the
# destination host, so just perform a sanity check to make sure it
# made it as expected.
self._host.get_guest(instance)
def _get_instance_disk_info_from_config(self, guest_config,
block_device_info):
"""Get the non-volume disk information from the domain xml
:param LibvirtConfigGuest guest_config: the libvirt domain config
for the instance
:param dict block_device_info: block device info for BDMs
:returns disk_info: list of dicts with keys:
* 'type': the disk type (str)
* 'path': the disk path (str)
* 'virt_disk_size': the virtual disk size (int)
* 'backing_file': backing file of a disk image (str)
* 'disk_size': physical disk size (int)
* 'over_committed_disk_size': virt_disk_size - disk_size or 0
"""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
if (guest_config.virt_type == 'parallels' and
guest_config.os_type == fields.VMMode.EXE):
node_type = 'filesystem'
else:
node_type = 'disk'
for device in guest_config.devices:
if device.root_name != node_type:
continue
disk_type = device.source_type
if device.root_name == 'filesystem':
target = device.target_dir
if device.source_type == 'file':
path = device.source_file
elif device.source_type == 'block':
path = device.source_dev
else:
path = None
else:
target = device.target_dev
path = device.source_path
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
guest_config.name)
continue
if disk_type not in ['file', 'block']:
LOG.debug('skipping disk because it looks like a volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
if device.root_name == 'filesystem':
driver_type = device.driver_type
else:
driver_type = device.driver_format
# get the real disk size or
# raise a localized error if image is unavailable
if disk_type == 'file':
if driver_type == 'ploop':
dk_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
dk_size += os.path.getsize(fp)
else:
dk_size = disk_api.get_allocated_disk_size(path)
elif disk_type == 'block' and block_device_info:
dk_size = lvm.get_volume_size(path)
else:
LOG.debug('skipping disk %(path)s (%(target)s) - unable to '
'determine if volume',
{'path': path, 'target': target})
continue
if driver_type in ("qcow2", "ploop"):
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk_api.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': driver_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return disk_info
def _get_instance_disk_info(self, instance, block_device_info):
try:
guest = self._host.get_guest(instance)
config = guest.get_config()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s',
{'instance_name': instance.name,
'error_code': error_code,
'ex': encodeutils.exception_to_unicode(ex)},
instance=instance)
raise exception.InstanceNotFound(instance_id=instance.uuid)
return self._get_instance_disk_info_from_config(config,
block_device_info)
def get_instance_disk_info(self, instance,
block_device_info=None):
return jsonutils.dumps(
self._get_instance_disk_info(instance, block_device_info))
def _get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
disk_over_committed_size = 0
instance_domains = self._host.list_instance_domains(only_running=False)
if not instance_domains:
return disk_over_committed_size
# Get all instance uuids
instance_uuids = [dom.UUIDString() for dom in instance_domains]
ctx = nova_context.get_admin_context()
# Get instance object list by uuid filter
filters = {'uuid': instance_uuids}
# NOTE(ankit): objects.InstanceList.get_by_filters method is
# getting called twice one is here and another in the
# _update_available_resource method of resource_tracker. Since
# _update_available_resource method is synchronized, there is a
# possibility the instances list retrieved here to calculate
# disk_over_committed_size would differ to the list you would get
# in _update_available_resource method for calculating usages based
# on instance utilization.
local_instance_list = objects.InstanceList.get_by_filters(
ctx, filters, use_slave=True)
# Convert instance list to dictionary with instance uuid as key.
local_instances = {inst.uuid: inst for inst in local_instance_list}
# Get bdms by instance uuids
bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
ctx, instance_uuids)
for dom in instance_domains:
try:
guest = libvirt_guest.Guest(dom)
config = guest.get_config()
block_device_info = None
if guest.uuid in local_instances \
and (bdms and guest.uuid in bdms):
# Get block device info for instance
block_device_info = driver.get_block_device_info(
local_instances[guest.uuid], bdms[guest.uuid])
disk_infos = self._get_instance_disk_info_from_config(
config, block_device_info)
if not disk_infos:
continue
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s',
{'instance_name': guest.name,
'error_code': error_code,
'ex': encodeutils.exception_to_unicode(ex)})
except OSError as e:
if e.errno in (errno.ENOENT, errno.ESTALE):
LOG.warning('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.',
{'i_name': guest.name})
elif e.errno == errno.EACCES:
LOG.warning('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.',
{'i_name': guest.name})
else:
raise
except exception.VolumeBDMPathNotFound as e:
LOG.warning('Periodic task is updating the host stats, '
'it is trying to get disk info for %(i_name)s, '
'but the backing volume block device was removed '
'by concurrent operations such as resize. '
'Error: %(error)s',
{'i_name': guest.name, 'error': e})
except exception.DiskNotFound:
with excutils.save_and_reraise_exception() as err_ctxt:
# If the instance is undergoing a task state transition,
# like moving to another host or is being deleted, we
# should ignore this instance and move on.
if guest.uuid in local_instances:
inst = local_instances[guest.uuid]
if inst.task_state is not None:
LOG.info('Periodic task is updating the host '
'stats; it is trying to get disk info '
'for %(i_name)s, but the backing disk '
'was removed by a concurrent operation '
'(task_state=%(task_state)s)',
{'i_name': guest.name,
'task_state': inst.task_state},
instance=inst)
err_ctxt.reraise = False
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
def get_host_uptime(self):
"""Returns the result of calling "uptime"."""
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
shutil.rmtree(inst_base, ignore_errors=True)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
self._remotefs.remove_dir(dest, inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
# NOTE (drwahl): Actually, there is a 3rd way: if images_type is rbd,
# it will always be shared storage
if CONF.libvirt.images_type == 'rbd':
return True
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuidutils.generate_uuid(dashed=False) + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
self._remotefs.create_file(dest, tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
self._remotefs.remove_file(dest, tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
# get_bdm_ephemeral_disk_size() will return 0 if the new
# instance's requested block device mapping contain no
# ephemeral devices. However, we still want to check if
# the original instance's ephemeral_gb property was set and
# ensure that the new requested flavor ephemeral size is greater
eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or
instance.flavor.ephemeral_gb)
# Checks if the migration needs a disk resize down.
root_down = flavor.root_gb < instance.flavor.root_gb
ephemeral_down = flavor.ephemeral_gb < eph_size
booted_from_volume = self._is_booted_from_volume(block_device_info)
if (root_down and not booted_from_volume) or ephemeral_down:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
if CONF.libvirt.images_type == 'lvm' and not booted_from_volume:
reason = _("Migration is not supported for LVM backed instances")
raise exception.InstanceFaultRollback(
exception.MigrationPreCheckError(reason=reason))
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
try:
self._remotefs.create_dir(dest, inst_base)
except processutils.ProcessExecutionError as e:
reason = _("not able to execute ssh command: %s") % e
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
self._disconnect_volume(context, connection_info, instance)
disk_info = self._get_instance_disk_info(instance, block_device_info)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
fileutils.ensure_tree(inst_base)
on_execute = lambda process: \
self.job_tracker.add_job(instance, process.pid)
on_completion = lambda process: \
self.job_tracker.remove_job(instance, process.pid)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
# We will not copy over the swap disk here, and rely on
# finish_migration to re-create it for us. This is ok because
# the OS is shut down, and as recreating a swap disk is very
# cheap it is more efficient than copying either locally or
# over the network. This also means we don't have to resize it.
if fname == 'disk.swap':
continue
compression = info['type'] not in NO_COMPRESSION_TYPES
libvirt_utils.copy_image(from_path, img_path, host=dest,
on_execute=on_execute,
on_completion=on_completion,
compression=compression)
# Ensure disk.info is written to the new path to avoid disks being
# reinspected and potentially changing format.
src_disk_info_path = os.path.join(inst_base_resize, 'disk.info')
if os.path.exists(src_disk_info_path):
dst_disk_info_path = os.path.join(inst_base, 'disk.info')
libvirt_utils.copy_image(src_disk_info_path,
dst_disk_info_path,
host=dest, on_execute=on_execute,
on_completion=on_completion)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return jsonutils.dumps(disk_info)
def _wait_for_running(self, instance):
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info("Instance running successfully.", instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_raw_to_qcow2(path):
"""Converts a raw disk to qcow2."""
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
"""Converts a qcow2 disk to raw."""
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
block_disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
# assume _create_image does nothing if a target file exists.
# NOTE: This has the intended side-effect of fetching a missing
# backing file.
self._create_image(context, instance, block_disk_info['mapping'],
block_device_info=block_device_info,
ignore_bdi_for_swap=True,
fallback_from_host=migration.source_compute)
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
gen_confdrive = functools.partial(
self._create_configdrive, context, instance,
InjectionInfo(admin_pass=None, network_info=network_info,
files=None))
# Convert raw disks to qcow2 if migrating to host which uses
# qcow2 from host which uses raw.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
path = info['path']
disk_name = os.path.basename(path)
# NOTE(mdbooth): The code below looks wrong, but is actually
# required to prevent a security hole when migrating from a host
# with use_cow_images=False to one with use_cow_images=True.
# Imagebackend uses use_cow_images to select between the
# atrociously-named-Raw and Qcow2 backends. The Qcow2 backend
# writes to disk.info, but does not read it as it assumes qcow2.
# Therefore if we don't convert raw to qcow2 here, a raw disk will
# be incorrectly assumed to be qcow2, which is a severe security
# flaw. The reverse is not true, because the atrociously-named-Raw
# backend supports both qcow2 and raw disks, and will choose
# appropriately between them as long as disk.info exists and is
# correctly populated, which it is because Qcow2 writes to
# disk.info.
#
# In general, we do not yet support format conversion during
# migration. For example:
# * Converting from use_cow_images=True to use_cow_images=False
# isn't handled. This isn't a security bug, but is almost
# certainly buggy in other cases, as the 'Raw' backend doesn't
# expect a backing file.
# * Converting to/from lvm and rbd backends is not supported.
#
# This behaviour is inconsistent, and therefore undesirable for
# users. It is tightly-coupled to implementation quirks of 2
# out of 5 backends in imagebackend and defends against a severe
# security flaw which is not at all obvious without deep analysis,
# and is therefore undesirable to developers. We should aim to
# remove it. This will not be possible, though, until we can
# represent the storage layout of a specific instance
# independent of the default configuration of the local compute
# host.
# Config disks are hard-coded to be raw even when
# use_cow_images=True (see _get_disk_config_image_type),so don't
# need to be converted.
if (disk_name != 'disk.config' and
info['type'] == 'raw' and CONF.use_cow_images):
self._disk_raw_to_qcow2(info['path'])
xml = self._get_guest_xml(context, instance, network_info,
block_disk_info, image_meta,
block_device_info=block_device_info)
# NOTE(mriedem): vifs_already_plugged=True here, regardless of whether
# or not we've migrated to another host, because we unplug VIFs locally
# and the status change in the port might go undetected by the neutron
# L2 agent (or neutron server) so neutron may not know that the VIF was
# unplugged in the first place and never send an event.
guest = self._create_domain_and_network(context, xml, instance,
network_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True,
post_xml_callback=gen_confdrive)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
# Sync guest time after migration.
guest.sync_guest_time()
LOG.debug("finish_migration finished successfully.", instance=instance)
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
root_disk = self.image_backend.by_name(instance, 'disk')
# Once we rollback, the snapshot is no longer needed, so remove it
# TODO(nic): Remove the try/except/finally in a future release
# To avoid any upgrade issues surrounding instances being in pending
# resize state when the software is updated, this portion of the
# method logs exceptions rather than failing on them. Once it can be
# reasonably assumed that no such instances exist in the wild
# anymore, the try/except/finally should be removed,
# and ignore_errors should be set back to False (the default) so
# that problems throw errors, like they should.
if root_disk.exists():
try:
root_disk.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
except exception.SnapshotNotFound:
LOG.warning("Failed to rollback snapshot (%s)",
libvirt_utils.RESIZE_SNAPSHOT_NAME)
finally:
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
LOG.debug("finish_revert_migration finished successfully.",
instance=instance)
def confirm_migration(self, context, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(context, instance, network_info)
@staticmethod
def _get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
def get_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
output["cpu" + str(vcpu.id) + "_time"] = vcpu.time
except libvirt.libvirtError:
pass
# get io status
xml = guest.get_xml_desc()
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def get_instance_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
# TODO(sahid): Needs to use get_info but more changes have to
# be done since a mapping STATE_MAP LIBVIRT_POWER_STATE is
# needed.
(state, max_mem, mem, num_cpu, cpu_time) = \
guest._get_domain_info(self._host)
config_drive = configdrive.required_by(instance)
launched_at = timeutils.normalize_time(instance.launched_at)
uptime = timeutils.delta_seconds(launched_at,
timeutils.utcnow())
diags = diagnostics_obj.Diagnostics(state=power_state.STATE_MAP[state],
driver='libvirt',
config_drive=config_drive,
hypervisor=CONF.libvirt.virt_type,
hypervisor_os='linux',
uptime=uptime)
diags.memory_details = diagnostics_obj.MemoryDiagnostics(
maximum=max_mem / units.Mi,
used=mem / units.Mi)
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
diags.add_cpu(id=vcpu.id, time=vcpu.time)
except libvirt.libvirtError:
pass
# get io status
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
diags.add_disk(read_bytes=stats[1],
read_requests=stats[0],
write_bytes=stats[3],
write_requests=stats[2],
errors_count=stats[4])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if diags.nic_details:
nodes = xml_doc.findall('./devices/interface/mac')
for index, node in enumerate(nodes):
diags.nic_details[index].mac_address = node.get('address')
return diags
@staticmethod
def _prepare_device_bus(dev):
"""Determines the device bus and its hypervisor assigned address
"""
bus = None
address = (dev.device_addr.format_address() if
dev.device_addr else None)
if isinstance(dev.device_addr,
vconfig.LibvirtConfigGuestDeviceAddressPCI):
bus = objects.PCIDeviceBus()
elif isinstance(dev, vconfig.LibvirtConfigGuestDisk):
if dev.target_bus == 'scsi':
bus = objects.SCSIDeviceBus()
elif dev.target_bus == 'ide':
bus = objects.IDEDeviceBus()
elif dev.target_bus == 'usb':
bus = objects.USBDeviceBus()
if address is not None and bus is not None:
bus.address = address
return bus
def _build_device_metadata(self, context, instance):
"""Builds a metadata object for instance devices, that maps the user
provided tag to the hypervisor assigned device address.
"""
def _get_device_name(bdm):
return block_device.strip_dev(bdm.device_name)
network_info = instance.info_cache.network_info
vlans_by_mac = netutils.get_cached_vifs_with_vlan(network_info)
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
vifs_to_expose = {vif.address: vif for vif in vifs
if ('tag' in vif and vif.tag) or
vlans_by_mac.get(vif.address)}
# TODO(mriedem): We should be able to avoid the DB query here by using
# block_device_info['block_device_mapping'] which is passed into most
# methods that call this function.
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
tagged_bdms = {_get_device_name(bdm): bdm for bdm in bdms if bdm.tag}
devices = []
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_dom)
for dev in guest_config.devices:
# Build network interfaces related metadata
if isinstance(dev, vconfig.LibvirtConfigGuestInterface):
vif = vifs_to_expose.get(dev.mac_addr)
if not vif:
continue
bus = self._prepare_device_bus(dev)
device = objects.NetworkInterfaceMetadata(mac=vif.address)
if 'tag' in vif and vif.tag:
device.tags = [vif.tag]
if bus:
device.bus = bus
vlan = vlans_by_mac.get(vif.address)
if vlan:
device.vlan = int(vlan)
devices.append(device)
# Build disks related metadata
if isinstance(dev, vconfig.LibvirtConfigGuestDisk):
bdm = tagged_bdms.get(dev.target_dev)
if not bdm:
continue
bus = self._prepare_device_bus(dev)
device = objects.DiskMetadata(tags=[bdm.tag])
# NOTE(artom) Setting the serial (which corresponds to
# volume_id in BlockDeviceMapping) in DiskMetadata allows us to
# find the disks's BlockDeviceMapping object when we detach the
# volume and want to clean up its metadata.
device.serial = bdm.volume_id
if bus:
device.bus = bus
devices.append(device)
if devices:
dev_meta = objects.InstanceDeviceMetadata(devices=devices)
return dev_meta
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path,
instance=instance)
shared_instance_path = os.access(instance_path, os.W_OK)
# NOTE(flwang): For shared block storage scenario, the file system is
# not really shared by the two hosts, but the volume of evacuated
# instance is reachable.
shared_block_storage = (self.image_backend.backend().
is_shared_block_storage())
return shared_instance_path or shared_block_storage
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
self.job_tracker.terminate_jobs(instance)
LOG.info('Deleting instance files %s', target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error('Failed to cleanup directory %(target)s: %(e)s',
{'target': target_del, 'e': e}, instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
LOG.info('Deletion of %s failed', remaining_path,
instance=instance)
return False
LOG.info('Deletion of %s complete', target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
instance, CONF.libvirt.virt_type, image_meta,
root_bdm, disk_bus, cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
block_device_mapping = list(itertools.chain(*block_device_lists))
# NOTE(ndipanov): Null out the device names so that blockinfo code
# will assign them
for bdm in block_device_mapping:
if bdm.device_name is not None:
LOG.warning(
"Ignoring supplied device name: %(device_name)s. "
"Libvirt can't honour user-supplied dev names",
{'device_name': bdm.device_name}, instance=instance)
bdm.device_name = None
block_device_info = driver.get_block_device_info(instance,
block_device_mapping)
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance,
block_device_info,
instance.image_meta)
def get_device_name_for_instance(self, instance, bdms, block_device_obj):
block_device_info = driver.get_block_device_info(instance, bdms)
instance_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
instance.image_meta, block_device_info=block_device_info)
suggested_dev_name = block_device_obj.device_name
if suggested_dev_name is not None:
LOG.warning(
'Ignoring supplied device name: %(suggested_dev)s',
{'suggested_dev': suggested_dev_name}, instance=instance)
# NOTE(ndipanov): get_info_from_bdm will generate the new device name
# only when it's actually not set on the bd object
block_device_obj.device_name = None
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta,
block_device_obj, mapping=instance_info['mapping'])
return block_device.prepend_dev(disk_info['dev'])
def is_supported_fs_format(self, fs_type):
return fs_type in [nova.privsep.fs.FS_FORMAT_EXT2,
nova.privsep.fs.FS_FORMAT_EXT3,
nova.privsep.fs.FS_FORMAT_EXT4,
nova.privsep.fs.FS_FORMAT_XFS]
|
the-stack_0_7852 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `bitwin24-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
|
the-stack_0_7855 | """
Test message and address parsing/formatting functions.
"""
from email.header import Header
from email.headerregistry import Address
from email.message import EmailMessage, Message
import pytest
from hypothesis import example, given
from hypothesis.strategies import emails
from aiosmtplib.email import (
extract_recipients,
extract_sender,
flatten_message,
parse_address,
quote_address,
)
@pytest.mark.parametrize(
"address, expected_address",
(
('"A.Smith" <[email protected]>', "[email protected]"),
("Pepé Le Pew <pé[email protected]>", "pé[email protected]"),
("<[email protected]>", "[email protected]"),
("B. Smith <[email protected]", "[email protected]"),
),
ids=("quotes", "nonascii", "newtld", "missing_end_<"),
)
def test_parse_address_with_display_names(address, expected_address):
parsed_address = parse_address(address)
assert parsed_address == expected_address
@given(emails())
@example("email@[123.123.123.123]")
@example("[email protected]")
def test_parse_address(email):
assert parse_address(email) == email
@pytest.mark.parametrize(
"address, expected_address",
(
('"A.Smith" <[email protected]>', "<[email protected]>"),
("Pepé Le Pew <pé[email protected]>", "<pé[email protected]>"),
("<[email protected]>", "<[email protected]>"),
("email@[123.123.123.123]", "<email@[123.123.123.123]>"),
("[email protected]", "<[email protected]>"),
("B. Smith <[email protected]", "<[email protected]>"),
),
ids=("quotes", "nonascii", "newtld", "ipaddr", "underscores", "missing_end_quote"),
)
def test_quote_address_with_display_names(address, expected_address):
quoted_address = quote_address(address)
assert quoted_address == expected_address
@given(emails())
@example("email@[123.123.123.123]")
@example("[email protected]")
def test_quote_address(email):
assert quote_address(email) == "<{}>".format(email)
def test_flatten_message():
message = EmailMessage()
message["To"] = "[email protected]"
message["Subject"] = "Hello, World."
message["From"] = "[email protected]"
message.set_content("This is a test")
flat_message = flatten_message(message)
expected_message = b"""To: [email protected]\r
Subject: Hello, World.\r
From: [email protected]\r
Content-Type: text/plain; charset="utf-8"\r
Content-Transfer-Encoding: 7bit\r
MIME-Version: 1.0\r
\r
This is a test\r
"""
assert flat_message == expected_message
@pytest.mark.parametrize(
"utf8, cte_type, expected_chunk",
(
(False, "7bit", b"=?utf-8?q?=C3=A5lice?="),
(True, "7bit", b"From: \xc3\[email protected]"),
(False, "8bit", b"=?utf-8?q?=C3=A5lice?="),
(True, "8bit", b"\xc3\[email protected]"),
),
ids=("ascii-7bit", "utf8-7bit", "ascii-8bit", "utf8-8bit"),
)
def test_flatten_message_utf8_options(utf8, cte_type, expected_chunk):
message = EmailMessage()
message["From"] = "å[email protected]"
flat_message = flatten_message(message, utf8=utf8, cte_type=cte_type)
assert expected_chunk in flat_message
def test_flatten_message_removes_bcc_from_message_text():
message = EmailMessage()
message["Bcc"] = "[email protected]"
flat_message = flatten_message(message)
assert flat_message == b"\r\n" # empty message
def test_flatten_resent_message():
message = EmailMessage()
message["To"] = "[email protected]"
message["Cc"] = "[email protected]"
message["Bcc"] = "[email protected]"
message["Subject"] = "Hello, World."
message["From"] = "[email protected]"
message.set_content("This is a test")
message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000"
message["Resent-To"] = "[email protected]"
message["Resent-Cc"] = "[email protected]"
message["Resent-Bcc"] = "[email protected]"
message["Resent-Subject"] = "Fwd: Hello, World."
message["Resent-From"] = "[email protected]"
flat_message = flatten_message(message)
expected_message = b"""To: [email protected]\r
Cc: [email protected]\r
Subject: Hello, World.\r
From: [email protected]\r
Content-Type: text/plain; charset="utf-8"\r
Content-Transfer-Encoding: 7bit\r
MIME-Version: 1.0\r
Resent-Date: Mon, 20 Nov 2017 21:04:27 -0000\r
Resent-To: [email protected]\r
Resent-Cc: [email protected]\r
Resent-Subject: Fwd: Hello, World.\r
Resent-From: [email protected]\r
\r
This is a test\r
"""
assert flat_message == expected_message
@pytest.mark.parametrize(
"mime_to_header,mime_cc_header,compat32_to_header,"
"compat32_cc_header,expected_recipients",
(
(
"Alice Smith <[email protected]>, [email protected]",
"Bob <[email protected]>",
"Alice Smith <[email protected]>, [email protected]",
"Bob <[email protected]>",
["[email protected]", "[email protected]", "[email protected]"],
),
(
Address(display_name="Alice Smith", username="alice", domain="example.com"),
Address(display_name="Bob", username="Bob", domain="example.com"),
Header("Alice Smith <[email protected]>"),
Header("Bob <[email protected]>"),
["[email protected]", "[email protected]"],
),
(
Address(display_name="ålice Smith", username="ålice", domain="example.com"),
Address(display_name="Bøb", username="Bøb", domain="example.com"),
Header("ålice Smith <å[email protected]>"),
Header("Bøb <Bø[email protected]>"),
["å[email protected]", "Bø[email protected]"],
),
(
Address(display_name="ålice Smith", username="alice", domain="example.com"),
Address(display_name="Bøb", username="Bob", domain="example.com"),
Header("ålice Smith <[email protected]>"),
Header("Bøb <[email protected]>"),
["[email protected]", "[email protected]"],
),
),
ids=("str", "ascii", "utf8_address", "utf8_display_name"),
)
def test_extract_recipients(
mime_to_header,
mime_cc_header,
compat32_to_header,
compat32_cc_header,
expected_recipients,
):
mime_message = EmailMessage()
mime_message["To"] = mime_to_header
mime_message["Cc"] = mime_cc_header
mime_recipients = extract_recipients(mime_message)
assert mime_recipients == expected_recipients
compat32_message = Message()
compat32_message["To"] = compat32_to_header
compat32_message["Cc"] = compat32_cc_header
compat32_recipients = extract_recipients(compat32_message)
assert compat32_recipients == expected_recipients
def test_extract_recipients_includes_bcc():
message = EmailMessage()
message["Bcc"] = "[email protected]"
recipients = extract_recipients(message)
assert recipients == [message["Bcc"]]
def test_extract_recipients_invalid_email():
message = EmailMessage()
message["Cc"] = "me"
recipients = extract_recipients(message)
assert recipients == ["me"]
def test_extract_recipients_with_iterable_of_strings():
message = EmailMessage()
message["To"] = ("[email protected]", "you")
recipients = extract_recipients(message)
assert recipients == ["[email protected]", "you"]
def test_extract_recipients_resent_message():
message = EmailMessage()
message["To"] = "[email protected]"
message["Cc"] = "[email protected]"
message["Bcc"] = "[email protected]"
message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000"
message["Resent-To"] = "[email protected]"
message["Resent-Cc"] = "[email protected]"
message["Resent-Bcc"] = "[email protected]"
recipients = extract_recipients(message)
assert message["Resent-To"] in recipients
assert message["Resent-Cc"] in recipients
assert message["Resent-Bcc"] in recipients
assert message["To"] not in recipients
assert message["Cc"] not in recipients
assert message["Bcc"] not in recipients
def test_extract_recipients_valueerror_on_multiple_resent_message():
message = EmailMessage()
message["Resent-Date"] = "Mon, 20 Nov 2016 21:04:27 -0000"
message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000"
with pytest.raises(ValueError):
extract_recipients(message)
@pytest.mark.parametrize(
"mime_header,compat32_header,expected_sender",
(
(
"Alice Smith <[email protected]>",
"Alice Smith <[email protected]>",
"[email protected]",
),
(
Address(display_name="Alice Smith", username="alice", domain="example.com"),
Header("Alice Smith <[email protected]>"),
"[email protected]",
),
(
Address(display_name="ålice Smith", username="ålice", domain="example.com"),
Header("ålice Smith <å[email protected]>", "utf-8"),
"å[email protected]",
),
(
Address(display_name="ålice Smith", username="alice", domain="example.com"),
Header("ålice Smith <[email protected]>", "utf-8"),
"[email protected]",
),
),
ids=("str", "ascii", "utf8_address", "utf8_display_name"),
)
def test_extract_sender(mime_header, compat32_header, expected_sender):
mime_message = EmailMessage()
mime_message["From"] = mime_header
mime_sender = extract_sender(mime_message)
assert mime_sender == expected_sender
compat32_message = Message()
compat32_message["From"] = compat32_header
compat32_sender = extract_sender(compat32_message)
assert compat32_sender == expected_sender
def test_extract_sender_prefers_sender_header():
message = EmailMessage()
message["From"] = "[email protected]"
message["Sender"] = "[email protected]"
sender = extract_sender(message)
assert sender != message["From"]
assert sender == message["Sender"]
def test_extract_sender_resent_message():
message = EmailMessage()
message["From"] = "[email protected]"
message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000"
message["Resent-From"] = "[email protected]"
sender = extract_sender(message)
assert sender == message["Resent-From"]
assert sender != message["From"]
def test_extract_sender_valueerror_on_multiple_resent_message():
message = EmailMessage()
message["Resent-Date"] = "Mon, 20 Nov 2016 21:04:27 -0000"
message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000"
with pytest.raises(ValueError):
extract_sender(message)
|
the-stack_0_7857 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Syscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, msg_getdata
from test_framework.mininode import P2PInterface
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxuploadtarget=800"]]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].sync_with_ping()
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(800):
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].sync_with_ping()
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_message(getdata_request)
p2p_conns[2].sync_with_ping()
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
self.log.info("Restarting nodes with -whitelist=127.0.0.1")
self.stop_node(0)
self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
self.nodes[0].p2p.send_message(getdata_request)
self.nodes[0].p2p.sync_with_ping()
assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
self.nodes[0].p2p.send_and_ping(getdata_request)
assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist
self.log.info("Peer still connected after trying to download old block (whitelisted)")
if __name__ == '__main__':
MaxUploadTest().main()
|
the-stack_0_7859 | import discord
import discord.utils
import discord.ext
import re
import emoji
from redbot.core import commands, Config, bank, checks
class April(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.guild is None:
return
valid = True
valid2 = True
msg = message
author = message.author
valid_user = isinstance(author, discord.Member) and not author.bot
if not valid_user:
return
channels = [926112975813750796, 927783621794877460, 926113551968526376, 926113675419471972,
927518938919735326, 927518938919735326, 927539973459169302, 928689945080627201, 930531314363424808]
if message.channel.id in channels:
if len(message.attachments) == 0:
x = re.search(r'^<a.*:|<:.*>$', msg.content)
if not x:
valid = False
else:
valid = True
x = re.search(r'>*\s[^\s]*\s<', msg.content)
if x:
valid = False
if valid == False:
for symbol in msg.content:
if symbol not in emoji.UNICODE_EMOJI['en']:
valid2 = False
else:
i = msg.content.replace(symbol, '')
x = re.search(r'^\s*<:.*>\s*$', str(i))
if x:
valid = True
valid2 = True
if valid == False and valid2 == False:
try:
await message.delete()
except discord.HTTPException:
pass
@commands.Cog.listener()
async def on_message_edit(self, _prior, message):
await self.on_message(message)
|
the-stack_0_7862 | # -*- coding: utf-8 -*-
'''
Control the state system on the minion.
State Caching
-------------
When a highstate is called, the minion automatically caches a copy of the last
high data. If you then run a highstate with cache=True it will use that cached
highdata and won't hit the fileserver except for ``salt://`` links in the
states themselves.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tarfile
import tempfile
import time
# Import salt libs
import salt.config
import salt.payload
import salt.state
import salt.utils.args
import salt.utils.data
import salt.utils.event
import salt.utils.files
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.jid
import salt.utils.json
import salt.utils.platform
import salt.utils.state
import salt.utils.stringutils
import salt.utils.url
import salt.utils.versions
import salt.defaults.exitcodes
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.runners.state import orchestrate as _orchestrate
from salt.utils.odict import OrderedDict
# Import 3rd-party libs
from salt.ext import six
import msgpack
__proxyenabled__ = ['*']
__outputter__ = {
'sls': 'highstate',
'sls_id': 'highstate',
'pkg': 'highstate',
'top': 'highstate',
'single': 'highstate',
'highstate': 'highstate',
'template': 'highstate',
'template_str': 'highstate',
'apply_': 'highstate',
'request': 'highstate',
'check_request': 'highstate',
'run_request': 'highstate',
}
__func_alias__ = {
'apply_': 'apply'
}
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'state'
def __virtual__():
'''
Set the virtualname
'''
# Update global namespace with functions that are cloned in this module
global _orchestrate
_orchestrate = salt.utils.functools.namespaced_function(_orchestrate, globals())
return __virtualname__
def _filter_running(runnings):
'''
Filter out the result: True + no changes data
'''
ret = dict((tag, value) for tag, value in six.iteritems(runnings)
if not value['result'] or value['changes'])
return ret
def _set_retcode(ret, highstate=None):
'''
Set the return code based on the data back from the state system
'''
# Set default retcode to 0
__context__['retcode'] = salt.defaults.exitcodes.EX_OK
if isinstance(ret, list):
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return
if not __utils__['state.check_result'](ret, highstate=highstate):
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_FAILURE
def _get_pillar_errors(kwargs, pillar=None):
'''
Checks all pillars (external and internal) for errors.
Return an error message, if anywhere or None.
:param kwargs: dictionary of options
:param pillar: external pillar
:return: None or an error message
'''
return None if kwargs.get('force') else (pillar or {}).get('_errors', __pillar__.get('_errors')) or None
def _wait(jid):
'''
Wait for all previously started state jobs to finish running
'''
if jid is None:
jid = salt.utils.jid.gen_jid(__opts__)
states = _prior_running_states(jid)
while states:
time.sleep(1)
states = _prior_running_states(jid)
def _snapper_pre(opts, jid):
'''
Create a snapper pre snapshot
'''
snapper_pre = None
try:
if not opts['test'] and __opts__.get('snapper_states'):
# Run the snapper pre snapshot
snapper_pre = __salt__['snapper.create_snapshot'](
config=__opts__.get('snapper_states_config', 'root'),
snapshot_type='pre',
description='Salt State run for jid {0}'.format(jid),
__pub_jid=jid)
except Exception:
log.error('Failed to create snapper pre snapshot for jid: %s', jid)
return snapper_pre
def _snapper_post(opts, jid, pre_num):
'''
Create the post states snapshot
'''
try:
if not opts['test'] and __opts__.get('snapper_states') and pre_num:
# Run the snapper pre snapshot
__salt__['snapper.create_snapshot'](
config=__opts__.get('snapper_states_config', 'root'),
snapshot_type='post',
pre_number=pre_num,
description='Salt State run for jid {0}'.format(jid),
__pub_jid=jid)
except Exception:
log.error('Failed to create snapper pre snapshot for jid: %s', jid)
def _get_pause(jid, state_id=None):
'''
Return the pause information for a given jid
'''
pause_dir = os.path.join(__opts__['cachedir'], 'state_pause')
pause_path = os.path.join(pause_dir, jid)
if not os.path.exists(pause_dir):
try:
os.makedirs(pause_dir)
except OSError:
# File created in the gap
pass
data = {}
if state_id is not None:
if state_id not in data:
data[state_id] = {}
if os.path.exists(pause_path):
with salt.utils.files.fopen(pause_path, 'rb') as fp_:
data = msgpack.loads(fp_.read())
return data, pause_path
def get_pauses(jid=None):
'''
Get a report on all of the currently paused state runs and pause
run settings.
Optionally send in a jid if you only desire to see a single pause
data set.
'''
ret = {}
active = __salt__['saltutil.is_running']('state.*')
pause_dir = os.path.join(__opts__['cachedir'], 'state_pause')
if not os.path.exists(pause_dir):
return ret
if jid is None:
jids = os.listdir(pause_dir)
elif isinstance(jid, list):
jids = salt.utils.data.stringify(jid)
else:
jids = [six.text_type(jid)]
for scan_jid in jids:
is_active = False
for active_data in active:
if active_data['jid'] == scan_jid:
is_active = True
if not is_active:
try:
pause_path = os.path.join(pause_dir, scan_jid)
os.remove(pause_path)
except OSError:
# Already gone
pass
continue
data, pause_path = _get_pause(scan_jid)
ret[scan_jid] = data
return ret
def soft_kill(jid, state_id=None):
'''
Set up a state run to die before executing the given state id,
this instructs a running state to safely exit at a given
state id. This needs to pass in the jid of the running state.
If a state_id is not passed then the jid referenced will be safely exited
at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `soft_kill` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.soft_kill 20171130110407769519
salt '*' state.soft_kill 20171130110407769519 vim
'''
jid = six.text_type(jid)
if state_id is None:
state_id = '__all__'
data, pause_path = _get_pause(jid, state_id)
data[state_id]['kill'] = True
with salt.utils.files.fopen(pause_path, 'wb') as fp_:
fp_.write(msgpack.dumps(data))
def pause(jid, state_id=None, duration=None):
'''
Set up a state id pause, this instructs a running state to pause at a given
state id. This needs to pass in the jid of the running state and can
optionally pass in a duration in seconds. If a state_id is not passed then
the jid referenced will be paused at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `pause` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.pause 20171130110407769519
salt '*' state.pause 20171130110407769519 vim
salt '*' state.pause 20171130110407769519 vim 20
'''
jid = six.text_type(jid)
if state_id is None:
state_id = '__all__'
data, pause_path = _get_pause(jid, state_id)
if duration:
data[state_id]['duration'] = int(duration)
with salt.utils.files.fopen(pause_path, 'wb') as fp_:
fp_.write(msgpack.dumps(data))
def resume(jid, state_id=None):
'''
Remove a pause from a jid, allowing it to continue. If the state_id is
not specified then the a general pause will be resumed.
The given state_id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `rm_pause` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.resume 20171130110407769519
salt '*' state.resume 20171130110407769519 vim
'''
jid = six.text_type(jid)
if state_id is None:
state_id = '__all__'
data, pause_path = _get_pause(jid, state_id)
if state_id in data:
data.pop(state_id)
if state_id == '__all__':
data = {}
with salt.utils.files.fopen(pause_path, 'wb') as fp_:
fp_.write(msgpack.dumps(data))
def orchestrate(mods,
saltenv='base',
test=None,
exclude=None,
pillar=None,
pillarenv=None):
'''
.. versionadded:: 2016.11.0
Execute the orchestrate runner from a masterless minion.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:mod:`Docs for the ``salt`` state module <salt.states.saltmod>`
CLI Examples:
.. code-block:: bash
salt-call --local state.orchestrate webserver
salt-call --local state.orchestrate webserver saltenv=dev test=True
salt-call --local state.orchestrate webserver saltenv=dev pillarenv=aws
'''
return _orchestrate(mods=mods,
saltenv=saltenv,
test=test,
exclude=exclude,
pillar=pillar,
pillarenv=pillarenv)
def running(concurrent=False):
'''
Return a list of strings that contain state return data if a state function
is already running. This function is used to prevent multiple state calls
from being run at the same time.
CLI Example:
.. code-block:: bash
salt '*' state.running
'''
ret = []
if concurrent:
return ret
active = __salt__['saltutil.is_running']('state.*')
for data in active:
err = (
'The function "{0}" is running as PID {1} and was started at '
'{2} with jid {3}'
).format(
data['fun'],
data['pid'],
salt.utils.jid.jid_to_time(data['jid']),
data['jid'],
)
ret.append(err)
return ret
def _prior_running_states(jid):
'''
Return a list of dicts of prior calls to state functions. This function is
used to queue state calls so only one is run at a time.
'''
ret = []
active = __salt__['saltutil.is_running']('state.*')
for data in active:
try:
data_jid = int(data['jid'])
except ValueError:
continue
if data_jid < int(jid):
ret.append(data)
return ret
def _check_queue(queue, kwargs):
'''
Utility function to queue the state run if requested
and to check for conflicts in currently running states
'''
if queue:
_wait(kwargs.get('__pub_jid'))
else:
conflict = running(concurrent=kwargs.get('concurrent', False))
if conflict:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return conflict
def _get_initial_pillar(opts):
return __pillar__ if __opts__.get('__cli', None) == 'salt-call' \
and opts['pillarenv'] == __opts__['pillarenv'] \
else None
def low(data, queue=False, **kwargs):
'''
Execute a single low data call
This function is mostly intended for testing the state system and is not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
try:
st_ = salt.state.State(__opts__, proxy=__proxy__)
except NameError:
st_ = salt.state.State(__opts__)
err = st_.verify_data(data)
if err:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return err
ret = st_.call(data)
if isinstance(ret, list):
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
if __utils__['state.check_result'](ret):
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_FAILURE
return ret
def _get_test_value(test=None, **kwargs):
'''
Determine the correct value for the test flag.
'''
ret = True
if test is None:
if salt.utils.args.test_mode(test=test, **kwargs):
ret = True
elif __salt__['config.get']('test', omit_opts=True) is True:
ret = True
else:
ret = __opts__.get('test', None)
else:
ret = test
return ret
def high(data, test=None, queue=False, **kwargs):
'''
Execute the compound calls stored in a single set of high data
This function is mostly intended for testing the state system and is not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
ret = st_.call_high(data)
_set_retcode(ret, highstate=data)
return ret
def template(tem, queue=False, **kwargs):
'''
Execute the information stored in a template file on the minion.
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion.
CLI Example:
.. code-block:: bash
salt '*' state.template '<Path to template on the minion>'
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
context=__context__,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem)
high_state, errors = st_.render_state(tem,
kwargs.get('saltenv', ''),
'',
None,
local=True)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
ret = st_.state.call_high(high_state)
_set_retcode(ret, highstate=high_state)
return ret
def template_str(tem, queue=False, **kwargs):
'''
Execute the information stored in a string from an sls template
CLI Example:
.. code-block:: bash
salt '*' state.template_str '<Template String>'
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.State(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.State(opts, initial_pillar=_get_initial_pillar(opts))
ret = st_.call_template_str(tem)
_set_retcode(ret)
return ret
def apply_(mods=None, **kwargs):
'''
.. versionadded:: 2015.5.0
This function will call :mod:`state.highstate
<salt.modules.state.highstate>` or :mod:`state.sls
<salt.modules.state.sls>` based on the arguments passed to this function.
It exists as a more intuitive way of applying states.
.. rubric:: APPLYING ALL STATES CONFIGURED IN TOP.SLS (A.K.A. :ref:`HIGHSTATE <running-highstate>`)
To apply all configured states, simply run ``state.apply``:
.. code-block:: bash
salt '*' state.apply
The following additional arguments are also accepted when applying all
states configured in top.sls:
test
Run states in test-only (dry-run) mode
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.apply stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.apply exclude=bar,baz
salt '*' state.apply exclude=foo*
salt '*' state.apply exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
.. code-block:: bash
salt '*' state.apply localconfig=/path/to/minion.yml
.. rubric:: APPLYING INDIVIDUAL SLS FILES (A.K.A. :py:func:`STATE.SLS <salt.modules.state.sls>`)
To apply individual SLS files, pass them as a comma-separated list:
.. code-block:: bash
# Run the states configured in salt://stuff.sls (or salt://stuff/init.sls)
salt '*' state.apply stuff
# Run the states configured in salt://stuff.sls (or salt://stuff/init.sls)
# and salt://pkgs.sls (or salt://pkgs/init.sls).
salt '*' state.apply stuff,pkgs
# Run the states configured in a more deeply nested directory such as salt://my/organized/stuff.sls (or salt://my/organized/stuff/init.sls)
salt '*' state.apply my.organized.stuff
The following additional arguments are also accepted when applying
individual SLS files:
test
Run states in test-only (dry-run) mode
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.apply stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
concurrent : False
Execute state runs concurrently instead of serially
.. warning::
This flag is potentially dangerous. It is designed for use when
multiple state runs can safely be run at the same time. Do *not*
use this flag for performance optimization.
saltenv
Specify a salt fileserver environment to be used when applying states
.. versionchanged:: 0.17.0
Argument name changed from ``env`` to ``saltenv``
.. versionchanged:: 2014.7.0
If no saltenv is specified, the minion config will be checked for an
``environment`` parameter and if found, it will be used. If none is
found, ``base`` will be used. In prior releases, the minion config
was not checked and ``base`` would always be assumed when the
saltenv was not explicitly set.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
.. code-block:: bash
salt '*' state.apply stuff localconfig=/path/to/minion.yml
sync_mods
If specified, the desired custom module types will be synced prior to
running the SLS files:
.. code-block:: bash
salt '*' state.apply stuff sync_mods=states,modules
salt '*' state.apply stuff sync_mods=all
.. note::
This option is ignored when no SLS files are specified, as a
:ref:`highstate <running-highstate>` automatically syncs all custom
module types.
.. versionadded:: 2017.7.8,2018.3.3,2019.2.0
'''
if mods:
return sls(mods, **kwargs)
return highstate(**kwargs)
def request(mods=None,
**kwargs):
'''
.. versionadded:: 2015.5.0
Request that the local admin execute a state run via
`salt-call state.run_request`.
All arguments match those of state.apply.
CLI Example:
.. code-block:: bash
salt '*' state.request
salt '*' state.request stuff
salt '*' state.request stuff,pkgs
'''
kwargs['test'] = True
ret = apply_(mods, **kwargs)
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
req = check_request()
req.update({kwargs.get('name', 'default'): {
'test_run': ret,
'mods': mods,
'kwargs': kwargs
}
})
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return ret
def check_request(name=None):
'''
.. versionadded:: 2015.5.0
Return the state request information, if any
CLI Example:
.. code-block:: bash
salt '*' state.check_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if os.path.isfile(notify_path):
with salt.utils.files.fopen(notify_path, 'rb') as fp_:
req = serial.load(fp_)
if name:
return req[name]
return req
return {}
def clear_request(name=None):
'''
.. versionadded:: 2015.5.0
Clear out the state execution request without executing it
CLI Example:
.. code-block:: bash
salt '*' state.clear_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if not os.path.isfile(notify_path):
return True
if not name:
try:
os.remove(notify_path)
except (IOError, OSError):
pass
else:
req = check_request()
if name in req:
req.pop(name)
else:
return False
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return True
def run_request(name='default', **kwargs):
'''
.. versionadded:: 2015.5.0
Execute the pending state request
CLI Example:
.. code-block:: bash
salt '*' state.run_request
'''
req = check_request()
if name not in req:
return {}
n_req = req[name]
if 'mods' not in n_req or 'kwargs' not in n_req:
return {}
req[name]['kwargs'].update(kwargs)
if 'test' in n_req['kwargs']:
n_req['kwargs'].pop('test')
if req:
ret = apply_(n_req['mods'], **n_req['kwargs'])
try:
os.remove(os.path.join(__opts__['cachedir'], 'req_state.p'))
except (IOError, OSError):
pass
return ret
return {}
def highstate(test=None, queue=False, **kwargs):
'''
Retrieve the state data from the salt master for this minion and execute it
test
Run states in test-only (dry-run) mode
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.highstate stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionchanged:: 2016.3.0
GPG-encrypted CLI Pillar data is now supported via the GPG
renderer. See :ref:`here <encrypted-cli-pillar-data>` for details.
pillar_enc
Specify which renderer to use to decrypt encrypted data located within
the ``pillar`` value. Currently, only ``gpg`` is supported.
.. versionadded:: 2016.3.0
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.highstate exclude=bar,baz
salt '*' state.highstate exclude=foo*
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
saltenv
Specify a salt fileserver environment to be used when applying states
.. versionchanged:: 0.17.0
Argument name changed from ``env`` to ``saltenv``.
.. versionchanged:: 2014.7.0
If no saltenv is specified, the minion config will be checked for a
``saltenv`` parameter and if found, it will be used. If none is
found, ``base`` will be used. In prior releases, the minion config
was not checked and ``base`` would always be assumed when the
saltenv was not explicitly set.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
CLI Examples:
.. code-block:: bash
salt '*' state.highstate
salt '*' state.highstate whitelist=sls1_to_run,sls2_to_run
salt '*' state.highstate exclude=sls_to_exclude
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
salt '*' state.highstate pillar="{foo: 'Foo!', bar: 'Bar!'}"
'''
if _disabled(['highstate']):
log.debug('Salt highstate run is disabled. To re-enable, run state.enable highstate')
ret = {
'name': 'Salt highstate run is disabled. To re-enable, run state.enable highstate',
'result': 'False',
'comment': 'Disabled'
}
return ret
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
if 'saltenv' in kwargs:
opts['saltenv'] = kwargs['saltenv']
if 'pillarenv' in kwargs:
opts['pillarenv'] = kwargs['pillarenv']
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.HighState(opts,
pillar_override,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar_override,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ['Pillar failed to render with the following messages:'] + errors
st_.push_active()
orchestration_jid = kwargs.get('orchestration_jid')
snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy'))
try:
ret = st_.call_highstate(
exclude=kwargs.get('exclude', []),
cache=kwargs.get('cache', None),
cache_name=kwargs.get('cache_name', 'highstate'),
force=kwargs.get('force', False),
whitelist=kwargs.get('whitelist'),
orchestration_jid=orchestration_jid)
finally:
st_.pop_active()
if isinstance(ret, dict) and (__salt__['config.option']('state_data', '') == 'terse' or
kwargs.get('terse')):
ret = _filter_running(ret)
_set_retcode(ret, highstate=st_.building_highstate)
_snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
return ret
def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
'''
Execute the states in one or more SLS files
test
Run states in test-only (dry-run) mode
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.sls stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override existing Pillar values set via
``pillar_roots`` or an external Pillar source. Pillar values that
are not included in the kwarg will not be overwritten.
.. versionchanged:: 2016.3.0
GPG-encrypted CLI Pillar data is now supported via the GPG
renderer. See :ref:`here <encrypted-cli-pillar-data>` for details.
pillar_enc
Specify which renderer to use to decrypt encrypted data located within
the ``pillar`` value. Currently, only ``gpg`` is supported.
.. versionadded:: 2016.3.0
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.sls foo,bar,baz exclude=bar,baz
salt '*' state.sls foo,bar,baz exclude=ba*
salt '*' state.sls foo,bar,baz exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
concurrent : False
Execute state runs concurrently instead of serially
.. warning::
This flag is potentially dangerous. It is designed for use when
multiple state runs can safely be run at the same time. Do *not*
use this flag for performance optimization.
saltenv
Specify a salt fileserver environment to be used when applying states
.. versionchanged:: 0.17.0
Argument name changed from ``env`` to ``saltenv``.
.. versionchanged:: 2014.7.0
If no saltenv is specified, the minion config will be checked for an
``environment`` parameter and if found, it will be used. If none is
found, ``base`` will be used. In prior releases, the minion config
was not checked and ``base`` would always be assumed when the
saltenv was not explicitly set.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
sync_mods
If specified, the desired custom module types will be synced prior to
running the SLS files:
.. code-block:: bash
salt '*' state.sls stuff sync_mods=states,modules
salt '*' state.sls stuff sync_mods=all
.. versionadded:: 2017.7.8,2018.3.3,2019.2.0
CLI Example:
.. code-block:: bash
# Run the states configured in salt://example.sls (or salt://example/init.sls)
salt '*' state.apply example
# Run the states configured in salt://core.sls (or salt://core/init.sls)
# and salt://edit/vim.sls (or salt://edit/vim/init.sls)
salt '*' state.sls core,edit.vim
# Run the states configured in a more deeply nested directory such as salt://my/nested/state.sls (or salt://my/nested/state/init.sls)
salt '*' state.sls my.nested.state
salt '*' state.sls core exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
salt '*' state.sls myslsfile pillar="{foo: 'Foo!', bar: 'Bar!'}"
'''
concurrent = kwargs.get('concurrent', False)
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
# Modification to __opts__ lost after this if-else
if queue:
_wait(kwargs.get('__pub_jid'))
else:
conflict = running(concurrent)
if conflict:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return conflict
if isinstance(mods, list):
disabled = _disabled(mods)
else:
disabled = _disabled([mods])
if disabled:
for state in disabled:
log.debug(
'Salt state %s is disabled. To re-enable, run '
'state.enable %s', state, state
)
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return disabled
orig_test = __opts__.get('test', None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
# Since this is running a specific SLS file (or files), fall back to the
# 'base' saltenv if none is configured and none was passed.
if opts['saltenv'] is None:
opts['saltenv'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
serial = salt.payload.Serial(__opts__)
cfn = os.path.join(
__opts__['cachedir'],
'{0}.cache.p'.format(kwargs.get('cache_name', 'highstate'))
)
if sync_mods is True:
sync_mods = ['all']
if sync_mods is not None:
sync_mods = salt.utils.args.split_input(sync_mods)
else:
sync_mods = []
if 'all' in sync_mods and sync_mods != ['all']:
# Prevent unnecessary extra syncing
sync_mods = ['all']
for module_type in sync_mods:
try:
__salt__['saltutil.sync_{0}'.format(module_type)](
saltenv=opts['saltenv']
)
except KeyError:
log.warning(
'Invalid custom module type \'%s\', ignoring',
module_type
)
try:
st_ = salt.state.HighState(opts,
pillar_override,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar_override,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ['Pillar failed to render with the following messages:'] + errors
orchestration_jid = kwargs.get('orchestration_jid')
with salt.utils.files.set_umask(0o077):
if kwargs.get('cache'):
if os.path.isfile(cfn):
with salt.utils.files.fopen(cfn, 'rb') as fp_:
high_ = serial.load(fp_)
return st_.state.call_high(high_, orchestration_jid)
# If the state file is an integer, convert to a string then to unicode
if isinstance(mods, six.integer_types):
mods = salt.utils.stringutils.to_unicode(str(mods)) # future lint: disable=blacklisted-function
mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts['saltenv']: mods})
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
if exclude:
exclude = salt.utils.args.split_input(exclude)
if '__exclude__' in high_:
high_['__exclude__'].extend(exclude)
else:
high_['__exclude__'] = exclude
snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy'))
ret = st_.state.call_high(high_, orchestration_jid)
finally:
st_.pop_active()
if __salt__['config.option']('state_data', '') == 'terse' or kwargs.get('terse'):
ret = _filter_running(ret)
cache_file = os.path.join(__opts__['cachedir'], 'sls.p')
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run'](['attrib', '-R', cache_file], python_shell=False)
with salt.utils.files.fopen(cache_file, 'w+b') as fp_:
serial.dump(ret, fp_)
except (IOError, OSError):
log.error(
'Unable to write to SLS cache file %s. Check permission.',
cache_file
)
_set_retcode(ret, high_)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
try:
with salt.utils.files.fopen(cfn, 'w+b') as fp_:
try:
serial.dump(high_, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
log.error(
'Unable to write to highstate cache file %s. Do you have permissions?',
cfn
)
_snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
return ret
def top(topfn, test=None, queue=False, **kwargs):
'''
Execute a specific top file instead of the default. This is useful to apply
configurations from a different environment (for example, dev or prod), without
modifying the default top file.
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
saltenv
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' state.top reverse_top.sls
salt '*' state.top prod_top.sls exclude=sls_to_exclude
salt '*' state.top dev_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
context=__context__,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ['Pillar failed to render with the following messages:'] + errors
st_.push_active()
st_.opts['state_top'] = salt.utils.url.create(topfn)
ret = {}
orchestration_jid = kwargs.get('orchestration_jid')
if 'saltenv' in kwargs:
st_.opts['state_top_saltenv'] = kwargs['saltenv']
try:
snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy'))
ret = st_.call_highstate(
exclude=kwargs.get('exclude', []),
cache=kwargs.get('cache', None),
cache_name=kwargs.get('cache_name', 'highstate'),
orchestration_jid=orchestration_jid)
finally:
st_.pop_active()
_set_retcode(ret, highstate=st_.building_highstate)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
_snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
__opts__['test'] = orig_test
return ret
def show_highstate(queue=False, **kwargs):
'''
Retrieve the highstate data from the salt master and display it
Custom Pillar data can be passed with the ``pillar`` kwarg.
CLI Example:
.. code-block:: bash
salt '*' state.show_highstate
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
st_.push_active()
try:
ret = st_.compile_highstate()
finally:
st_.pop_active()
_set_retcode(ret)
return ret
def show_lowstate(queue=False, **kwargs):
'''
List out the low data that will be applied to this minion
CLI Example:
.. code-block:: bash
salt '*' state.show_lowstate
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
assert False
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
st_.push_active()
try:
ret = st_.compile_low_chunks()
finally:
st_.pop_active()
return ret
def show_state_usage(queue=False, **kwargs):
'''
Retrieve the highstate data from the salt master to analyse used and unused states
Custom Pillar data can be passed with the ``pillar`` kwarg.
CLI Example:
.. code-block:: bash
salt '*' state.show_state_usage
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
pillar = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc)
st_.push_active()
try:
ret = st_.compile_state_usage()
finally:
st_.pop_active()
_set_retcode(ret)
return ret
def show_states(queue=False, **kwargs):
'''
Returns the list of states that will be applied on highstate.
CLI Example:
.. code-block:: bash
salt '*' state.show_states
.. versionadded:: 2019.2.0
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
assert False
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
st_.push_active()
states = OrderedDict()
try:
result = st_.compile_low_chunks()
if not isinstance(result, list):
raise Exception(result)
for s in result:
if not isinstance(s, dict):
_set_retcode(result)
return result
states[s['__sls__']] = True
finally:
st_.pop_active()
return list(states.keys())
def sls_id(id_, mods, test=None, queue=False, **kwargs):
'''
Call a single ID from the named module(s) and handle all requisites
The state ID comes *before* the module ID(s) on the command line.
id
ID to call
mods
Comma-delimited list of modules to search for given id and its requisites
.. versionadded:: 2014.7.0
saltenv : base
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.sls_id my_state my_module pillar='{"foo": "bar"}'
.. note::
Values passed this way will override existing Pillar values set via
``pillar_roots`` or an external Pillar source. Pillar values that
are not included in the kwarg will not be overwritten.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' state.sls_id my_state my_module
salt '*' state.sls_id my_state my_module,a_common_module
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
# Since this is running a specific ID within a specific SLS file, fall back
# to the 'base' saltenv if none is configured and none was passed.
if opts['saltenv'] is None:
opts['saltenv'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ['Pillar failed to render with the following messages:'] + errors
split_mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts['saltenv']: split_mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
# Apply requisites to high data
high_, req_in_errors = st_.state.requisite_in(high_)
if req_in_errors:
# This if statement should not be necessary if there were no errors,
# but it is required to get the unit tests to pass.
errors.extend(req_in_errors)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
chunks = st_.state.compile_high_data(high_)
ret = {}
for chunk in chunks:
if chunk.get('__id__', '') == id_:
ret.update(st_.state.call_chunk(chunk, {}, chunks))
_set_retcode(ret, highstate=highstate)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
if not ret:
raise SaltInvocationError(
'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv '
'\'{2}\''.format(id_, mods, opts['saltenv'])
)
return ret
def show_low_sls(mods, test=None, queue=False, **kwargs):
'''
Display the low data from a specific sls. The default environment is
``base``, use ``saltenv`` to specify a different environment.
saltenv
Specify a salt fileserver environment to be used when applying states
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.show_low_sls stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
CLI Example:
.. code-block:: bash
salt '*' state.show_low_sls foo
salt '*' state.show_low_sls foo saltenv=dev
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
# Since this is dealing with a specific SLS file (or files), fall back to
# the 'base' saltenv if none is configured and none was passed.
if opts['saltenv'] is None:
opts['saltenv'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.HighState(opts,
pillar_override,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar_override,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts['saltenv']: mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
ret = st_.state.compile_high_data(high_)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
return ret
def show_sls(mods, test=None, queue=False, **kwargs):
'''
Display the state data from a specific sls or list of sls files on the
master. The default environment is ``base``, use ``saltenv`` to specify a
different environment.
This function does not support topfiles. For ``top.sls`` please use
``show_top`` instead.
Custom Pillar data can be passed with the ``pillar`` kwarg.
saltenv
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
CLI Example:
.. code-block:: bash
salt '*' state.show_sls core,edit.vim saltenv=dev
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
# Since this is dealing with a specific SLS file (or files), fall back to
# the 'base' saltenv if none is configured and none was passed.
if opts['saltenv'] is None:
opts['saltenv'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts['saltenv']: mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
return high_
def sls_exists(mods, test=None, queue=False, **kwargs):
'''
Tests for the existance the of a specific SLS or list of SLS files on the
master. Similar to :py:func:`state.show_sls <salt.modules.state.show_sls>`,
rather than returning state details, returns True or False. The default
environment is ``base``, use ``saltenv`` to specify a different environment.
.. versionadded:: 2019.2.0
saltenv
Specify a salt fileserver environment from which to look for the SLS files
specified in the ``mods`` argument
CLI Example:
.. code-block:: bash
salt '*' state.sls_exists core,edit.vim saltenv=dev
'''
return isinstance(
show_sls(mods, test=test, queue=queue, **kwargs),
dict
)
def id_exists(ids, mods, test=None, queue=False, **kwargs):
'''
Tests for the existence of a specific ID or list of IDs within the
specified SLS file(s). Similar to :py:func:`state.sls_exists
<salt.modules.state.sls_exists>`, returns True or False. The default
environment is base``, use ``saltenv`` to specify a different environment.
.. versionadded:: 2019.2.0
saltenv
Specify a salt fileserver environment from which to look for the SLS files
specified in the ``mods`` argument
CLI Example:
.. code-block:: bash
salt '*' state.id_exists create_myfile,update_template filestate saltenv=dev
'''
ids = salt.utils.args.split_input(ids)
ids = set(ids)
sls_ids = set(x['__id__'] for x in show_low_sls(mods, test=test, queue=queue, **kwargs))
return ids.issubset(sls_ids)
def show_top(queue=False, **kwargs):
'''
Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
errors = []
top_ = st_.get_top()
errors += st_.verify_tops(top_)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
matches = st_.top_matches(top_)
return matches
def single(fun, name, test=None, queue=False, **kwargs):
'''
Execute a single state function with the named kwargs, returns False if
insufficient data is sent to the command
By default, the values of the kwargs will be parsed as YAML. So, you can
specify lists values, or lists of single entry key-value maps, as you
would in a YAML salt file. Alternatively, JSON format of keyword values
is also supported.
CLI Example:
.. code-block:: bash
salt '*' state.single pkg.installed name=vim
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
comps = fun.split('.')
if len(comps) < 2:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return 'Invalid function passed'
kwargs.update({'state': comps[0],
'fun': comps[1],
'__id__': name,
'name': name})
orig_test = __opts__.get('test', None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
err = st_.verify_data(kwargs)
if err:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return err
st_._mod_init(kwargs)
snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy'))
ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs):
st_.call(kwargs)}
_set_retcode(ret)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
_snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
__opts__['test'] = orig_test
return ret
def clear_cache():
'''
Clear out cached state files, forcing even cache runs to refresh the cache
on the next state execution.
Remember that the state cache is completely disabled by default, this
execution only applies if cache=True is used in states
CLI Example:
.. code-block:: bash
salt '*' state.clear_cache
'''
ret = []
for fn_ in os.listdir(__opts__['cachedir']):
if fn_.endswith('.cache.p'):
path = os.path.join(__opts__['cachedir'], fn_)
if not os.path.isfile(path):
continue
os.remove(path)
ret.append(fn_)
return ret
def pkg(pkg_path,
pkg_sum,
hash_type,
test=None,
**kwargs):
'''
Execute a packaged state run, the packaged state run will exist in a
tarball available locally. This packaged state
can be generated using salt-ssh.
CLI Example:
.. code-block:: bash
salt '*' state.pkg /tmp/salt_state.tgz 760a9353810e36f6d81416366fc426dc md5
'''
# TODO - Add ability to download from salt master or other source
popts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
if not os.path.isfile(pkg_path):
return {}
if not salt.utils.hashutils.get_hash(pkg_path, hash_type) == pkg_sum:
return {}
root = tempfile.mkdtemp()
s_pkg = tarfile.open(pkg_path, 'r:gz')
# Verify that the tarball does not extract outside of the intended root
members = s_pkg.getmembers()
for member in members:
if salt.utils.stringutils.to_unicode(member.path).startswith((os.sep, '..{0}'.format(os.sep))):
return {}
elif '..{0}'.format(os.sep) in salt.utils.stringutils.to_unicode(member.path):
return {}
s_pkg.extractall(root)
s_pkg.close()
lowstate_json = os.path.join(root, 'lowstate.json')
with salt.utils.files.fopen(lowstate_json, 'r') as fp_:
lowstate = salt.utils.json.load(fp_)
# Check for errors in the lowstate
for chunk in lowstate:
if not isinstance(chunk, dict):
return lowstate
pillar_json = os.path.join(root, 'pillar.json')
if os.path.isfile(pillar_json):
with salt.utils.files.fopen(pillar_json, 'r') as fp_:
pillar_override = salt.utils.json.load(fp_)
else:
pillar_override = None
roster_grains_json = os.path.join(root, 'roster_grains.json')
if os.path.isfile(roster_grains_json):
with salt.utils.files.fopen(roster_grains_json, 'r') as fp_:
roster_grains = salt.utils.json.load(fp_)
if os.path.isfile(roster_grains_json):
popts['grains'] = roster_grains
popts['fileclient'] = 'local'
popts['file_roots'] = {}
popts['test'] = _get_test_value(test, **kwargs)
envs = os.listdir(root)
for fn_ in envs:
full = os.path.join(root, fn_)
if not os.path.isdir(full):
continue
popts['file_roots'][fn_] = [full]
st_ = salt.state.State(popts, pillar_override=pillar_override)
snapper_pre = _snapper_pre(popts, kwargs.get('__pub_jid', 'called localy'))
ret = st_.call_chunks(lowstate)
ret = st_.call_listen(lowstate, ret)
try:
shutil.rmtree(root)
except (IOError, OSError):
pass
_set_retcode(ret)
_snapper_post(popts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
return ret
def disable(states):
'''
Disable state runs.
CLI Example:
.. code-block:: bash
salt '*' state.disable highstate
salt '*' state.disable highstate,test.succeed_without_changes
.. note::
To disable a state file from running provide the same name that would
be passed in a state.sls call.
salt '*' state.disable bind.config
'''
ret = {
'res': True,
'msg': ''
}
states = salt.utils.args.split_input(states)
msg = []
_disabled = __salt__['grains.get']('state_runs_disabled')
if not isinstance(_disabled, list):
_disabled = []
_changed = False
for _state in states:
if _state in _disabled:
msg.append('Info: {0} state already disabled.'.format(_state))
else:
msg.append('Info: {0} state disabled.'.format(_state))
_disabled.append(_state)
_changed = True
if _changed:
__salt__['grains.setval']('state_runs_disabled', _disabled)
ret['msg'] = '\n'.join(msg)
# refresh the grains
__salt__['saltutil.refresh_modules']()
return ret
def enable(states):
'''
Enable state function or sls run
CLI Example:
.. code-block:: bash
salt '*' state.enable highstate
salt '*' state.enable test.succeed_without_changes
.. note::
To enable a state file from running provide the same name that would
be passed in a state.sls call.
salt '*' state.disable bind.config
'''
ret = {
'res': True,
'msg': ''
}
states = salt.utils.args.split_input(states)
log.debug('states %s', states)
msg = []
_disabled = __salt__['grains.get']('state_runs_disabled')
if not isinstance(_disabled, list):
_disabled = []
_changed = False
for _state in states:
log.debug('_state %s', _state)
if _state not in _disabled:
msg.append('Info: {0} state already enabled.'.format(_state))
else:
msg.append('Info: {0} state enabled.'.format(_state))
_disabled.remove(_state)
_changed = True
if _changed:
__salt__['grains.setval']('state_runs_disabled', _disabled)
ret['msg'] = '\n'.join(msg)
# refresh the grains
__salt__['saltutil.refresh_modules']()
return ret
def list_disabled():
'''
List the states which are currently disabled
CLI Example:
.. code-block:: bash
salt '*' state.list_disabled
'''
return __salt__['grains.get']('state_runs_disabled')
def _disabled(funs):
'''
Return messages for disabled states
that match state functions in funs.
'''
ret = []
_disabled = __salt__['grains.get']('state_runs_disabled')
for state in funs:
for _state in _disabled:
if '.*' in _state:
target_state = _state.split('.')[0]
target_state = target_state + '.' if not target_state.endswith('.') else target_state
if state.startswith(target_state):
err = (
'The state file "{0}" is currently disabled by "{1}", '
'to re-enable, run state.enable {1}.'
).format(
state,
_state,
)
ret.append(err)
continue
else:
if _state == state:
err = (
'The state file "{0}" is currently disabled, '
'to re-enable, run state.enable {0}.'
).format(
_state,
)
ret.append(err)
continue
return ret
def event(tagmatch='*',
count=-1,
quiet=False,
sock_dir=None,
pretty=False,
node='minion'):
r'''
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2016.3.0
.. versionchanged:: 2019.2.0
``tagmatch`` can now be either a glob or regular expression.
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this glob or regular expression.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
:param node: Watch the minion-side or master-side event bus.
CLI Example:
.. code-block:: bash
salt-call --local state.event pretty=True
'''
with salt.utils.event.get_event(
node,
sock_dir or __opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True) as sevent:
while True:
ret = sevent.get_event(full=True, auto_reconnect=True)
if ret is None:
continue
if salt.utils.stringutils.expr_match(ret['tag'], tagmatch):
if not quiet:
salt.utils.stringutils.print_cli(
str('{0}\t{1}').format( # future lint: blacklisted-function
salt.utils.stringutils.to_str(ret['tag']),
salt.utils.json.dumps(
ret['data'],
sort_keys=pretty,
indent=None if not pretty else 4)
)
)
sys.stdout.flush()
if count > 0:
count -= 1
log.debug('Remaining event matches: %s', count)
if count == 0:
break
else:
log.debug('Skipping event tag: %s', ret['tag'])
continue
|
the-stack_0_7864 | # !/usr/bin/env python3
# Author: C.K
# Email: [email protected]
# DateTime:2021-08-10 21:45:28
# Description:
import os, sys
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
mapping = {
'2': 'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz'
}
if len(digits) == 0:
return []
if len(digits) == 1:
return list(mapping[digits[0]])
prev = self.letterCombinations(digits[:-1])
additional = mapping[digits[-1]]
return [s + c for s in prev for c in additional]
if __name__ == "__main__":
pass
|
the-stack_0_7866 | import unittest
import atheris
import atheris_libprotobuf_mutator
from atheris import fuzz_test_lib
from google.protobuf import wrappers_pb2
@atheris.instrument_func
def simple_proto_comparison(msg):
if msg.value == "abc":
raise RuntimeError("Solved")
class AtherisLibprotobufMutatorTests(unittest.TestCase):
def testSimpleProtoComparison(self):
fuzz_test_lib.run_fuzztest(
simple_proto_comparison,
custom_setup=atheris_libprotobuf_mutator.Setup,
setup_kwargs={"proto": wrappers_pb2.StringValue},
expected_output=b"Solved",
timeout=60)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_7867 | import logging.handlers
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s][%(process)d][%(thread)d][%(levelname)-5s][%(filename)s:%(lineno)d][%(funcName)s]: %(message)s',
'datefmt': '%Y/%m/%d %H:%M:%S',
},
'simple': {
'format': '[%(asctime)s][%(levelname)s] %(message)s',
'datefmt': '%Y/%m/%d %H:%M:%S',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'socket': {
'level': 'INFO',
'class': 'logging.handlers.SocketHandler',
'formatter': 'verbose',
'host': 'localhost',
'port': logging.handlers.DEFAULT_TCP_LOGGING_PORT,
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'debug.log',
'formatter': 'verbose',
},
},
'loggers': {
'jsea_blog': {
'handlers': ['console', 'socket'],
'level': 'DEBUG',
'propagate': True,
},
'django.request': {
'handlers': ['console', 'socket'],
'level': 'DEBUG',
},
},
}
|
the-stack_0_7868 | from django.urls import path
from . import views
app_name = 'events'
urlpatterns = [
path('', views.EventListView.as_view(), name='all'),
path('<int:pk>/', views.EventView.as_view(), name='details'),
path('<int:pk>/edit/', views.EventUpdateView.as_view(), name='edit'),
path('<int:pk>/attended/', views.EventAttendeeEditView.as_view(), name='attended'),
path('new', views.EventCreateView.as_view(), name='new'),
path('<int:pk>/delete/', views.EventDeleteView.as_view(), name='delete'),
path('<int:event_id>/register/', views.register_on_event, name="register")
]
|
the-stack_0_7869 | import argparse
from email.mime import image
import os
from tqdm import tqdm
import pandas as pd
import logging
from src.utils.common import read_yaml, create_directories
from src.stage_01_get_data import main as loader_main
from sklearn.metrics import confusion_matrix, f1_score
import numpy as np
import warnings
import torch
STAGE = "STAGE_NAME" ## <<< change stage name
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s: %(levelname)s: %(module)s]: %(message)s")
file_handler = logging.FileHandler(os.path.join("logs", "running_logs.log"))
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
warnings.filterwarnings('ignore')
def main(config_path):
## read config files
config = read_yaml(config_path)
train_data_loader, test_data_loader, labels_dict = loader_main(config_path)
pred = np.array([])
target = np.array([])
prediction_data_dir = config['data']['PRED_DATA_DIR']
create_directories([prediction_data_dir])
prediction_data_file_name = config['data']['PRED_DATA_FILE_NAME']
prediction_data_file_path = os.path.join(prediction_data_dir, prediction_data_file_name)
model_dir = config['artifacts']['model_dir']
trained_model_name = config['artifacts']['trained_model_name']
trained_model_path = os.path.join(model_dir, trained_model_name)
model = torch.load(trained_model_path)
logger.info(f"trained model loaded")
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model.to(DEVICE)
logger.info(f"trained model loaded into {DEVICE}")
with torch.no_grad():
for batch, data in enumerate(test_data_loader):
images = data[0].to(DEVICE)
labels = data[1].to(DEVICE)
y_pred = model(images)
pred = np.concatenate((pred, torch.argmax(y_pred, 1).cpu().numpy()))
target = np.concatenate((target, labels.cpu().numpy()))
logger.info("prediction for test data finished")
df = pd.DataFrame({"Actual":target, "Prediction":pred})
df.to_csv(prediction_data_file_path)
logger.info(f"saved prediction results into {prediction_data_file_path}")
cm = confusion_matrix(target, pred)
print(cm)
fs = f1_score(target, pred, average=None)
print(fs)
logger.info(fs)
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/config.yaml")
parsed_args = args.parse_args()
try:
logger.info("\n********************")
logger.info(f">>>>> stage {STAGE} started <<<<<")
main(config_path=parsed_args.config)
logger.info(f">>>>> stage {STAGE} completed!<<<<<\n")
except Exception as e:
logger.exception(e)
raise e |
the-stack_0_7870 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Replication(Resource):
"""An object that represents a replication for a container registry.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: Required. The location of the resource. This cannot be
changed after the resource is created.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:ivar provisioning_state: The provisioning state of the replication at the
time the operation was called. Possible values include: 'Creating',
'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2019_04_01.models.ProvisioningState
:ivar status: The status of the replication at the time the operation was
called.
:vartype status: ~azure.mgmt.containerregistry.v2019_04_01.models.Status
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'Status'},
}
def __init__(self, **kwargs):
super(Replication, self).__init__(**kwargs)
self.provisioning_state = None
self.status = None
|
the-stack_0_7871 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain,
UserCancelled, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
decimal_point_to_base_unit_name,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs)
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.util import PR_PAID, PR_FAILED
from electrum.util import pr_expiration_values
from electrum.lnutil import ln_dummy_address
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.tl_windows = []
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.wallet.has_lightning():
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker and wallet.network:
wallet.network.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_lightning_backup(self):
if self.wallet.is_lightning_backup():
msg = '\n\n'.join([
_("This file is a backup of a lightning wallet."),
_("You will not be able to perform lightning payments using this file, and the lightning balance displayed in this wallet might be outdated.") + ' ' + \
_("If you have lost the original wallet file, you can use this file to trigger a forced closure of your channels."),
_("Do you want to have your channels force-closed?")
])
if self.question(msg, title=_('Lightning Backup')):
self.network.maybe_init_lightning()
self.wallet.lnworker.start_network(self.network)
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter="",
*, default_extension: str = None,
default_filter: str = None) -> Optional[str]:
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(self, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
self.config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('On-chain'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice, amount_sat=None):
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, key):
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, key, description=None):
self.show_message(_('Payment succeeded'))
self.need_update.set()
def on_payment_failed(self, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
invoice_dict = self.wallet.lnworker.parse_bech32_invoice(invoice)
if invoice_dict.get('amount') is None:
amount = self.amount_e.get_amount()
if amount:
invoice_dict['amount'] = amount
else:
self.show_error(_('No amount'))
return
return invoice_dict
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_clear()
self.do_pay_invoice(invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice['outputs']
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'], amount_sat=invoice['amount'])
elif invoice['type'] == PR_TYPE_ONCHAIN:
outputs = invoice['outputs']
self.pay_onchain_dialog(self.get_coins(), outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
self.show_message(_('Not Enough Funds'))
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr: 'paymentrequest.PaymentRequest'):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x.value)+ self.base_unit() + ' @ ' + x.address, pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
# note: "delete" disabled as invoice is saved with a different key in wallet.invoices that we do not have here
# def do_delete():
# if self.question(_('Delete invoice?')):
# self.wallet.delete_invoice(key)
# self.history_list.update()
# self.invoice_list.update()
# d.close()
# deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, CloseButton(d)))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if not self.network.is_lightning_running():
return
cur, total = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_percent = 0
progress_str = "??%"
if cur is not None and total is not None and total > 0:
# note: Progress is rescaled such that 95% is considered "done".
# "Real" progress can stay around 98-99% for a long time, which
# might needlessly worry users.
progress_percent = (1.0 / 0.95 * cur / total) * 100
progress_percent = min(progress_percent, 100)
progress_percent = round(progress_percent)
progress_str = f"{progress_percent}%"
if progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 4 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
if self.wallet.can_have_lightning():
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(QLabel(_('Lightning')), 5, 0)
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER_ANY)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
scriptpubkey = bfh(bitcoin.address_to_script(addr))
outputs = [PartialTxOutput(scriptpubkey=scriptpubkey, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
the-stack_0_7875 | # cython: auto_cpdef=True
"""Python code for writing AVRO files"""
# This code is a modified version of the code at
# http://svn.apache.org/viewvc/avro/trunk/lang/py/src/avro/ which is under
# Apache 2.0 license (http://www.apache.org/licenses/LICENSE-2.0)
import json
from io import BytesIO
from os import urandom, SEEK_SET
import bz2
import lzma
import zlib
from .io.binary_encoder import BinaryEncoder
from .io.json_encoder import AvroJSONEncoder
from .validation import _validate
from .read import HEADER_SCHEMA, SYNC_SIZE, MAGIC, reader
from .logical_writers import LOGICAL_WRITERS
from .schema import extract_record_type, extract_logical_type, parse_schema
from ._write_common import _is_appendable
def write_null(encoder, datum, schema, named_schemas, fname):
"""null is written as zero bytes"""
encoder.write_null()
def write_boolean(encoder, datum, schema, named_schemas, fname):
"""A boolean is written as a single byte whose value is either 0 (false) or
1 (true)."""
encoder.write_boolean(datum)
def write_int(encoder, datum, schema, named_schemas, fname):
"""int and long values are written using variable-length, zig-zag coding."""
encoder.write_int(datum)
def write_long(encoder, datum, schema, named_schemas, fname):
"""int and long values are written using variable-length, zig-zag coding."""
encoder.write_long(datum)
def write_float(encoder, datum, schema, named_schemas, fname):
"""A float is written as 4 bytes. The float is converted into a 32-bit
integer using a method equivalent to Java's floatToIntBits and then encoded
in little-endian format."""
encoder.write_float(datum)
def write_double(encoder, datum, schema, named_schemas, fname):
"""A double is written as 8 bytes. The double is converted into a 64-bit
integer using a method equivalent to Java's doubleToLongBits and then
encoded in little-endian format."""
encoder.write_double(datum)
def write_bytes(encoder, datum, schema, named_schemas, fname):
"""Bytes are encoded as a long followed by that many bytes of data."""
encoder.write_bytes(datum)
def write_utf8(encoder, datum, schema, named_schemas, fname):
"""A string is encoded as a long followed by that many bytes of UTF-8
encoded character data."""
encoder.write_utf8(datum)
def write_crc32(encoder, datum):
"""A 4-byte, big-endian CRC32 checksum"""
encoder.write_crc32(datum)
def write_fixed(encoder, datum, schema, named_schemas, fname):
"""Fixed instances are encoded using the number of bytes declared in the
schema."""
if len(datum) != schema["size"]:
raise ValueError(
f"data of length {len(datum)} does not match schema size: {schema}"
)
encoder.write_fixed(datum)
def write_enum(encoder, datum, schema, named_schemas, fname):
"""An enum is encoded by a int, representing the zero-based position of
the symbol in the schema."""
index = schema["symbols"].index(datum)
encoder.write_enum(index)
def write_array(encoder, datum, schema, named_schemas, fname):
"""Arrays are encoded as a series of blocks.
Each block consists of a long count value, followed by that many array
items. A block with count zero indicates the end of the array. Each item
is encoded per the array's item schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written."""
encoder.write_array_start()
if len(datum) > 0:
encoder.write_item_count(len(datum))
dtype = schema["items"]
for item in datum:
write_data(encoder, item, dtype, named_schemas, fname)
encoder.end_item()
encoder.write_array_end()
def write_map(encoder, datum, schema, named_schemas, fname):
"""Maps are encoded as a series of blocks.
Each block consists of a long count value, followed by that many key/value
pairs. A block with count zero indicates the end of the map. Each item is
encoded per the map's value schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written."""
encoder.write_map_start()
if len(datum) > 0:
encoder.write_item_count(len(datum))
vtype = schema["values"]
for key, val in datum.items():
encoder.write_utf8(key)
write_data(encoder, val, vtype, named_schemas, fname)
encoder.write_map_end()
def write_union(encoder, datum, schema, named_schemas, fname):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value. The value
is then encoded per the indicated schema within the union."""
best_match_index = -1
if isinstance(datum, tuple):
(name, datum) = datum
for index, candidate in enumerate(schema):
extracted_type = extract_record_type(candidate)
if extracted_type == "record":
schema_name = candidate["name"]
else:
schema_name = extracted_type
if name == schema_name:
best_match_index = index
break
if best_match_index == -1:
field = f"on field {fname}" if fname else ""
msg = (
f"provided union type name {name} not found in schema "
+ f"{schema} {field}"
)
raise ValueError(msg)
index = best_match_index
else:
pytype = type(datum)
most_fields = -1
# All of Python's floating point values are doubles, so to
# avoid loss of precision, we should always prefer 'double'
# if we are forced to choose between float and double.
#
# If 'double' comes before 'float' in the union, then we'll immediately
# choose it, and don't need to worry. But if 'float' comes before
# 'double', we don't want to pick it.
#
# So, if we ever see 'float', we skim through the rest of the options,
# just to see if 'double' is a possibility, because we'd prefer it.
could_be_float = False
for index, candidate in enumerate(schema):
if could_be_float:
if extract_record_type(candidate) == "double":
best_match_index = index
break
else:
# Nothing except "double" is even worth considering.
continue
if _validate(datum, candidate, named_schemas, raise_errors=False):
record_type = extract_record_type(candidate)
if record_type == "record":
logical_type = extract_logical_type(candidate)
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, candidate)
candidate_fields = set(f["name"] for f in candidate["fields"])
datum_fields = set(datum)
fields = len(candidate_fields.intersection(datum_fields))
if fields > most_fields:
best_match_index = index
most_fields = fields
elif record_type == "float":
best_match_index = index
# Continue in the loop, because it's possible that there's
# another candidate which has record type 'double'
could_be_float = True
else:
best_match_index = index
break
if best_match_index == -1:
field = f"on field {fname}" if fname else ""
raise ValueError(
f"{repr(datum)} (type {pytype}) do not match {schema} {field}"
)
index = best_match_index
# write data
# TODO: There should be a way to give just the index
encoder.write_index(index, schema[index])
write_data(encoder, datum, schema[index], named_schemas, fname)
def write_record(encoder, datum, schema, named_schemas, fname):
"""A record is encoded by encoding the values of its fields in the order
that they are declared. In other words, a record is encoded as just the
concatenation of the encodings of its fields. Field values are encoded per
their schema."""
for field in schema["fields"]:
name = field["name"]
if name not in datum and "default" not in field and "null" not in field["type"]:
raise ValueError(f"no value and no default for {name}")
write_data(
encoder,
datum.get(name, field.get("default")),
field["type"],
named_schemas,
name,
)
WRITERS = {
"null": write_null,
"boolean": write_boolean,
"string": write_utf8,
"int": write_int,
"long": write_long,
"float": write_float,
"double": write_double,
"bytes": write_bytes,
"fixed": write_fixed,
"enum": write_enum,
"array": write_array,
"map": write_map,
"union": write_union,
"error_union": write_union,
"record": write_record,
"error": write_record,
}
def write_data(encoder, datum, schema, named_schemas, fname):
"""Write a datum of data to output stream.
Paramaters
----------
encoder: encoder
Type of encoder (e.g. binary or json)
datum: object
Data to write
schema: dict
Schemda to use
named_schemas: dict
Mapping of fullname to schema definition
"""
record_type = extract_record_type(schema)
logical_type = extract_logical_type(schema)
fn = WRITERS.get(record_type)
if fn:
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, schema)
try:
return fn(encoder, datum, schema, named_schemas, fname)
except TypeError as ex:
if fname:
raise TypeError(f"{ex} on field {fname}")
raise
else:
return write_data(encoder, datum, named_schemas[record_type], named_schemas, "")
def write_header(encoder, metadata, sync_marker):
header = {
"magic": MAGIC,
"meta": {key: value.encode() for key, value in metadata.items()},
"sync": sync_marker,
}
write_data(encoder, header, HEADER_SCHEMA, {}, "")
def null_write_block(encoder, block_bytes, compression_level):
"""Write block in "null" codec."""
encoder.write_long(len(block_bytes))
encoder._fo.write(block_bytes)
def deflate_write_block(encoder, block_bytes, compression_level):
"""Write block in "deflate" codec."""
# The first two characters and last character are zlib
# wrappers around deflate data.
if compression_level is not None:
data = zlib.compress(block_bytes, compression_level)[2:-1]
else:
data = zlib.compress(block_bytes)[2:-1]
encoder.write_long(len(data))
encoder._fo.write(data)
def bzip2_write_block(encoder, block_bytes, compression_level):
"""Write block in "bzip2" codec."""
data = bz2.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
def xz_write_block(encoder, block_bytes, compression_level):
"""Write block in "xz" codec."""
data = lzma.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
BLOCK_WRITERS = {
"null": null_write_block,
"deflate": deflate_write_block,
"bzip2": bzip2_write_block,
"xz": xz_write_block,
}
def _missing_codec_lib(codec, library):
def missing(encoder, block_bytes, compression_level):
raise ValueError(
f"{codec} codec is supported but you need to install {library}"
)
return missing
def snappy_write_block(encoder, block_bytes, compression_level):
"""Write block in "snappy" codec."""
data = snappy.compress(block_bytes)
encoder.write_long(len(data) + 4) # for CRC
encoder._fo.write(data)
encoder.write_crc32(block_bytes)
try:
import snappy
except ImportError:
BLOCK_WRITERS["snappy"] = _missing_codec_lib("snappy", "python-snappy")
else:
BLOCK_WRITERS["snappy"] = snappy_write_block
def zstandard_write_block(encoder, block_bytes, compression_level):
"""Write block in "zstandard" codec."""
data = zstd.ZstdCompressor().compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
try:
import zstandard as zstd
except ImportError:
BLOCK_WRITERS["zstandard"] = _missing_codec_lib("zstandard", "zstandard")
else:
BLOCK_WRITERS["zstandard"] = zstandard_write_block
def lz4_write_block(encoder, block_bytes, compression_level):
"""Write block in "lz4" codec."""
data = lz4.block.compress(block_bytes)
encoder.write_long(len(data))
encoder._fo.write(data)
try:
import lz4.block
except ImportError:
BLOCK_WRITERS["lz4"] = _missing_codec_lib("lz4", "lz4")
else:
BLOCK_WRITERS["lz4"] = lz4_write_block
class GenericWriter:
def __init__(self, schema, metadata=None, validator=None):
self._named_schemas = {}
self.schema = parse_schema(schema, self._named_schemas)
self.validate_fn = _validate if validator is True else validator
self.metadata = metadata or {}
if isinstance(schema, dict):
schema = {
key: value
for key, value in schema.items()
if key not in ("__fastavro_parsed", "__named_schemas")
}
elif isinstance(schema, list):
schemas = []
for s in schema:
if isinstance(s, dict):
schemas.append(
{
key: value
for key, value in s.items()
if key
not in (
"__fastavro_parsed",
"__named_schemas",
)
}
)
else:
schemas.append(s)
schema = schemas
self.metadata["avro.schema"] = json.dumps(schema)
class Writer(GenericWriter):
def __init__(
self,
fo,
schema,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
compression_level=None,
):
GenericWriter.__init__(self, schema, metadata, validator)
self.metadata["avro.codec"] = codec
if isinstance(fo, BinaryEncoder):
self.encoder = fo
else:
self.encoder = BinaryEncoder(fo)
self.io = BinaryEncoder(BytesIO())
self.block_count = 0
self.sync_interval = sync_interval
self.compression_level = compression_level
if _is_appendable(self.encoder._fo):
# Seed to the beginning to read the header
self.encoder._fo.seek(0)
avro_reader = reader(self.encoder._fo)
header = avro_reader._header
file_writer_schema = parse_schema(avro_reader.writer_schema)
if self.schema != file_writer_schema:
raise ValueError(
f"Provided schema {self.schema} does not match "
+ f"file writer_schema {file_writer_schema}"
)
codec = avro_reader.metadata.get("avro.codec", "null")
self.sync_marker = header["sync"]
# Seek to the end of the file
self.encoder._fo.seek(0, 2)
self.block_writer = BLOCK_WRITERS[codec]
else:
self.sync_marker = sync_marker or urandom(SYNC_SIZE)
try:
self.block_writer = BLOCK_WRITERS[codec]
except KeyError:
raise ValueError(f"unrecognized codec: {codec}")
write_header(self.encoder, self.metadata, self.sync_marker)
def dump(self):
self.encoder.write_long(self.block_count)
self.block_writer(self.encoder, self.io._fo.getvalue(), self.compression_level)
self.encoder._fo.write(self.sync_marker)
self.io._fo.truncate(0)
self.io._fo.seek(0, SEEK_SET)
self.block_count = 0
def write(self, record):
if self.validate_fn:
self.validate_fn(record, self.schema, self._named_schemas)
write_data(self.io, record, self.schema, self._named_schemas, "")
self.block_count += 1
if self.io._fo.tell() >= self.sync_interval:
self.dump()
def write_block(self, block):
# Clear existing block if there are any records pending
if self.io._fo.tell() or self.block_count > 0:
self.dump()
self.encoder.write_long(block.num_records)
self.block_writer(self.encoder, block.bytes_.getvalue(), self.compression_level)
self.encoder._fo.write(self.sync_marker)
def flush(self):
if self.io._fo.tell() or self.block_count > 0:
self.dump()
self.encoder._fo.flush()
class JSONWriter(GenericWriter):
def __init__(
self,
fo,
schema,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
codec_compression_level=None,
):
GenericWriter.__init__(self, schema, metadata, validator)
self.encoder = fo
self.encoder.configure(self.schema, self._named_schemas)
def write(self, record):
if self.validate_fn:
self.validate_fn(record, self.schema, self._named_schemas)
write_data(self.encoder, record, self.schema, self._named_schemas, "")
def flush(self):
self.encoder.flush()
def writer(
fo,
schema,
records,
codec="null",
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None,
codec_compression_level=None,
):
"""Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
schema: dict
Writer schema
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
codec_compression_level: int, optional
Compression level to use with the specified codec (if the codec
supports it)
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
The `fo` argument is a file-like object so another common example usage
would use an `io.BytesIO` object like so::
from io import BytesIO
from fastavro import writer
fo = BytesIO()
writer(fo, schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records)
"""
# Sanity check that records is not a single dictionary (as that is a common
# mistake and the exception that gets raised is not helpful)
if isinstance(records, dict):
raise ValueError('"records" argument should be an iterable, not dict')
if isinstance(fo, AvroJSONEncoder):
writer_class = JSONWriter
else:
# Assume a binary IO if an encoder isn't given
writer_class = Writer
fo = BinaryEncoder(fo)
output = writer_class(
fo,
schema,
codec,
sync_interval,
metadata,
validator,
sync_marker,
codec_compression_level,
)
for record in records:
output.write(record)
output.flush()
def schemaless_writer(fo, schema, record):
"""Write a single record without the schema or header information
Parameters
----------
fo: file-like
Output file
schema: dict
Schema
record: dict
Record to write
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file', 'rb') as fp:
fastavro.schemaless_writer(fp, parsed_schema, record)
Note: The ``schemaless_writer`` can only write a single record.
"""
named_schemas = {}
schema = parse_schema(schema, named_schemas)
encoder = BinaryEncoder(fo)
write_data(encoder, record, schema, named_schemas, "")
encoder.flush()
|
the-stack_0_7876 | """
격자의 행과 열의 크기를 입력 받아서,
격자의 왼쪽 위에서 오른쪽 아래로 가는 모든 최단 경로 (shortest grid path)를 d, r로 표시하여 보세요.
Input
같은 줄에 격자(grid)의 행(세로, row)과 열(가로, col)의 크기가 입력됩니다.
Output
격자의 왼쪽 위에서 오른쪽 아래로 가는 모든 최단 경로를 d, r로 표시하여 출력합니다.
출력 순서는 문제의 예와 같게 합니다.
Sample Input 1
3 2
Sample Output 1
dddrr
ddrdr
ddrrd
drddr
drdrd
drrdd
rdddr
rddrd
rdrdd
rrddd
"""
def getGridPath(r, c, p=''):
if (len(p) >= row+col):
if (r==0 and c==0):
print(p)
return
getGridPath(r-1, c, p+'d')
getGridPath(r, c-1, p+'r')
a = input().split()
row, col = int(a[0]), int(a[1])
getGridPath(row, col)
|
the-stack_0_7877 | """
_SummaryHistogram_
Histogram module, to be used by the TaskArchiver
to store histograms in the summary.
Created on Nov 16, 2012
@author: dballest
"""
from builtins import str
from WMCore.DataStructs.WMObject import WMObject
class SummaryHistogram(WMObject):
"""
_SummaryHistogram_
Histogram object, provides familiar CRUD methods
which take care of most of the statistical
calculations when adding points, this object
can also be converted into a dictionary
for JSON documents. It knows how to combine
with other histograms and create itself from
a dictionary provided it has matching structure.
This is an interface, the real work is done
by the ContinuousSummaryHistogram and
DiscreteSummaryHistogram objects
"""
def __init__(self, title = None, xLabel = None):
"""
__init__
Initialize the elements in the object.
"""
# Meta-information about the histogram, it can be changed at any point
self.title = title
self.xLabel = xLabel
# These shouldn't be touched from anything outside the SummaryHistogram object and children classes
self.continuous = None
self.jsonInternal = None
self.data = {}
self.average = None
self.stdDev = None
return
def setTitle(self, newTitle):
"""
_setTitle_
Set the title
"""
self.title = newTitle
return
def setHorizontalLabel(self, xLabel):
"""
_setHorizontalLabel_
Set the label on the x axis
"""
self.xLabel = xLabel
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add a point to the histogram data, a histogram
can have many types of y values for the same x if
x is continuous otherwise it is only one yLabel.
They should be in a similar scale for best results.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def toJSON(self):
"""
_toJSON_
Return a dictionary which is compatible
with a JSON object
"""
if self.continuous is None:
raise TypeError("toJSON can't be called on a bare SummaryHistogram object")
# Get what the children classes did
jsonDict = {}
jsonDict['internalData'] = self.jsonInternal or {}
# Add the common things
jsonDict['title'] = self.title
jsonDict['xLabel'] = self.xLabel
jsonDict['continuous'] = self.continuous
jsonDict['data'] = self.data
jsonDict['stdDev'] = self.stdDev
jsonDict['average'] = self.average
return jsonDict
def __add__(self, other):
"""
__add__
Add two histograms, combine statistics.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def __str__(self):
"""
__str__
Return the str object of the JSON
"""
return str(self.toJSON())
|
the-stack_0_7879 | import cv2
import numpy as np
from preparation.augmentor import Augmentor
from preparation.utils import get_snake_case, get_class_from_path
import pandas as pd
import os
from multiprocessing.pool import ThreadPool as Pool
from glob import glob
class Processor:
def __init__(self, batch_size, width, height):
self.batch_size = batch_size
self.width = width
self.height = height
columns = pd.read_csv('data/train_labels/train01.csv').columns[1:]
columns = list(map(lambda x: get_snake_case(x), columns))
columns_to_index = {column_name: index for (index, column_name) in enumerate(columns)}
columns_to_index.update({'no_tools': 21})
self.columns = columns_to_index
def process(self, imgs_paths, augment=True):
new_imgs = np.zeros((self.batch_size, self.height, self.width, 3), dtype=np.float32)
new_labels = np.zeros((self.batch_size, 22), dtype=np.float32)
if not len(imgs_paths):
return new_imgs, new_labels
for i in range(0, len(imgs_paths)):
img = cv2.imread(imgs_paths[i], 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Augmentor().augment(img) if augment else img
new_imgs[i] = img
current_class = get_class_from_path(imgs_paths[i])
new_labels[i][self.columns[current_class]] = 1.
new_imgs /= 255
return new_imgs, new_labels
def delete_empty_files(self, imgs_paths, folder_path):
def delete_empty_file(path):
img = cv2.imread(path)
shape = img.shape[:2]
if not shape[0] or not shape[1]:
os.remove(path)
with Pool(processes=12) as pool:
pool.map(delete_empty_file, imgs_paths)
return np.array(glob(folder_path))
|
the-stack_0_7881 | from django.test import TestCase
# Create your tests here.
import datetime
from django.utils import timezone
from catalog.forms import RenewBookForm
class RenewBookFormTest(TestCase):
def test_renew_form_date_in_past(self):
"""
Test form is invalid if renewal_date is before today
"""
date = datetime.date.today() - datetime.timedelta(days=1)
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertFalse(form.is_valid())
def test_renew_form_date_too_far_in_future(self):
"""
Test for is invalid if renewal_date more than 4 weeks from today
"""
date = datetime.date.today() + datetime.timedelta(weeks=4) + datetime.timedelta(days=1)
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertFalse(form.is_valid())
def test_renew_form_date_today(self):
"""
Test for is valid if renewal_date is today
"""
date = datetime.date.today()
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertTrue(form.is_valid())
def test_renew_form_date_max(self):
"""
Test form is valid if renewal_date is within 4 weeks
"""
date = timezone.now() + datetime.timedelta(weeks=4)
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertTrue(form.is_valid())
def test_renew_form_date_field_label(self):
"""
Test renewal_date label is "renewal date"
"""
form = RenewBookForm()
self.assertTrue(form.fields['renewal_date'].label == None or form.fields['renewal_date'].label == 'renewal date')
def test_renew_form_date_field_help_text(self):
"""
Test renewal_date help_text is as expected
"""
form = RenewBookForm()
self.assertTrue(form.fields['renewal_date'].help_text,'Enter a date between now and 4 weeks (default 3).')
|
the-stack_0_7882 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import Tuple
import numpy as np
import torch
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \
get_patch_size, default_3D_augmentation_params
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
from torch.nn.utils import clip_grad_norm_
from nnunet.training.learning_rate.poly_lr import poly_lr
from batchgenerators.utilities.file_and_folder_operations import *
try:
from apex import amp
except ImportError:
amp = None
class nnUNetTrainerV2(nnUNetTrainer):
"""
Info for Fabian: same as internal nnUNetTrainerV2_2
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1000
self.initial_lr = 1e-2
self.deep_supervision_scales = None
self.ds_loss_weights = None
self.pin_memory = True
def initialize(self, training=True, force_load_plans=False):
"""
- replaced get_default_augmentation with get_moreDA_augmentation
- enforce to only run this code once
- loss function wrapper for deep supervision
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(
self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory
)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
- momentum 0.99
- SGD instead of Adam
- self.lr_scheduler = None because we do poly_lr
- deep supervision = True
- i am sure I forgot something here
Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
# finetune
# finetune = True
# print('-'*30)
# if finetune==True:
# saved_model = torch.load(join(self.output_folder, "finetune.model"), map_location=torch.device('cpu'))
# exit()
# new_state_dict = OrderedDict()
# curr_state_dict_keys = list(self.network.state_dict().keys())
# print('-'*30)
# print('curr_state_dict_keys: ', curr_state_dict_keys)
# print('saved_model['state_dict'].keys(): ', saved_model['state_dict'].keys())
# print('-'*30)
# for k, value in saved_model['state_dict'].items():
# key = k
# if key not in curr_state_dict_keys:
# print(key, "duh***********")
# key = key[7:]
# new_state_dict[key] = value
# self.network.load_state_dict(new_state_dict)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_online_evaluation(self, output, target):
"""
due to deep supervision the return value and the reference are now lists of tensors. We only need the full
resolution output because this is what we are interested in in the end. The others are ignored
:param output:
:param target:
:return:
"""
target = target[0]
output = output[0]
return super().run_online_evaluation(output, target)
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0):
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().validate(do_mirroring, use_sliding_window, step_size, save_softmax, use_gaussian,
overwrite, validation_folder_name, debug, all_in_gpu,
force_separate_z=force_separate_z, interpolation_order=interpolation_order,
interpolation_order_z=interpolation_order_z)
self.network.do_ds = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True,
step_size: float = 0.5, use_gaussian: bool = True,
pad_border_mode: str = 'constant', pad_kwargs: dict = None,
all_in_gpu: bool = True,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().predict_preprocessed_data_return_seg_and_softmax(data, do_mirroring, mirror_axes,
use_sliding_window, step_size, use_gaussian,
pad_border_mode, pad_kwargs, all_in_gpu, verbose)
self.network.do_ds = ds
return ret
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
"""
gradient clipping improves training stability
:param data_generator:
:param do_backprop:
:param run_online_evaluation:
:return:
"""
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
output = self.network(data)
del data
loss = self.loss(output, target)
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
if do_backprop:
if not self.fp16 or amp is None or not torch.cuda.is_available():
loss.backward()
else:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
_ = clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
return loss.detach().cpu().numpy()
def do_split(self):
"""
we now allow more than 5 splits. IMPORTANT: and fold > 4 will not be a real split but just another random
80:20 split of the data. You cannot run X-fold cross-validation with this code. It will always be a 5-fold CV.
Folds > 4 will be independent from each other
:return:
"""
if self.fold == 'all' or self.fold < 5:
return super().do_split()
else:
rnd = np.random.RandomState(seed=12345 + self.fold)
keys = np.sort(list(self.dataset.keys()))
idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)
idx_val = [i for i in range(len(keys)) if i not in idx_tr]
self.dataset_tr = OrderedDict()
for i in idx_tr:
self.dataset_tr[keys[i]] = self.dataset[keys[i]]
self.dataset_val = OrderedDict()
for i in idx_val:
self.dataset_val[keys[i]] = self.dataset[keys[i]]
def setup_DA_params(self):
"""
- we increase roation angle from [-15, 15] to [-30, 30]
- scale range is now (0.7, 1.4), was (0.85, 1.25)
- we don't do elastic deformation anymore
:return:
"""
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.7, 1.4)
self.data_aug_params["do_elastic"] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
self.data_aug_params["num_cached_per_thread"] = 2
def maybe_update_lr(self, epoch=None):
"""
if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1
(maybe_update_lr is called in on_epoch_end which is called before epoch is incremented.
herefore we need to do +1 here)
:param epoch:
:return:
"""
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9)
self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6))
def on_epoch_end(self):
"""
overwrite patient-based early stopping. Always run to 1000 epochs
:return:
"""
super().on_epoch_end()
continue_training = self.epoch < self.max_num_epochs
# it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the
# estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95
if self.epoch == 100:
if self.all_val_eval_metrics[-1] == 0:
self.optimizer.param_groups[0]["momentum"] = 0.95
self.network.apply(InitWeights_He(1e-2))
self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too "
"high momentum. High momentum (0.99) is good for datasets where it works, but "
"sometimes causes issues such as this one. Momentum has now been reduced to "
"0.95 and network weights have been reinitialized")
return continue_training
def run_training(self):
"""
if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first
continued epoch with self.initial_lr
we also need to make sure deep supervision in the network is enabled for training, thus the wrapper
:return:
"""
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.do_ds
self.network.do_ds = True
ret = super().run_training()
self.network.do_ds = ds
return ret
|
the-stack_0_7883 | import logging
import os
from datetime import datetime
from unittest.mock import Mock
import psycopg2
import pytest
from scrapy.exceptions import NotConfigured
from kingfisher_scrapy.extensions import DatabaseStore, FilesStore
from tests import spider_with_crawler
database_url = os.getenv('KINGFISHER_COLLECT_DATABASE_URL')
skip_test_if = not database_url and ('CI' not in os.environ or 'CI_SKIP' in os.environ)
def test_from_crawler_missing_arguments():
spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00')
with pytest.raises(NotConfigured) as excinfo:
DatabaseStore.from_crawler(spider.crawler)
assert str(excinfo.value) == 'DATABASE_URL is not set.'
spider.crawler.settings = {'DATABASE_URL': 'test', 'FILES_STORE': None}
with pytest.raises(NotConfigured) as excinfo:
DatabaseStore.from_crawler(spider.crawler)
assert str(excinfo.value) == 'FILES_STORE is not set.'
@pytest.mark.skipif(skip_test_if, reason='KINGFISHER_COLLECT_DATABASE_URL must be set')
@pytest.mark.parametrize('from_date,default_from_date,date_format', [
(None, None, None),
('2020-01-01', None, 'date'),
('2020-01-01', '2020-01-01', 'date'),
])
def test_spider_opened_first_time(caplog, tmpdir, from_date, default_from_date, date_format):
spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00',
settings={'DATABASE_URL': database_url, 'FILES_STORE': tmpdir})
spider.from_date = from_date
spider.default_from_date = default_from_date
if date_format:
spider.date_format = spider.VALID_DATE_FORMATS[date_format]
extension = DatabaseStore.from_crawler(spider.crawler)
with caplog.at_level(logging.INFO):
extension.spider_opened(spider)
if not from_date:
assert [record.message for record in caplog.records][-5:] == [
'Getting the date from which to resume the crawl from the test table']
connection = psycopg2.connect(database_url)
cursor = connection.cursor()
try:
cursor.execute("SELECT to_regclass('test')")
table_exists = cursor.fetchone()[0]
assert table_exists == 'test'
assert spider.from_date == from_date
finally:
cursor.close()
connection.close()
@pytest.mark.skipif(skip_test_if, reason='KINGFISHER_COLLECT_DATABASE_URL must be set')
def test_spider_closed_error(caplog, tmpdir):
spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00',
settings={'DATABASE_URL': database_url, 'FILES_STORE': tmpdir})
extension = DatabaseStore.from_crawler(spider.crawler)
with caplog.at_level(logging.INFO):
extension.spider_closed(spider, 'closed')
assert not caplog.records
@pytest.mark.skipif(skip_test_if, reason='KINGFISHER_COLLECT_DATABASE_URL must be set')
@pytest.mark.parametrize('data,data_type,sample,compile_releases', [
(b'{"releases": [{"date": "2021-05-26T10:00:00Z"}]}', 'release_package', None, False),
(b'{"releases": [{"date": "2021-05-26T10:00:00Z"}]}', 'release_package', 1, False),
(b'{"releases": [{"ocid":"1", "date": "2021-05-26T10:00:00Z"}]}', 'release_package', None, True),
(b'{"records": [{"compiledRelease": {"date": "2021-05-26T10:00:00Z"}}]}', 'record_package', None, False),
(b'{"records": [{"releases": [{"ocid":"1", "date": "2021-05-26T10:00:00Z"}]}]}', 'record_package', None, True),
])
def test_spider_closed(caplog, tmpdir, data, data_type, sample, compile_releases):
caplog.set_level(logging.INFO)
expected_date = '2021-05-26T10:00:00Z'
spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00',
settings={'DATABASE_URL': database_url, 'FILES_STORE': tmpdir})
spider.data_type = data_type
spider.sample = sample
spider.compile_releases = compile_releases
extension = DatabaseStore.from_crawler(spider.crawler)
files_store_extension = FilesStore.from_crawler(spider.crawler)
response = Mock()
response.body = data
response.request = Mock()
response.request.url = 'https://example.com/remote.json'
response.request.meta = {'file_name': 'file.json'}
item = spider.build_file_from_response(response, file_name='file.json', data_type=data_type)
files_store_extension.item_scraped(item, spider)
extension.spider_opened(spider)
caplog.clear()
extension.spider_closed(spider, 'finished')
connection = psycopg2.connect(database_url)
cursor = connection.cursor()
try:
cursor.execute("SELECT max(data->>'date') FROM test")
max_date = cursor.fetchone()[0]
assert max_date == expected_date
if compile_releases:
if data_type == 'release_package':
prefix = 'empty'
else:
prefix = 'records.item.releases.item'
elif data_type == 'release_package':
prefix = 'releases.item'
else:
prefix = 'records.item.compiledRelease'
if sample:
suffix = '_sample'
else:
suffix = ''
expected_messages = [
f'Reading the {tmpdir}/test{suffix}/20210525_000000 crawl directory with the {prefix} prefix',
f'Writing the JSON data to the {tmpdir}/test{suffix}/20210525_000000/data.csv CSV file',
'Replacing the JSON data in the test table',
]
if compile_releases:
expected_messages.insert(1, 'Creating compiled releases')
assert [record.message for record in caplog.records][-5:] == expected_messages
finally:
cursor.close()
connection.close()
@pytest.mark.skipif(skip_test_if, reason='KINGFISHER_COLLECT_DATABASE_URL must be set')
def test_spider_opened_with_data(caplog, tmpdir):
spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00',
settings={'DATABASE_URL': database_url, 'FILES_STORE': tmpdir})
extension = DatabaseStore.from_crawler(spider.crawler)
connection = psycopg2.connect(database_url)
cursor = connection.cursor()
try:
with caplog.at_level(logging.INFO):
extension.spider_opened(spider)
assert spider.from_date == datetime(2021, 5, 26, 0, 0)
assert [record.message for record in caplog.records][-5:] == [
'Getting the date from which to resume the crawl from the test table',
'Resuming the crawl from 2021-05-26']
finally:
cursor.execute('DROP TABLE test')
connection.commit()
cursor.close()
connection.close()
|
the-stack_0_7884 |
import json
f = open("../../config/add_action.txt")
processors = []
action_map = {}
actions = []
primitive_list = []
primitive_num = 0
parameter_num = 0
primitive_idx = 0
cur_idx = 0
while True:
line = f.readline()
print(line)
if not line:
break
if line == "\n":
continue
l = line.split()
if l[0] == 'e':
action_map["processor_name"] = l[1]
# processors.append(l[1])
# action_map[processors[-1]] = []
# action_map[processors[-1]] = {}
if l[0] == 'a':
actions.append(l[1])
# action_map[processors[-1]].append({})
action_map["action_name"] = l[1]
action_map["parameter_num"] = int(l[2])
parameter_num = int(l[2])
action_map["primitives"] = []
action_map["parameter_length"] = []
primitive_num = int(l[3])
# primitive_list = []
cur_idx = 0
elif l[0] == 'p':
primitive_map = {}
primitive_name = l[1]
primitive_map["primitive_name"] = l[1]
parameters = []
for i in range(len(l)-2):
header_name = l[2+i].split('.')[0]
field_name = l[2+i].split('.')[1]
parameter_map = {}
parameter_map["type"] = header_name
parameter_map["value"] = field_name
parameters.append(parameter_map)
primitive_map["parameters"] = parameters
action_map["primitives"].append(primitive_map)
elif l[0] == 'l':
for i in range(1, parameter_num + 1):
action_map["parameter_length"].append(int(l[i]))
f.close()
print(action_map)
print(json.dumps(action_map, indent=3))
filename = "../../config/add_action.json"
with open(filename, 'w') as file_obj:
json.dump(action_map, file_obj, indent=3) |
the-stack_0_7885 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from monai.engines import SupervisedEvaluator
from monai.handlers import DecollateBatch, PostProcessing
from monai.transforms import Activationsd, AsDiscreted, Compose, CopyItemsd
class TestHandlerDecollateBatch(unittest.TestCase):
def test_compute(self):
data = [
{"image": torch.tensor([[[[2.0], [3.0]]]]), "filename": ["test1"]},
{"image": torch.tensor([[[[6.0], [8.0]]]]), "filename": ["test2"]},
]
handlers = [
DecollateBatch(event="MODEL_COMPLETED"),
PostProcessing(
transform=Compose(
[
Activationsd(keys="pred", sigmoid=True),
CopyItemsd(keys="filename", times=1, names="filename_bak"),
AsDiscreted(keys="pred", threshold=0.5, to_onehot=2),
]
)
),
]
# set up engine, PostProcessing handler works together with postprocessing transforms of engine
engine = SupervisedEvaluator(
device=torch.device("cpu:0"),
val_data_loader=data,
epoch_length=2,
network=torch.nn.PReLU(),
# set decollate=False and execute some postprocessing first, then decollate in handlers
postprocessing=lambda x: dict(pred=x["pred"] + 1.0),
decollate=False,
val_handlers=handlers,
)
engine.run()
expected = torch.tensor([[[[1.0], [1.0]], [[0.0], [0.0]]]])
for o, e in zip(engine.state.output, expected):
torch.testing.assert_allclose(o["pred"], e)
filename = o.get("filename_bak")
if filename is not None:
self.assertEqual(filename, "test2")
if __name__ == "__main__":
unittest.main()
|
the-stack_0_7888 | from __future__ import absolute_import, division, print_function, unicode_literals
import braintree
from postgres.orm import Model
class ExchangeRoute(Model):
typname = "exchange_routes"
def __bool__(self):
return self.error != 'invalidated'
__nonzero__ = __bool__
@classmethod
def from_id(cls, id):
return cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE id = %(id)s
""", locals())
@classmethod
def from_network(cls, participant, network):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM current_exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
""", locals())
if r:
r.__dict__['participant'] = participant
return r
@classmethod
def from_address(cls, participant, network, address):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
AND address = %(address)s
""", locals())
if r:
r.__dict__['participant'] = participant
return r
@classmethod
def insert(cls, participant, network, address, error='', fee_cap=None):
participant_id = participant.id
r = cls.db.one("""
INSERT INTO exchange_routes
(participant, network, address, error, fee_cap)
VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s)
RETURNING exchange_routes.*::exchange_routes
""", locals())
if network == 'balanced-cc':
participant.update_giving_and_tippees()
r.__dict__['participant'] = participant
return r
def invalidate(self):
if self.network == 'braintree-cc':
braintree.PaymentMethod.delete(self.address)
# For Paypal, we remove the record entirely to prevent
# an integrity error if the user tries to add the route again
if self.network == 'paypal':
self.db.run("DELETE FROM exchange_routes WHERE id=%s", (self.id,))
else:
self.update_error('invalidated')
def update_error(self, new_error, propagate=True):
id = self.id
old_error = self.error
if old_error == 'invalidated':
return
self.db.run("""
UPDATE exchange_routes
SET error = %(new_error)s
WHERE id = %(id)s
""", locals())
self.set_attributes(error=new_error)
# Update the receiving amounts of tippees if requested and necessary
if not propagate or self.network != 'balanced-cc':
return
if self.participant.is_suspicious or bool(new_error) == bool(old_error):
return
self.participant.update_giving_and_tippees()
|
the-stack_0_7889 | import os
TARGET = os.path.abspath(os.getcwd())
for root, dirs, files in os.walk(TARGET):
for filename in files:
# read file content
with open(os.path.join(root, filename)) as f:
content = f.read()
# replace tag by install path
content = content.replace('$((INSTALDIR))', TARGET)
# replace file content
with open(os.path.join(root, filename), 'w') as f:
f.write(content)
|
the-stack_0_7890 | # -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.15.45'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
# Python 2 & 3
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
# -----------------------------------------------------------------------------
try:
import urllib.parse as _urlencode # Python 3
except ImportError:
import urllib as _urlencode # Python 2
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
version = None
# rate limiter settings
enableRateLimit = False
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
session = None # Session () by default
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
fees = {
'trading': {
'fee_loaded': False,
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'fee_loaded': False,
'withdraw': {},
'deposit': {},
},
}
ids = None
tickers = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
apiKey = ''
secret = ''
password = ''
uid = ''
twofa = False
marketsById = None
markets_by_id = None
currencies_by_id = None
precision = None
limits = None
exceptions = None
headers = None
balance = None
orderbooks = None
orders = None
trades = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
}
# API method metainfo
has = {
'publicAPI': True,
'privateAPI': True,
'CORS': False,
'cancelOrder': True,
'cancelOrders': False,
'createDepositAddress': False,
'createOrder': True,
'createMarketOrder': True,
'createLimitOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchFundingFees': False,
'fetchL2OrderBook': True,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchTrades': True,
'fetchTradingFees': False,
'fetchTradingLimits': False,
'withdraw': False,
}
precisionMode = DECIMAL_PLACES
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
last_http_response = None
last_json_response = None
last_response_headers = None
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
}
def __init__(self, config={}):
self.precision = {} if self.precision is None else self.precision
self.limits = {} if self.limits is None else self.limits
self.exceptions = {} if self.exceptions is None else self.exceptions
self.headers = {} if self.headers is None else self.headers
self.balance = {} if self.balance is None else self.balance
self.orderbooks = {} if self.orderbooks is None else self.orderbooks
self.orders = {} if self.orders is None else self.orders
self.trades = {} if self.trades is None else self.trades
self.currencies = {} if self.currencies is None else self.currencies
self.options = {} if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimalToPrecision = self.decimal_to_precision = decimal_to_precision
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# format camel case
for attr in dir(self):
if attr[0] != '_'and attr[-1] != '_' and '_' in attr:
conv = attr.split('_')
camel_case = conv[0] + ''.join(i[0].upper() + i[1:] for i in conv[1:])
setattr(self, camel_case, getattr(self, attr))
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit,
'delay': 1.0,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {})
self.session = self.session if self.session else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
def __del__(self):
if self.session:
self.session.close()
def describe(self):
return {}
def define_rest_api(self, api, method_name, options={}):
delimiters = re.compile('[^a-zA-Z0-9]')
for api_type, methods in api.items():
for http_method, urls in methods.items():
for url in urls:
url = url.strip()
split_path = delimiters.split(url)
uppercase_method = http_method.upper()
lowercase_method = http_method.lower()
camelcase_method = lowercase_method.capitalize()
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
lowercase_path = [x.strip().lower() for x in split_path]
underscore_suffix = '_'.join([k for k in lowercase_path if len(k)])
camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower()
if 'suffixes' in options:
if 'camelcase' in options['suffixes']:
camelcase += options['suffixes']['camelcase']
if 'underscore' in options['suffixes']:
underscore += options['suffixes']['underscore']
partial = functools.partial(getattr(self, method_name), url, api_type, uppercase_method)
setattr(self, camelcase, partial)
setattr(self, underscore, partial)
def raise_error(self, exception_type, url=None, method=None, error=None, details=None):
if error:
error = str(error)
output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None])
raise exception_type(output)
def throttle(self):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
if elapsed < self.rateLimit:
delay = self.rateLimit - elapsed
time.sleep(delay / 1000.0)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
return self.fetch2(path, api, method, params, headers, body)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def handle_errors(self, code, reason, url, method, headers, body):
pass
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return headers
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, request_headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
if body:
body = body.encode()
self.session.cookies.clear()
response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies
)
self.last_http_response = response.text
self.last_response_headers = response.headers
if self.verbose:
print("\nResponse:", method, url, str(response.status_code), str(response.headers), self.last_http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, response.headers, self.last_http_response)
response.raise_for_status()
except Timeout as e:
self.raise_error(RequestTimeout, method, url, e)
except TooManyRedirects as e:
self.raise_error(ExchangeError, url, method, e)
except SSLError as e:
self.raise_error(ExchangeError, url, method, e)
except HTTPError as e:
self.handle_errors(response.status_code, response.reason, url, method, self.last_response_headers, self.last_http_response)
self.handle_rest_errors(e, response.status_code, self.last_http_response, url, method)
self.raise_error(ExchangeError, url, method, e, self.last_http_response)
except RequestException as e: # base exception class
self.raise_error(ExchangeError, url, method, e, self.last_http_response)
self.handle_errors(response.status_code, response.reason, url, method, None, self.last_http_response)
return self.handle_rest_response(self.last_http_response, url, method, headers, body)
def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'):
error = None
if http_status_code in [418, 429]:
error = DDoSProtection
elif http_status_code in [404, 409, 500, 501, 502, 520, 521, 522, 525]:
error = ExchangeNotAvailable
elif http_status_code in [422]:
error = ExchangeError
elif http_status_code in [400, 403, 405, 503, 530]:
# special case to detect ddos protection
error = ExchangeNotAvailable
if response:
ddos_protection = re.search('(cloudflare|incapsula)', response, flags=re.IGNORECASE)
if ddos_protection:
error = DDoSProtection
elif http_status_code in [408, 504]:
error = RequestTimeout
elif http_status_code in [401, 511]:
error = AuthenticationError
if error:
self.raise_error(error, url, method, exception if exception else http_status_code, response)
def handle_rest_response(self, response, url, method='GET', headers=None, body=None):
try:
if self.parseJsonResponse:
last_json_response = json.loads(response) if len(response) > 1 else None
self.last_json_response = last_json_response
return last_json_response
else:
return response
except ValueError as e: # ValueError == JsonDecodeError
ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE)
exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE)
if ddos_protection:
self.raise_error(DDoSProtection, method, url, None, response)
if exchange_not_available:
message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect'
self.raise_error(ExchangeNotAvailable, method, url, None, message)
self.raise_error(ExchangeError, method, url, e, response)
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key:
value = float(dictionary[key])
else:
value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if key is None or (key not in dictionary):
return default_value
value = dictionary[key]
if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()):
return int(value)
return default_value
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
@staticmethod
def truncate(num, precision=0):
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
if precision > 0:
parts = ('%f' % Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
if value:
grouped = Exchange.group_by(array, key)
if value in grouped:
return grouped[value]
return []
return array
@staticmethod
def filterBy(self, array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
for element in array:
if (key in element) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
for key in params:
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def url(path, params={}):
result = Exchange.implode_params(path, params)
query = Exchange.omit(params, Exchange.extract_params(path))
if query:
result += '?' + _urlencode.urlencode(query)
return result
@staticmethod
def urlencode(params={}):
if (type(params) is dict) or isinstance(params, collections.OrderedDict):
return _urlencode.urlencode(params)
return params
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri):
return _urlencode.quote(uri, safe="~()*!.'")
@staticmethod
def omit(d, *args):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, int):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d')
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1')
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
h = hashlib.new(algorithm, request)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_to_string(s):
return s.decode('ascii')
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'):
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encodedHeader = Exchange.base64urlencode(header)
encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encodedHeader + '.' + encodedData
hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary')
signature = Exchange.base64urlencode(hmac)
return token + '.' + signature
@staticmethod
def unjson(input):
return json.loads(input)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def encode(string):
return string.encode()
@staticmethod
def decode(string):
return string.decode()
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
def check_required_credentials(self):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
self.raise_error(AuthenticationError, details='requires `' + key + '`')
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
self.raise_error(InvalidAddress, details='address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': 0.0,
'used': 0.0,
'total': 0.0,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def currency_id(self, commonCode):
currencyIds = {v: k for k, v in self.commonCurrencies.items()}
return self.safe_string(currencyIds, commonCode, commonCode)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(cost))
def price_to_precision(self, symbol, price):
return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(price))
def amount_to_precision(self, symbol, amount):
return self.truncate(amount, self.markets[symbol]['precision']['amount'])
def amount_to_string(self, symbol, amount):
return self.truncate_to_string(amount, self.markets[symbol]['precision']['amount'])
def amount_to_lots(self, symbol, amount):
lot = self.markets[symbol]['lot']
return self.amount_to_precision(symbol, math.floor(amount / lot) * lot)
def fee_to_precision(self, symbol, fee):
return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(fee))
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.marketsById = self.markets_by_id
self.symbols = sorted(list(self.markets.keys()))
self.ids = sorted(list(self.markets_by_id.keys()))
if currencies:
self.currencies = self.deep_extend(currencies, self.currencies)
else:
base_currencies = [{
'id': market['baseId'] if 'baseId' in market else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if 'quoteId' in market else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies)
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
return self.markets
def load_markets(self, reload=False):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
markets = self.fetch_markets()
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
return self.set_markets(markets, currencies)
def populate_fees(self):
if not (hasattr(self, 'markets') or hasattr(self, 'currencies')):
return
for currency, data in self.currencies.items(): # try load withdrawal fees from currencies
if 'fee' in data and data['fee'] is not None:
self.fees['funding']['withdraw'][currency] = data['fee']
self.fees['funding']['fee_loaded'] = True
# find a way to populate trading fees from markets
def load_fees(self):
self.load_markets()
self.populate_fees()
if not (self.has['fetchTradingFees'] or self.has['fetchFundingFees']):
return self.fees
fetched_fees = self.fetch_fees()
if fetched_fees['funding']:
self.fees['funding']['fee_loaded'] = True
if fetched_fees['trading']:
self.fees['trading']['fee_loaded'] = True
self.fees = self.deep_extend(self.fees, fetched_fees)
return self.fees
def fetch_markets(self):
return self.to_array(self.markets)
def fetch_fees(self):
trading = {}
funding = {}
try:
trading = self.fetch_trading_fees()
except AuthenticationError:
pass
except AttributeError:
pass
try:
funding = self.fetch_funding_fees()
except AuthenticationError:
pass
except AttributeError:
pass
return {
'trading': trading,
'funding': funding,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.raise_error(NotSupported, details='create_order() not implemented yet')
def cancel_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='cancel_order() not implemented yet')
def fetch_bids_asks(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_tickers(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, market=None):
order = self.fetch_order(id)
return order['status']
def purge_cached_orders(self, before):
orders = self.to_array(self.orders)
orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)]
self.orders = self.index_by(orders, 'id')
return self.orders
def fetch_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order() is not implemented yet')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_orders() is not implemented yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_open_orders() is not implemented yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_closed_orders() is not implemented yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_my_trades() is not implemented yet')
def fetch_order_trades(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order_trades() is not implemented yet')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
ohlcvs = self.to_array(ohlcvs)
num_ohlcvs = len(ohlcvs)
result = []
i = 0
while i < num_ohlcvs:
if limit and (len(result) >= limit):
break
ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit)
i = i + 1
if since and (ohlcv[0] < since):
continue
result.append(ohlcv)
return result
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [float(bidask[price_key]), float(bidask[amount_key])]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def parse_balance(self, balance):
currencies = self.omit(balance, 'info').keys()
for account in ['free', 'used', 'total']:
balance[account] = {}
for currency in currencies:
balance[account][currency] = balance[currency][account]
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
limits = response['limits']
keys = list(limits.keys())
for i in range(0, len(keys)):
symbol = keys[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], {
'limits': limits[symbol],
})
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not implemented yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs):
result = []
for i in range(0, len(ohlcvs['t'])):
result.append([
ohlcvs['t'][i] * 1000,
ohlcvs['o'][i],
ohlcvs['h'][i],
ohlcvs['l'][i],
ohlcvs['c'][i],
ohlcvs['v'][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs):
result = {
't': [],
'o': [],
'h': [],
'l': [],
'c': [],
'v': [],
}
for i in range(0, len(ohlcvs)):
result['t'].append(int(ohlcvs[i][0] / 1000))
result['o'].append(ohlcvs[i][1])
result['h'].append(ohlcvs[i][2])
result['l'].append(ohlcvs[i][3])
result['c'].append(ohlcvs[i][4])
result['v'].append(ohlcvs[i][5])
return result
def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(high, low, close, volume) = (2, 3, 4, 5)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms:
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price'])
ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price'])
ohlcvs[j - 1][close] = trade['price']
ohlcvs[j - 1][volume] += trade['amount']
return ohlcvs
def parse_timeframe(self, timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' in unit:
scale = 60 * 60 * 24 * 365
elif 'M' in unit:
scale = 60 * 60 * 24 * 30
elif 'w' in unit:
scale = 60 * 60 * 24 * 7
elif 'd' in unit:
scale = 60 * 60 * 24
elif 'h' in unit:
scale = 60 * 60
else:
scale = 60 # 1m by default
return amount * scale
def parse_trades(self, trades, market=None, since=None, limit=None):
array = self.to_array(trades)
array = [self.parse_trade(trade, market) for trade in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def parse_orders(self, orders, market=None, since=None, limit=None):
array = self.to_array(orders)
array = [self.parse_order(order, market) for order in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None):
array = self.to_array(array)
if symbol:
array = [entry for entry in array if entry['symbol'] == symbol]
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_since_limit(self, array, since=None, limit=None):
array = self.to_array(array)
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
self.raise_error(ExchangeError, details='Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
self.raise_error(ExchangeError, details='Does not have currency code ' + str(code))
def find_market(self, string):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(string, basestring):
if string in self.markets_by_id:
return self.markets_by_id[string]
if string in self.markets:
return self.markets[string]
return string
def find_symbol(self, string, market=None):
if market is None:
market = self.find_market(string)
if isinstance(market, dict):
return market['symbol']
return string
def market(self, symbol):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(symbol, basestring) and (symbol in self.markets):
return self.markets[symbol]
self.raise_error(ExchangeError, details='No market symbol ' + str(symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * price))
return {
'rate': rate,
'type': takerOrMaker,
'currency': market['quote'],
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, *args):
return self.create_order(symbol, 'limit', *args)
def create_market_order(self, symbol, *args):
return self.create_order(symbol, 'market', *args)
def create_limit_buy_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'buy', *args)
def create_limit_sell_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'sell', *args)
def create_market_buy_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
|
the-stack_0_7891 | import itertools
import collections
import numpy as np
class Solver:
'''
Solver class.
'''
def __init__(self, Y, M, epsilon, distance):
'''
Parameters
----------
Y : list<vector>
Finite set of vectors
M : int
Positive integer lesser than the size of Y
epsilon : float
Relative error
distance : callable
Distance metric
'''
self.Y = Y
if isinstance(Y[0], int):
self.q = 1
else:
self.q = len(Y[0])
self.M = M
self.epsilon = epsilon
self.distance = distance
def solve(self):
min_obj_fun_val = 1e10
opt_subset = []
for y in self.Y:
ZMyY = self.computeZMvY(y) # M elements of Y closest to y
rMyY = ZMyY[-1][-1] # maximal distance between y and elements in ZMyY
h = self.epsilon*1.0 / (self.q*self.M)**0.5 * rMyY
H = self.M**0.5 * rMyY
if rMyY==0:
return ZMyY
ByhH = self.generateByhH(y,h,H)
for b in ByhH:
ZMbY = self.computeZMvY(b)
subset = [i[0] for i in ZMbY]
obj_fun_val = self.computeObj(subset)
if obj_fun_val < min_obj_fun_val:
min_obj_fun_val = obj_fun_val
opt_subset = subset
return opt_subset, min_obj_fun_val
def generateByhH(self, y, h, H):
if self.q==1:
return np.hstack((
np.arange(y, -1*H, h), np.arange(y, H, h)
))
arr = [[] for i in range(self.q)]
for i in range(self.q):
arr[i] = np.hstack((
np.arange(y[i], -1*H, -1*h), np.arange(y[i], H, h)
))
out = []
n = self.q
indices = [0 for i in range(n)]
while (1):
out.append([])
for i in range(n):
out[-1].append(arr[i][indices[i]])
next = n - 1
while (next >= 0 and
(indices[next] + 1 >= len(arr[next]))):
next-=1
if (next < 0):
return out
indices[next] += 1
for i in range(next + 1, n):
indices[i] = 0
return out
def computeObj(self, Y):
Y = np.array(Y)
y_mean = sum(Y)/len(Y)
val = 0
for y in Y:
val += self.distance(y, y_mean)
return val
def computeZMvY(self, y):
dist = collections.OrderedDict()
for v in self.Y:
dist[v] = self.distance(y, v)
dist = sorted(dist.items(), key=lambda kv: kv[1])
return dist[:self.M]
|
the-stack_0_7893 | import random
import nltk
from nltk.tokenize.treebank import TreebankWordDetokenizer
import random
import base64
import binascii
import cipheydists
import string
import cipheycore
import cipheydists
import base58
import base62
import re
class encipher:
"""Generates encrypted text. Used for the NN and test_generator"""
def __init__(self): # pragma: no cover
"""Inits the encipher object """
self.text = self.read_text()
self.MAX_SENTENCE_LENGTH = 5
# ntlk.download("punkt")
self.crypto = encipher_crypto()
def read_text(self): # pragma: no cover
f = open("hansard.txt", encoding="ISO-8859-1")
x = f.read()
splits = nltk.tokenize.sent_tokenize(x)
return splits
def getRandomSentence(self, size): # pragma: no cover
return TreebankWordDetokenizer().detokenize(
random.sample(self.text, random.randint(1, size))
)
def getRandomEncryptedSentence(self, size): # pragma: no cover
sents = self.getRandomSentence(size)
sentsEncrypted = self.crypto.randomEncrypt(sents)
return {"PlainText Sentences": sents, "Encrypted Texts": sentsEncrypted}
class encipher_crypto: # pragma: no cover
"""Holds the encryption functions
can randomly select an encryption function use on text
returns:
{"text": t, "plaintext": c, "cipher": p, "suceeds": False}
where suceeds is whether or not the text is really encrypted or falsely decrypted
Uses Cyclic3's module generate psuedo random text"""
def __init__(self): # pragma: no cover
self.methods = [
self.Base64,
self.Ascii,
self.Base16,
self.Base32,
self.Binary,
self.Hex,
self.MorseCode,
self.Reverse,
self.Vigenere,
self.base58_bitcoin,
self.base58_ripple,
self.b62,
]
self.morse_dict = dict(cipheydists.get_charset("morse"))
self.letters = string.ascii_lowercase
self.group = cipheydists.get_charset("english")["lcase"]
# pragma: no cover
def random_key(self, text) -> str: # pragma: no cover
if len(text) < 8:
length = 3
else:
length = 8
return self.random_string(length)
def random_string(self, length) -> str: # pragma: no cover
return "".join(random.sample(self.letters, length))
def randomEncrypt(self, text: str) -> str: # pragma: no cover
"""Randomly encrypts string with an encryption"""
func__use = random.choice(self.methods)
encryptedText = func__use(text)
name = func__use.__name__
return {"PlainText": text, "EncryptedText": encryptedText, "CipherUsed": name}
def Base64(self, text: str) -> str: # pragma: no cover
"""Turns text in base64 using Python libray
args:
text -> text convert
returns:
text -> as base 64"""
return base64.b64encode(bytes(text, "utf-8")).decode("utf-8")
def Caesar(self, s, k): # pragma: no cover
"""Iterates through each letter and constructs the cipher text"""
new_message = ""
facr = k % 26
for c in s:
new_message += self.apply_rotation(c, facr)
return new_message
def apply_rotation(self, c, facr): # pragma: no cover
"""Applies a shift of facr the letter denoted by c"""
if c.isalpha():
lower = ord("A") if c.isupper() else ord("a")
c = chr(lower + ((ord(c) - lower + facr) % 26))
return c
def Base32(self, text: str) -> str: # pragma: no cover
"""Turns text in base64 using Python libray
args:
text -> text convert
returns:
text -> as base 64"""
return base64.b32encode(bytes(text, "utf-8")).decode("utf-8")
def Base16(self, text: str) -> str: # pragma: no cover
"""Turns text in base64 using Python libray
args:
text -> text convert
returns:
text -> as base 64"""
return base64.b16encode(bytes(text, "utf-8")).decode("utf-8")
def Binary(self, text: str) -> str: # pragma: no cover
return " ".join(format(ord(x), "b") for x in text)
# pragma: no cover
def Ascii(self, text: str) -> str: # pragma: no cover
res = [ord(c) for c in text]
return " ".join([str(x) for x in res])
def Hex(self, text: str) -> str: # pragma: no cover
return binascii.hexlify(text.encode()).decode("utf-8")
def MorseCode(self, text: str) -> str: # pragma: :wno cover
morse = []
for i in text:
m = self.morse_dict.get(i.upper())
if m == None:
m = ""
morse.append(m)
output = morse
# output = " ".join(MORSE_CODE_DICT.get(i.upper()) for i in text)
return " ".join(output)
def Reverse(self, text: str) -> str:
return text[::-1]
def Vigenere(self, plaintext):
key = self.vig_key(plaintext, self.random_key(plaintext))
cipheycore.vigenere_encrypt(plaintext, key, self.group)
def vig_key(self, msg, key):
tab = dict()
for counter, i in enumerate(self.group):
tab[self.group[counter]] = counter
real_key = []
for i in key:
real_key.append(tab[i])
return real_key
# vigenere_encrypt(msg, real_key, group)
def base58_bitcoin(self, text: str):
return base58.b58encode(bytes(text, "utf-8")).decode("utf-8")
def base58_ripple(self, text: str):
return base58.b58encode(
bytes(text, "utf-8"), alphabet=base58.RIPPLE_ALPHABET
).decode("utf-8")
def b62(self, text: str):
return base62.decode(str(re.sub(r"[^A-Za-z1-9]+", "", text)))
# obj = encipher()
# print(obj.getRandomEncryptedSentence())
|
the-stack_0_7894 | # Copyright 2018 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mathutils
from . import gltf2_blender_export_keys
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2.io.com import gltf2_io
from io_scene_gltf2.io.exp import gltf2_io_binary_data
from io_scene_gltf2.io.com import gltf2_io_constants
from io_scene_gltf2.blender.exp import gltf2_blender_gather_joints
from io_scene_gltf2.blender.com import gltf2_blender_math
@cached
def gather_skin(blender_object, mesh_object, export_settings):
"""
Gather armatures, bones etc into a glTF2 skin object.
:param blender_object: the object which may contain a skin
:param mesh_object: the mesh object to be deformed
:param export_settings:
:return: a glTF2 skin object
"""
if not __filter_skin(blender_object, export_settings):
return None
return gltf2_io.Skin(
extensions=__gather_extensions(blender_object, export_settings),
extras=__gather_extras(blender_object, export_settings),
inverse_bind_matrices=__gather_inverse_bind_matrices(blender_object, mesh_object, export_settings),
joints=__gather_joints(blender_object, export_settings),
name=__gather_name(blender_object, export_settings),
skeleton=__gather_skeleton(blender_object, export_settings)
)
def __filter_skin(blender_object, export_settings):
if not export_settings[gltf2_blender_export_keys.SKINS]:
return False
if blender_object.type != 'ARMATURE' or len(blender_object.pose.bones) == 0:
return False
return True
def __gather_extensions(blender_object, export_settings):
return None
def __gather_extras(blender_object, export_settings):
return None
def __gather_inverse_bind_matrices(blender_object, mesh_object, export_settings):
inverse_matrices = []
axis_basis_change = mathutils.Matrix.Identity(4)
if export_settings[gltf2_blender_export_keys.YUP]:
axis_basis_change = mathutils.Matrix(
((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0)))
# # artificial torso, as needed by glTF
# inverse_bind_matrix = blender_object.matrix_world.inverted() * axis_basis_change.inverted()
# for column in range(0, 4):
# for row in range(0, 4):
# inverse_matrices.append(inverse_bind_matrix[row][column])
#
for blender_bone in blender_object.pose.bones:
matrix_world = gltf2_blender_math.multiply(blender_object.matrix_world, mesh_object.matrix_world.inverted())
inverse_bind_matrix = gltf2_blender_math.multiply(
axis_basis_change,
gltf2_blender_math.multiply(
matrix_world,
blender_bone.bone.matrix_local
)
).inverted()
for column in range(0, 4):
for row in range(0, 4):
inverse_matrices.append(inverse_bind_matrix[row][column])
binary_data = gltf2_io_binary_data.BinaryData.from_list(inverse_matrices, gltf2_io_constants.ComponentType.Float)
return gltf2_io.Accessor(
buffer_view=binary_data,
byte_offset=None,
component_type=gltf2_io_constants.ComponentType.Float,
count=len(inverse_matrices) // gltf2_io_constants.DataType.num_elements(gltf2_io_constants.DataType.Mat4),
extensions=None,
extras=None,
max=None,
min=None,
name=None,
normalized=None,
sparse=None,
type=gltf2_io_constants.DataType.Mat4
)
def __gather_joints(blender_object, export_settings):
# # the skeletal hierarchy groups below a 'root' joint
# # TODO: add transform?
# torso = gltf2_io.Node(
# camera=None,
# children=[],
# extensions={},
# extras=None,
# matrix=[],
# mesh=None,
# name="Skeleton_" + blender_object.name,
# rotation=None,
# scale=None,
# skin=None,
# translation=None,
# weights=None
# )
root_joints = []
# build the hierarchy of nodes out of the bones
for blender_bone in blender_object.pose.bones:
if not blender_bone.parent:
root_joints.append(gltf2_blender_gather_joints.gather_joint(blender_bone, export_settings))
# joints is a flat list containing all nodes belonging to the skin
joints = []
def __collect_joints(node):
joints.append(node)
for child in node.children:
__collect_joints(child)
for joint in root_joints:
__collect_joints(joint)
return joints
def __gather_name(blender_object, export_settings):
return blender_object.name
def __gather_skeleton(blender_object, export_settings):
# In the future support the result of https://github.com/KhronosGroup/glTF/pull/1195
return None # gltf2_blender_gather_nodes.gather_node(blender_object, export_settings)
|
the-stack_0_7896 | import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class CTCLabelConverter(object):
""" Convert between text-label and text-index """
def __init__(self, character):
# character (str): set of the possible characters.
dict_character = list(character)
self.dict = {}
for i, char in enumerate(dict_character):
# NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss
self.dict[char] = i + 1
self.character = ['[CTCblank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0)
def encode(self, text, batch_max_length=60):
"""convert text-label into text-index.
input:
text: text labels of each image. [batch_size]
batch_max_length: max length of text label in the batch. 25 by default
output:
text: text index for CTCLoss. [batch_size, batch_max_length]
length: length of each text. [batch_size]
"""
length = [len(s) for s in text]
# The index used for padding (=0) would not affect the CTC loss calculation.
batch_text = torch.LongTensor(len(text), batch_max_length).fill_(0)
for i, t in enumerate(text):
text = list(t)
text = [self.dict[char] for char in text]
batch_text[i][:len(text)] = torch.LongTensor(text)
return (batch_text.to(device), torch.IntTensor(length).to(device))
def decode(self, text_index, length):
""" convert text-index into text-label. """
texts = []
for index, l in enumerate(length):
t = text_index[index, :]
char_list = []
for i in range(l):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank.
char_list.append(self.character[t[i]])
text = ''.join(char_list)
texts.append(text)
return texts
class CTCLabelConverterForBaiduWarpctc(object):
""" Convert between text-label and text-index for baidu warpctc """
def __init__(self, character):
# character (str): set of the possible characters.
dict_character = list(character)
self.dict = {}
for i, char in enumerate(dict_character):
# NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss
self.dict[char] = i + 1
self.character = ['[CTCblank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0)
def encode(self, text, batch_max_length=25):
"""convert text-label into text-index.
input:
text: text labels of each image. [batch_size]
output:
text: concatenated text index for CTCLoss.
[sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
length: length of each text. [batch_size]
"""
length = [len(s) for s in text]
text = ''.join(text)
text = [self.dict[char] for char in text]
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, text_index, length):
""" convert text-index into text-label. """
texts = []
index = 0
for l in length:
t = text_index[index:index + l]
char_list = []
for i in range(l):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank.
char_list.append(self.character[t[i]])
text = ''.join(char_list)
texts.append(text)
index += l
return texts
class AttnLabelConverter(object):
""" Convert between text-label and text-index """
def __init__(self, character):
# character (str): set of the possible characters.
# [GO] for the start token of the attention decoder. [s] for end-of-sentence token.
list_token = ['[GO]', '[s]'] # ['[s]','[UNK]','[PAD]','[GO]']
list_character = list(character)
self.character = list_token + list_character
self.dict = {}
for i, char in enumerate(self.character):
# print(i, char)
self.dict[char] = i
def encode(self, text, batch_max_length=25):
""" convert text-label into text-index.
input:
text: text labels of each image. [batch_size]
batch_max_length: max length of text label in the batch. 25 by default
output:
text : the input of attention decoder. [batch_size x (max_length+2)] +1 for [GO] token and +1 for [s] token.
text[:, 0] is [GO] token and text is padded with [GO] token after [s] token.
length : the length of output of attention decoder, which count [s] token also. [3, 7, ....] [batch_size]
"""
length = [len(s) + 1 for s in text] # +1 for [s] at end of sentence.
# batch_max_length = max(length) # this is not allowed for multi-gpu setting
batch_max_length += 1
# additional +1 for [GO] at first step. batch_text is padded with [GO] token after [s] token.
batch_text = torch.LongTensor(len(text), batch_max_length + 1).fill_(0)
for i, t in enumerate(text):
text = list(t)
text.append('[s]')
text = [self.dict[char] for char in text]
batch_text[i][1:1 + len(text)] = torch.LongTensor(text) # batch_text[:, 0] = [GO] token
return (batch_text.to(device), torch.IntTensor(length).to(device))
def decode(self, text_index, length):
""" convert text-index into text-label. """
texts = []
for index, l in enumerate(length):
text = ''.join([self.character[i] for i in text_index[index, :]])
texts.append(text)
return texts
class Averager(object):
"""Compute average for torch.Tensor, used for loss average."""
def __init__(self):
self.reset()
def add(self, v):
count = v.data.numel()
v = v.data.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
|
the-stack_0_7899 | # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[acm.python.remove_tags_for_certificate.complete]
import boto3
# Create ACM client
acm = boto3.client('acm')
# Remove tag(s) from the specified certificate.
response = acm.remove_tags_from_certificate(
CertificateArn='arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012',
Tags=[
{
'Key': 'TagKey1',
'Value': 'TagValue1'
},
{
'Key': 'TagKey2',
'Value': 'TagValue2'
},
]
)
print(response)
# snippet-end:[acm.python.remove_tags_for_certificate.complete]
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[remove_tags_from_certificate.py demonstrates how to remove one or more tags from an AWS Certificate Manager certificate. ]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[AWS Certificate Manager]
# snippet-service:[acm]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-12-26]
# snippet-sourceauthor:[walkerk1980]
|
the-stack_0_7900 | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class CreateVpcResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vpc': 'Vpc'
}
attribute_map = {
'vpc': 'vpc'
}
def __init__(self, vpc=None):
"""CreateVpcResponse - a model defined in huaweicloud sdk"""
super(CreateVpcResponse, self).__init__()
self._vpc = None
self.discriminator = None
if vpc is not None:
self.vpc = vpc
@property
def vpc(self):
"""Gets the vpc of this CreateVpcResponse.
:return: The vpc of this CreateVpcResponse.
:rtype: Vpc
"""
return self._vpc
@vpc.setter
def vpc(self, vpc):
"""Sets the vpc of this CreateVpcResponse.
:param vpc: The vpc of this CreateVpcResponse.
:type: Vpc
"""
self._vpc = vpc
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateVpcResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_7902 | # Main script for inducing LTL contrastive explanations from input set of traces
#
# ARGUMENTS:
# -d [input.json] : json file containing the required input (see README)
#
# OUTPUT:
# output.json : json output containing top-10 induced results
#
#
# Written by Joseph Kim
import argparse
from itertools import permutations
import json
from operator import itemgetter
import time
# Local imports
import ltlfunc
import interestingness
#############################################################
def run_ltl_inference(data, output_filepath=None):
# Vocabulary (lowercased, unique)
vocab = [s.lower() for s in data['vocab']]
vocab = list(set(vocab))
# Traces - organize both pos and neg clusters
cluster_A = []
for i, trace in enumerate(data['traces_pos']):
trace = [[v.lower() for v in s] for s in trace] # lowercase
temp = dict()
temp['name'] = 'a' + str(i) # Create a name id
temp['trace'] = tuple(trace) # Use tuple
cluster_A.append(temp)
cluster_B = []
for i, trace in enumerate(data['traces_neg']):
trace = [[v.lower() for v in s] for s in trace]
temp = dict()
temp['name'] = 'b' + str(i)
temp['trace'] = tuple(trace)
cluster_B.append(temp)
# X = (cluster_A, cluster_B) # Evidence
# Parameters
inference = data['params']['inference']
iterations = data['params']['iterations']
conjoin = data['params']['conjoin']
ltl_sample_cnt = data['params'].get('ltl_sample_cnt', 10)
run_reversed_inference = data['params'].get('reversed_inference', True)
verbose = data['params'].get('verbose', False)
# Default inference parameters
params = dict()
params['alpha'] = data['params'].get('alpha', 0.01)
params['beta'] = data['params'].get('beta', 0.01)
params['lambda'] = data['params'].get('lambda', 0.60)
params['epsilon'] = data['params'].get('epsilon', 0.2)
# Print statistics
print('\nSize of vocabulary = %s' % len(vocab))
# Get LTL templates
if 'probs_templates' in data:
probs_templates = data['probs_templates']
else:
probs_templates = None
templates = ltlfunc.getLTLtemplates(user_probs=probs_templates)
# Get permutation tables
perm_table = dict()
perm_table[1] = [list(i) for i in permutations(vocab, 1)]
perm_table[2] = [list(i) for i in permutations(vocab, 2)]
ltl_rundata = [
{'X': (cluster_A, cluster_B), 'reversed': False}
]
if run_reversed_inference:
ltl_rundata.append(
{'X': (cluster_B, cluster_A), 'reversed': True}
)
# Preparing json output
output_inference = list()
for data_X in ltl_rundata:
X = data_X['X']
reversed = data_X['reversed']
cluster_A_inf, cluster_B_inf = X
output = list()
print('*' * 50)
print('Running LTL inference with reversed mode = %s' % str(reversed))
print('Number of positive traces = %s' % len(cluster_A_inf))
print('Number of negative traces = %s' % len(cluster_B_inf))
#######################################################
# RUN INFERENCE
#
# 1) Metropolis-Hastings Sampling
if inference == 'mh':
# Initial guess
ltl_initial = ltlfunc.samplePrior(templates, vocab, perm_table, params['lambda'], conjoin)
print('\n-Running MH..')
print('-Initial guess = ' + ltl_initial['str_friendly'])
st = time.time()
# Preparation
burn_in_mh = 500
num_iter_mh = iterations + burn_in_mh
memory = dict()
cache = dict()
# Run MH Sampler
sampler = ltlfunc.MH_sampler(ltl_initial, X, vocab, templates, params, perm_table, memory, cache, conjoin)
sampler.runMH(num_iter_mh, burn_in_mh, verbose=verbose)
memory = sampler.memory
# Print stats
print('-MH runtime = ' + format(time.time() - st, '.3f'))
print(
'-MH number of accepted samples = %s / %s' % (sampler.num_accepts, len(sampler.accept_reject_history)))
print('-MH number of perfect valid samples = %s / %s' %
(int(sum([j for j in sampler.cscore_history if j == 1])), num_iter_mh - burn_in_mh))
# Rank posterior and print top-10 samples
print('\n-MH Top-{} Specs'.format(ltl_sample_cnt))
ranked = sorted(sampler.posterior_dict, key=sampler.posterior_dict.get, reverse=True)
i = 0
for r in ranked:
cscore = sampler.cscore_dict[r]
cscore1, cscore2 = memory[r]
cscore2 = 1 - cscore2
ltl_meaning = sampler.ltl_str_meanings[r]['meaning']
ltl = sampler.ltl_log[r]
ltl_name = ltl['name']
ltl_props = ltl['props_list'] if conjoin else [ltl['props']]
# Positive set support
positive_support = interestingness.compute_support(cluster_A_inf, ltl_name, ltl_props, vocab)
if positive_support == 0:
continue
i += 1
print('-' * 30)
print(r, end='')
print(' accuracy = %s' % cscore)
print(' (individual scores): cscore1: %f, cscore2: %f' % (cscore1, cscore2))
print(' Interestingness (support) : %f' % positive_support)
print(' Meaning: %s' % ltl_meaning)
if i >= ltl_sample_cnt:
break
# Adding to output
temp = dict()
temp['formula'] = r
temp['meaning'] = sampler.ltl_str_meanings[r]
temp['accuracy'] = cscore
temp['cscores_individual'] = (cscore1, cscore2)
temp['interestingness'] = positive_support
temp['reversed'] = reversed
output.append(temp)
# 2) Brute force search (delimited enumeration)
elif inference == 'brute':
print('\n-Running Brute Force Search')
st = time.time()
if conjoin:
# Brute force random sampler (b/c pre-enumerating everything is intractable)
print('-Collecting delimited set')
ltl_full = []
history = []
num_brute_force = iterations
# Collection loop
while len(history) < num_brute_force:
s = ltlfunc.samplePrior(templates, vocab, perm_table, conjoin=conjoin, doRandom=True)
ltl_str = s['str_friendly']
if ltl_str not in history:
ltl_full.append(s)
history.append(ltl_str)
print('-All delimited set collected. Time spent = ' + format(time.time() - st, '.3f'))
else:
# If not using conjunction, then obtain a full brute force list
ltl_full = []
for template in templates:
results = ltlfunc.instantiateLTLvariablePermutate(template, vocab)
ltl_full += results
print('-Number of total possible LTL specs (no conjunctions): %s' % len(ltl_full))
# Exact inference on collection
memory = dict()
cache = dict()
for ltl_instance in ltl_full:
log_posterior, cscore, memory = ltlfunc.computePosterior(ltl_instance, X, vocab, params, memory, cache,
conjoin)
ltl_instance['posterior'] = log_posterior
ltl_instance['cscore'] = cscore
ltl_instance['cscores_individual'] = memory[ltl_instance['str_friendly']]
print('-Brute force collection and posterior collection complete. Time spent = ' + format(time.time() - st,
'.3f'))
# Rank posterior and print top-10 samples
print('\n-Enumeration Top-{} Specs'.format(ltl_sample_cnt))
ranked = sorted(ltl_full, key=itemgetter('posterior'), reverse=True)
i = 0
for r in ranked:
cscore1, cscore2 = r['cscores_individual']
cscore2 = 1 - cscore2
ltl_name, ltl_props = r['name'], r['props_list']
# Positive set support
positive_support = interestingness.compute_support(cluster_A_inf, ltl_name, ltl_props, vocab)
if positive_support == 0:
continue
i += 1
print('-' * 30)
print(r['str_friendly'], end='')
print(' accuracy = %s' % r['cscore'])
print(' (individual scores): cscore1: %f, cscore2: %f' % (cscore1, cscore2))
print(' Interestingness (support) : %f' % positive_support)
print(' Meaning: %s' % r['str_meaning'])
if i >= ltl_sample_cnt:
break
# Adding to output
temp = dict()
temp['formula'] = r['str_friendly']
temp['meaning'] = r['str_meaning']
temp['accuracy'] = r['cscore']
temp['cscores_individual'] = (cscore1, cscore2)
temp['interestingness'] = positive_support
temp['reversed'] = reversed
output.append(temp)
else:
raise AttributeError("Wrong inference mode specified.")
#######################################################
# END OF INFERENCE
#######################################################
# Append local ltl order inference output to global output list
output_inference.extend(output)
output_inference = sorted(output_inference, key=lambda x: x['accuracy'], reverse=True)[:ltl_sample_cnt]
# Dump output
if output_filepath is not None:
with open(output_filepath, 'w') as f:
json.dump(output_inference, f, indent=4)
return output_inference
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Main script for LTL contrastive explanation induction')
parser.add_argument('-d', type=str, required=True, help='input JSON file')
parser.add_argument('-o', type=str, required=False, default='output.json', help='output JSON file')
args = parser.parse_args()
# Load input
with open(args.d) as f:
data = json.load(f)
run_ltl_inference(data, args.o)
|
the-stack_0_7904 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 26 11:08:40 2021
@author: leonl42
Unit test for testing the lowercase conversion in the preprocessing pipeline
"""
import unittest
import pandas as pd
from scripts.preprocessing.lower import Lower
from scripts.util import COLUMN_TWEET
class LowerTest(unittest.TestCase):
""""Test the lowercase preprocessing step"""
def setUp(self):
self._df = pd.DataFrame()
# make one random string and copy it, but replace every upper letter with the corresponding lower one in this copy
_string_to_test = "I WHISH thath this ##2#E220md STRING becomes LoWerCase ÄÜÖÄ"
self._expected_result = "i whish thath this ##2#e220md string becomes lowercase äüöä"
self._df[COLUMN_TWEET] = [_string_to_test]
self._lower = Lower()
def test_lowercase(self):
"""Test lowercase conversion on a predefined string"""
lowercase_string = self._lower.fit_transform(self._df)
self.assertEqual(lowercase_string[COLUMN_TWEET][0], self._expected_result)
if __name__ == "__main__":
unittest.main() |
the-stack_0_7905 | import os
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
class PlotUtils(object):
@staticmethod
def plot_region(df, x0, x1, y0, y1, text=True):
"""
Plot the region of the mapping space bounded by the given x and y limits.
"""
FS = (10, 8)
fig, ax = plt.subplots(figsize=FS)
pts = df[
(df.x >= x0) & (df.x <= x1)
& (df.y >= y0) & (df.y <= y1)
]
ax.scatter(pts.x, pts.y, alpha=.6)
ax.set_xlim(x0, x1)
ax.set_ylim(y0, y1)
if text:
texts = []
for label, x, y in zip(pts.short_file.values, pts.x.values, pts.y.values):
t = ax.annotate(label, xy=(x, y))
texts.append(t)
return ax
@staticmethod
def plot_region_around(df, title, margin=5, **kwargs):
"""
Plot the region of the mapping space in the neighbourhood of the image with
the given name. The margin parameter controls the size of the neighbourhood around the image.
"""
xmargin = ymargin = margin
match = df[df.short_file == title]
assert len(match) == 1
row = match.iloc[0]
return PlotUtils.plot_region(df, row.x - xmargin, row.x + xmargin, row.y - ymargin, row.y + ymargin, **kwargs)
@staticmethod
def plot_images_cluster(df, embs, output_path, width = 4000, height = 3000, max_dim = 100):
"""
Plot the images cluster.
:param df:
:param embs: tsne embeddings, an array of unnormalized 2d points.
:return:
"""
# The variable tsne contains an array of unnormalized 2d points, corresponding to the embedding.
# We normalize the embedding so that lies entirely in the range (0,1).
tx, ty = embs[:, 0], embs[:, 1]
tx = (tx - np.min(tx)) / (np.max(tx) - np.min(tx))
ty = (ty - np.min(ty)) / (np.max(ty) - np.min(ty))
full_image = Image.new('RGBA', (width, height))
# Finally, we will compose a new RGB image where the set of images have been drawn according to the t-SNE
# results. Adjust width and height to set the size in pixels of the full image, and set max_dim to
# the pixel size (on the largest size) to scale images to.
for img, x, y in zip(df['file'].values, tx, ty):
tile = Image.open(img)
rs = max(1, tile.width / max_dim, tile.height / max_dim)
tile = tile.resize((int(tile.width / rs), int(tile.height / rs)), Image.ANTIALIAS)
full_image.paste(tile, (int((width - max_dim) * x), int((height - max_dim) * y)), mask=tile.convert('RGBA'))
full_image.show()
full_image.save(os.path.join(output_path,"cluster.png"),"PNG")
resized_image = full_image.resize((int(width / 5), int(height / 5)))
resized_image.save(os.path.join(output_path, "resized_cluster.png"), "PNG")
|
the-stack_0_7906 | import numpy as np
import math
from keras.initializers import VarianceScaling
from keras.models import model_from_json
from keras.models import Sequential, Model
#from keras.engine.training import collect_trainable_weights
from keras.layers import Dense, Flatten, Input, Add, merge, Lambda
from keras.optimizers import Adam
import tensorflow as tf
import keras.backend as K
HIDDEN1_UNITS = 300
HIDDEN2_UNITS = 600
class ActorNetwork(object):
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
self.sess = sess
self.BATCH_SIZE = BATCH_SIZE
self.TAU = TAU
self.LEARNING_RATE = LEARNING_RATE
K.set_session(sess)
#Now create the model
self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)
self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size)
self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)
grads = zip(self.params_grad, self.weights)
self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)
self.sess.run(tf.initialize_all_variables())
def train(self, states, action_grads):
self.sess.run(self.optimize, feed_dict={
self.state: states,
self.action_gradient: action_grads
})
def target_train(self):
actor_weights = self.model.get_weights()
actor_target_weights = self.target_model.get_weights()
for i in xrange(len(actor_weights)):
actor_target_weights[i] = self.TAU * actor_weights[i] + (1 - self.TAU)* actor_target_weights[i]
self.target_model.set_weights(actor_target_weights)
def create_actor_network(self, state_size,action_dim):
print("Now we build the model")
S = Input(shape=[state_size])
h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
action_list = []
for i in range(action_dim):
action = Dense(1,activation='sigmoid',init=lambda shape: VarianceScaling(scale=1e-4)(shape))(h1)
action_list.append(action)
#V = Add()(action_list) # mode='concat'
V = merge(action_list,mode='concat')
model = Model(output=V, input=S)
return model, model.trainable_weights, S
|
the-stack_0_7907 | from __future__ import absolute_import
import socket
from pickle import loads, dumps
from celery import states
from celery.exceptions import ImproperlyConfigured
from celery.tests.case import (
AppCase, Mock, mock_module, depends_on_current_app,
)
class Object(object):
pass
def install_exceptions(mod):
# py3k: cannot catch exceptions not ineheriting from BaseException.
class NotFoundException(Exception):
pass
class TException(Exception):
pass
class InvalidRequestException(Exception):
pass
class UnavailableException(Exception):
pass
class TimedOutException(Exception):
pass
class AllServersUnavailable(Exception):
pass
mod.NotFoundException = NotFoundException
mod.TException = TException
mod.InvalidRequestException = InvalidRequestException
mod.TimedOutException = TimedOutException
mod.UnavailableException = UnavailableException
mod.AllServersUnavailable = AllServersUnavailable
class test_CassandraBackend(AppCase):
def setup(self):
self.app.conf.update(
CASSANDRA_SERVERS=['example.com'],
CASSANDRA_KEYSPACE='keyspace',
CASSANDRA_COLUMN_FAMILY='columns',
)
def test_init_no_pycassa(self):
with mock_module('pycassa'):
from celery.backends import cassandra as mod
prev, mod.pycassa = mod.pycassa, None
try:
with self.assertRaises(ImproperlyConfigured):
mod.CassandraBackend(app=self.app)
finally:
mod.pycassa = prev
def test_init_with_and_without_LOCAL_QUROM(self):
with mock_module('pycassa'):
from celery.backends import cassandra as mod
mod.pycassa = Mock()
install_exceptions(mod.pycassa)
cons = mod.pycassa.ConsistencyLevel = Object()
cons.LOCAL_QUORUM = 'foo'
self.app.conf.CASSANDRA_READ_CONSISTENCY = 'LOCAL_FOO'
self.app.conf.CASSANDRA_WRITE_CONSISTENCY = 'LOCAL_FOO'
mod.CassandraBackend(app=self.app)
cons.LOCAL_FOO = 'bar'
mod.CassandraBackend(app=self.app)
# no servers raises ImproperlyConfigured
with self.assertRaises(ImproperlyConfigured):
self.app.conf.CASSANDRA_SERVERS = None
mod.CassandraBackend(
app=self.app, keyspace='b', column_family='c',
)
@depends_on_current_app
def test_reduce(self):
with mock_module('pycassa'):
from celery.backends.cassandra import CassandraBackend
self.assertTrue(loads(dumps(CassandraBackend(app=self.app))))
def test_get_task_meta_for(self):
with mock_module('pycassa'):
from celery.backends import cassandra as mod
mod.pycassa = Mock()
install_exceptions(mod.pycassa)
mod.Thrift = Mock()
install_exceptions(mod.Thrift)
x = mod.CassandraBackend(app=self.app)
Get_Column = x._get_column_family = Mock()
get_column = Get_Column.return_value = Mock()
get = get_column.get
META = get.return_value = {
'task_id': 'task_id',
'status': states.SUCCESS,
'result': '1',
'date_done': 'date',
'traceback': '',
'children': None,
}
x.decode = Mock()
x.detailed_mode = False
meta = x._get_task_meta_for('task_id')
self.assertEqual(meta['status'], states.SUCCESS)
x.detailed_mode = True
row = get.return_value = Mock()
row.values.return_value = [Mock()]
x.decode.return_value = META
meta = x._get_task_meta_for('task_id')
self.assertEqual(meta['status'], states.SUCCESS)
x.decode.return_value = Mock()
x.detailed_mode = False
get.side_effect = KeyError()
meta = x._get_task_meta_for('task_id')
self.assertEqual(meta['status'], states.PENDING)
calls = [0]
end = [10]
def work_eventually(*arg):
try:
if calls[0] > end[0]:
return META
raise socket.error()
finally:
calls[0] += 1
get.side_effect = work_eventually
x._retry_timeout = 10
x._retry_wait = 0.01
meta = x._get_task_meta_for('task')
self.assertEqual(meta['status'], states.SUCCESS)
x._retry_timeout = 0.1
calls[0], end[0] = 0, 100
with self.assertRaises(socket.error):
x._get_task_meta_for('task')
def test_store_result(self):
with mock_module('pycassa'):
from celery.backends import cassandra as mod
mod.pycassa = Mock()
install_exceptions(mod.pycassa)
mod.Thrift = Mock()
install_exceptions(mod.Thrift)
x = mod.CassandraBackend(app=self.app)
Get_Column = x._get_column_family = Mock()
cf = Get_Column.return_value = Mock()
x.detailed_mode = False
x._store_result('task_id', 'result', states.SUCCESS)
self.assertTrue(cf.insert.called)
cf.insert.reset()
x.detailed_mode = True
x._store_result('task_id', 'result', states.SUCCESS)
self.assertTrue(cf.insert.called)
def test_process_cleanup(self):
with mock_module('pycassa'):
from celery.backends import cassandra as mod
x = mod.CassandraBackend(app=self.app)
x._column_family = None
x.process_cleanup()
x._column_family = True
x.process_cleanup()
self.assertIsNone(x._column_family)
def test_get_column_family(self):
with mock_module('pycassa'):
from celery.backends import cassandra as mod
mod.pycassa = Mock()
install_exceptions(mod.pycassa)
x = mod.CassandraBackend(app=self.app)
self.assertTrue(x._get_column_family())
self.assertIsNotNone(x._column_family)
self.assertIs(x._get_column_family(), x._column_family)
|
the-stack_0_7908 | """Automatic Domain Randomization (ADR) algorithm
Introduced in:
Akkaya, Ilge, et al. "Solving rubik's cube with a robot hand."
arXiv preprint arXiv:1910.07113 (2019).
"""
import random
from inspect import signature
from typing import Any, AnyStr, Union, Sequence, Optional
import numpy as np
from collections import OrderedDict
from simmod.algorithms.udr import UniformDomainRandomization
from simmod.modification.base_modifier import BaseModifier
from simmod.common.parametrization import Parametrization
from simmod.common.parametrization import Execution
EXECUTION_POINTS = Union[Execution]
class AutomaticDomainRandomization(UniformDomainRandomization):
def __init__(self, *modifiers: BaseModifier, random_state: Optional[
np.random.Generator] = None,
buffer_threshold, performance_thresholds: Sequence,
step_size, **kwargs:
Any) -> \
None:
if len(performance_thresholds) > 2 or performance_thresholds[0] > \
performance_thresholds[1]:
raise ValueError("'performance_thresholds' should be Tuple "
"containing two values whereas the first "
"corresponds to the lower threshold t_L and the "
"second to the upper threshold t_H (t_L < t_H)")
if random_state is None:
self.random_state = np.random.default_rng()
elif isinstance(random_state, int):
# random_state assumed to be an int
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
super().__init__(*modifiers, random_state=random_state, **kwargs)
buffer = OrderedDict()
for modifier in self.modifiers:
buffer[modifier] = OrderedDict()
for instrumentation in modifier.instrumentation:
buffer[modifier][instrumentation] = ([], [])
def _bound_value(self, modifier: BaseModifier, instrumentation: Parametrization,
bound_low: bool):
a, b = instrumentation.parameter_values
object_name = instrumentation.object_name
setter_func = modifier.standard_setters[instrumentation.setter]
if setter_func.__defaults__ is not None: # in case there are no kwargs
n_kwargs = len(setter_func.__defaults__)
else:
n_kwargs = 0
sig = signature(setter_func)
n_params = len(
sig.parameters) - n_kwargs - 1 # Exclude name & non-positional arguments
# TODO: Randomize non-positional arguments
new_values = instrumentation.sample(n_params)
if bound_low:
new_values[0] = a
else:
new_values[1] = b
instrumentation.update(new_values)
return setter_func(object_name, *new_values)
def adapt_boundaries(self, instrumentation: Parametrization,
step_size: float, select_low: bool):
pass
def entropy(self):
n = 0
entropy = 0
for modifier in self.modifiers:
for instrumentation in modifier.instrumentation:
entropy += instrumentation.entropy
n += 1
assert n != 0
return entropy / n
def step(self, execution: EXECUTION_POINTS = 'RESET', **kwargs) -> None:
mod = random.choice(self.modifiers)
bounded_param = random.choice(mod.instrumentation)
x = self.random_state.uniform()
select_low = (x < 0.5)
for modifier in self.modifiers:
for instrumentation in modifier.instrumentation:
if instrumentation is bounded_param:
self._bound_value(modifier, instrumentation, select_low)
else:
self._randomize_object(modifier, instrumentation)
|
the-stack_0_7910 | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import test_utils
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_stacks
from tensorflow_federated.python.core.impl.executors import executor_test_utils
def _create_tff_parallel_clients_with_dataset_reduce():
@tf.function
def reduce_fn(x, y):
return x + y
@tf.function
def dataset_reduce_fn(ds, initial_val):
return ds.reduce(initial_val, reduce_fn)
@computations.tf_computation(computation_types.SequenceType(tf.int64))
def dataset_reduce_fn_wrapper(ds):
initial_val = tf.Variable(np.int64(1.0))
return dataset_reduce_fn(ds, initial_val)
@computations.federated_computation(
computation_types.at_clients(computation_types.SequenceType(tf.int64)))
def parallel_client_run(client_datasets):
return intrinsics.federated_map(dataset_reduce_fn_wrapper, client_datasets)
return parallel_client_run
def _create_tff_parallel_clients_with_iter_dataset():
@tf.function
def reduce_fn(x, y):
return x + y
@tf.function
def dataset_reduce_fn(ds, initial_val):
for batch in iter(ds):
initial_val = reduce_fn(initial_val, batch)
return initial_val
@computations.tf_computation(computation_types.SequenceType(tf.int64))
def dataset_reduce_fn_wrapper(ds):
initial_val = tf.Variable(np.int64(1.0))
return dataset_reduce_fn(ds, initial_val)
@computations.federated_computation(
computation_types.at_clients(computation_types.SequenceType(tf.int64)))
def parallel_client_run(client_datasets):
return intrinsics.federated_map(dataset_reduce_fn_wrapper, client_datasets)
return parallel_client_run
class MultiGPUTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
test_utils.create_logical_multi_gpus()
def _create_logical_multi_gpus(self):
# Multiple logical GPU devices will be created for tests in this module.
# Only call this function once as logical deviceds have to be created before
# listed in each indivisual test.
gpu_devices = tf.config.list_physical_devices('GPU')
if not gpu_devices:
# TODO(b/168138878): switch to raise and move out of MultiGPUTest
self.skipTest('Skip GPU tests when no GPU is provided')
if len(gpu_devices) == 1:
tf.config.set_logical_device_configuration(gpu_devices[0], [
tf.config.LogicalDeviceConfiguration(memory_limit=128),
tf.config.LogicalDeviceConfiguration(memory_limit=128)
])
@parameterized.named_parameters(
('server_on_cpu', 'CPU'),
('server_on_gpu', 'GPU'),
)
def test_create_executor_with_client_mgpu(self, tf_device):
tf_devices = tf.config.list_logical_devices(tf_device)
server_tf_device = None if not tf_devices else tf_devices[0]
gpu_devices = tf.config.list_logical_devices('GPU')
unplaced_factory = executor_stacks.UnplacedExecutorFactory(
use_caching=True,
server_device=server_tf_device,
client_devices=gpu_devices)
unplaced_executor = unplaced_factory.create_executor()
self.assertIsInstance(unplaced_executor, executor_base.Executor)
@parameterized.named_parameters(
('server_on_cpu', 'CPU'),
('server_on_gpu', 'GPU'),
)
def test_local_executor_multi_gpus_iter_dataset(self, tf_device):
tf_devices = tf.config.list_logical_devices(tf_device)
server_tf_device = None if not tf_devices else tf_devices[0]
gpu_devices = tf.config.list_logical_devices('GPU')
local_executor = executor_stacks.local_executor_factory(
server_tf_device=server_tf_device, client_tf_devices=gpu_devices)
with executor_test_utils.install_executor(local_executor):
parallel_client_run = _create_tff_parallel_clients_with_iter_dataset()
client_data = [
tf.data.Dataset.range(10),
tf.data.Dataset.range(10).map(lambda x: x + 1)
]
client_results = parallel_client_run(client_data)
self.assertEqual(client_results, [np.int64(46), np.int64(56)])
@parameterized.named_parameters(
('server_on_cpu', 'CPU'),
('server_on_gpu', 'GPU'),
)
def test_local_executor_multi_gpus_dataset_reduce(self, tf_device):
tf_devices = tf.config.list_logical_devices(tf_device)
server_tf_device = None if not tf_devices else tf_devices[0]
gpu_devices = tf.config.list_logical_devices('GPU')
local_executor = executor_stacks.local_executor_factory(
server_tf_device=server_tf_device, client_tf_devices=gpu_devices)
with executor_test_utils.install_executor(local_executor):
parallel_client_run = _create_tff_parallel_clients_with_dataset_reduce()
client_data = [
tf.data.Dataset.range(10),
tf.data.Dataset.range(10).map(lambda x: x + 1)
]
# TODO(b/159180073): merge this one into iter dataset test when the
# dataset reduce function can be correctly used for GPU device.
with self.assertRaisesRegex(
ValueError,
'Detected dataset reduce op in multi-GPU TFF simulation.*'):
parallel_client_run(client_data)
if __name__ == '__main__':
absltest.main()
|
the-stack_0_7912 | #!/usr/bin/env python3
import os
import sys
import json
from argparse import ArgumentParser
from source.fmp import ProfileFMP
from source.create_sheet import create_excel
data_dir = os.path.join(os.path.dirname(__file__), 'data')
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
def fetch_data_by_symbol(symbol):
fmp_company = ProfileFMP(symbol)
return {
'symbol': symbol,
'profile': fmp_company.profile,
'rating': fmp_company.rating,
'income': fmp_company.income,
}
def load(symbol):
company = fetch_data_by_symbol(symbol)
filename = os.path.join(data_dir, symbol + '.json')
with open(filename, 'w') as file:
json.dump(company, file)
create_excel(data_dir, company, symbol)
parser = ArgumentParser()
subparsers = parser.add_subparsers(dest="action", title='Subcommands')
load_parser = subparsers.add_parser('load', help='laod data')
load_parser.add_argument('symbols', type=str, nargs='*', help='Stock symbol')
args = sys.argv[1:]
args = parser.parse_args(args)
if args.action == 'load':
symbols = args.symbols
for symbol in symbols:
print("Loading data for {}.".format(symbol))
load(symbol)
sys.exit(0)
else:
parser.error('Unknown command: ' + repr(args.action))
|
the-stack_0_7913 | import csv
from functools import lru_cache
from pathlib import Path
from typing import Dict
from typing import Iterable
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import Union
import warnings
from django.apps import AppConfig
from django.apps import apps
from django.conf import settings
import django.contrib.auth
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model
from django.http import HttpRequest
from django.utils.module_loading import import_string
from .types import Evaluator
from .types import PermName
from .types import ResolveEvaluatorFunc
from .types import ResolvePermNameFunc
from .types import UnresolvedEvaluator
from .types import UserType
def default_resolve_perm_name(
app_config: AppConfig, model: Type[Model], action: str, is_global: bool
) -> str:
if model:
default_codename = django.contrib.auth.get_permission_codename(action, model._meta)
permission_name = f"{app_config.label}.{default_codename}"
else:
permission_name = f"{app_config.label}.{action}"
return permission_name
def default_get_user_type(user: Model) -> Optional[str]:
# note that AnonymousUser won't have a user_type so we need to deal with that gracefully
return getattr(user, "user_type", None)
def _parse_csv(
file_path: Path,
resolve_permission_name_func: ResolvePermNameFunc,
) -> Tuple[
Dict[PermName, bool],
Dict[PermName, Dict[UserType, UnresolvedEvaluator]],
Iterable[str],
]:
"""
Parses the CSV of user_type permissions returns data for further processing.
See README.md for the CSV file format
:return: A tuple of three elements:
- A dict mapping permission name to bool of whether that permission is global or not
- A dict mapping a permission to a dict of user_types to partially resolved permission details:
permission_name: {
user_type1: UnresolvedEvaluator,
...
user_typeN: UnresolvedEvaluator,
}
- A list of user types
"""
with open(file_path, "r") as csv_file:
reader = csv.reader(csv_file, skipinitialspace=True)
# get first row of headers
fieldnames = next(reader)
fieldnames = [x.strip() for x in fieldnames]
prelim_headers = ["Model", "App", "Action", "Is Global"]
prelim_header_count = len(prelim_headers)
if fieldnames[:prelim_header_count] != prelim_headers:
raise ValueError(f"Invalid csv_permissions CSV column headers found in {file_path}")
user_type_headers = fieldnames[prelim_header_count:]
nonempty_user_type_headers = [user_type for user_type in user_type_headers if user_type != ""]
if len(set(nonempty_user_type_headers)) != len(nonempty_user_type_headers):
duplicates = [x for x in nonempty_user_type_headers if nonempty_user_type_headers.count(x) >= 2]
raise ValueError(f"Duplicate csv_permissions CSV column header ({duplicates[0]}) found in {file_path}")
if len(nonempty_user_type_headers) == 0:
raise ValueError(f"Missing user_type headers in {file_path}")
perm_is_global = {}
perm_user_type_unresolved: Dict[PermName, Dict[UserType, UnresolvedEvaluator]] = {}
# We can't just count the number of permissions read because we don't consider
# a file with commented out lines to be empty so keep track with a flag
was_empty = True
for line_number, row in enumerate(reader):
row = [cell.strip() for cell in row]
was_empty = False
if all(x == "" for x in row):
# ignore completely empty rows
continue
if any(row[0].strip().startswith(comment_prefix) for comment_prefix in ("//", "#", ';')):
# Ignore lines beginning with comment chars
continue
if len(row) < prelim_header_count:
raise ValueError(f"Incomplete line {line_number} in {csv_file}")
# note that model capitalisation may differ to model._meta.model_name
model_name_orig, app_label, action, is_global = row[:prelim_header_count]
app_config = apps.get_app_config(app_label)
model = app_config.get_model(model_name_orig) if model_name_orig else None
if is_global == "yes":
is_global = True
elif is_global == "no":
is_global = False
else:
raise ValueError("Invalid value for Is Global: should be 'yes' or 'no'.")
permission = resolve_permission_name_func(app_config, model, action, is_global)
if permission not in perm_is_global:
perm_is_global[permission] = is_global
perm_user_type_unresolved[permission] = {}
for i, user_type in enumerate(user_type_headers):
try:
evaluator_name = row[prelim_header_count + i]
except IndexError:
continue
if user_type == "":
# if a column has an empty user type then that's allowed but only if the entire column is empty
if evaluator_name != "":
raise ValueError(f"Columns with an empty user_type must be completely empty")
else:
perm_user_type_unresolved[permission][user_type] = UnresolvedEvaluator(
app_config=app_config,
model=model,
is_global=is_global,
permission=permission,
action=action,
evaluator_name=evaluator_name,
source_csv=file_path,
)
if was_empty:
raise ValueError("Empty permissions file")
return perm_is_global, perm_user_type_unresolved, nonempty_user_type_headers
# should be at least as large as the number of CSV files we load. This gets called by every has_perm() so must be cached
@lru_cache(maxsize=32)
def _resolve_functions(
file_paths: Iterable[Path],
resolve_permission_name: Optional[str],
resolve_evaluators: Iterable[Union[str, ResolveEvaluatorFunc]],
) -> Tuple[
Dict[PermName, Dict[UserType, Evaluator]],
Dict[PermName, bool],
Set[str],
Set[str]
]:
"""
:param file_paths: Path to the CSV files to read.
:resolve_permission_name: the settings.CSV_PERMISSIONS_RESOLVE_PERM_NAME setting.
:resolve_evaluators: the settings.CSV_PERMISSIONS_RESOLVE_EVALUATORS setting.
:return: A tuple of:
- dictionary mapping the permissions for each UserType to a function determining if the user has access.
- dictionary mapping the permission to a boolean indicating whether the permission is object level or global level.
- set of user types
- set of permissions
"""
if resolve_permission_name is None:
resolve_permission_name = default_resolve_perm_name
else:
resolve_permission_name = import_string(resolve_permission_name)
resolve_evaluators = tuple(
import_string(resolve_evaluator) if isinstance(resolve_evaluator, str) else resolve_evaluator
for resolve_evaluator
in resolve_evaluators
)
permission_is_global: Dict[PermName, bool] = {}
permission_is_global_source_csv: Dict[PermName, Path] = {}
known_user_types: Set[UserType] = set()
known_perms: Set[PermName] = set()
permission_to_user_type_to_unresolved: Dict[PermName, Dict[UserType, UnresolvedEvaluator]] = {}
for file_path in file_paths:
file_permission_is_global, new_permission_to_user_type_to_unresolved, user_types = \
_parse_csv(file_path, resolve_permission_name)
# merge global list of known user types/permissions
known_user_types.update(set(user_types))
known_perms.update(set(file_permission_is_global.keys()))
# merge is_global settings
for permission, is_global in file_permission_is_global.items():
if permission in permission_is_global and permission_is_global[permission] != is_global:
# we don't specifically keep track of which previous file set the is_global;
# look back through all of the unresolved permissions to find where it came from
# (this is slowish but only happens in the failure case)
raise ValueError(
f"'Is Global' for {permission} in {file_path} is inconsistent "
f"with a previous CSV file ({permission_is_global_source_csv[permission]})"
)
permission_is_global.update(file_permission_is_global)
permission_is_global_source_csv.update({perm: file_path for perm in file_permission_is_global.keys()})
# merge unresolved permissions
for permission, new_user_type_to_unresolved in new_permission_to_user_type_to_unresolved.items():
if permission not in permission_to_user_type_to_unresolved:
permission_to_user_type_to_unresolved[permission] = {}
for user_type, new_unresolved in new_user_type_to_unresolved.items():
if user_type not in permission_to_user_type_to_unresolved[permission]:
permission_to_user_type_to_unresolved[permission][user_type] = new_unresolved
else:
# both the new and an older CSV file include this cell
existing_unresolved = permission_to_user_type_to_unresolved[permission][user_type]
if new_unresolved == existing_unresolved:
# they are the same so do nothing (leaves the old one in place)
pass
elif existing_unresolved.evaluator_name == "":
# old CSV cell was empty, use new one
permission_to_user_type_to_unresolved[permission][user_type] = new_unresolved
elif new_unresolved.evaluator_name == "":
# new CSV cell is empty, use old one
pass
else:
# they were not the same and neither was empty. This means they're inconsistent
raise ValueError(
f"Permission {permission} for user type {user_type} in "
f"{file_path} is inconsistent with a previous CSV file "
f"({existing_unresolved.source_csv})"
)
# now take the partially resolved functions and resolve them
permission_to_user_type_to_evaluator: Dict[PermName, Dict[UserType, Evaluator]] = {}
for permission, user_type_to_unresolved in permission_to_user_type_to_unresolved.items():
if permission not in permission_to_user_type_to_evaluator:
permission_to_user_type_to_evaluator[permission] = {}
for user_type, detail in user_type_to_unresolved.items():
try:
for resolve_evaluator in resolve_evaluators:
evaluator = resolve_evaluator(detail)
if evaluator is not None:
permission_to_user_type_to_evaluator[permission][user_type] = evaluator
break
else:
raise ValueError(f"Could not resolve {permission} for {user_type} to anything")
except Exception as e:
raise RuntimeError(f"Error resolving {permission} for {user_type}: {detail.evaluator_name} ({e})") from e
return permission_to_user_type_to_evaluator, permission_is_global, known_user_types, known_perms
# note that django creates a new instance of an auth backend for every permission check!
class CSVPermissionsBackend:
permission_lookup: Dict[PermName, Dict[UserType, Evaluator]]
permission_is_global: Dict[PermName, bool]
known_user_types: Set[UserType]
known_perms: Set[PermName]
def __init__(self):
try:
permissions_paths = settings.CSV_PERMISSIONS_PATHS
except AttributeError:
try:
settings.CSV_PERMISSIONS_PATHS = (settings.CSV_PERMISSIONS_PATH,)
except AttributeError:
raise ImproperlyConfigured("csv_permissions requires settings.CSV_PERMISSIONS_PATHS to be set")
else:
permissions_paths = settings.CSV_PERMISSIONS_PATHS
del settings.CSV_PERMISSIONS_PATH
# make sure it's immutable so that it's hashable and _resolve_functions() can have @lru_cache() applied
if not isinstance(permissions_paths, tuple):
if isinstance(permissions_paths, (str, Path)):
raise ImproperlyConfigured("settings.CSV_PERMISSIONS_PATHS should be an iterable of paths")
permissions_paths = tuple(permissions_paths)
settings.CSV_PERMISSIONS_PATHS = permissions_paths
try:
resolve_perm_name = settings.CSV_PERMISSIONS_RESOLVE_PERM_NAME
except AttributeError:
try:
settings.CSV_PERMISSIONS_RESOLVE_PERM_NAME = settings.CSV_PERMISSIONS_RESOLVE_RULE_NAME
except AttributeError:
resolve_perm_name = None
else:
warnings.warn(
"settings.CSV_PERMISSIONS_RESOLVE_RULE_NAME is deprecated in favor of settings.CSV_PERMISSIONS_RESOLVE_PERM_NAME",
DeprecationWarning
)
resolve_perm_name = settings.CSV_PERMISSIONS_RESOLVE_RULE_NAME
try:
resolve_evaluators = settings.CSV_PERMISSIONS_RESOLVE_EVALUATORS
except AttributeError:
raise ImproperlyConfigured(
'settings.CSV_PERMISSIONS_RESOLVE_EVALUATORS must be defined. '
'For legacy 0.1.0 compatibility use "csv_permissions.legacy.legacy_resolve_evaluator".'
)
else:
if isinstance(resolve_evaluators, str):
resolve_evaluators = import_string(resolve_evaluators)
resolve_evaluators = tuple(resolve_evaluators)
self.permission_lookup, self.permission_is_global, self.known_user_types, self.known_perms = _resolve_functions(
permissions_paths,
resolve_perm_name,
resolve_evaluators,
)
def authenticate(self, request: HttpRequest, username: Optional[str] = None, password: Optional[str] = None):
return None
def is_global_perm(self, perm: str) -> bool:
try:
return self.permission_is_global[perm]
except KeyError as ke:
raise ValueError(f"Permission {perm} is not known") from ke
def has_perm(self, user: Model, perm: str, obj: Model) -> bool:
if user is None:
return False
get_user_type = getattr(settings, 'CSV_PERMISSIONS_GET_USER_TYPE', default_get_user_type)
if isinstance(get_user_type, str):
settings.CSV_PERMISSIONS_GET_USER_TYPE = import_string(settings.CSV_PERMISSIONS_GET_USER_TYPE)
get_user_type = settings.CSV_PERMISSIONS_GET_USER_TYPE
user_type = get_user_type(user)
if user_type is None:
# if there is no user_type then it's probably an AnonymousUser, but might also be a
# user using a different permissions backend; either way they're not covered by csv_permissions
return False
if getattr(settings, "CSV_PERMISSIONS_STRICT", False):
if perm not in self.known_perms:
raise LookupError(f"Permission {repr(perm)} is not known")
if user_type not in self.known_user_types:
raise LookupError(f"User Type {repr(user_type)} is not known")
try:
func = self.permission_lookup[perm][user_type]
except KeyError:
# If we get here it means that
# - the permission/user type is not known at all and CSV_PERMISSIONS_STRICT is not set
# or
# - the permission & user types are known but because there are multiple CSV files that
# particular combination doesn't appear in any CSV file
#
# in either case we allow django to try other backends
return False
return func(user, obj)
|
the-stack_0_7914 | # Atribuir o valor 1 à variável var_teste
var_teste = 1
# Imprimir o valor da variável
print(var_teste)
# Atribuir o valor 2 à variável var_teste
var_teste = 2
# Imprimir o valor da variável
print(var_teste)
# Exibir o tipo de dados da variável
type(var_teste)
# Atribuir o valor 9.5 à variável var_teste
var_teste = 9.5
# Exibir o tipo de dados da variável
type(var_teste)
# ## Declaração Múltipla
pessoa1, pessoa2, pessoa3 = "Maria", "José", "Tobias"
fruta1 = fruta2 = fruta3 = "Laranja"
# ## Pode-se usar letras, números e underline (mas não se pode começar com números)
x1 = 50
# ## Variáveis atribuídas a outras variáveis e ordem dos operadores
largura = 2
altura = 4
area = largura * altura
print(area)
perimetro = 2 * largura + 2 * altura
print(perimetro)
perimetro = 2 * (largura + 2) * altura
print(perimetro)
idade1 = 25
idade2 = 35
idade1 + idade2
idade2 - idade1
idade2 * idade1
idade2 / idade1
idade2 % idade1
# ## Concatenação de Variáveis
nome = "Steve"
sobrenome = "Jobs"
fullName = nome + " " + sobrenome
print(fullName)
# script desenvolvido com base no material didático da DSA.
|
the-stack_0_7916 | # Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.text import normalize_newlines
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from cloudkittydashboard.api import cloudkitty as api
LOG = logging.getLogger(__name__)
class CreateScriptForm(forms.SelfHandlingForm):
help_text = _('Create a new rating script.')
name = forms.CharField(label=_("Name"))
source_choices = [('raw', _('Direct Input')),
('file', _('File'))]
script_source = forms.ChoiceField(
label=_('Rating Script Source'),
choices=source_choices,
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'scriptsource'}))
script_help = _("A script or set of python commands to modify rating "
"calculations.")
script_upload = forms.FileField(
label=_('Script File'),
help_text=script_help,
widget=forms.FileInput(attrs={
'class': 'switched',
'data-switch-on': 'scriptsource',
'data-scriptsource-file': _('Script File')}),
required=False)
script_data = forms.CharField(
label=_('Script Data'),
help_text=script_help,
widget=forms.widgets.Textarea(attrs={
'class': 'switched',
'data-switch-on': 'scriptsource',
'data-scriptsource-raw': _('Script Data')}),
required=False)
class Meta(object):
name = _('Create Script')
def clean(self):
cleaned = super(CreateScriptForm, self).clean()
files = self.request.FILES
script = self.clean_uploaded_files('script', files)
if script is not None:
cleaned['script_data'] = script
return cleaned
def clean_uploaded_files(self, prefix, files):
upload_str = prefix + "_upload"
has_upload = upload_str in files
if has_upload:
upload_file = files[upload_str]
log_script_name = upload_file.name
LOG.info('got upload %s' % log_script_name)
script = upload_file.read()
if script != "":
try:
normalize_newlines(script)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
return script
else:
return None
def handle(self, request, data):
name = data['name']
LOG.info('Creating script with name %s' % (name))
ck_client = api.cloudkittyclient(request)
return ck_client.rating.pyscripts.create_script(
name=name,
data=data['script_data'])
class EditScriptForm(CreateScriptForm):
script_id = forms.CharField(label=_("Script ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
fields_order = ['script_id', 'name', 'script_source', 'script_upload',
'script_data']
class Meta(object):
name = _("Update Script")
def handle(self, request, data):
script_id = self.initial['script_id']
LOG.info('Updating script with id %s' % (script_id))
ck_client = api.cloudkittyclient(request)
return ck_client.rating.pyscripts.update_script(
script_id=script_id, name=data['name'], data=data['script_data'])
|
the-stack_0_7917 | """
xterm terminal info
Since most of the Windows virtual processing schemes are based on xterm
This file is intended to be sourced and includes the man page descriptions
Most of this information came from the terminfo man pages, part of ncurses
More information on ncurses can be found at:
https://www.gnu.org/software/ncurses/ncurses.html
The values are as reported by infocmp on Fedora 30 with ncurses 6.1
"""
# pylint: disable=wrong-spelling-in-comment,line-too-long
# flake8: noqa: E501
BOOL_CAPS = [
'am', # (auto_right_margin) terminal has automatic margins
'bce', # (back_color_erase) screen erased with background color
# 'bw', # (auto_left_margin) cub1 wraps from column 0 to last column
# 'ccc', # (can_change) terminal can re-define existing colors
# 'chts', # (hard_cursor) cursor is hard to see
# 'cpix', # (cpi_changes_res) changing character pitch changes resolution
# 'crxm', # (cr_cancels_micro_mode) using cr turns off micro mode
# 'daisy', # (has_print_wheel) printer needs operator to change character set
# 'da', # (memory_above) display may be retained above the screen
# 'db', # (memory_below) display may be retained below the screen
# 'eo', # (erase_overstrike) can erase overstrikes with a blank
# 'eslok', # (status_line_esc_ok) escape can be used on the status line
# 'gn', # (generic_type) generic line type
# 'hc', # (hard_copy) hardcopy terminal
# 'hls', # (hue_lightness_saturation) terminal uses only HLS color notation (Tektronix)
# 'hs', # (has_status_line) has extra status line
# 'hz', # (tilde_glitch) cannot print ~'s (Hazeltine)
# 'in', # (insert_null_glitch) insert mode distinguishes nulls
'km', # (has_meta_key) Has a meta key (i.e., sets 8th-bit)
# 'lpix', # (lpi_changes_res) changing line pitch changes resolution
'mc5i', # (prtr_silent) printer will not echo on screen
'mir', # (move_insert_mode) safe to move while in insert mode
'msgr', # (move_standout_mode) safe to move while in standout mode
# 'ndscr', # (non_dest_scroll_region) scrolling region is non-destructive
'npc', # (no_pad_char) pad character does not exist
# 'nrrmc', # (non_rev_rmcup) smcup does not reverse rmcup
# 'nxon', # (needs_xon_xoff) padding will not work, xon/xoff required
# 'os', # (over_strike) terminal can overstrike
# 'sam', # (semi_auto_right_margin) printing in last column causes cr
# 'ul', # (transparent_underline) underline character overstrikes
'xenl', # (eat_newline_glitch) newline ignored after 80 cols (concept)
# 'xhpa', # (col_addr_glitch) only positive motion for hpa/mhpa caps
# 'xhp', # (ceol_standout_glitch) standout not erased by overwriting (hp)
# 'xon', # (xon_xoff) terminal uses xon/xoff handshaking
# 'xsb', # (no_esc_ctlc) beehive (f1=escape, f2=ctrl C)
# 'xt', # (dest_tabs_magic_smso) tabs destructive, magic so char (t1061)
# 'xvpa', # (row_addr_glitch) only positive motion for vpa/mvpa caps
]
NUM_CAPS = {
# 'bitwin': 0, # (bit_image_entwining) number of passes for each bit-image row
# 'bitype': 0, # (bit_image_type) type of bit-image device
# 'btns': 0, # (buttons) number of buttons on mouse
# 'bufsz': 0, # (buffer_capacity) numbers of bytes buffered before printing
'colors': 8, # (max_colors) maximum number of colors on screen
'cols': 80, # (columns) number of columns in a line
# 'cps': 0, # (print_rate) print rate in characters per second
'it': 8, # (init_tabs) tabs initially every # spaces
# 'lh': 0, # (label_height) rows in each label
'lines': 24, # (lines) number of lines on screen or page
# 'lm': 0, # (lines_of_memory) lines of memory if > line. 0 means varies
# 'lw': 0, # (label_width) columns in each label
# 'ma': 0, # (max_attributes) maximum combined attributes terminal can handle
# 'maddr': 0, # (max_micro_address) maximum value in micro_..._address
# 'mcs': 0, # (micro_col_size) character step size when in micro mode
# 'mjump': 0, # (max_micro_jump) maximum value in parm_..._micro
# 'mls': 0, # (micro_line_size) line step size when in micro mode
# 'ncv': 0, # (no_color_video) video attributes that cannot be used with colors
# 'nlab': 0, # (num_labels) number of labels on screen
# 'npins': 0, # (number_of_pins) numbers of pins in print-head
# 'orc': 0, # (output_res_char) horizontal resolution in units per line
# 'orhi': 0, # (output_res_horz_inch) horizontal resolution in units per inch
# 'orl': 0, # (output_res_line) vertical resolution in units per line
# 'orvi': 0, # (output_res_vert_inch) vertical resolution in units per inch
'pairs': 64, # (max_pairs) maximum number of color-pairs on the screen
# 'pb': 0, # (padding_baud_rate) lowest baud rate where padding needed
# 'spinh': 0, # (dot_horz_spacing) spacing of dots horizontally in dots per inch
# 'spinv': 0, # (dot_vert_spacing) spacing of pins vertically in pins per inch
# 'vt': 0, # (virtual_terminal) virtual terminal number (CB/unix)
# 'widcs': 0, # (wide_char_size) character step size when in double wide mode
# 'wnum': 0, # (maximum_windows) maximum number of definable windows
# 'wsl': 0, # (width_status_line) number of columns in status line
# 'xmc': 0, # (magic_cookie_glitch) number of blank characters left by smso or rmso
}
STR_CAPS = {
'acsc': b'``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~', # (acs_chars) graphics charset pairs, based on vt100
'bel': b'^G', # (bell) audible signal (bell) (P)
# 'bicr': b'', # (bit_image_carriage_return) Move to beginning of same row
# 'binel': b'', # (bit_image_newline) Move to next row of the bit image
# 'birep': b'', # (bit_image_repeat) Repeat bit image cell #1 #2 times
'blink': b'\x1b[5m', # (enter_blink_mode) turn on blinking
'bold': b'\x1b[1m', # (enter_bold_mode) turn on bold (extra bright) mode
'cbt': b'\x1b[Z', # (back_tab) back tab (P)
# 'chr': b'', # (change_res_horz) Change horizontal resolution to #1
'civis': b'\x1b[?25l', # (cursor_invisible) make cursor invisible
'clear': b'\x1b[H\x1b[2J', # (clear_screen) clear screen and home cursor (P*)
# 'cmdch': b'', # (command_character) terminal settable cmd character in prototype !?
'cnorm': b'\x1b[?12l\x1b[?25h', # (cursor_normal) make cursor appear normal (undo civis/cvvis)
# 'colornm': b'', # (color_names) Give name for color #1
# 'cpi': b'', # (change_char_pitch) Change number of characters per inch to #1
'cr': b'\r', # (carriage_return) carriage return (P*) (P*)
# 'csin': b'', # (code_set_init) Init sequence for multiple codesets
# 'csnm': b'', # (char_set_names) Produce #1'th item from list of character set names
'csr': b'\x1b[%i%p1%d;%p2%dr', # (change_scroll_region) change region to line #1 to line #2 (P)
'cub1': b'^H', # (cursor_left) move left one space
'cub': b'\x1b[%p1%dD', # (parm_left_cursor) move #1 characters to the left (P)
'cud1': b'\n', # (cursor_down) down one line
'cud': b'\x1b[%p1%dB', # (parm_down_cursor) down #1 lines (P*)
'cuf1': b'\x1b[C', # (cursor_right) non-destructive space (move right one space)
'cuf': b'\x1b[%p1%dC', # (parm_right_cursor) move #1 characters to the right (P*)
'cup': b'\x1b[%i%p1%d;%p2%dH', # (cursor_address) move to row #1 columns #2
'cuu1': b'\x1b[A', # (cursor_up) up one line
'cuu': b'\x1b[%p1%dA', # (parm_up_cursor) up #1 lines (P*)
# 'cvr': b'', # (change_res_vert) Change vertical resolution to #1
'cvvis': b'\x1b[?12;25h', # (cursor_visible) make cursor very visible
# 'cwin': b'', # (create_window) define a window #1 from #2,#3 to #4,#5
'dch1': b'\x1b[P', # (delete_character) delete character (P*)
'dch': b'\x1b[%p1%dP', # (parm_dch) delete #1 characters (P*)
# 'dclk': b'', # (display_clock) display clock
# 'defbi': b'', # (define_bit_image_region) Define rectangular bit image region
# 'defc': b'', # (define_char) Define a character #1, #2 dots wide, descender #3
# 'devt': b'', # (device_type) Indicate language/codeset support
# 'dial': b'', # (dial_phone) dial number #1
'dim': b'\x1b[2m', # (enter_dim_mode) turn on half-bright mode
# 'dispc': b'', # (display_pc_char) Display PC character #1
'dl1': b'\x1b[M', # (delete_line) delete line (P*)
'dl': b'\x1b[%p1%dM', # (parm_delete_line) delete #1 lines (P*)
# 'docr': b'', # (these_cause_cr) Printing any of these characters causes CR
# 'dsl': b'', # (dis_status_line) disable status line
'ech': b'\x1b[%p1%dX', # (erase_chars) erase #1 characters (P)
'ed': b'\x1b[J', # (clr_eos) clear to end of screen (P*)
'el1': b'\x1b[1K', # (clr_bol) Clear to beginning of line
'el': b'\x1b[K', # (clr_eol) clear to end of line (P)
# 'enacs': b'', # (ena_acs) enable alternate char set
# 'endbi': b'', # (end_bit_image_region) End a bit-image region
# 'ff': b'', # (form_feed) hardcopy terminal page eject (P*)
'flash': b'\x1b[?5h$<100/>\x1b[?5l', # (flash_screen) visible bell (may not move cursor)
# 'fln': b'', # (label_format) label format
# 'fsl': b'', # (from_status_line) return from status line
# 'getm': b'', # (get_mouse) Curses should get button events, parameter #1 not documented.
# 'hd': b'', # (down_half_line) half a line down
'home': b'\x1b[H', # (cursor_home) home cursor (if no cup)
# 'hook': b'', # (flash_hook) flash switch hook
'hpa': b'\x1b[%i%p1%dG', # (column_address) horizontal position #1, absolute (P)
'ht': b'^I', # (tab) tab to next 8-space hardware tab stop
'hts': b'\x1bH', # (set_tab) set a tab in every row, current columns
# 'hu': b'', # (up_half_line) half a line up
# 'hup': b'', # (hangup) hang-up phone
# 'ich1': b'', # (insert_character) insert character (P)
'ich': b'\x1b[%p1%d@', # (parm_ich) insert #1 characters (P*)
# 'if': b'', # (init_file) name of initialization file
'il1': b'\x1b[L', # (insert_line) insert line (P*)
'il': b'\x1b[%p1%dL', # (parm_insert_line) insert #1 lines (P*)
'ind': b'\n', # (scroll_forward) scroll text up (P)
'indn': b'\x1b[%p1%dS', # (parm_index) scroll forward #1 lines (P)
# 'initc': b'', # (initialize_color) initialize color #1 to (#2,#3,#4)
# 'initp': b'', # (initialize_pair) Initialize color pair #1 to fg=(#2,#3,#4), bg=(#5,#6,#7)
'invis': b'\x1b[8m', # (enter_secure_mode) turn on blank mode (characters invisible)
# 'ip': b'', # (insert_padding) insert padding after inserted character
# 'iprog': b'', # (init_prog) path name of program for initialization
# 'is1': b'', # (init_1string) initialization string
'is2': b'\x1b[!p\x1b[?3;4l\x1b[4l\x1b>', # (init_2string) initialization string
# 'is3': b'', # (init_3string) initialization string
# 'ka1': b'', # (key_a1) upper left of keypad
# 'ka3': b'', # (key_a3) upper right of keypad
'kb2': b'\x1bOE', # (key_b2) center of keypad
# 'kbeg': b'', # (key_beg) begin key
# 'kBEG': b'', # (key_sbeg) shifted begin key
'kbs': b'^?', # (key_backspace) backspace key
# 'kc1': b'', # (key_c1) lower left of keypad
# 'kc3': b'', # (key_c3) lower right of keypad
# 'kcan': b'', # (key_cancel) cancel key
# 'kCAN': b'', # (key_scancel) shifted cancel key
'kcbt': b'\x1b[Z', # (key_btab) back-tab key
# 'kclo': b'', # (key_close) close key
# 'kclr': b'', # (key_clear) clear-screen or erase key
# 'kcmd': b'', # (key_command) command key
# 'kCMD': b'', # (key_scommand) shifted command key
# 'kcpy': b'', # (key_copy) copy key
# 'kCPY': b'', # (key_scopy) shifted copy key
# 'kcrt': b'', # (key_create) create key
# 'kCRT': b'', # (key_screate) shifted create key
# 'kctab': b'', # (key_ctab) clear-tab key
'kcub1': b'\x1bOD', # (key_left) left-arrow key
'kcud1': b'\x1bOB', # (key_down) down-arrow key
'kcuf1': b'\x1bOC', # (key_right) right-arrow key
'kcuu1': b'\x1bOA', # (key_up) up-arrow key
'kDC': b'\x1b[3;2~', # (key_sdc) shifted delete- character key
'kdch1': b'\x1b[3~', # (key_dc) delete-character key
# 'kdl1': b'', # (key_dl) delete-line key
# 'kDL': b'', # (key_sdl) shifted delete-line key
# 'ked': b'', # (key_eos) clear-to-end-of- screen key
# 'kel': b'', # (key_eol) clear-to-end-of-line key
'kEND': b'\x1b[1;2F', # (key_send) shifted end key
'kend': b'\x1bOF', # (key_end) end key
'kent': b'\x1bOM', # (key_enter) enter/send key
# 'kEOL': b'', # (key_seol) shifted clear-to- end-of-line key
# 'kext': b'', # (key_exit) exit key
# 'kEXT': b'', # (key_sexit) shifted exit key
# 'kf0': b'', # (key_f0) F0 function key
'kf1': b'\x1bOP', # (key_f1) F1 function key
'kf2': b'\x1bOQ', # (key_f2) F2 function key
'kf3': b'\x1bOR', # (key_f3) F3 function key
'kf4': b'\x1bOS', # (key_f4) F4 function key
'kf5': b'\x1b[15~', # (key_f5) F5 function key
'kf6': b'\x1b[17~', # (key_f6) F6 function key
'kf7': b'\x1b[18~', # (key_f7) F7 function key
'kf8': b'\x1b[19~', # (key_f8) F8 function key
'kf9': b'\x1b[20~', # (key_f9) F9 function key
'kf10': b'\x1b[21~', # (key_f10) F10 function key
'kf11': b'\x1b[23~', # (key_f11) F11 function key
'kf12': b'\x1b[24~', # (key_f12) F12 function key
'kf13': b'\x1b[1;2P', # (key_f13) F13 function key
'kf14': b'\x1b[1;2Q', # (key_f14) F14 function key
'kf15': b'\x1b[1;2R', # (key_f15) F15 function key
'kf16': b'\x1b[1;2S', # (key_f16) F16 function key
'kf17': b'\x1b[15;2~', # (key_f17) F17 function key
'kf18': b'\x1b[17;2~', # (key_f18) F18 function key
'kf19': b'\x1b[18;2~', # (key_f19) F19 function key
'kf20': b'\x1b[19;2~', # (key_f20) F20 function key
'kf21': b'\x1b[20;2~', # (key_f21) F21 function key
'kf22': b'\x1b[21;2~', # (key_f22) F22 function key
'kf23': b'\x1b[23;2~', # (key_f23) F23 function key
'kf24': b'\x1b[24;2~', # (key_f24) F24 function key
'kf25': b'\x1b[1;5P', # (key_f25) F25 function key
'kf26': b'\x1b[1;5Q', # (key_f26) F26 function key
'kf27': b'\x1b[1;5R', # (key_f27) F27 function key
'kf28': b'\x1b[1;5S', # (key_f28) F28 function key
'kf29': b'\x1b[15;5~', # (key_f29) F29 function key
'kf30': b'\x1b[17;5~', # (key_f30) F30 function key
'kf31': b'\x1b[18;5~', # (key_f31) F31 function key
'kf32': b'\x1b[19;5~', # (key_f32) F32 function key
'kf33': b'\x1b[20;5~', # (key_f33) F33 function key
'kf34': b'\x1b[21;5~', # (key_f34) F34 function key
'kf35': b'\x1b[23;5~', # (key_f35) F35 function key
'kf36': b'\x1b[24;5~', # (key_f36) F36 function key
'kf37': b'\x1b[1;6P', # (key_f37) F37 function key
'kf38': b'\x1b[1;6Q', # (key_f38) F38 function key
'kf39': b'\x1b[1;6R', # (key_f39) F39 function key
'kf40': b'\x1b[1;6S', # (key_f40) F40 function key
'kf41': b'\x1b[15;6~', # (key_f41) F41 function key
'kf42': b'\x1b[17;6~', # (key_f42) F42 function key
'kf43': b'\x1b[18;6~', # (key_f43) F43 function key
'kf44': b'\x1b[19;6~', # (key_f44) F44 function key
'kf45': b'\x1b[20;6~', # (key_f45) F45 function key
'kf46': b'\x1b[21;6~', # (key_f46) F46 function key
'kf47': b'\x1b[23;6~', # (key_f47) F47 function key
'kf48': b'\x1b[24;6~', # (key_f48) F48 function key
'kf49': b'\x1b[1;3P', # (key_f49) F49 function key
'kf50': b'\x1b[1;3Q', # (key_f50) F50 function key
'kf51': b'\x1b[1;3R', # (key_f51) F51 function key
'kf52': b'\x1b[1;3S', # (key_f52) F52 function key
'kf53': b'\x1b[15;3~', # (key_f53) F53 function key
'kf54': b'\x1b[17;3~', # (key_f54) F54 function key
'kf55': b'\x1b[18;3~', # (key_f55) F55 function key
'kf56': b'\x1b[19;3~', # (key_f56) F56 function key
'kf57': b'\x1b[20;3~', # (key_f57) F57 function key
'kf58': b'\x1b[21;3~', # (key_f58) F58 function key
'kf59': b'\x1b[23;3~', # (key_f59) F59 function key
'kf60': b'\x1b[24;3~', # (key_f60) F60 function key
'kf61': b'\x1b[1;4P', # (key_f61) F61 function key
'kf62': b'\x1b[1;4Q', # (key_f62) F62 function key
'kf63': b'\x1b[1;4R', # (key_f63) F63 function key
# 'kfnd': b'', # (key_find) find key
# 'kFND': b'', # (key_sfind) shifted find key
# 'khlp': b'', # (key_help) help key
# 'kHLP': b'', # (key_shelp) shifted help key
'kHOM': b'\x1b[1;2H', # (key_shome) shifted home key
'khome': b'\x1bOH', # (key_home) home key
# 'khts': b'', # (key_stab) set-tab key
'kIC': b'\x1b[2;2~', # (key_sic) shifted insert- character key
'kich1': b'\x1b[2~', # (key_ic) insert-character key
# 'kil1': b'', # (key_il) insert-line key
'kind': b'\x1b[1;2B', # (key_sf) scroll-forward key
'kLFT': b'\x1b[1;2D', # (key_sleft) shifted left-arrow key
# 'kll': b'', # (key_ll) lower-left key (home down)
'kmous': b'\x1b[<', # (key_mouse) Mouse event has occurred
# 'kmov': b'', # (key_move) move key
# 'kMOV': b'', # (key_smove) shifted move key
# 'kmrk': b'', # (key_mark) mark key
# 'kmsg': b'', # (key_message) message key
# 'kMSG': b'', # (key_smessage) shifted message key
'knp': b'\x1b[6~', # (key_npage) next-page key
# 'knxt': b'', # (key_next) next key
'kNXT': b'\x1b[6;2~', # (key_snext) shifted next key
# 'kopn': b'', # (key_open) open key
# 'kopt': b'', # (key_options) options key
# 'kOPT': b'', # (key_soptions) shifted options key
'kpp': b'\x1b[5~', # (key_ppage) previous-page key
# 'kprt': b'', # (key_print) print key
# 'kPRT': b'', # (key_sprint) shifted print key
# 'kprv': b'', # (key_previous) previous key
'kPRV': b'\x1b[5;2~', # (key_sprevious) shifted previous key
# 'krdo': b'', # (key_redo) redo key
# 'kRDO': b'', # (key_sredo) shifted redo key
# 'kref': b'', # (key_reference) reference key
# 'kres': b'', # (key_resume) resume key
# 'kRES': b'', # (key_srsume) shifted resume key
# 'krfr': b'', # (key_refresh) refresh key
'kri': b'\x1b[1;2A', # (key_sr) scroll-backward key
'kRIT': b'\x1b[1;2C', # (key_sright) shifted right-arrow key
# 'krmir': b'', # (key_eic) sent by rmir or smir in insert mode
# 'krpl': b'', # (key_replace) replace key
# 'kRPL': b'', # (key_sreplace) shifted replace key
# 'krst': b'', # (key_restart) restart key
# 'ksav': b'', # (key_save) save key
# 'kSAV': b'', # (key_ssave) shifted save key
# 'kslt': b'', # (key_select) select key
# 'kSPD': b'', # (key_ssuspend) shifted suspend key
# 'kspd': b'', # (key_suspend) suspend key
# 'ktbc': b'', # (key_catab) clear-all-tabs key
# 'kUND': b'', # (key_sundo) shifted undo key
# 'kund': b'', # (key_undo) undo key
# 'lf0': b'', # (lab_f0) label on function key f0 if not f0
# 'lf10': b'', # (lab_f10) label on function key f10 if not f10
# 'lf1': b'', # (lab_f1) label on function key f1 if not f1
# 'lf2': b'', # (lab_f2) label on function key f2 if not f2
# 'lf3': b'', # (lab_f3) label on function key f3 if not f3
# 'lf4': b'', # (lab_f4) label on function key f4 if not f4
# 'lf5': b'', # (lab_f5) label on function key f5 if not f5
# 'lf6': b'', # (lab_f6) label on function key f6 if not f6
# 'lf7': b'', # (lab_f7) label on function key f7 if not f7
# 'lf8': b'', # (lab_f8) label on function key f8 if not f8
# 'lf9': b'', # (lab_f9) label on function key f9 if not f9
# 'll': b'', # (cursor_to_ll) last line, first column (if no cup)
# 'lpi': b'', # (change_line_pitch) Change number of lines per inch to #1
'meml': b'\x1bl', # lock memory above the curser
'memu': b'\x1bl', # unlock memory above the curser
'mc0': b'\x1b[i', # (print_screen) print contents of screen
'mc4': b'\x1b[4i', # (prtr_off) turn off printer
'mc5': b'\x1b[5i', # (prtr_on) turn on printer
# 'mc5p': b'', # (prtr_non) turn on printer for #1 bytes
# 'mcub1': b'', # (micro_left) Like cursor_left in micro mode
# 'mcub': b'', # (parm_left_micro) Like parm_left_cursor in micro mode
# 'mcud1': b'', # (micro_down) Like cursor_down in micro mode
# 'mcud': b'', # (parm_down_micro) Like parm_down_cursor in micro mode
# 'mcuf1': b'', # (micro_right) Like cursor_right in micro mode
# 'mcuf': b'', # (parm_right_micro) Like parm_right_cursor in micro mode
# 'mcuu1': b'', # (micro_up) Like cursor_up in micro mode
# 'mcuu': b'', # (parm_up_micro) Like parm_up_cursor in micro mode
# 'mgc': b'', # (clear_margins) clear right and left soft margins
# 'mhpa': b'', # (micro_column_address) Like column_address in micro mode
# 'minfo': b'', # (mouse_info) Mouse status information
# 'mrcup': b'', # (cursor_mem_address) memory relative cursor addressing, move to row #1 columns #2
# 'mvpa': b'', # (micro_row_address) Like row_address #1 in micro mode
# 'nel': b'', # (newline) newline (behave like cr followed by lf)
# 'oc': b'', # (orig_colors) Set all color pairs to the original ones
'op': b'\x1b[39;49m', # (orig_pair) Set default pair to its original value
# 'pad': b'', # (pad_char) padding char (instead of null)
# 'pause': b'', # (fixed_pause) pause for 2-3 seconds
# 'pctrm': b'', # (pc_term_options) PC terminal options
# 'pfkey': b'', # (pkey_key) program function key #1 to type string #2
# 'pfloc': b'', # (pkey_local) program function key #1 to execute string #2
# 'pfx': b'', # (pkey_xmit) program function key #1 to transmit string #2
# 'pfxl': b'', # (pkey_plab) Program function key #1 to type string #2 and show string #3
# 'pln': b'', # (plab_norm) program label #1 to show string #2
# 'porder': b'', # (order_of_pins) Match software bits to print-head pins
# 'prot': b'', # (enter_protected_mode) turn on protected mode
# 'pulse': b'', # (pulse) select pulse dialing
# 'qdial': b'', # (quick_dial) dial number #1 without checking
# 'rbim': b'', # (stop_bit_image) Stop printing bit image graphics
'rc': b'\x1b8', # (restore_cursor) restore cursor to position of last save_cursor
# 'rcsd': b'', # (stop_char_set_def) End definition of character set #1
'rep': b'%p1%c\x1b[%p2%{1}%-%db', # (repeat_char) repeat char #1 #2 times (P*)
# 'reqmp': b'', # (req_mouse_pos) Request mouse position
'rev': b'\x1b[7m', # (enter_reverse_mode) turn on reverse video mode
# 'rf': b'', # (reset_file) name of reset file
# 'rfi': b'', # (req_for_input) send next input char (for ptys)
'ri': b'\x1bM', # (scroll_reverse) scroll text down (P)
'rin': b'\x1b[%p1%dT', # (parm_rindex) scroll back #1 lines (P)
'ritm': b'\x1b[23m', # (exit_italics_mode) End italic mode
# 'rlm': b'', # (exit_leftward_mode) End left-motion mode
'rmacs': b'\x1b(B', # (exit_alt_charset_mode) end alternate character set (P)
'rmam': b'\x1b[?7l', # (exit_am_mode) turn off automatic margins
# 'rmclk': b'', # (remove_clock) remove clock
'rmcup': b'\x1b[?1049l\x1b[23;0;0t', # (exit_ca_mode) strings to end programs using cup
# 'rmdc': b'', # (exit_delete_mode) end delete mode
# 'rmicm': b'', # (exit_micro_mode) End micro-motion mode
'rmir': b'\x1b[4l', # (exit_insert_mode) exit insert mode
'rmkx': b'\x1b[?1l\x1b>', # (keypad_local) leave 'keyboard_transmit' mode
# 'rmln': b'', # (label_off) turn off soft labels
'rmm': b'\x1b[?1034l', # (meta_off) turn off meta mode
# 'rmp': b'', # (char_padding) like ip but when in insert mode
# 'rmpch': b'', # (exit_pc_charset_mode) Exit PC character display mode
# 'rmsc': b'', # (exit_scancode_mode) Exit PC scancode mode
'rmso': b'\x1b[27m', # (exit_standout_mode) exit standout mode
'rmul': b'\x1b[24m', # (exit_underline_mode) exit underline mode
# 'rmxon': b'', # (exit_xon_mode) turn off xon/xoff handshaking
'rs1': b'\x1bc', # (reset_1string) reset string
'rs2': b'\x1b[!p\x1b[?3;4l\x1b[4l\x1b>', # (reset_2string) reset string
# 'rs3': b'', # (reset_3string) reset string
# 'rshm': b'', # (exit_shadow_mode) End shadow-print mode
# 'rsubm': b'', # (exit_subscript_mode) End subscript mode
# 'rsupm': b'', # (exit_superscript_mode) End superscript mode
# 'rum': b'', # (exit_upward_mode) End reverse character motion
# 'rwidm': b'', # (exit_doublewide_mode) End double-wide mode
# 's0ds': b'', # (set0_des_seq) Shift to codeset 0 (EUC set 0, ASCII)
# 's1ds': b'', # (set1_des_seq) Shift to codeset 1
# 's2ds': b'', # (set2_des_seq) Shift to codeset 2
# 's3ds': b'', # (set3_des_seq) Shift to codeset 3
# 'sbim': b'', # (start_bit_image) Start printing bit image graphics
'sc': b'\x1b7', # (save_cursor) save current cursor position (P)
# 'scesa': b'', # (alt_scancode_esc) Alternate escape for scancode emulation
# 'scesc': b'', # (scancode_escape) Escape for scancode emulation
# 'sclk': b'', # (set_clock) set clock, #1 hrs #2 mins #3 secs
# 'scp': b'', # (set_color_pair) Set current color pair to #1
# 'scs': b'', # (select_char_set) Select character set, #1
# 'scsd': b'', # (start_char_set_def) Start character set definition #1, with #2 characters in the set
# 'sdrfq': b'', # (enter_draft_quality) Enter draft-quality mode
'setab': b'\x1b[4%p1%dm', # (set_a_background) Set background color to #1, using ANSI escape
'setaf': b'\x1b[3%p1%dm', # (set_a_foreground) Set foreground color to #1, using ANSI escape
'setb': b'\x1b[4%?%p1%{1}%=%t4%e%p1%{3}%=%t6%e%p1%{4}%=%t1%e%p1%{6}%=%t3%e%p1%d%;m', # (set_background) Set background color #1
# 'setcolor': b'', # (set_color_band) Change to ribbon color #1
'setf': b'\x1b[3%?%p1%{1}%=%t4%e%p1%{3}%=%t6%e%p1%{4}%=%t1%e%p1%{6}%=%t3%e%p1%d%;m', # (set_foreground) Set foreground color #1
'sgr0': b'\x1b(B\x1b[m', # (exit_attribute_mode) turn off all attributes
'sgr': b'%?%p9%t\x1b(0%e\x1b(B%;\x1b[0%?%p6%t;1%;%?%p5%t;2%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;%?%p7%t;8%;m', # (set_attributes) define video attributes #1-#9 (PG9)
'sitm': b'\x1b[3m', # (enter_italics_mode) Enter italic mode
# 'slines': b'', # (set_page_length) Set page length to #1 lines
# 'slm': b'', # (enter_leftward_mode) Start leftward carriage motion
'smacs': b'\x1b(0', # (enter_alt_charset_mode) start alternate character set (P)
'smam': b'\x1b[?7h', # (enter_am_mode) turn on automatic margins
'smcup': b'\x1b[?1049h\x1b[22;0;0t', # (enter_ca_mode) string to start programs using cup
# 'smdc': b'', # (enter_delete_mode) enter delete mode
# 'smgb': b'', # (set_bottom_margin) Set bottom margin at current line
# 'smgbp': b'', # (set_bottom_margin_parm) Set bottom margin at line #1 or (if smgtp is not given) #2 lines from bottom
# 'smgl': b'', # (set_left_margin) set left soft margin at current column. See smgl. (ML is not in BSD termcap).
# 'smglp': b'', # (set_left_margin_parm) Set left (right) margin at column #1
# 'smglr': b'', # (set_lr_margin) Set both left and right margins to #1, #2. (ML is not in BSD termcap).
# 'smgr': b'', # (set_right_margin) set right soft margin at current column
# 'smgrp': b'', # (set_right_margin_parm) Set right margin at column #1
# 'smgtb': b'', # (set_tb_margin) Sets both top and bottom margins to #1, #2
# 'smgt': b'', # (set_top_margin) Set top margin at current line
# 'smgtp': b'', # (set_top_margin_parm) Set top (bottom) margin at row #1
# 'smicm': b'', # (enter_micro_mode) Start micro-motion mode
'smir': b'\x1b[4h', # (enter_insert_mode) enter insert mode
'smkx': b'\x1b[?1h\x1b=', # (keypad_xmit) enter 'keyboard_transmit' mode
# 'smln': b'', # (label_on) turn on soft labels
'smm': b'\x1b[?1034h', # (meta_on) turn on meta mode (8th-bit on)
# 'smpch': b'', # (enter_pc_charset_mode) Enter PC character display mode
# 'smsc': b'', # (enter_scancode_mode) Enter PC scancode mode
'smso': b'\x1b[7m', # (enter_standout_mode) begin standout mode
'smul': b'\x1b[4m', # (enter_underline_mode) begin underline mode
# 'smxon': b'', # (enter_xon_mode) turn on xon/xoff handshaking
# 'snlq': b'', # (enter_near_letter_quality) Enter NLQ mode
# 'snrmq': b'', # (enter_normal_quality) Enter normal-quality mode
# 'sshm': b'', # (enter_shadow_mode) Enter shadow-print mode
# 'ssubm': b'', # (enter_subscript_mode) Enter subscript mode
# 'ssupm': b'', # (enter_superscript_mode) Enter superscript mode
# 'subcs': b'', # (subscript_characters) List of subscriptable characters
# 'sum': b'', # (enter_upward_mode) Start upward carriage motion
# 'supcs': b'', # (superscript_characters) List of superscriptable characters
# 'swidm': b'', # (enter_doublewide_mode) Enter double-wide mode
'tbc': b'\x1b[3g', # (clear_all_tabs) clear all tab stops (P)
# 'tone': b'', # (tone) select touch tone dialing
# 'tsl': b'', # (to_status_line) move to status line, column #1
# 'u0': b'', # (user0) User string #0
# 'u1': b'', # (user1) User string #1
# 'u2': b'', # (user2) User string #2
# 'u3': b'', # (user3) User string #3
# 'u4': b'', # (user4) User string #4
# 'u5': b'', # (user5) User string #5
'u6': b'\x1b[%i%d;%dR', # (user6) User string #6 [cursor position report (equiv. to ANSI/ECMA-48 CPR)]
'u7': b'\x1b[6n', # (user7) User string #7 [cursor position request (equiv. to VT100/ANSI/ECMA-48 DSR 6)]
'u8': b'\x1b[?%[;0123456789]c', # (user8) User string #8 [terminal answerback description]
'u9': b'\x1b[c', # (user9) User string #9 [terminal enquire string (equiv. to ANSI/ECMA-48 DA)]
# 'uc': b'', # (underline_char) underline char and move past it
'vpa': b'\x1b[%i%p1%dd', # (row_address) vertical position #1 absolute (P)
# 'wait': b'', # (wait_tone) wait for dial-tone
# 'wind': b'', # (set_window) current window is lines #1-#2 cols #3-#4
# 'wingo': b'', # (goto_window) go to window #1
# 'xoffc': b'', # (xoff_character) XOFF character
# 'xonc': b'', # (xon_character) XON character
# 'zerom': b'', # (zero_motion) No motion for subsequent character
}
|
the-stack_0_7918 | # https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py
# https://mspries.github.io/jimmy_pendulum.html
#!/usr/bin/env python3
import time
import torch
import torch.multiprocessing as mp
import os, sys
print("PyTorch Version", torch.__version__)
current_path = os.path.dirname(os.path.realpath(__file__))
PROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
if PROJECT_HOME not in sys.path:
sys.path.append(PROJECT_HOME)
from common.logger import get_logger
from rl_main import rl_utils
from common.fast_rl.rl_agent import float32_preprocessor
from common.fast_rl import actions, rl_agent, experience_single
from common.fast_rl.common import statistics, utils
from config.parameters import PARAMETERS as params
MODEL_SAVE_DIR = os.path.join(PROJECT_HOME, "out", "model_save_files")
if not os.path.exists(MODEL_SAVE_DIR):
os.makedirs(MODEL_SAVE_DIR)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
if torch.cuda.is_available():
device = torch.device("cuda" if params.CUDA else "cpu")
else:
device = torch.device("cpu")
my_logger = get_logger("openai_pendulum_d4pg")
DELTA_Z = (params.V_MAX - params.V_MIN) / (params.N_ATOMS - 1)
def play_func(exp_queue, env, net):
print(env.action_space.low[0], env.action_space.high[0])
action_min = env.action_space.low[0]
action_max = env.action_space.high[0]
action_selector = actions.EpsilonGreedyD4PGActionSelector(epsilon=params.EPSILON_INIT)
epsilon_tracker = actions.EpsilonTracker(
action_selector=action_selector,
eps_start=params.EPSILON_INIT,
eps_final=params.EPSILON_MIN,
eps_frames=params.EPSILON_MIN_STEP
)
agent = rl_agent.AgentD4PG(
net, n_actions=1, action_selector=action_selector,
action_min=action_min, action_max=action_max, device=device, preprocessor=float32_preprocessor
)
experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(
env, agent, gamma=params.GAMMA, steps_count=params.N_STEP, step_length=-1
)
exp_source_iter = iter(experience_source)
if params.DRAW_VIZ:
stat = statistics.StatisticsForPolicyBasedRL(method="policy_gradient")
else:
stat = None
step_idx = 0
best_mean_episode_reward = 0.0
with utils.RewardTracker(params=params, frame=False, stat=stat) as reward_tracker:
while step_idx < params.MAX_GLOBAL_STEP:
# 1 스텝 진행하고 exp를 exp_queue에 넣음
step_idx += 1
exp = next(exp_source_iter)
exp_queue.put(exp)
epsilon_tracker.udpate(step_idx)
episode_rewards = experience_source.pop_episode_reward_lst()
if episode_rewards:
current_episode_reward = episode_rewards[0]
solved, mean_episode_reward = reward_tracker.set_episode_reward(
current_episode_reward, step_idx, epsilon=action_selector.epsilon
)
model_save_condition = [
reward_tracker.mean_episode_reward > best_mean_episode_reward,
step_idx > params.EPSILON_MIN_STEP
]
if reward_tracker.mean_episode_reward > best_mean_episode_reward:
best_mean_episode_reward = reward_tracker.mean_episode_reward
if all(model_save_condition) or solved:
rl_agent.save_model(
MODEL_SAVE_DIR, params.ENVIRONMENT_ID.value, net.__name__, net, step_idx, mean_episode_reward
)
if solved:
break
exp_queue.put(None)
def main():
mp.set_start_method('spawn')
env = rl_utils.get_environment(owner="worker", params=params)
print("env:", params.ENVIRONMENT_ID)
print("observation_space:", env.observation_space)
print("action_space:", env.action_space)
rl_algorithm = rl_utils.get_rl_algorithm(env=env, worker_id=0, logger=my_logger, params=params)
exp_queue = mp.Queue(maxsize=params.TRAIN_STEP_FREQ * 2)
play_proc = mp.Process(target=play_func, args=(exp_queue, env, rl_algorithm.model))
play_proc.start()
time.sleep(0.5)
step_idx = 0
while play_proc.is_alive():
step_idx += params.TRAIN_STEP_FREQ
exp = None
for _ in range(params.TRAIN_STEP_FREQ):
exp = exp_queue.get()
if exp is None:
play_proc.join()
break
rl_algorithm.buffer._add(exp)
if len(rl_algorithm.buffer) < params.MIN_REPLAY_SIZE_FOR_TRAIN:
continue
if exp is not None and exp.last_state is None:
for _ in range(3):
rl_algorithm.train_net(step_idx=step_idx)
if __name__ == "__main__":
main()
|
the-stack_0_7919 | #!/usr/bin/env python
import os
import numpy as np
import gippy as gp
import unittest
import gippy.test as gpt
# from nose.tools import raises
"""
Included are some tests for doing processing in NumPy instead of Gippy,
for doing speed comparisons. To see the durations of each test use:
$ nosetests test --with-timer -v
"""
class GeoRasterTests(unittest.TestCase):
""" Speed tests vs NumPy """
def setUp(self):
""" Configure options """
gp.Options.set_verbose(1)
gp.Options.set_chunksize(256.0)
def test_size(self):
""" Retrieve size and dimension in pixels """
# note that xsize and ysize are redefined in GeoRaster from
# GeoResource, thus it is tested again
geoimg = gp.GeoImage.create(xsz=500, ysz=1500)
self.assertEqual(geoimg.xsize(), 500)
self.assertEqual(geoimg.ysize(), 1500)
self.assertEqual(geoimg.size(), 1500*500)
def test_type(self):
""" Set datatype on create and verify """
geoimg = gp.GeoImage.create(dtype='uint32')
self.assertEqual(geoimg.type().string(), 'uint32')
def test_naming(self):
""" Get basename and desription """
fout = 'test-image.tif'
bname = os.path.splitext(fout)[0]
bandnames = ['red', 'green', 'blue']
geoimg = gp.GeoImage.create(fout, nb=3)
geoimg.set_bandnames(bandnames)
for i in range(0, 3):
self.assertEqual(geoimg[i].description(), bandnames[i])
self.assertEqual(geoimg[i].basename(), '%s[%s]' % (bname, i))
os.remove(fout)
# TODO - test color
def test_gain_and_offset(self):
""" Set and retrieve gain and offset """
fout = 'test-gainoffset.tif'
gains = [2.0, 3.0]
offsets = [4.0, 5.0]
geoimg = gp.GeoImage.create(fout, nb=2)
geoimg[0].set_gain(gains[0])
geoimg[1].set_gain(gains[1])
geoimg[0].set_offset(offsets[0])
geoimg[1].set_offset(offsets[1])
# check persistance
geoimg = None
geoimg = gp.GeoImage(fout)
for i in range(0, 2):
self.assertEqual(geoimg[i].gain(), gains[i])
self.assertEqual(geoimg[i].offset(), offsets[i])
os.remove(fout)
def test_nodata(self):
""" Set nodata and retrieve """
fout = 'test-nodata.tif'
geoimg = gp.GeoImage.create(fout, xsz=100, ysz=100)
geoimg.set_nodata(1)
self.assertEqual(geoimg[0].nodata(), 1)
geoimg = None
geoimg = gp.GeoImage(fout)
self.assertEqual(geoimg[0].nodata(), 1)
# check that entire array is nan
arr = np.where(geoimg.read() == np.nan)
self.assertEqual(len(arr[0]), 0)
self.assertEqual(len(arr[1]), 0)
os.remove(fout)
def test_bandmeta(self):
""" Set metadata on band and retrieve """
fout = 'test-meta.tif'
geoimg = gp.GeoImage.create(fout, xsz=100, ysz=100)
geoimg[0].add_bandmeta('TESTKEY', 'TESTVALUE')
geoimg = None
geoimg = gp.GeoImage(fout)
self.assertEqual(geoimg[0].bandmeta('TESTKEY'), 'TESTVALUE')
os.remove(fout)
# TODO - test masking
def test_stats(self):
""" Calculate statistics using gippy """
geoimg = gpt.get_test_image()
for band in geoimg:
stats = band.stats()
mask = band.data_mask() == 1
# check against numpy
arr = band.read()
self.assertAlmostEqual(arr[mask].min(), stats[0])
self.assertAlmostEqual(arr[mask].max(), stats[1])
self.assertAlmostEqual(arr[mask].mean(), stats[2], places=2)
def test_scale(self):
""" Scale image to byte range """
geoimg = gpt.get_test_image()
for band in geoimg:
band = band.autoscale(minout=1, maxout=255, percent=2.0)
self.assertTrue(band.min() == 1)
self.assertTrue(band.max() == 255)
def test_histogram(self):
""" Calculate histogram of blank data """
geoimg = gp.GeoImage.create(xsz=10, ysz=10, nb=2)
arr = np.arange(10).reshape(1, 10) + 1
for i in range(9):
arr = np.append(arr, arr, axis=0)
geoimg[0].write(arr.astype('uint8'))
hist = geoimg[0].histogram(bins=10, normalize=False)
self.assertEqual(hist[0], 10)
self.assertEqual(hist.sum(), geoimg.size())
hist = geoimg[0].histogram(bins=10)
self.assertAlmostEqual(hist.sum(), 1.0)
self.assertAlmostEqual(hist[0], 0.1)
hist = geoimg[0].histogram(bins=10, normalize=False, cumulative=True)
self.assertAlmostEqual(hist[-1], geoimg.size())
def test_real_histogram(self):
""" Calculate histogram of real data """
geoimg = gpt.get_test_image()
hist = geoimg[0].histogram(normalize=False)
self.assertEqual(len(hist), 100)
self.assertEqual(hist.sum(), geoimg.size())
def test_sqrt(self):
""" Calculate sqrt of image """
geoimg = gpt.get_test_image().select(['red', 'green', 'swir1', 'nir'])
for band in geoimg:
vals = band.sqrt().read()
mask = band.data_mask() == 1
# check against numpy
arr = band.read()
self.assertTrue((vals[mask] == np.sqrt(arr[mask])).any())
# TODO - test processing functions
# Test filters
def test_laplacian(self):
""" Test with laplacian filter """
geoimg = gp.GeoImage.create(xsz=10, ysz=10)
arr = geoimg.read()
arr[:, 0:6] = 1
geoimg[0].write(arr)
arrout = geoimg[0].laplacian().read()
self.assertEqual(arrout[0, 5], -1.)
self.assertEqual(arrout[0, 6], 1.)
def test_convolve(self):
""" Convolve an image with a 3x3 kernel """
geoimg = gp.GeoImage.create(xsz=10, ysz=10)
arr = geoimg.read() + 1
geoimg[0].write(arr)
kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
arrout = geoimg[0].convolve(kernel, boundary=False).read()
self.assertEqual(arrout[0, 0], 4)
self.assertEqual(arrout[5, 5], 9)
self.assertEqual(arrout[5, 0], 6)
def test_skeletonize(self):
""" Skeletonize a binary imager """
geoimg = gp.GeoImage.create(xsz=10, ysz=10)
arr = geoimg.read()
arr[3:8, :] = 1
geoimg[0].write(arr)
arrout = geoimg[0].skeletonize().read()
def test_write(self):
""" Write arrays of different datatype """
geoimg = gp.GeoImage.create(xsz=100, ysz=100, dtype='uint8')
arr = np.ones((100, 100)).astype('uint8')
geoimg[0].write(arr)
self.assertTrue(np.array_equal(arr, geoimg[0].read()))
arr = np.ones((100, 100)).astype('float32')
geoimg[0].write(arr)
self.assertTrue(np.array_equal(arr, geoimg[0].read()))
"""
def test_invalid_args(self):
# Check that invalid arguments throw error
geoimg = gippy.GeoImage.create(xsz=100, ysz=100, dtype='uint8')
try:
geoimg[0].write('invalid arg')
geoimg[0].write([1.0, 1.0])
self.assertTrue(False)
except:
pass
"""
|
the-stack_0_7921 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import scipy
import six
import runtime.temp_file as temp_file
import runtime.xgboost as xgboost_extended
import shap
import xgboost as xgb
from runtime import db, explainer
from runtime.dbapi.paiio import PaiIOConnection
from runtime.feature.compile import compile_ir_feature_columns
from runtime.feature.derivation import get_ordered_field_descs
from runtime.model import EstimatorType
from runtime.model.model import Model
from runtime.pai.pai_distributed import define_tf_flags
FLAGS = define_tf_flags()
def explain(datasource,
select,
explainer,
model_params,
result_table,
model,
pai_table="",
oss_model_path="",
oss_dest=None,
oss_ak=None,
oss_sk=None,
oss_endpoint=None,
oss_bucket_name=None):
"""TBD
"""
if model_params is None:
model_params = {}
summary_params = dict()
for k in model_params:
if k.startswith("summary."):
summary_key = k.replace("summary.", "")
summary_params[summary_key] = model_params[k]
bst = xgb.Booster()
if isinstance(model, six.string_types):
with temp_file.TemporaryDirectory(as_cwd=True):
model = Model.load_from_db(datasource, model)
bst.load_model("my_model")
else:
assert isinstance(model,
Model), "not supported model type %s" % type(model)
bst.load_model("my_model")
fc_map_ir = model.get_meta("features")
label_meta = model.get_meta("label").get_field_desc()[0].to_dict(
dtype_to_string=True)
field_descs = get_ordered_field_descs(fc_map_ir)
feature_column_names = [fd.name for fd in field_descs]
feature_metas = dict([(fd.name, fd.to_dict(dtype_to_string=True))
for fd in field_descs])
is_pai = True if pai_table else False
# NOTE: in the current implementation, we are generating a transform_fn
# from the COLUMN clause. The transform_fn is executed during the process
# of dumping the original data into DMatrix SVM file.
compiled_fc = compile_ir_feature_columns(fc_map_ir, EstimatorType.XGBOOST)
transform_fn = xgboost_extended.feature_column.ComposedColumnTransformer(
feature_column_names, *compiled_fc["feature_columns"])
dataset = xgb_shap_dataset(datasource, select, feature_column_names,
label_meta, feature_metas, is_pai, pai_table,
transform_fn)
if explainer == "XGBoostExplainer":
xgb_native_explain(bst, datasource, result_table)
else:
# when explainer is "" or "TreeExplainer" use SHAP by default.
shap_explain(bst,
datasource,
dataset,
summary_params,
result_table,
is_pai=is_pai,
oss_dest=oss_dest,
oss_ak=oss_ak,
oss_sk=oss_sk,
oss_endpoint=oss_endpoint,
oss_bucket_name=oss_bucket_name)
def shap_explain(booster,
datasource,
dataset,
summary_params,
result_table="",
is_pai=False,
oss_dest=None,
oss_ak=None,
oss_sk=None,
oss_endpoint=None,
oss_bucket_name=None):
tree_explainer = shap.TreeExplainer(booster)
shap_values = tree_explainer.shap_values(dataset)
if result_table:
if is_pai:
conn = PaiIOConnection.from_table(result_table)
else:
conn = db.connect_with_data_source(datasource)
# TODO(typhoonzero): the shap_values is may be a
# list of shape [3, num_samples, num_features],
# use the first dimension here, should find out
# when to use the other two. When shap_values is
# not a list it can be directly used.
if isinstance(shap_values, list):
to_write = shap_values[0]
else:
to_write = shap_values
columns = list(dataset.columns)
with db.buffered_db_writer(conn, result_table, columns) as w:
for row in to_write:
w.write(list(row))
conn.close()
if summary_params.get("plot_type") == "decision":
shap_interaction_values = tree_explainer.shap_interaction_values(
dataset)
expected_value = tree_explainer.expected_value
if isinstance(shap_interaction_values, list):
shap_interaction_values = shap_interaction_values[0]
if isinstance(expected_value, list):
expected_value = expected_value[0]
plot_func = lambda: shap.decision_plot( # noqa: E731
expected_value,
shap_interaction_values,
dataset,
show=False,
feature_display_range=slice(None, -40, -1),
alpha=1)
else:
plot_func = lambda: shap.summary_plot( # noqa: E731
shap_values, dataset, show=False, **summary_params)
explainer.plot_and_save(plot_func,
oss_dest=oss_dest,
oss_ak=oss_ak,
oss_sk=oss_sk,
oss_endpoint=oss_endpoint,
oss_bucket_name=oss_bucket_name,
filename='summary')
def xgb_native_explain(booster, datasource, result_table):
if not result_table:
raise ValueError(
"XGBoostExplainer must use with INTO to output result to a table.")
gain_map = booster.get_score(importance_type="gain")
fscore_map = booster.get_fscore()
conn = db.connect_with_data_source(datasource)
all_feature_keys = list(gain_map.keys())
all_feature_keys.sort()
columns = ["feature", "fscore", "gain"]
with db.buffered_db_writer(conn, result_table, columns) as w:
for fkey in all_feature_keys:
row = [fkey, fscore_map[fkey], gain_map[fkey]]
w.write(list(row))
conn.close()
def infer_data_type(feature):
if isinstance(feature, np.ndarray):
if feature.dtype == np.float32 or feature.dtype == np.float64:
return 'float32'
elif feature.dtype == np.int32 or feature.dtype == np.int64:
return 'int64'
else:
raise ValueError('Not supported data type {}'.format(
feature.dtype))
elif isinstance(feature, (np.float32, np.float64, float)):
return 'float32'
elif isinstance(feature, (np.int32, np.int64, six.integer_types)):
return 'int64'
else:
raise ValueError('Not supported data type {}'.format(type(feature)))
def xgb_shap_dataset(datasource,
select,
feature_column_names,
label_meta,
feature_metas,
is_pai,
pai_explain_table,
transform_fn=None):
if is_pai:
# (TODO: lhw) we may specify pai_explain_table in datasoure
# and discard the condition statement here
conn = PaiIOConnection.from_table(pai_explain_table)
stream = db.db_generator(conn, None, label_meta)
else:
conn = db.connect_with_data_source(datasource)
stream = db.db_generator(conn, select, label_meta)
selected_cols = db.selected_cols(conn, select)
if transform_fn:
feature_names = transform_fn.get_feature_column_names()
else:
feature_names = feature_column_names
xs = None
dtypes = []
sizes = []
offsets = []
i = 0
for row, label in stream():
features = db.read_features_from_row(row,
selected_cols,
feature_column_names,
feature_metas,
is_xgboost=True)
if transform_fn:
features = transform_fn(features)
flatten_features = []
for j, feature in enumerate(features):
if len(feature) == 3: # convert sparse to dense
col_indices, values, dense_shape = feature
size = int(np.prod(dense_shape))
row_indices = np.zeros(shape=[col_indices.size])
sparse_matrix = scipy.sparse.csr_matrix(
(values, (row_indices, col_indices)), shape=[1, size])
values = sparse_matrix.toarray()
else:
values = feature[0]
if isinstance(values, np.ndarray):
flatten_features.extend(values.flatten().tolist())
if i == 0:
sizes.append(values.size)
dtypes.append(infer_data_type(values))
else:
flatten_features.append(values)
if i == 0:
sizes.append(1)
dtypes.append(infer_data_type(values))
# Create the column name according to the feature number
# of each column.
#
# If the column "c" contains only 1 feature, the result
# column name would be "c" too.
#
# If the column "c" contains 3 features,
# the result column name would be "c_0", "c_1" and "c_2"
if i == 0:
offsets = np.cumsum([0] + sizes)
column_names = []
for j in six.moves.range(len(offsets) - 1):
start = offsets[j]
end = offsets[j + 1]
if end - start == 1:
column_names.append(feature_names[j])
else:
for k in six.moves.range(start, end):
column_names.append('{}_{}'.format(
feature_names[j], k))
xs = pd.DataFrame(columns=column_names)
xs.loc[i] = flatten_features
i += 1
columns = xs.columns
for i, dtype in enumerate(dtypes):
for j in six.moves.range(offsets[i], offsets[i + 1]):
xs[columns[j]] = xs[columns[j]].astype(dtype)
return xs
|
the-stack_0_7922 | class Solution:
def findPeakElement(self, nums: List[int]) -> int:
l,r = 0,len(nums)-1
while l<r:
mid = (l+r)//2
if nums[mid]<nums[mid+1]:
l=mid+1
else:
r=mid
return l
|
the-stack_0_7925 | """Support for Nanoleaf Lights."""
from __future__ import annotations
import math
from typing import Any
from aionanoleaf import Nanoleaf
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ColorMode,
LightEntity,
LightEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
from . import NanoleafEntryData
from .const import DOMAIN
from .entity import NanoleafEntity
RESERVED_EFFECTS = ("*Solid*", "*Static*", "*Dynamic*")
DEFAULT_NAME = "Nanoleaf"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Nanoleaf light."""
entry_data: NanoleafEntryData = hass.data[DOMAIN][entry.entry_id]
async_add_entities([NanoleafLight(entry_data.device, entry_data.coordinator)])
class NanoleafLight(NanoleafEntity, LightEntity):
"""Representation of a Nanoleaf Light."""
_attr_supported_color_modes = {ColorMode.COLOR_TEMP, ColorMode.HS}
_attr_supported_features = LightEntityFeature.EFFECT | LightEntityFeature.TRANSITION
def __init__(self, nanoleaf: Nanoleaf, coordinator: DataUpdateCoordinator) -> None:
"""Initialize the Nanoleaf light."""
super().__init__(nanoleaf, coordinator)
self._attr_unique_id = nanoleaf.serial_no
self._attr_name = nanoleaf.name
self._attr_min_mireds = math.ceil(1000000 / nanoleaf.color_temperature_max)
self._attr_max_mireds = kelvin_to_mired(nanoleaf.color_temperature_min)
@property
def brightness(self) -> int:
"""Return the brightness of the light."""
return int(self._nanoleaf.brightness * 2.55)
@property
def color_temp(self) -> int:
"""Return the current color temperature."""
return kelvin_to_mired(self._nanoleaf.color_temperature)
@property
def effect(self) -> str | None:
"""Return the current effect."""
# The API returns the *Solid* effect if the Nanoleaf is in HS or CT mode.
# The effects *Static* and *Dynamic* are not supported by Home Assistant.
# These reserved effects are implicitly set and are not in the effect_list.
# https://forum.nanoleaf.me/docs/openapi#_byoot0bams8f
return (
None if self._nanoleaf.effect in RESERVED_EFFECTS else self._nanoleaf.effect
)
@property
def effect_list(self) -> list[str]:
"""Return the list of supported effects."""
return self._nanoleaf.effects_list
@property
def icon(self) -> str:
"""Return the icon to use in the frontend, if any."""
return "mdi:triangle-outline"
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._nanoleaf.is_on
@property
def hs_color(self) -> tuple[int, int]:
"""Return the color in HS."""
return self._nanoleaf.hue, self._nanoleaf.saturation
@property
def color_mode(self) -> ColorMode | None:
"""Return the color mode of the light."""
# According to API docs, color mode is "ct", "effect" or "hs"
# https://forum.nanoleaf.me/docs/openapi#_4qgqrz96f44d
if self._nanoleaf.color_mode == "ct":
return ColorMode.COLOR_TEMP
# Home Assistant does not have an "effect" color mode, just report hs
return ColorMode.HS
async def async_turn_on(self, **kwargs: Any) -> None:
"""Instruct the light to turn on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
hs_color = kwargs.get(ATTR_HS_COLOR)
color_temp_mired = kwargs.get(ATTR_COLOR_TEMP)
effect = kwargs.get(ATTR_EFFECT)
transition = kwargs.get(ATTR_TRANSITION)
if effect:
if effect not in self.effect_list:
raise ValueError(
f"Attempting to apply effect not in the effect list: '{effect}'"
)
await self._nanoleaf.set_effect(effect)
elif hs_color:
hue, saturation = hs_color
await self._nanoleaf.set_hue(int(hue))
await self._nanoleaf.set_saturation(int(saturation))
elif color_temp_mired:
await self._nanoleaf.set_color_temperature(
mired_to_kelvin(color_temp_mired)
)
if transition:
if brightness: # tune to the required brightness in n seconds
await self._nanoleaf.set_brightness(
int(brightness / 2.55), transition=int(kwargs[ATTR_TRANSITION])
)
else: # If brightness is not specified, assume full brightness
await self._nanoleaf.set_brightness(100, transition=int(transition))
else: # If no transition is occurring, turn on the light
await self._nanoleaf.turn_on()
if brightness:
await self._nanoleaf.set_brightness(int(brightness / 2.55))
async def async_turn_off(self, **kwargs: Any) -> None:
"""Instruct the light to turn off."""
transition: float | None = kwargs.get(ATTR_TRANSITION)
await self._nanoleaf.turn_off(None if transition is None else int(transition))
|
the-stack_0_7926 | from datetime import datetime, timedelta, date
import logging
import traceback
from decimal import *
import json
import calendar
import geojson
import requests
import io
from django.conf import settings
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import redirect
from django.utils import timezone
from dateutil.tz.tz import tzoffset
from pytz import timezone as pytimezone
from ledger.payments.models import Invoice,OracleInterface,CashTransaction
from ledger.payments.utils import oracle_parser_on_invoice,update_payments
from ledger.checkout.utils import create_basket_session, create_checkout_session, place_order_submission, get_cookie_basket
from mooring.models import (MooringArea, Mooringsite, MooringsiteRate, MooringsiteBooking, Booking, BookingInvoice, MooringsiteBookingRange, Rate, MooringAreaBookingRange,MooringAreaStayHistory, MooringsiteRate, MarinaEntryRate, BookingVehicleRego, AdmissionsBooking, AdmissionsOracleCode, AdmissionsRate, AdmissionsLine, ChangePricePeriod, CancelPricePeriod, GlobalSettings, MooringAreaGroup, AdmissionsLocation, ChangeGroup, CancelGroup, BookingPeriod, BookingPeriodOption, AdmissionsBookingInvoice, BookingAnnualAdmission)
from mooring import models
from mooring.serialisers import BookingRegoSerializer, MooringsiteRateSerializer, MarinaEntryRateSerializer, RateSerializer, MooringsiteRateReadonlySerializer, AdmissionsRateSerializer
from mooring.emails import send_booking_invoice,send_booking_confirmation
from mooring import emails
from oscar.apps.order.models import Order
from ledger.payments.invoice import utils
from mooring import models
logger = logging.getLogger('booking_checkout')
def create_booking_by_class(campground_id, campsite_class_id, start_date, end_date, num_adult=0, num_concession=0, num_child=0, num_infant=0, num_mooring=0, vessel_size=0):
"""Create a new temporary booking in the system."""
# get campground
campground = MooringArea.objects.get(pk=campground_id)
# TODO: date range check business logic
# TODO: number of people check? this is modifiable later, don't bother
# the MooringsiteBooking table runs the risk of a race condition,
# wrap all this behaviour up in a transaction
with transaction.atomic():
# fetch all the campsites and applicable rates for the campground
sites_qs = Mooringsite.objects.filter(
mooringarea=campground,
campsite_class=campsite_class_id
)
if not sites_qs.exists():
raise ValidationError('No matching campsites found.')
# get availability for sites, filter out the non-clear runs
availability = get_campsite_availability(sites_qs, start_date, end_date)
excluded_site_ids = set()
for site_id, dates in availability.items():
if not all([v[0] == 'open' for k, v in dates.items()]):
excluded_site_ids.add(site_id)
# create a list of campsites without bookings for that period
sites = [x for x in sites_qs if x.pk not in excluded_site_ids]
if not sites:
raise ValidationError('Mooringsite class unavailable for specified time period.')
# TODO: add campsite sorting logic based on business requirements
# for now, pick the first campsite in the list
site = sites[0]
# Prevent booking if max people passed
total_people = num_adult + num_concession + num_child + num_infant + num_mooring
if total_people > site.max_people:
raise ValidationError('Maximum number of people exceeded for the selected campsite')
# Prevent booking if less than min people
if total_people < site.min_people:
raise ValidationError('Number of people is less than the minimum allowed for the selected campsite')
# Create a new temporary booking with an expiry timestamp (default 20mins)
booking = Booking.objects.create(
booking_type=3,
arrival=start_date,
departure=end_date,
details={
'num_adult': num_adult,
'num_concession': num_concession,
'num_child': num_child,
'num_infant': num_infant,
'num_mooring' : num_mooring,
'vessel_size' : vessel_size
},
expiry_time=timezone.now()+timedelta(seconds=settings.BOOKING_TIMEOUT),
mooringarea=campground
)
for i in range((end_date-start_date).days):
cb = MooringsiteBooking.objects.create(
campsite=site,
booking_type=3,
date=start_date+timedelta(days=i),
booking=booking
)
# On success, return the temporary booking
return booking
def create_booking_by_site(sites_qs, start_date, end_date, num_adult=0, num_concession=0, num_child=0, num_infant=0, num_mooring=0, vessel_size=0, cost_total=0, override_price=None, override_reason=None, override_reason_info=None, send_invoice=False, overridden_by=None, customer=None, updating_booking=False, override_checks=False):
"""Create a new temporary booking in the system for a set of specific campsites."""
# the CampsiteBooking table runs the risk of a race condition,
# wrap all this behaviour up in a transaction
campsite_qs = Mooringsite.objects.filter(pk__in=sites_qs)
with transaction.atomic():
# get availability for campsite, error out if booked/closed
availability = get_campsite_availability(campsite_qs, start_date, end_date, False)
for site_id, dates in availability.items():
if not override_checks:
if updating_booking:
if not all([v[0] in ['open','tooearly'] for k, v in dates.items()]):
raise ValidationError('Mooring unavailable for specified time period.')
else:
if not all([v[0] == 'open' for k, v in dates.items()]):
raise ValidationError('Mooring unavailable for specified time period.')
else:
if not all([v[0] in ['open','tooearly','closed'] for k, v in dates.items()]):
raise ValidationError('Mooring unavailable for specified time period.')
if not override_checks:
# Prevent booking if max people passed
total_people = num_adult + num_concession + num_child + num_infant
min_people = sum([cs.min_people for cs in campsite_qs])
max_people = sum([cs.max_people for cs in campsite_qs])
if total_people > max_people:
raise ValidationError('Maximum number of people exceeded')
# Prevent booking if less than min people
#if total_people < min_people:
# raise ValidationError('Number of people is less than the minimum allowed for the selected campsite(s)')
# Create a new temporary booking with an expiry timestamp (default 20mins)
booking = Booking.objects.create(
booking_type=3,
arrival=start_date,
departure=end_date,
details={
'num_adult': num_adult,
'num_concession': num_concession,
'num_child': num_child,
'num_infant': num_infant,
'num_mooring': num_mooring,
'vessel_size': vessel_size
},
cost_total = cost_total,
override_price = Decimal(override_price) if (override_price is not None) else None,
override_reason = override_reason,
override_reason_info = override_reason_info,
send_invoice = send_invoice,
overridden_by = overridden_by,
expiry_time=timezone.now()+timedelta(seconds=settings.BOOKING_TIMEOUT),
mooringarea=campsite_qs[0].mooringarea,
customer = customer
)
for cs in campsite_qs:
for i in range((end_date-start_date).days):
cb = MooringsiteBooking.objects.create(
campsite=cs,
booking_type=3,
date=start_date+timedelta(days=i),
booking=booking
)
# On success, return the temporary booking
return booking
def ooolldcreate_booking_by_site(campsite_id, start_date, end_date, num_adult=0, num_concession=0, num_child=0, num_infant=0,num_mooring=0,vessel_size=0,cost_total=0,customer=None,updating_booking=False):
"""Create a new temporary booking in the system for a specific campsite."""
# get campsite
sites_qs = Mooringsite.objects.filter(pk=campsite_id)
campsite = sites_qs.first()
# TODO: date range check business logic
# TODO: number of people check? this is modifiable later, don't bother
# the MooringsiteBooking table runs the risk of a race condition,
# wrap all this behaviour up in a transaction
with transaction.atomic():
# get availability for campsite, error out if booked/closed
availability = get_campsite_availability(sites_qs, start_date, end_date)
for site_id, dates in availability.items():
if updating_booking:
if not all([v[0] in ['open','tooearly'] for k, v in dates.items()]):
raise ValidationError('Mooringsite unavailable for specified time period.')
else:
if not all([v[0] == 'open' for k, v in dates.items()]):
raise ValidationError('Mooringsite unavailable for specified time period.')
# Prevent booking if max people passed
total_people = num_adult + num_concession + num_child + num_infant + num_mooring
if total_people > campsite.max_people:
raise ValidationError('Maximum number of people exceeded for the selected campsite')
# Prevent booking if less than min people
if total_people < campsite.min_people:
raise ValidationError('Number of people is less than the minimum allowed for the selected campsite')
# Create a new temporary booking with an expiry timestamp (default 20mins)
booking = Booking.objects.create(
booking_type=3,
arrival=start_date,
departure=end_date,
details={
'num_adult': num_adult,
'num_concession': num_concession,
'num_child': num_child,
'num_infant': num_infant,
'num_mooring': num_mooring,
'vessel_size': vessel_size
},
cost_total= Decimal(cost_total),
expiry_time=timezone.now()+timedelta(seconds=settings.BOOKING_TIMEOUT),
mooringarea=campsite.mooringarea,
customer = customer
)
for i in range((end_date-start_date).days):
cb = MooringsiteBooking.objects.create(
campsite=campsite,
booking_type=3,
date=start_date+timedelta(days=i),
booking=booking
)
# On success, return the temporary booking
return booking
def check_mooring_available_by_time(campsite_id, start_date_time, end_date_time):
# Confirmed Bookings
start_time_count = MooringsiteBooking.objects.filter(
Q(campsite_id=campsite_id) &
( Q(from_dt__lte=start_date_time) & Q(to_dt__gte=start_date_time))
).exclude(booking_type__in=[3,4]).count()
end_time_count = MooringsiteBooking.objects.filter(
Q(campsite_id=campsite_id) &
( Q(from_dt__lte=end_date_time) & Q(to_dt__gte=end_date_time))
).exclude(booking_type__in=[3,4]).count()
# Temp bookings
start_time_temp_count = MooringsiteBooking.objects.filter(
Q(campsite_id=campsite_id) & Q(booking_type__in=[3]) & Q(booking__expiry_time__gte=datetime.today()) &
( Q(from_dt__lte=start_date_time) & Q(to_dt__gte=start_date_time))
).count()
end_time_temp_count = MooringsiteBooking.objects.filter(
Q(campsite_id=campsite_id) & Q(booking_type__in=[3]) & Q(booking__expiry_time__gte=datetime.today()) &
( Q(from_dt__lte=end_date_time) & Q(to_dt__gte=end_date_time))
).count()
if start_time_count > 0 or end_time_count > 0 or start_time_temp_count > 0 or end_time_temp_count >0:
return True
return False
def check_mooring_availablity(campsites_qs, start_date, end_date):
if start_date != end_date:
end_date =end_date-timedelta(days=1)
avail_results = get_campsite_availability(campsites_qs, start_date, end_date,None, None)
cs_array = {}
for av in avail_results:
open_periods = 0
closed_periods = 0
for date_rotate in avail_results[av]:
bp = avail_results[av][date_rotate][1]
for i in bp:
if avail_results[av][date_rotate][1][i] == 'open':
open_periods = open_periods + 1
else:
closed_periods = closed_periods + 1
cs_array[av] = { 'open_periods': open_periods, 'closed_periods': closed_periods}
return cs_array
def get_open_marinas(campsites_qs, start_date, end_date):
"""Fetch the set of Marine Parks (from a set of Mooring Sites) with spaces open over a range of visit dates."""
# short circuit: if start date is before today, return nothing
exclude_moorings = []
today = date.today()
#if start_date < today:
# return set()
campsites_qs = check_mooring_availablity(campsites_qs,start_date, end_date)
# remove from the campsite list any entries with bookings
# campsites_qs = campsites_qs.exclude(
# id__in=exclude_moorings
# mooringsitebooking__date__range=(start_date, end_date-timedelta(days=1))
# and also campgrounds where the book window is outside of the max advance range
# ).exclude(
# campground__max_advance_booking__lte=(start_date-today).days - 1
# mooringarea__max_advance_booking__lt=(start_date-today).days
# )
# get closures at campsite and campground level
# cgbr_qs = MooringAreaBookingRange.objects.filter(
# Q(campground__in=[x[0] for x in campsites_qs.distinct('mooringarea').values_list('mooringarea')]),
# Q(status=1),
# Q(range_start__lt=end_date) & (Q(range_end__gte=start_date)|Q(range_end__isnull=True))
# )
# cgbr = set([x[0] for x in cgbr_qs.values_list('campground')])
## cgbr = set([x[0] for x in cgbr_qs.values_list('campground')])
# csbr_qs = MooringsiteBookingRange.objects.filter(
# Q(campsite__in=campsites_qs),
# Q(status=1),
# Q(range_start__lt=end_date) & (Q(range_end__gte=start_date)|Q(range_end__isnull=True))
# )
# print csbr_qs
# csbr = set([x[0] for x in csbr_qs.values_list('campsite')])
# generate a campground-to-campsite-list map with closures removed
mooring_map = {}
for cs in campsites_qs:
# if cs == 5:
# pass
# else:
#mooring_map = {}
mooring_map[cs] = campsites_qs[cs]
#mooring_map.append(row)
# for cs in campsites_qs:
# if (cs.pk in csbr) or (cs.mooringarea.pk in cgbr):
# continue
# if cs.mooringarea.pk not in mooring_map:
# mooring_map[cs.mooringarea.pk] = []
# mooring_map[cs.mooringarea.pk].append(cs.pk)
return mooring_map
def generate_mooring_rate(mooringsites_qs,start_date, end_date, duration):
mooring_rate = {}
mooring_site_ids = []
search_filter = Q()
for ms in mooringsites_qs:
mooring_site_ids.append(ms.id)
search_filter |= Q(campsite_id=ms.id)
#print (mooring_site_ids)
mooring_rate_search_filter = Q()
mooring_rate_search_filter &= Q(search_filter)# & Q(date_start__lte=start_date) & Q(Q(date_end__gte=start_date) | Q(date_end=None))
#& Q(date_end__gte=end_date)
#& Q(date_end__lte=end_date)
mr_resp = MooringsiteRate.objects.filter(mooring_rate_search_filter).order_by('date_start')
#print (mr_resp)
for i in range(duration):
date_rotate_forward = start_date+timedelta(days=i)
mooring_rate[date_rotate_forward] = {}
for mrr in mr_resp:
# this is to account for None end dates..
if mrr.date_end is None:
mrr.date_end = datetime.today().date()+timedelta(days=90)
#+timedelta(days=90)
if mrr.date_start <= date_rotate_forward and mrr.date_end >= date_rotate_forward:
#print (str(mrr.id)+' '+str(mrr.date_start)+' '+str(mrr.date_end)+' '+str(mrr.campsite.id) )
#mooring_rate[date_rotate_forward] = {}
mooring_rate[date_rotate_forward][mrr.campsite_id] = mrr
#print (mooring_rate)
return mooring_rate
#for i in range(duration):
# date_rotate_forward = start_date+timedelta(days=i)
# print (date_rotate_forward)
# mooring_rate_search_filter = Q()
# mooring_rate_search_filter &= Q(search_filter) & Q(date_start__lte=date_rotate_forward) & Q(date_end__gte=date_rotate_forward)
# #print MooringsiteRate.objects.filter(campsite_id__in=[mooring_site_ids])
# #campsite_id__in=mooring_site_ids
# print (MooringsiteRate.objects.filter(mooring_rate_search_filter).query)
# mr = MooringsiteRate.objects.filter(mooring_rate_search_filter).order_by('date_start')
# #mr = MooringsiteRate.objects.filter(campsite_id__in=[1,2,3,4,5,6],date_start__lte=date_rotate_forward, date_end__gte=date_rotate_forward).order_by('date_start')
# for msr in mr:
# mooring_rate[date_rotate_forward] = {}
# mooring_rate[date_rotate_forward][msr.campsite.id] = msr
# # mooring_rate[date_rotate_forward][mr.campsite_id] = msr
# print (mr)
# print ("MOOO RATE")
# print (mooring_rate)
# if MooringsiteRate.objects.filter(campsite_id=site.pk,date_start__lte=date_rotate_forward, date_end__gte=date_rotate_forward).count() > 0:
# mooring_rate = MooringsiteRate.objects.filter(campsite_id=site.pk,date_start__lte=date_rotate_forward, date_end__gte=date_rotate_forward).order_by('-date_start')[0]
# else:
## if MooringsiteRate.objects.filter(campsite_id=site.pk,date_start__lte=date_rotate_forward, date_end=None).count() > 0:
# mooring_rate = MooringsiteRate.objects.filter(campsite_id=site.pk,date_start__lte=date_rotate_forward, date_end=None).order_by('-date_start')[0]
def get_campsite_availability(campsites_qs, start_date, end_date, ongoing_booking, request=None):
"""Fetch the availability of each mooring in a queryset over a range of visit dates."""
# fetch all of the single-day MooringsiteBooking objects within the date range for the sites
end_date =end_date+timedelta(days=1)
start_date_time = datetime.strptime(str(start_date)+str(' 00:00'), '%Y-%m-%d %H:%M')
end_date_time = datetime.strptime(str(end_date)+str(' 23:59'), '%Y-%m-%d %H:%M')
booking_id = None
booking_period_option = None
today = date.today()
nowtime = datetime.today()
mooring_date_selected = {}
if ongoing_booking:
booking_id = ongoing_booking.id
#booking_period_option = ongoing_booking.booking_period_option
booking_old_id=None
if request is not None:
#if request.get('session', None):
if request:
if 'ps_booking_old' in request.session:
booking_old_id = request.session['ps_booking_old']
bookings_qs = MooringsiteBooking.objects.filter(
campsite__in=campsites_qs,
#date__gte=start_date,
#date__lt=end_date
from_dt__gte=start_date_time,
to_dt__lt=end_date_time,
#booking__expiry_time__gte=datetime.now()
).exclude(booking__id=booking_old_id).order_by('date', 'campsite__name')
# booking__expiry_time__gte=datetime.now()
booking_qs = None
# prefill all slots as 'open'
duration = (end_date-start_date).days
#results = {site.pk: {start_date+timedelta(days=i): ['open', ] for i in range(duration)} for site in campsites_qs}
# Build Hash of open periods
mooring_rate_hash = generate_mooring_rate(campsites_qs,start_date, end_date, duration)
results = {}
# return results
for site in campsites_qs:
results[site.pk] = {}
cgbr_qs = MooringAreaBookingRange.objects.filter(
Q(campground=site.mooringarea),
Q(status=1),
Q(range_start__lt=end_date_time+timedelta(days=1)) & (Q(range_end__gte=start_date_time-timedelta(days=3))|Q(range_end__isnull=True))
)
for i in range(duration):
date_rotate_forward = start_date+timedelta(days=i)
mooring_date_selected[date_rotate_forward] = 'notselected'
mooring_rate = None
if date_rotate_forward in mooring_rate_hash:
if site.pk in mooring_rate_hash[date_rotate_forward]:
mooring_rate = mooring_rate_hash[date_rotate_forward][site.pk]
#print mooring_rate
#print ("BOOKING PERIOD")
#print (mooring_rate.booking_period.booking_period.all())
#print ("MOORING RATE")
#print (mooring_rate)
#if MooringsiteRate.objects.filter(campsite_id=site.pk,date_start__lte=date_rotate_forward, date_end__gte=date_rotate_forward).count() > 0:
# mooring_rate = MooringsiteRate.objects.filter(campsite_id=site.pk,date_start__lte=date_rotate_forward, date_end__gte=date_rotate_forward).order_by('-date_start')[0]
#else:
# if MooringsiteRate.objects.filter(campsite_id=site.pk,date_start__lte=date_rotate_forward, date_end=None).count() > 0:
# mooring_rate = MooringsiteRate.objects.filter(campsite_id=site.pk,date_start__lte=date_rotate_forward, date_end=None).order_by('-date_start')[0]
#print (mooring_rate)
#print ("GET CMA 9")
#print (datetime.utcnow())
booking_period = {}
selection_period = {}
bp_result = []
if mooring_rate:
if mooring_rate.booking_period is None:
continue
bp_result = mooring_rate.booking_period.booking_period.all()
if bp_result is None:
continue
for bp in bp_result:
booking_period[bp.pk] = 'open'
selection_period[bp.pk] = 0
if bp.start_time is None or bp.finish_time is None:
booking_period[bp.pk] = 'closed'
continue
nowtimewa = nowtime+timedelta(hours=8)
start_dt = datetime.strptime(str(date_rotate_forward)+' '+str(bp.start_time), '%Y-%m-%d %H:%M:%S')
finish_dt = datetime.strptime(str(date_rotate_forward)+' '+str(bp.finish_time), '%Y-%m-%d %H:%M:%S')
if start_dt > finish_dt:
finish_dt = finish_dt+timedelta(days=1)
if date_rotate_forward < today:
booking_period[bp.pk] = 'closed'
if today == date_rotate_forward:
if ongoing_booking:
if ongoing_booking.old_booking is None:
pass
else:
if nowtime > start_dt:
pass
#booking_period[bp.pk] = 'closed'
else:
pass
#if nowtime > start_dt:
# booking_period[bp.pk] = 'closed'
for closure in cgbr_qs:
# CLOSURE INFORMATION
if closure.range_end:
c_range_end = closure.range_end
else:
c_range_end = closure.range_start
start = max(start_date, (closure.range_start+ timedelta(hours=8)).date() -timedelta(days=2))
end = min(end_date, (c_range_end + timedelta(hours=8)).date()) if c_range_end.date() else end_date
closure_range = (end-start).days + 1
closure_start = closure.range_start+ timedelta(hours=8)
closure_finish = c_range_end+timedelta(hours=8)
# BOOKING PERIOD
if closure_start.strftime('%Y-%m-%d %H:%M:%S') >= start_dt.strftime('%Y-%m-%d %H:%M:%S'):
if closure_start.strftime('%Y-%m-%d %H:%M:%S') <= finish_dt.strftime('%Y-%m-%d %H:%M:%S'):
booking_period[bp.pk] = 'closed'
if closure_finish.strftime('%Y-%m-%d %H:%M:%S') >= start_dt.strftime('%Y-%m-%d %H:%M:%S'):
if closure_finish.strftime('%Y-%m-%d %H:%M:%S') <= finish_dt.strftime('%Y-%m-%d %H:%M:%S'):
booking_period[bp.pk] = 'closed'
if closure_start.strftime('%Y-%m-%d %H:%M:%S') <= start_dt.strftime('%Y-%m-%d %H:%M:%S') and closure_finish.strftime('%Y-%m-%d %H:%M:%S') >= finish_dt.strftime('%Y-%m-%d %H:%M:%S'):
booking_period[bp.pk] = 'closed'
results[site.pk][date_rotate_forward] = ['closed',booking_period,selection_period, bp_result]
#results[site.pk][start_date+timedelta(days=i)] = ['closed',booking_period,selection_period, bp_result]
# Determine availablity
for b in bookings_qs:
if b.booking.id == booking_old_id:
continue
if b.booking.booking_type == 4:
print ("CANCELLED BOOKING")
continue
# Release booking availablity on Expired Bookings
if b.booking.booking_type == 3 or b.booking.booking_type == 5:
if b.booking.expiry_time is not None:
if b.booking.expiry_time < datetime.now(tz=timezone.utc):
continue
for i in range(duration):
date_rotate_forward = start_date+timedelta(days=i)
mooring_rate = None
if date_rotate_forward in mooring_rate_hash:
if b.campsite.id in mooring_rate_hash[date_rotate_forward]:
mooring_rate = mooring_rate_hash[date_rotate_forward][b.campsite.id]
if mooring_rate:
for bp in mooring_rate.booking_period.booking_period.all():
start_dt = datetime.strptime(str(date_rotate_forward)+' '+str(bp.start_time), '%Y-%m-%d %H:%M:%S')
finish_dt = datetime.strptime(str(date_rotate_forward)+' '+str(bp.finish_time), '%Y-%m-%d %H:%M:%S')
if start_dt > finish_dt:
finish_dt = finish_dt+timedelta(days=1)
from_dt = b.from_dt + timedelta(hours=8)
to_dt = b.to_dt + timedelta(hours=8)
if from_dt.strftime('%Y-%m-%d %H:%M:%S') >= start_dt.strftime('%Y-%m-%d %H:%M:%S'):
if from_dt.strftime('%Y-%m-%d %H:%M:%S') <= finish_dt.strftime('%Y-%m-%d %H:%M:%S'):
if date_rotate_forward in results[b.campsite.id]:
if results[b.campsite.id][date_rotate_forward][1][bp.id] != 'selected':
results[b.campsite.id][date_rotate_forward][1][bp.id] = 'closed'
if b.booking.id == booking_id:
if bp.id == b.booking_period_option.id:
results[b.campsite.id][date_rotate_forward][1][bp.id] = 'selected'
results[b.campsite.id][date_rotate_forward][2][bp.id] = b.id
mooring_date_selected[date_rotate_forward] = 'selected'
pass
if to_dt.strftime('%Y-%m-%d %H:%M:%S') >= start_dt.strftime('%Y-%m-%d %H:%M:%S'):
if to_dt.strftime('%Y-%m-%d %H:%M:%S') <= finish_dt.strftime('%Y-%m-%d %H:%M:%S'):
if date_rotate_forward in results[b.campsite.id]:
if bp.id in results[b.campsite.id][date_rotate_forward][1]:
if results[b.campsite.id][date_rotate_forward][1][bp.id] != 'selected':
results[b.campsite.id][date_rotate_forward][1][bp.id] = 'closed'
if b.booking.id == booking_id:
if bp.id == b.booking_period_option.id:
results[b.campsite.id][date_rotate_forward][1][bp.id] = 'selected'
results[b.campsite.id][date_rotate_forward][2][bp.id] = b.id
mooring_date_selected[date_rotate_forward] = 'selected'
pass
if from_dt.strftime('%Y-%m-%d %H:%M:%S') <= start_dt.strftime('%Y-%m-%d %H:%M:%S') and to_dt.strftime('%Y-%m-%d %H:%M:%S') >= finish_dt.strftime('%Y-%m-%d %H:%M:%S'):
if date_rotate_forward in results[b.campsite.id]:
if bp.id in results[b.campsite.id][date_rotate_forward][1]:
if results[b.campsite.id][date_rotate_forward][1][bp.id] != 'selected':
results[b.campsite.id][date_rotate_forward][1][bp.id] = 'closed'
if b.booking.id == booking_id:
if bp.id == b.booking_period_option.id:
results[b.campsite.id][date_rotate_forward][1][bp.id] = 'selected'
results[b.campsite.id][date_rotate_forward][2][bp.id] = b.id
mooring_date_selected[date_rotate_forward] = 'selected'
pass
# prevent other mooring from being selected for same day preventing mooring lockouts
for ma in results:
for ma_dt in results[ma]:
if mooring_date_selected[ma_dt] == 'selected':
for bp in results[ma][ma_dt][1]:
if results[ma][ma_dt][1][bp] == 'open':
pass
results[ma][ma_dt][1][bp] = 'perday'
mooring_map = {cg[0]: [cs.pk for cs in campsites_qs if cs.mooringarea.pk == cg[0]] for cg in campsites_qs.distinct('mooringarea').values_list('mooringarea')}
today = date.today()
print ("GLOBA 1")
# strike out days after the max_advance_booking
for site in campsites_qs:
max_advance = None
max_advance_open_time = '00:00'
try:
group = MooringAreaGroup.objects.get(moorings__in=[site.mooringarea])
except:
group = None
if group:
globalsettings = GlobalSettings.objects.filter(mooring_group__in=[group,])
for gs in globalsettings:
if gs.key == 2:
#max_advance = int(GlobalSettings.objects.get(key=2, mooring_group__in=[group,]).value)
max_advance = int(gs.value)
if gs.key == 18:
max_advance_open_time = gs.value
#max_advance = int(GlobalSettings.objects.get(key=2, mooring_group__in=[group,]).value)
#if GlobalSettings.objects.filter(key=18, mooring_group__in=[group,]).count():
# max_advance_open_time = GlobalSettings.objects.get(key=18, mooring_group__in=[group,]).value
else:
qs = GlobalSettings.objects.filter(key=2)
highest_val = 0
for q in qs:
if int(q.value) > highest_val:
highest_val = int(q.value)
max_advance = highest_val
max_advance_open_time_dt = datetime.strptime(str(today)+' '+str(max_advance_open_time), '%Y-%m-%d %H:%M')
if nowtime > max_advance_open_time_dt:
pass
else:
max_advance = max_advance - 1
stop = today + timedelta(days=max_advance)
stop_mark = min(max(stop, start_date), end_date)
#if start_date > stop:
for i in range((end_date-stop_mark).days):
if stop_mark+timedelta(days=i) > stop:
results[site.pk][stop_mark+timedelta(days=i)][0] = 'toofar'
for b in results[site.pk][stop_mark+timedelta(days=i)][1]:
results[site.pk][stop_mark+timedelta(days=i)][1][b] = 'toofar'
# Get the current stay history
stay_history = None
if campsites_qs.count() > 0:
stay_history = MooringAreaStayHistory.objects.filter(
Q(range_start__lte=start_date,range_end__gte=start_date)|# filter start date is within period
Q(range_start__lte=end_date,range_end__gte=end_date)|# filter end date is within period
Q(Q(range_start__gt=start_date,range_end__lt=end_date)&Q(range_end__gt=today)) #filter start date is before and end date after period
,mooringarea=campsites_qs.first().mooringarea
)
if stay_history:
max_days = min([x.max_days for x in stay_history])
else:
max_days = settings.PS_MAX_BOOKING_LENGTH
# strike out days after the max_stay period
for site in campsites_qs:
stay_history = MooringAreaStayHistory.objects.filter(
Q(range_start__lte=start_date,range_end__gte=start_date)|# filter start date is within period
Q(range_start__lte=end_date,range_end__gte=end_date)|# filter end date is within period
Q(Q(range_start__gt=start_date,range_end__lt=end_date)&Q(range_end__gt=today)) #filter start date is before and end date after period
,mooringarea=site.mooringarea
)
if stay_history:
max_days = min([x.max_days for x in stay_history])
else:
max_days = settings.PS_MAX_BOOKING_LENGTH
stop = start_date + timedelta(days=max_days)
stop_mark = min(max(stop, start_date), end_date)
for i in range((end_date-stop_mark).days):
date_key = stop_mark+timedelta(days=i)
if date_key in results[site.pk]:
results[site.pk][stop_mark+timedelta(days=i)][0] = 'toofar'
for b in results[site.pk][stop_mark+timedelta(days=i)][1]:
if results[site.pk][stop_mark+timedelta(days=i)][1][b] == 'open':
results[site.pk][stop_mark+timedelta(days=i)][1][b] = 'maxstay'
return results
def get_visit_rates(campsites_qs, start_date, end_date):
"""Fetch the per-day pricing for each visitor type over a range of visit dates."""
# fetch the applicable rates for the campsites
rates_qs = MooringsiteRate.objects.filter(
Q(campsite__in=campsites_qs),
Q(date_start__lt=end_date) & (Q(date_end__gte=start_date)|Q(date_end__isnull=True))
).prefetch_related('rate')
# prefill all slots
duration = (end_date-start_date).days+1
results = {
site.pk: {
start_date+timedelta(days=i): {
'mooring': '0.00',
'adult': '0.00',
'child': '0.00',
'concession': '0.00',
'infant': '0.00',
'booking_period' : []
} for i in range(duration)
} for site in campsites_qs
}
# make a record of the earliest MooringsiteRate for each site
early_rates = {}
for rate in rates_qs:
#if rate.campsite.pk not in early_rates:
# early_rates[rate.campsite.pk] = rate
#elif early_rates[rate.campsite.pk].date_start > rate.date_start:
# early_rates[rate.campsite.pk] = rate
# for the period of the visit overlapped by the rate, set the amounts
start = max(start_date, rate.date_start)
end = min(end_date, rate.date_end) if rate.date_end else end_date
for i in range((end-start).days+1):
if rate.booking_period is None:
continue
booking_period = rate.booking_period.booking_period.all()
results[rate.campsite.pk][start+timedelta(days=i)]['mooring'] = str(rate.rate.mooring)
results[rate.campsite.pk][start+timedelta(days=i)]['adult'] = str(rate.rate.adult)
results[rate.campsite.pk][start+timedelta(days=i)]['concession'] = str(rate.rate.concession)
results[rate.campsite.pk][start+timedelta(days=i)]['child'] = str(rate.rate.child)
results[rate.campsite.pk][start+timedelta(days=i)]['infant'] = str(rate.rate.infant)
for b in booking_period:
if b.caption is None:
b.caption = ''
booking_period_row = {'id':b.id, 'period_name' : b.period_name, 'small_price': format(b.small_price,'.2f'), 'medium_price': format(b.medium_price,'.2f'), 'large_price' : format(b.large_price,'.2f'), 'start_time' : b.start_time, 'finish_time' : b.finish_time,'all_day' : b.all_day, 'caption': b.caption, 'created' : b.created }
# booking_period_row = {}
# booking_period_row['id'] = b.id
# booking_period_row['period_name'] = b.period_name
# , 'period_name' : b.period_name, 'small_price': str(b.small_price), 'medium_price': str(b.medium_price), 'large_price' : str(b.large_price), 'start_time' : str(b.start_time), 'finish_time' : str(b.finish_time),'all_day' : str(b.all_day), 'created' : str(b.created) )
results[rate.campsite.pk][start+timedelta(days=i)]['booking_period'].append(booking_period_row)
# complain if there's a Mooringsite without a MooringsiteRate
if len(early_rates) < rates_qs.count():
print('Missing Mooring Site Rate coverage!')
# for ease of testing against the old datasets, if the visit dates are before the first
# MooringsiteRate date, use that MooringsiteRate as the pricing model.
for site_pk, rate in early_rates.items():
if start_date < rate.date_start:
start = start_date
end = rate.date_start
for i in range((end-start).days):
results[site_pk][start+timedelta(days=i)]['mooring'] = str(rate.rate.mooring)
results[site_pk][start+timedelta(days=i)]['adult'] = str(rate.rate.adult)
results[site_pk][start+timedelta(days=i)]['concession'] = str(rate.rate.concession)
results[site_pk][start+timedelta(days=i)]['child'] = str(rate.rate.child)
results[site_pk][start+timedelta(days=i)]['infant'] = str(rate.rate.infant)
if rate.booking_period is None:
continue
for b in rate.booking_period.booking_period.all():
booking_period_row = {'id':b.id, 'period_name' : b.period_name, 'small_price': format(b.small_price,'.2f'), 'medium_price': format(b.medium_price,'.2f'), 'large_price' : format(b.large_price,'.2f'), 'start_time' : b.start_time, 'finish_time' : b.finish_time,'all_day' : b.all_day, 'created' : b.created }
results[site_pk][start+timedelta(days=i)]['booking_period'].append(booking_period_row)
return results
def get_available_campsitetypes(campground_id,start_date,end_date,_list=True):
try:
cg = MooringArea.objects.get(id=campground_id)
if _list:
available_campsiteclasses = []
else:
available_campsiteclasses = {}
for _class in cg.campsite_classes:
sites_qs = Mooringsite.objects.all()
# sites_qs = Mooringsite.objects.filter(
# campground=campground_id,
# mooringsite_class=_class
# )
sites_qs = None
if sites_qs.exists():
# get availability for sites, filter out the non-clear runs
availability = get_campsite_availability(sites_qs, start_date, end_date)
excluded_site_ids = set()
for site_id, dates in availability.items():
if not all([v[0] == 'open' for k, v in dates.items()]):
excluded_site_ids.add(site_id)
# create a list of campsites without bookings for that period
sites = [x for x in sites_qs if x.pk not in excluded_site_ids]
if sites:
if not _list:
available_campsiteclasses[_class] = sites
else:
available_campsiteclasses.append(_class)
return available_campsiteclasses
except MooringArea.DoesNotExist:
raise Exception('The campsite you are searching does not exist')
except:
raise
def get_available_campsites_list(campsite_qs,request, start_date, end_date):
from mooring.serialisers import MooringsiteSerialiser
campsites = get_campsite_availability(campsite_qs, start_date, end_date)
available = []
for camp in campsites:
av = [item for sublist in campsites[camp].values() for item in sublist]
if ('booked' not in av):
if ('closed' not in av):
available.append(MooringsiteSerialiser(Mooringsite.objects.filter(id = camp),many=True,context={'request':request}).data[0])
return available
def get_available_campsites_list_booking(campsite_qs,request, start_date, end_date,booking):
'''
Used to get the available campsites in the selected period
and the ones currently attached to a booking
'''
from mooring.serialisers import MooringsiteSerialiser
campsites = get_campsite_availability(campsite_qs, start_date, end_date)
available = []
for camp in campsites:
av = [item for sublist in campsites[camp].values() for item in sublist]
if ('booked' not in av or camp in booking.campsite_id_list):
if ('closed' not in av):
available.append(MooringsiteSerialiser(Mooringsite.objects.filter(id = camp),many=True,context={'request':request}).data[0])
#complete = [MooringsiteSerialiser(Mooringsite.objects.filter(id = camp),many=True,context={'request':request}).data[0]]
return available
def get_campsite_current_rate(request,campsite_id,start_date,end_date):
res = []
if start_date and end_date:
start_date = datetime.strptime(start_date,"%Y-%m-%d").date()
end_date = datetime.strptime(end_date,"%Y-%m-%d").date()
for single_date in daterange(start_date, end_date):
price_history = MooringsiteRate.objects.filter(campsite=campsite_id,date_start__lte=single_date).order_by('-date_start')
if price_history:
rate = RateSerializer(price_history[0].rate,context={'request':request}).data
rate['campsite'] = campsite_id
res.append({
"date" : single_date.strftime("%Y-%m-%d"),
"rate" : rate
})
return res
def get_park_entry_rate(request,start_date):
res = []
if start_date:
start_date = datetime.strptime(start_date,"%Y-%m-%d").date()
price_history = MarinaEntryRate.objects.filter(period_start__lte = start_date).order_by('-period_start')
if price_history:
serializer = MarinaEntryRateSerializer(price_history,many=True,context={'request':request})
res = serializer.data[0]
return res
def override_lineitems(override_price, override_reason, total_price, oracle_code, override_reason_info=""):
invoice_line = []
if oracle_code:
#if override_reason:
discount = Decimal(override_price) - Decimal(override_price) - Decimal(override_price)
invoice_line.append({"ledger_description": '{} - {}'.format(override_reason.text, override_reason_info), "quantity": 1, 'price_incl_tax': discount, 'oracle_code': oracle_code, 'line_status': 1})
return invoice_line
def nononline_booking_lineitems(oracle_code, request):
invoice_line = []
if oracle_code:
group = MooringAreaGroup.objects.filter(members__in=[request.user])
value = GlobalSettings.objects.get(mooring_group=group, key=0).value
if Decimal(value) > 0:
invoice_line.append({'ledger_description': 'Phone Booking Fee', 'quantity': 1, 'price_incl_tax': Decimal(value), 'oracle_code': oracle_code, 'line_status': 1})
# invoice_line.append({'ledger_description': 'Phone Booking Fee', 'quantity': 1, 'price_incl_tax': Decimal(value), 'oracle_code': oracle_code})
return invoice_line
def admission_lineitems(lines):
invoice_lines = []
if lines:
for line in lines:
if line['guests'] > 0:
invoice_lines.append({'ledger_description': 'Admissions {} - {} ({} guests)'.format(line['from'], line['to'], line['guests']), "quantity": 1, 'price_incl_tax': line['admissionFee'], "oracle_code": line['oracle_code'], 'line_status': 1})
# invoice_lines.append({'ledger_description': 'Admissions {} - {} ({} guests)'.format(line['from'], line['to'], line['guests']), "quantity": 1, 'price_incl_tax': line['admissionFee'], "oracle_code": line['oracle_code']})
return invoice_lines
def calculate_price_booking_cancellation(booking, overide_cancel_fees=False):
current_date_time = datetime.strptime(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
nowtime = datetime.today()
nowtimec = datetime.strptime(nowtime.strftime('%Y-%m-%d'),'%Y-%m-%d')
mg = MooringAreaGroup.objects.all()
booking = MooringsiteBooking.objects.filter(booking=booking)
cancellation_fees = []
adjustment_fee = Decimal('0.00')
#{'additional_fees': 'true', 'description': 'Booking Change Fee','amount': Decimal('0.00')}
for ob in booking:
changed = True
#for bc in booking_changes:
# if bc.campsite == ob.campsite and ob.from_dt == bc.from_dt and ob.to_dt == bc.to_dt and ob.booking_period_option == bc.booking_period_option:
# changed = False
from_dt = datetime.strptime(ob.from_dt.strftime('%Y-%m-%d'),'%Y-%m-%d')
daystillbooking = (from_dt-nowtimec).days
cancel_policy = None
cancel_fee_amount = '0.00'
#change_price_period = CancelPricePeriod.objects.filter(id=ob.booking_period_option.cancel_group_id).order_by('days')
cancel_group = CancelGroup.objects.get(id=ob.booking_period_option.cancel_group_id)
cancel_price_period = cancel_group.cancel_period.all().order_by('days')
mooring_group =None
for i in mg:
if i.moorings.count() > 0:
mooring_group = i.moorings.all()[0].id
for cpp in cancel_price_period:
if daystillbooking < 0:
daystillbooking = 0
if daystillbooking >= cpp.days:
cancel_policy =cpp
if cancel_policy:
if cancel_policy.calulation_type == 0:
# Percentage
cancel_fee_amount = float(ob.amount) * (cancel_policy.percentage / 100)
elif cancel_policy.calulation_type == 1:
cancel_fee_amount = cancel_policy.amount
# Fixed Pricing
description = 'Mooring {} ({} - {})'.format(ob.campsite.mooringarea.name,ob.from_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'),ob.to_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'))
if overide_cancel_fees is True:
cancellation_fees.append({'additional_fees': 'true', 'description': 'Refund - '+description,'amount': str(ob.amount - ob.amount - ob.amount), 'mooring_group': mooring_group, 'oracle_code': str(ob.campsite.mooringarea.oracle_code)})
else:
if datetime.strptime(ob.from_dt.astimezone(pytimezone('Australia/Perth')).strftime('%Y-%m-%d %H:%M:%S'),'%Y-%m-%d %H:%M:%S') < current_date_time:
#cancellation_fees.append({'additional_fees': 'true', 'description': 'Past Booking - '+description,'amount': Decimal('0.00'), 'mooring_group': mooring_group})
cancellation_fees.append({'additional_fees': 'true', 'description': 'Past Booking - '+description,'amount': Decimal('0.00'), 'mooring_group': mooring_group, 'oracle_code': str(ob.campsite.mooringarea.oracle_code)})
else:
#change_fees['amount'] = str(refund_amount)
cancellation_fees.append({'additional_fees': 'true', 'description': 'Cancel Fee - '+description,'amount': cancel_fee_amount, 'mooring_group': mooring_group, 'oracle_code': str(ob.campsite.mooringarea.oracle_code)})
cancellation_fees.append({'additional_fees': 'true', 'description': 'Refund - '+description,'amount': str(ob.amount - ob.amount - ob.amount), 'mooring_group': mooring_group, 'oracle_code': str(ob.campsite.mooringarea.oracle_code)})
#cancellation_fees.append({'additional_fees': 'true', 'description': 'Cancel Fee - '+description,'amount': cancel_fee_amount, 'mooring_group': mooring_group})
#cancellation_fees.append({'additional_fees': 'true', 'description': 'Refund - '+description,'amount': str(ob.amount - ob.amount - ob.amount), 'mooring_group': mooring_group})
else:
print ("NO CANCELATION POLICY")
#else:
# adjustment_fee = ob.amount + adjustment_fee
#change_fees.append({'additional_fees': 'true', 'description': 'Mooring Adjustment Credit' ,'amount': str(adjustment_fee - adjustment_fee - adjustment_fee)})
return cancellation_fees
def calculate_price_booking_change(old_booking, new_booking,overide_change_fees=False):
nowtime = datetime.today()
nowtimec = datetime.strptime(nowtime.strftime('%Y-%m-%d'),'%Y-%m-%d')
old_booking_mooring = MooringsiteBooking.objects.filter(booking=old_booking)
booking_changes = MooringsiteBooking.objects.filter(booking=new_booking)
change_fees = []
adjustment_fee = Decimal('0.00')
mg = MooringAreaGroup.objects.all()
#{'additional_fees': 'true', 'description': 'Booking Change Fee','amount': Decimal('0.00')}
for ob in old_booking_mooring:
changed = True
for bc in booking_changes:
if bc.campsite == ob.campsite and ob.from_dt == bc.from_dt and ob.to_dt == bc.to_dt and ob.booking_period_option == bc.booking_period_option:
changed = False
from_dt = datetime.strptime(ob.from_dt.strftime('%Y-%m-%d'),'%Y-%m-%d')
daystillbooking = (from_dt-nowtimec).days
refund_policy = None
for i in mg:
if i.moorings.count() > 0:
mooring_group = i.moorings.all()[0].id
if changed is True:
change_fee_amount = '0.00'
# change_price_period = ChangePricePeriod.objects.filter(id=ob.booking_period_option.change_group_id).order_by('-days')
change_group = ChangeGroup.objects.get(id=ob.booking_period_option.change_group_id)
change_price_period = change_group.change_period.all().order_by('days')
for cpp in change_price_period:
if daystillbooking < 0:
daystillbooking = 0
# if cpp.days >= daystillbooking:
if daystillbooking >= cpp.days:
refund_policy =cpp
if refund_policy:
if refund_policy.calulation_type == 0:
# Percentage
change_fee_amount = float(ob.amount) * (refund_policy.percentage / 100)
elif refund_policy.calulation_type == 1:
change_fee_amount = refund_policy.amount
# Fixed Pricing
description = 'Mooring {} ({} - {})'.format(ob.campsite.mooringarea.name,ob.from_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'),ob.to_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'))
if overide_change_fees is True:
change_fees.append({'additional_fees': 'true', 'description': 'Refund - '+description,'amount': str(format(ob.amount - ob.amount - ob.amount, '.2f')), 'oracle_code': str(ob.campsite.mooringarea.oracle_code), 'mooring_group': mooring_group, 'line_status': 3})
else:
#change_fees['amount'] = str(refund_amount)
#change_fees.append({'additional_fees': 'true', 'description': 'Change Fee - '+description,'amount': float(change_fee_amount), 'oracle_code': str(ob.campsite.mooringarea.oracle_code), 'mooring_group': mooring_group})
#change_fees.append({'additional_fees': 'true', 'description': 'Refund - '+description,'amount': str(ob.amount - ob.amount - ob.amount), 'oracle_code': str(ob.campsite.mooringarea.oracle_code), 'mooring_group': mooring_group})
change_fees.append({'additional_fees': 'true', 'description': 'Change Fee - '+description,'amount': str(format(change_fee_amount, '.2f')), 'oracle_code': str(ob.campsite.mooringarea.oracle_code), 'mooring_group': mooring_group, 'line_status': 2})
change_fees.append({'additional_fees': 'true', 'description': 'Refund - '+description,'amount': str(format(ob.amount - ob.amount - ob.amount, '.2f')), 'oracle_code': str(ob.campsite.mooringarea.oracle_code), 'mooring_group': mooring_group, 'line_status': 3})
else:
print ("NO REFUND POLICY")
else:
#description = 'Mooring {} ({} - {})'.format(ob.campsite.mooringarea.name,ob.from_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'),ob.to_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'))
adjustment_fee = float('0.00')
adjustment_fee = float(ob.amount) + adjustment_fee
description = 'Mooring {} ({} - {})'.format(ob.campsite.mooringarea.name,ob.from_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'),ob.to_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'))
# change_fees.append({'additional_fees': 'true', 'description': 'Adjustment - '+description ,'amount': str(adjustment_fee - adjustment_fee - adjustment_fee), 'oracle_code': str(ob.campsite.mooringarea.oracle_code), 'mooring_group': mooring_group})
change_fees.append({'additional_fees': 'true', 'description': 'Adjustment - '+description ,'amount': str(format(adjustment_fee - adjustment_fee - adjustment_fee, '.2f')), 'oracle_code': str(ob.campsite.mooringarea.oracle_code), 'mooring_group': mooring_group, 'line_status': 3})
return change_fees
def calculate_price_admissions_cancel(adBooking, change_fees, overide_cancel_fees=False):
ad_lines = AdmissionsLine.objects.filter(admissionsBooking=adBooking)
for line in ad_lines:
if line.arrivalDate > date.today() or overide_cancel_fees is True:
description = "Admission ({}) for {} guest(s)".format(datetime.strftime(line.arrivalDate, '%d/%m/%Y'), adBooking.total_admissions)
oracle_code = AdmissionsOracleCode.objects.filter(mooring_group=line.location.mooring_group)[0]
change_fees.append({'additional_fees': 'true', 'description': 'Refund - ' + description,'amount': str(line.cost - line.cost - line.cost), 'oracle_code': str(oracle_code.oracle_code), 'mooring_group': line.location.mooring_group.id, 'line_status': 3})
return change_fees
def calculate_price_admissions_change(adBooking, change_fees):
ad_lines = AdmissionsLine.objects.filter(admissionsBooking=adBooking)
for line in ad_lines:
description = "Admission ({}) for {} guest(s)".format(datetime.strftime(line.arrivalDate, '%d/%m/%Y'), adBooking.total_admissions)
oracle_code = AdmissionsOracleCode.objects.filter(mooring_group=line.location.mooring_group)[0]
# Fees
change_fees.append({'additional_fees': 'true', 'description': 'Adjustment - ' + description,'amount': str(line.cost - line.cost - line.cost), 'oracle_code': str(oracle_code.oracle_code), 'mooring_group': line.location.mooring_group.id, 'line_status': 3 })
return change_fees
def price_or_lineitems(request,booking,campsite_list,lines=True,old_booking=None):
total_price = Decimal(0)
booking_mooring = MooringsiteBooking.objects.filter(booking=booking)
booking_mooring_old = []
if booking.old_booking:
booking_mooring_old = MooringsiteBooking.objects.filter(booking=booking.old_booking)
invoice_lines = []
if lines:
for bm in booking_mooring:
line_status = 1
amount = bm.amount
if str(bm.id) in booking.override_lines:
amount = Decimal(booking.override_lines[str(bm.id)])
for ob in booking_mooring_old:
if bm.campsite == ob.campsite and ob.from_dt == bm.from_dt and ob.to_dt == bm.to_dt and ob.booking_period_option == bm.booking_period_option:
line_status = 2
invoice_lines.append({'ledger_description':'Mooring {} ({} - {})'.format(bm.campsite.mooringarea.name,bm.from_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'),bm.to_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p')),"quantity":1,"price_incl_tax":amount,"oracle_code":bm.campsite.mooringarea.oracle_code, 'line_status': line_status})
# invoice_lines.append({'ledger_description':'Mooring {} ({} - {})'.format(bm.campsite.mooringarea.name,bm.from_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p'),bm.to_dt.astimezone(pytimezone('Australia/Perth')).strftime('%d/%m/%Y %H:%M %p')),"quantity":1,"price_incl_tax":bm.amount,"oracle_code":bm.campsite.mooringarea.oracle_code})
return invoice_lines
else:
return total_price
def price_or_lineitems_extras(request,booking,change_fees,invoice_lines=[]):
total_price = Decimal(0)
booking_mooring = MooringsiteBooking.objects.filter(booking=booking)
for cf in change_fees:
# invoice_lines.append({'ledger_description':cf['description'],"quantity":1,"price_incl_tax":cf['amount'],"oracle_code":cf['oracle_code']})
invoice_lines.append({'ledger_description':cf['description'],"quantity":1,"price_incl_tax":cf['amount'],"oracle_code":cf['oracle_code'], 'line_status': cf['line_status']})
return invoice_lines
def old_price_or_lineitems(request,booking,campsite_list,lines=True,old_booking=None):
total_price = Decimal(0)
rate_list = {}
invoice_lines = []
if not lines and not old_booking:
raise Exception('An old booking is required if lines is set to false')
# Create line items for customers
daily_rates = [get_campsite_current_rate(request,c,booking.arrival.strftime('%Y-%m-%d'),booking.departure.strftime('%Y-%m-%d')) for c in campsite_list]
if not daily_rates:
raise Exception('There was an error while trying to get the daily rates.')
for rates in daily_rates:
for c in rates:
if c['rate']['campsite'] not in rate_list.keys():
rate_list[c['rate']['campsite']] = {c['rate']['id']:{'start':c['date'],'end':c['date'],'mooring': c['rate']['mooring'] ,'adult':c['rate']['adult'],'concession':c['rate']['concession'],'child':c['rate']['child'],'infant':c['rate']['infant']}}
else:
if c['rate']['id'] not in rate_list[c['rate']['campsite']].keys():
rate_list[c['rate']['campsite']] = {c['rate']['id']:{'start':c['date'],'end':c['date'],'mooring': c['rate']['mooring'], 'adult':c['rate']['adult'],'concession':c['rate']['concession'],'child':c['rate']['child'],'infant':c['rate']['infant']}}
else:
rate_list[c['rate']['campsite']][c['rate']['id']]['end'] = c['date']
# Get Guest Details
#guests = {}
#for k,v in booking.details.items():
# if 'num_' in k:
# guests[k.split('num_')[1]] = v
##### Above is for poeple quantity (mooring are not based on people.. based on vessels)
# guess is used as the quantity items for the check out basket.
guests = {}
guests['mooring'] = 1
for k,v in guests.items():
if int(v) > 0:
for c,p in rate_list.items():
for i,r in p.items():
price = Decimal(0)
end = datetime.strptime(r['end'],"%Y-%m-%d").date()
start = datetime.strptime(r['start'],"%Y-%m-%d").date()
num_days = int ((end - start).days) + 1
campsite = Mooringsite.objects.get(id=c)
if lines:
price = str((num_days * Decimal(r[k])))
#if not booking.mooringarea.oracle_code:
# raise Exception('The mooringarea selected does not have an Oracle code attached to it.')
end_date = end + timedelta(days=1)
# invoice_lines.append({'ledger_description':'Mooring fee {} ({} - {})'.format(k,start.strftime('%d-%m-%Y'),end_date.strftime('%d-%m-%Y')),"quantity":v,"price_incl_tax":price,"oracle_code":booking.mooringarea.oracle_code})
invoice_lines.append({'ledger_description':'Admission fee on {} ({}) {}'.format(adLine.arrivalDate, group, overnightStay),"quantity":amount,"price_incl_tax":price, "oracle_code":oracle_code, 'line_status': 1})
else:
price = (num_days * Decimal(r[k])) * v
total_price += price
# Create line items for vehicles
if lines:
vehicles = booking.regos.all()
else:
vehicles = old_booking.regos.all()
if vehicles:
if booking.mooringarea.park.entry_fee_required:
# Update the booking vehicle regos with the park entry requirement
vehicles.update(park_entry_fee=True)
if not booking.mooringarea.park.oracle_code:
raise Exception('A marine park entry Oracle code has not been set for the park that the mooringarea belongs to.')
park_entry_rate = get_park_entry_rate(request,booking.arrival.strftime('%Y-%m-%d'))
vehicle_dict = {
'vessel' : vehicles.filter(entry_fee=True, type='vessel'),
#'vehicle': vehicles.filter(entry_fee=True, type='vehicle'),
'motorbike': vehicles.filter(entry_fee=True, type='motorbike'),
'concession': vehicles.filter(entry_fee=True, type='concession')
}
for k,v in vehicle_dict.items():
if v.count() > 0:
if lines:
price = park_entry_rate[k]
regos = ', '.join([x[0] for x in v.values_list('rego')])
invoice_lines.append({
'ledger_description': 'Mooring fee - {}'.format(k),
'quantity': v.count(),
'price_incl_tax': price,
'oracle_code': booking.mooringarea.park.oracle_code
})
else:
price = Decimal(park_entry_rate[k]) * v.count()
total_price += price
if lines:
return invoice_lines
else:
return total_price
def get_admissions_entry_rate(request,start_date, location):
res = []
if start_date:
start_date = datetime.strptime(start_date,"%Y-%m-%d").date()
group = location.mooring_group
price_history = AdmissionsRate.objects.filter(mooring_group__in=[group,], period_start__lte = start_date).order_by('-period_start')
if price_history:
serializer = AdmissionsRateSerializer(price_history,many=True,context={'request':request})
res = serializer.data[0]
return res
def admissions_price_or_lineitems(request, admissionsBooking,lines=True):
total_price = Decimal(0)
rate_list = {}
invoice_lines = []
line = lines
daily_rates = []
# Create line items for customers
admissionsLines = AdmissionsLine.objects.filter(admissionsBooking=admissionsBooking)
for adLine in admissionsLines:
rate = get_admissions_entry_rate(request,adLine.arrivalDate.strftime('%Y-%m-%d'), adLine.location)
daily_rate = {'date' : adLine.arrivalDate.strftime('%d/%m/%Y'), 'rate' : rate}
daily_rates.append(daily_rate)
oracle_codes = AdmissionsOracleCode.objects.filter(mooring_group__in=[adLine.location.mooring_group,])
if not oracle_codes.count() > 0:
if request.user.is_staff:
raise Exception('Admissions Oracle Code missing, please set up in administration tool.')
else:
raise Exception('Please alert {} of the following error message:\nAdmissions Oracle Code missing.'.format(adLine['group']))
if not daily_rates or daily_rates == []:
raise Exception('There was an error while trying to get the daily rates.')
family = 0
adults = admissionsBooking.noOfAdults
children = admissionsBooking.noOfChildren
if adults > 1 and children > 1:
if adults == children:
if adults % 2 == 0:
family = adults//2
adults = 0
children = 0
else:
adults -= 1
family = adults//2
adults = 1
children = 1
elif adults > children: #Adults greater - tickets based on children
if children % 2 == 0:
family = children//2
adults -= children
children = 0
else:
children -= 1
family = children//2
adults -= children
children = 1
else: #Children greater - tickets based on adults
if adults % 2 == 0:
family = adults//2
children -= adults
adults = 0
else:
adults -= 1
family = adults//2
children -= adults
adults = 1
people = {'Adults': adults,'Concessions': admissionsBooking.noOfConcessions,'Children': children,'Infants': admissionsBooking.noOfInfants, 'Family': family}
for adLine in admissionsLines:
for group, amount in people.items():
if line:
if (amount > 0):
if group == 'Adults':
gr = 'adult'
elif group == 'Children':
gr = group
elif group == 'Infants':
gr = 'infant'
elif group == 'Family':
gr = 'family'
if adLine.overnightStay:
costfield = gr.lower() + "_overnight_cost"
overnightStay = "Overnight Included"
else:
costfield = gr.lower() + "_cost"
overnightStay = "Day Visit Only"
daily_rate = next(item for item in daily_rates if item['date'] == adLine.arrivalDate.strftime('%d/%m/%Y'))['rate']
price = daily_rate.get(costfield)
oracle_codes = AdmissionsOracleCode.objects.filter(mooring_group=adLine.location.mooring_group)
if oracle_codes.count() > 0:
oracle_code = oracle_codes[0].oracle_code
invoice_lines.append({'ledger_description':'Admission fee on {} ({}) {}'.format(adLine.arrivalDate, group, overnightStay),"quantity":amount,"price_incl_tax":price, "oracle_code":oracle_code, 'line_status': 1})
else:
daily_rate = daily_rates[adLine.arrivalDate.strftime('%d/%m/%Y')]
price = Decimal(daily_rate)
total_cost += price
if line:
return invoice_lines
else:
return total_price
def check_date_diff(old_booking,new_booking):
if old_booking.arrival == new_booking.arrival and old_booking.departure == new_booking.departure:
return 4 # same days
elif old_booking.arrival == new_booking.arrival:
old_booking_days = int((old_booking.departure - old_booking.arrival).days)
new_days = int((new_booking.departure - new_booking.arrival).days)
if new_days > old_booking_days:
return 1 #additional days
else:
return 2 #reduced days
elif old_booking.departure == new_booking.departure:
old_booking_days = int((old_booking.departure - old_booking.arrival).days)
new_days = int((new_booking.departure - new_booking.arrival).days)
if new_days > old_booking_days:
return 1 #additional days
else:
return 2 #reduced days
else:
return 3 # different days
def get_diff_days(old_booking,new_booking,additional=True):
if additional:
return int((new_booking.departure - old_booking.departure).days)
return int((old_booking.departure - new_booking.departure).days)
def create_temp_bookingupdate(request,arrival,departure,booking_details,old_booking,total_price):
# delete all the campsites in the old moving so as to transfer them to the new booking
old_booking.campsites.all().delete()
booking = create_booking_by_site(booking_details['campsites'],
start_date = arrival,
end_date = departure,
num_adult = booking_details['num_adult'],
num_concession= booking_details['num_concession'],
num_child= booking_details['num_child'],
num_infant= booking_details['num_infant'],
num_mooring = booking_details['num_mooring'],
cost_total = total_price,
customer = old_booking.customer,
override_price=old_booking.override_price,
updating_booking = True,
override_checks=True
)
# Move all the vehicles to the new booking
for r in old_booking.regos.all():
r.booking = booking
r.save()
lines = price_or_lineitems(request,booking,booking.campsite_id_list)
booking_arrival = booking.arrival.strftime('%d-%m-%Y')
booking_departure = booking.departure.strftime('%d-%m-%Y')
reservation = u'Reservation for {} confirmation PS{}'.format(
u'{} {}'.format(booking.customer.first_name, booking.customer.last_name), booking.id)
# Proceed to generate invoice
checkout_response = checkout(request,booking,lines,invoice_text=reservation,internal=True)
# FIXME: replace with session check
invoice = None
if 'invoice=' in checkout_response.url:
invoice = checkout_response.url.split('invoice=', 1)[1]
else:
for h in reversed(checkout_response.history):
if 'invoice=' in h.url:
invoice = h.url.split('invoice=', 1)[1]
break
# create the new invoice
new_invoice = internal_create_booking_invoice(booking, invoice)
# Check if the booking is a legacy booking and doesn't have an invoice
if old_booking.legacy_id and old_booking.invoices.count() < 1:
# Create a cash transaction in order to fix the outstnding invoice payment
CashTransaction.objects.create(
invoice = Invoice.objects.get(reference=new_invoice.invoice_reference),
amount = old_booking.cost_total,
type = 'move_in',
source = 'cash',
details = 'Transfer of funds from migrated booking',
movement_reference='Migrated Booking Funds'
)
# Update payment details for the new invoice
update_payments(new_invoice.invoice_reference)
# Attach new invoices to old booking
for i in old_booking.invoices.all():
inv = Invoice.objects.get(reference=i.invoice_reference)
inv.voided = True
#transfer to the new invoice
inv.move_funds(inv.transferable_amount,Invoice.objects.get(reference=new_invoice.invoice_reference),'Transfer of funds from {}'.format(inv.reference))
inv.save()
# Change the booking for the selected invoice
new_invoice.booking = old_booking
new_invoice.save()
return booking
def get_annual_admissions_pricing_info(annual_booking_period_id,vessel_size):
nowdt = datetime.now()
price = '0.00'
annual_admissions = {'response': 'error', 'abpg': {}, 'abpo': {}, 'abpovc': {}}
if models.AnnualBookingPeriodGroup.objects.filter(id=int(annual_booking_period_id)).count() > 0:
abpg = models.AnnualBookingPeriodGroup.objects.get(id=int(annual_booking_period_id))
vsc = models.VesselSizeCategory.objects.filter(start_size__lte=Decimal(vessel_size),end_size__gte=Decimal(vessel_size))
abpo= models.AnnualBookingPeriodOption.objects.filter(start_time__lte=nowdt,finish_time__gte=nowdt,annual_booking_period_group=abpg)
if abpo.count() > 0 and vsc.count() > 0:
abpovc = models.AnnualBookingPeriodOptionVesselCategoryPrice.objects.filter(annual_booking_period_option=abpo[0],vessel_category=vsc[0])
price = abpovc[0].price
annual_admissions['abpg'] = abpg
if abpo.count() > 0:
annual_admissions['abpo'] = abpo[0]
if abpovc.count() > 0:
annual_admissions['abpovc'] = abpovc[0]
annual_admissions['response'] = 'success'
return annual_admissions
def iiiicreate_temp_bookingupdate(request,arrival,departure,booking_details,old_booking,total_price):
# delete all the campsites in the old moving so as to transfer them to the new booking
old_booking.campsites.all().delete()
booking = create_booking_by_site(booking_details['campsites'][0],
start_date = arrival,
end_date = departure,
num_adult = booking_details['num_adult'],
num_concession= booking_details['num_concession'],
num_child= booking_details['num_child'],
num_infant= booking_details['num_infant'],
num_mooring = booking_details['num_mooring'],
cost_total = total_price,
customer = old_booking.customer,
updating_booking = True
)
# Move all the vehicles to the new booking
for r in old_booking.regos.all():
r.booking = booking
r.save()
lines = price_or_lineitems(request,booking,booking.campsite_id_list)
booking_arrival = booking.arrival.strftime('%d-%m-%Y')
booking_departure = booking.departure.strftime('%d-%m-%Y')
reservation = "Reservation for {} from {} to {} at {}".format('{} {}'.format(booking.customer.first_name,booking.customer.last_name),booking_arrival,booking_departure,booking.mooringarea.name)
# Proceed to generate invoice
checkout_response = checkout(request,booking,lines,invoice_text=reservation,internal=True)
internal_create_booking_invoice(booking, checkout_response)
# Get the new invoice
new_invoice = booking.invoices.first()
# Check if the booking is a legacy booking and doesn't have an invoice
if old_booking.legacy_id and old_booking.invoices.count() < 1:
# Create a cash transaction in order to fix the outstnding invoice payment
CashTransaction.objects.create(
invoice = Invoice.objects.get(reference=new_invoice.invoice_reference),
amount = old_booking.cost_total,
type = 'move_in',
source = 'cash',
details = 'Transfer of funds from migrated booking',
movement_reference='Migrated Booking Funds'
)
# Update payment details for the new invoice
update_payments(new_invoice.invoice_reference)
# Attach new invoices to old booking
for i in old_booking.invoices.all():
inv = Invoice.objects.get(reference=i.invoice_reference)
inv.voided = True
#transfer to the new invoice
inv.move_funds(inv.transferable_amount,Invoice.objects.get(reference=new_invoice.invoice_reference),'Transfer of funds from {}'.format(inv.reference))
inv.save()
# Change the booking for the selected invoice
new_invoice.booking = old_booking
new_invoice.save()
return booking
def update_booking(request,old_booking,booking_details):
same_dates = False
same_campsites = False
same_campground = False
same_details = False
same_vehicles = True
with transaction.atomic():
try:
set_session_booking(request.session, old_booking)
new_details = {}
new_details.update(old_booking.details)
# Update the guests
new_details['num_adult'] = booking_details['num_adult']
new_details['num_concession'] = booking_details['num_concession']
new_details['num_child'] = booking_details['num_child']
new_details['num_infant'] = booking_details['num_infant']
booking = Booking(
arrival = booking_details['start_date'],
departure =booking_details['end_date'],
details = new_details,
customer=old_booking.customer,
mooringarea = MooringArea.objects.get(id=booking_details['mooringarea']))
# Check that the departure is not less than the arrival
if booking.departure < booking.arrival:
raise Exception('The departure date cannot be before the arrival date')
today = datetime.now().date()
if today > old_booking.departure:
raise ValidationError('You cannot change a booking past the departure date.')
# Check if it is the same campground
if old_booking.mooringarea.id == booking.mooringarea.id:
same_campground = True
# Check if dates are the same
if (old_booking.arrival == booking.arrival) and (old_booking.departure == booking.departure):
same_dates = True
# Check if the campsite is the same
if sorted(old_booking.campsite_id_list) == sorted(booking_details['campsites']):
same_campsites = True
# Check if the details have changed
if new_details == old_booking.details:
same_details = True
# Check if the vehicles have changed
current_regos = old_booking.regos.all()
current_vehicle_regos= sorted([r.rego for r in current_regos])
# Add history
new_history = old_booking._generate_history(user=request.user)
if request.data.get('entryFees').get('regos'):
new_regos = request.data['entryFees'].pop('regos')
sent_regos = [r['rego'] for r in new_regos]
regos_serializers = []
update_regos_serializers = []
for n in new_regos:
if n['rego'] not in current_vehicle_regos:
n['booking'] = old_booking.id
regos_serializers.append(BookingRegoSerializer(data=n))
same_vehicles = False
else:
booking_rego = BookingVehicleRego.objects.get(booking=old_booking,rego=n['rego'])
n['booking'] = old_booking.id
if booking_rego.type != n['type'] or booking_rego.entry_fee != n['entry_fee']:
update_regos_serializers.append(BookingRegoSerializer(booking_rego,data=n))
# Create the new regos if they are there
if regos_serializers:
for r in regos_serializers:
r.is_valid(raise_exception=True)
r.save()
# Update the new regos if they are there
if update_regos_serializers:
for r in update_regos_serializers:
r.is_valid(raise_exception=True)
r.save()
same_vehicles = False
# Check if there are regos in place that need to be removed
stale_regos = []
for r in current_regos:
if r.rego not in sent_regos:
stale_regos.append(r.id)
# delete stale regos
if stale_regos:
same_vehicles = False
BookingVehicleRego.objects.filter(id__in=stale_regos).delete()
else:
same_vehicles = False
if current_regos:
current_regos.delete()
if same_campsites and same_dates and same_vehicles and same_details:
if new_history is not None:
new_history.delete()
return old_booking
# Check difference of dates in booking
old_booking_days = int((old_booking.departure - old_booking.arrival).days)
new_days = int((booking_details['end_date'] - booking_details['start_date']).days)
date_diff = check_date_diff(old_booking,booking)
total_price = price_or_lineitems(request,booking,booking_details['campsites'],lines=False,old_booking=old_booking)
price_diff = True
if old_booking.cost_total != total_price:
price_diff = True
if price_diff:
booking = create_temp_bookingupdate(request,booking.arrival,booking.departure,booking_details,old_booking,total_price)
# Attach campsite booking objects to old booking
for c in booking.campsites.all():
c.booking = old_booking
c.save()
# Move all the vehicles to the in new booking to the old booking
for r in booking.regos.all():
r.booking = old_booking
r.save()
old_booking.cost_total = booking.cost_total
old_booking.departure = booking.departure
old_booking.arrival = booking.arrival
old_booking.details.update(booking.details)
if not same_campground:
old_booking.campground = booking.campground
old_booking.save()
booking.delete()
delete_session_booking(request.session)
send_booking_invoice(old_booking)
# send out the confirmation email if the booking is paid or over paid
if old_booking.status == 'Paid' or old_booking.status == 'Over Paid':
send_booking_confirmation(old_booking,request)
return old_booking
except:
delete_session_booking(request.session)
print(traceback.print_exc())
raise
def create_or_update_booking(request,booking_details,updating=False,override_checks=False):
booking = None
if not updating:
booking = create_booking_by_site(booking_details['campsites'],
start_date = booking_details['start_date'],
end_date=booking_details['end_date'],
num_adult=booking_details['num_adult'],
num_concession=booking_details['num_concession'],
num_child=booking_details['num_child'],
num_infant=booking_details['num_infant'],
num_mooring=booking_details['num_mooring'],
vessel_size=booking_details['vessel_size'],
cost_total=booking_details['cost_total'],
override_price=booking_details['override_price'],
override_reason=booking_details['override_reason'],
override_reason_info=booking_details['override_reason_info'],
overridden_by=booking_details['overridden_by'],
customer=booking_details['customer'],
override_checks=override_checks
)
booking.details['first_name'] = booking_details['first_name']
booking.details['last_name'] = booking_details['last_name']
booking.details['phone'] = booking_details['phone']
booking.details['country'] = booking_details['country']
booking.details['postcode'] = booking_details['postcode']
# Add booking regos
if 'regos' in booking_details:
regos = booking_details['regos']
for r in regos:
r['booking'] = booking.id
regos_serializers = [BookingRegoSerializer(data=r) for r in regos]
for r in regos_serializers:
r.is_valid(raise_exception=True)
r.save()
booking.save()
return booking
def old_create_or_update_booking(request,booking_details,updating=False):
booking = None
if not updating:
booking = create_booking_by_site(campsite_id= booking_details['campsite_id'],
start_date = booking_details['start_date'],
end_date=booking_details['end_date'],
num_adult=booking_details['num_adult'],
num_concession=booking_details['num_concession'],
num_child=booking_details['num_child'],
num_infant=booking_details['num_infant'],
num_mooring=booking_details['num_mooring'],
vessel_size=booking_details['vessel_size'],
cost_total=booking_details['cost_total'],
customer=booking_details['customer'])
booking.details['first_name'] = booking_details['first_name']
booking.details['last_name'] = booking_details['last_name']
booking.details['phone'] = booking_details['phone']
booking.details['country'] = booking_details['country']
booking.details['postcode'] = booking_details['postcode']
# Add booking regos
if request.data.get('parkEntry').get('regos'):
regos = request.data['parkEntry'].pop('regos')
for r in regos:
r[u'booking'] = booking.id
regos_serializers = [BookingRegoSerializer(data=r) for r in regos]
for r in regos_serializers:
r.is_valid(raise_exception=True)
r.save()
booking.save()
return booking
def admissionsCheckout(request, admissionsBooking, lines, invoice_text=None, vouchers=[], internal=False):
basket_params = {
'products': lines,
'vouchers': vouchers,
'system': settings.PS_PAYMENT_SYSTEM_ID,
'custom_basket': True,
'booking_reference': 'AD-'+str(admissionsBooking.id)
}
basket, basket_hash = create_basket_session(request, basket_params)
checkout_params = {
'system': settings.PS_PAYMENT_SYSTEM_ID,
'fallback_url': request.build_absolute_uri('/'),
'return_url': request.build_absolute_uri(reverse('public_admissions_success')),
'return_preload_url': request.build_absolute_uri(reverse('public_admissions_success')),
'force_redirect': True,
'proxy': True if internal else False,
'invoice_text': invoice_text,
}
if internal or request.user.is_anonymous():
checkout_params['basket_owner'] = admissionsBooking.customer.id
create_checkout_session(request, checkout_params)
if internal:
responseJson = place_order_submission(request)
else:
print(reverse('checkout:index'))
responseJson = HttpResponse(geojson.dumps({'status': 'success','redirect': reverse('checkout:index'),}), content_type='application/json')
# response = HttpResponseRedirect(reverse('checkout:index'))
# inject the current basket into the redirect response cookies
# or else, anonymous users will be directionless
responseJson.set_cookie(
settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
)
return responseJson
def get_basket(request):
return get_cookie_basket(settings.OSCAR_BASKET_COOKIE_OPEN,request)
def annual_admission_checkout(request, booking, lines, invoice_text=None, vouchers=[], internal=False):
basket_params = {
'products': lines,
'vouchers': vouchers,
'system': settings.PS_PAYMENT_SYSTEM_ID,
'custom_basket': True,
'booking_reference': 'AA-'+str(booking.id)
}
basket, basket_hash = create_basket_session(request, basket_params)
checkout_params = {
'system': settings.PS_PAYMENT_SYSTEM_ID,
'fallback_url': request.build_absolute_uri('/'),
'return_url': request.build_absolute_uri(reverse('public_booking_annual_admission_success')),
'return_preload_url': request.build_absolute_uri(reverse('public_booking_annual_admission_success')),
'force_redirect': True,
'proxy': True if internal else False,
'invoice_text': invoice_text,
}
if internal or request.user.is_anonymous():
checkout_params['basket_owner'] = booking.customer.id
create_checkout_session(request, checkout_params)
# if internal:
# response = place_order_submission(request)
# else:
response = HttpResponseRedirect(reverse('checkout:index'))
# inject the current basket into the redirect response cookies
# or else, anonymous users will be directionless
response.set_cookie(
settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
)
#if booking.cost_total < 0:
# response = HttpResponseRedirect('/refund-payment')
# response.set_cookie(
# settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
# max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
# secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
# )
## Zero booking costs
#if booking.cost_total < 1 and booking.cost_total > -1:
# response = HttpResponseRedirect('/no-payment')
# response.set_cookie(
# settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
# max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
# secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
# )
return response
def checkout(request, booking, lines, invoice_text=None, vouchers=[], internal=False):
basket_params = {
'products': lines,
'vouchers': vouchers,
'system': settings.PS_PAYMENT_SYSTEM_ID,
'custom_basket': True,
'booking_reference': 'PS-'+str(booking.id)
}
basket, basket_hash = create_basket_session(request, basket_params)
checkout_params = {
'system': settings.PS_PAYMENT_SYSTEM_ID,
'fallback_url': request.build_absolute_uri('/'),
'return_url': request.build_absolute_uri(reverse('public_booking_success')),
'return_preload_url': request.build_absolute_uri(reverse('public_booking_success')),
'force_redirect': True,
'proxy': True if internal else False,
'invoice_text': invoice_text,
}
# if not internal:
# checkout_params['check_url'] = request.build_absolute_uri('/api/booking/{}/booking_checkout_status.json'.format(booking.id))
if internal or request.user.is_anonymous():
checkout_params['basket_owner'] = booking.customer.id
print ("BOOKING ID 3")
print (request.session['ps_booking'])
create_checkout_session(request, checkout_params)
print ("BOOKING ID 4")
print (request.session['ps_booking'])
# if internal:
# response = place_order_submission(request)
# else:
#response = HttpResponseRedirect(reverse('checkout:index'))
response = HttpResponse("<script> window.location='"+reverse('checkout:index')+"';</script> <a href='"+reverse('checkout:index')+"'> Redirecting please wait: "+reverse('checkout:index')+"</a>")
# inject the current basket into the redirect response cookies
# or else, anonymous users will be directionless
response.set_cookie(
settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
)
if booking.cost_total < 0:
response = HttpResponseRedirect('/refund-payment')
response.set_cookie(
settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
)
# Zero booking costs
if booking.cost_total < 1 and booking.cost_total > -1:
response = HttpResponseRedirect('/no-payment')
response.set_cookie(
settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
)
return response
def allocate_failedrefund_to_unallocated(request, booking, lines, invoice_text=None, internal=False, order_total='0.00',user=None):
booking_reference = None
if booking.__class__.__name__ == 'AdmissionsBooking':
booking_reference = 'AD-'+str(booking.id)
elif booking.__class__.__name__ == 'BookingAnnualAdmission':
booking_reference = 'AA-'+str(booking.id)
else:
booking_reference = 'PS-'+str(booking.id)
basket_params = {
'products': lines,
'vouchers': [],
'system': settings.PS_PAYMENT_SYSTEM_ID,
'custom_basket': True,
'booking_reference': booking_reference
}
basket, basket_hash = create_basket_session(request, basket_params)
ci = utils.CreateInvoiceBasket()
order = ci.create_invoice_and_order(basket, total=None, shipping_method='No shipping required',shipping_charge=False, user=user, status='Submitted', invoice_text='Refund Allocation Pool', )
#basket.status = 'Submitted'
#basket.save()
#new_order = Order.objects.get(basket=basket)
new_invoice = Invoice.objects.get(order_number=order.number)
update_payments(new_invoice.reference)
if booking.__class__.__name__ == 'AdmissionsBooking':
print ("AdmissionsBooking")
book_inv, created = AdmissionsBookingInvoice.objects.get_or_create(admissions_booking=booking, invoice_reference=new_invoice.reference, system_invoice=True)
elif booking.__class__.__name__ == 'BookingAnnualAdmission':
print ("BookingAnnualAdmission")
book_inv, created = models.BookingAnnualInvoice.objects.get_or_create(booking_annual_admission=booking, invoice_reference=new_invoice.reference, system_invoice=True)
else:
book_inv, created = BookingInvoice.objects.get_or_create(booking=booking, invoice_reference=new_invoice.reference, system_invoice=True)
return order
def allocate_refund_to_invoice(request, booking, lines, invoice_text=None, internal=False, order_total='0.00',user=None):
booking_reference = None
if booking.__class__.__name__ == 'AdmissionsBooking':
booking_reference = 'AD-'+str(booking.id)
elif booking.__class__.__name__ == 'BookingAnnualAdmission':
booking_reference = 'AA-'+str(booking.id)
else:
booking_reference = 'PS-'+str(booking.id)
basket_params = {
'products': lines,
'vouchers': [],
'system': settings.PS_PAYMENT_SYSTEM_ID,
'custom_basket': True,
'booking_reference': booking_reference
}
basket, basket_hash = create_basket_session(request, basket_params)
ci = utils.CreateInvoiceBasket()
order = ci.create_invoice_and_order(basket, total=None, shipping_method='No shipping required',shipping_charge=False, user=user, status='Submitted', invoice_text='Oracle Allocation Pools', )
#basket.status = 'Submitted'
#basket.save()
#new_order = Order.objects.get(basket=basket)
new_invoice = Invoice.objects.get(order_number=order.number)
update_payments(new_invoice.reference)
if booking.__class__.__name__ == 'AdmissionsBooking':
print ("AdmissionsBooking")
book_inv, created = AdmissionsBookingInvoice.objects.get_or_create(admissions_booking=booking, invoice_reference=new_invoice.reference, system_invoice=True)
elif booking.__class__.__name__ == 'BookingAnnualAdmission':
print ("BookingAnnualAdmission")
book_inv, created = models.BookingAnnualInvoice.objects.get_or_create(booking_annual_admission=booking, invoice_reference=new_invoice.reference, system_invoice=True)
else:
book_inv, created = BookingInvoice.objects.get_or_create(booking=booking, invoice_reference=new_invoice.reference, system_invoice=True)
return order
def old_internal_create_booking_invoice(booking, checkout_response):
if not checkout_response.history:
raise Exception('There was a problem retrieving the invoice for this booking')
last_redirect = checkout_response.history[-2]
reference = last_redirect.url.split('=')[1]
try:
Invoice.objects.get(reference=reference)
except Invoice.DoesNotExist:
raise Exception("There was a problem attaching an invoice for this booking")
book_inv = BookingInvoice.objects.get_or_create(booking=booking,invoice_reference=reference)
return book_inv
def internal_create_booking_invoice(booking, reference):
try:
Invoice.objects.get(reference=reference)
except Invoice.DoesNotExist:
raise Exception("There was a problem attaching an invoice for this booking")
book_inv = BookingInvoice.objects.get_or_create(booking=booking,invoice_reference=reference)
return book_inv
def internal_booking(request,booking_details,internal=True,updating=False):
json_booking = request.data
booking = None
try:
booking = create_or_update_booking(request, booking_details, updating, override_checks=internal)
with transaction.atomic():
set_session_booking(request.session,booking)
# Get line items
booking_arrival = booking.arrival.strftime('%d-%m-%Y')
booking_departure = booking.departure.strftime('%d-%m-%Y')
reservation = u"Reservation for {} confirmation PS{}".format(u'{} {}'.format(booking.customer.first_name,booking.customer.last_name), booking.id)
lines = price_or_lineitems(request,booking,booking.campsite_id_list)
# Proceed to generate invoice
checkout_response = checkout(request,booking,lines,invoice_text=reservation,internal=True)
# Change the type of booking
booking.booking_type = 0
booking.save()
# FIXME: replace with session check
invoice = None
if 'invoice=' in checkout_response.url:
invoice = checkout_response.url.split('invoice=', 1)[1]
else:
for h in reversed(checkout_response.history):
if 'invoice=' in h.url:
invoice = h.url.split('invoice=', 1)[1]
break
print ("-== internal_booking ==-")
internal_create_booking_invoice(booking, invoice)
delete_session_booking(request.session)
send_booking_invoice(booking)
return booking
except:
if booking:
booking.delete()
raise
def old_internal_booking(request,booking_details,internal=True,updating=False):
json_booking = request.data
booking = None
try:
booking = create_or_update_booking(request,booking_details,updating)
with transaction.atomic():
set_session_booking(request.session,booking)
# Get line items
booking_arrival = booking.arrival.strftime('%d-%m-%Y')
booking_departure = booking.departure.strftime('%d-%m-%Y')
reservation = u"Reservation for {} from {} to {} at {}".format(u'{} {}'.format(booking.customer.first_name,booking.customer.last_name),booking_arrival,booking_departure,booking.mooringarea.name)
lines = price_or_lineitems(request,booking,booking.campsite_id_list)
# Proceed to generate invoice
checkout_response = checkout(request,booking,lines,invoice_text=reservation,internal=True)
# Change the type of booking
booking.booking_type = 0
booking.save()
internal_create_booking_invoice(booking, checkout_response)
delete_session_booking(request.session)
send_booking_invoice(booking)
return booking
except:
if booking:
booking.delete()
raise
def set_session_booking(session, booking):
session['ps_booking'] = booking.id
session.modified = True
def get_session_admissions_booking(session):
if 'ad_booking' in session:
booking_id = session['ad_booking']
else:
raise Exception('Admissions booking not in Session')
try:
return AdmissionsBooking.objects.get(id=booking_id)
except AdmissionsBooking.DoesNotExist:
raise Exception('Admissions booking not found for booking_id {}'.format(booking_id))
def get_annual_admission_session_booking(session):
if 'annual_admission_booking' in session:
booking_id = session['annual_admission_booking']
else:
raise Exception('Annual Admission Booking not in Session')
try:
return BookingAnnualAdmission.objects.get(id=booking_id)
except BookingAnnualAdmission.DoesNotExist:
raise Exception('Annual Admission Booking not found for booking_id {}'.format(booking_id))
def delete_annual_admission_session_booking(session):
if 'annual_admission_booking' in session:
del session['annual_admission_booking']
session.modified = True
def delete_session_admissions_booking(session):
if 'ad_booking' in session:
del session['ad_booking']
session.modified = True
def get_session_booking(session):
if 'ps_booking' in session:
booking_id = session['ps_booking']
else:
raise Exception('Booking not in Session')
try:
return Booking.objects.get(id=booking_id)
except Booking.DoesNotExist:
raise Exception('Booking not found for booking_id {}'.format(booking_id))
def delete_session_booking(session):
if 'ps_booking' in session:
del session['ps_booking']
session.modified = True
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def oracle_integration(date,override):
system = '0516'
oracle_codes = oracle_parser_on_invoice(date,system,'Mooring Booking',override=override)
def admissions_lines(booking_mooring):
lines = []
for bm in booking_mooring:
# Convert the from and to dates of this booking to just plain dates in local time.
# Append them to a list.
if bm.campsite.mooringarea.park.entry_fee_required:
from_dt = bm.from_dt
timestamp = calendar.timegm(from_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
from_dt = local_dt.replace(microsecond=from_dt.microsecond)
to_dt = bm.to_dt
timestamp = calendar.timegm(to_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
to_dt = local_dt.replace(microsecond=to_dt.microsecond)
group = MooringAreaGroup.objects.filter(moorings__in=[bm.campsite.mooringarea,])[0].id
lines.append({'from': from_dt, 'to': to_dt, 'group':group})
# Sort the list by date from.
new_lines = sorted(lines, key=lambda line: line['from'])
i = 0
lines = []
latest_from = None
latest_to = None
# Loop through the list, if first instance, then this line's from date is the first admission fee.
# Then compare this TO value to the next FROM value. If they are not the same or overlapping dates
# add this date to the list, using the latest from and this TO value.
while i < len(new_lines):
if i == 0:
latest_from = new_lines[i]['from'].date()
if i < len(new_lines)-1:
if new_lines[i]['to'].date() < new_lines[i+1]['from'].date():
latest_to = new_lines[i]['to'].date()
else:
# if new_lines[i]['from'].date() > new_lines[i-1]['to'].date():
latest_to = new_lines[i]['to'].date()
if latest_to:
lines.append({"rowid":'admission_fee_id'+str(i), 'id': i,'from':datetime.strftime(latest_from, '%d %b %Y'), 'to': datetime.strftime(latest_to, '%d %b %Y'), 'admissionFee': 0, 'group': new_lines[i]['group']})
if i < len(new_lines)-1:
latest_from = new_lines[i+1]['from'].date()
latest_to = None
i+= 1
return lines
# Access Level check for Group
def mooring_group_access_level_change(pk,request):
mooring_groups = MooringAreaGroup.objects.filter(members__in=[request.user,])
if request.user.is_superuser is True:
return True
else:
if ChangeGroup.objects.filter(pk=pk,mooring_group__in=mooring_groups).count() > 0:
return True
return False
def mooring_group_access_level_cancel(pk,request):
mooring_groups = MooringAreaGroup.objects.filter(members__in=[request.user,])
if request.user.is_superuser is True:
return True
else:
if CancelGroup.objects.filter(pk=pk,mooring_group__in=mooring_groups).count() > 0:
return True
return False
def mooring_group_access_level_change_options(cg,pk,request):
mooring_groups = MooringAreaGroup.objects.filter(members__in=[request.user,])
if request.user.is_superuser is True:
return True
else:
cpp = ChangePricePeriod.objects.get(id=pk)
if ChangeGroup.objects.filter(id=cg,change_period__in=[cpp],mooring_group__in=mooring_groups).count() > 0:
return True
return False
def mooring_group_access_level_cancel_options(cg,pk,request):
mooring_groups = MooringAreaGroup.objects.filter(members__in=[request.user,])
if request.user.is_superuser is True:
return True
else:
cpp = CancelPricePeriod.objects.get(id=pk)
if CancelGroup.objects.filter(id=cg,cancel_period__in=[cpp],mooring_group__in=mooring_groups).count() > 0:
return True
return False
def mooring_group_access_level_booking_period(pk,request):
mooring_groups = MooringAreaGroup.objects.filter(members__in=[request.user,])
if request.user.is_superuser is True:
return True
else:
if BookingPeriod.objects.filter(pk=pk,mooring_group__in=mooring_groups).count() > 0:
return True
return False
def mooring_group_access_level_annual_booking_period(pk,request):
mooring_groups = MooringAreaGroup.objects.filter(members__in=[request.user,])
if request.user.is_superuser is True:
return True
else:
if models.AnnualBookingPeriodGroup.objects.filter(pk=pk,mooring_group__in=mooring_groups).count() > 0:
return True
return False
def mooring_group_access_level_booking_period_option(pk,bp_group_id,request):
mooring_groups = MooringAreaGroup.objects.filter(members__in=[request.user,])
if request.user.is_superuser is True:
return True
else:
bpo = BookingPeriodOption.objects.get(id=pk)
if BookingPeriod.objects.filter(pk=bp_group_id,booking_period__in=[bpo],mooring_group__in=mooring_groups).count() > 0:
return True
return False
def check_mooring_admin_access(request):
if request.user.is_superuser is True:
return True
else:
if request.user.groups.filter(name__in=['Mooring Admin']).exists():
return True
return False
def get_provinces(country_code):
provinces = []
read_data = ""
json_response = []
with io.open(settings.BASE_DIR+'/mooring/data/provinces.json', "r", encoding="utf-8") as my_file:
read_data = my_file.read()
provinces = json.loads(read_data)
for p in provinces:
if p['country'] == country_code:
if 'short' in p:
json_response.append(p)
return json_response
def booking_success(basket, booking, context_processor):
print("MLINE 1.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
order = Order.objects.get(basket=basket[0])
invoice = Invoice.objects.get(order_number=order.number)
print("MLINE 1.02", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
invoice_ref = invoice.reference
book_inv, created = BookingInvoice.objects.get_or_create(booking=booking, invoice_reference=invoice_ref)
print("MLINE 1.03", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
#invoice_ref = request.GET.get('invoice')
if booking.booking_type == 3:
print("MLINE 2.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
try:
inv = Invoice.objects.get(reference=invoice_ref)
order = Order.objects.get(number=inv.order_number)
order.user = booking.customer
order.save()
except Invoice.DoesNotExist:
print ("INVOICE ERROR")
logger.error('{} tried making a booking with an incorrect invoice'.format('User {} with id {}'.format(booking.customer.get_full_name(),booking.customer.id) if booking.customer else 'An anonymous user'))
return redirect('public_make_booking')
if inv.system not in ['0516']:
print ("SYSTEM ERROR")
logger.error('{} tried making a booking with an invoice from another system with reference number {}'.format('User {} with id {}'.format(booking.customer.get_full_name(),booking.customer.id) if booking.customer else 'An anonymous user',inv.reference))
return redirect('public_make_booking')
print("MLINE 3.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if book_inv:
print("MLINE 4.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if booking.old_booking:
old_booking = Booking.objects.get(id=booking.old_booking.id)
old_booking.booking_type = 4
old_booking.cancelation_time = datetime.now()
old_booking.canceled_by = booking.created_by #request.user
old_booking.save()
booking_items = MooringsiteBooking.objects.filter(booking=old_booking)
# Find admissions booking for old booking
if old_booking.admission_payment:
old_booking.admission_payment.booking_type = 4
old_booking.admission_payment.cancelation_time = datetime.now()
old_booking.admission_payment.canceled_by = booking.created_by #request.user
old_booking.admission_payment.save()
for bi in booking_items:
bi.booking_type = 4
bi.save()
print("MLINE 5.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
booking_items_current = MooringsiteBooking.objects.filter(booking=booking)
for bi in booking_items_current:
if str(bi.id) in booking.override_lines:
bi.amount = Decimal(booking.override_lines[str(bi.id)])
bi.save()
print("MLINE 6.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
msb = MooringsiteBooking.objects.filter(booking=booking).order_by('from_dt')
from_date = msb[0].from_dt
to_date = msb[msb.count()-1].to_dt
timestamp = calendar.timegm(from_date.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
from_dt = local_dt.replace(microsecond=from_date.microsecond)
from_date_converted = from_dt.date()
timestamp = calendar.timegm(to_date.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
to_dt = local_dt.replace(microsecond=to_date.microsecond)
to_date_converted = to_dt.date()
booking.arrival = from_date_converted
booking.departure = to_date_converted
# set booking to be permanent fixture
booking.booking_type = 1 # internet booking
booking.expiry_time = None
print("MLINE 7.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
update_payments(invoice_ref)
print("MLINE 8.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
#Calculate Admissions and create object
if booking.admission_payment:
ad_booking = AdmissionsBooking.objects.get(pk=booking.admission_payment.pk)
#if request.user.__class__.__name__ == 'EmailUser':
ad_booking.created_by = booking.created_by
ad_booking.booking_type=1
print("MLINE 8.02", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
ad_booking.save()
print("MLINE 8.03", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
ad_invoice = AdmissionsBookingInvoice.objects.get_or_create(admissions_booking=ad_booking, invoice_reference=invoice_ref)
print("MLINE 8.04", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
for al in ad_booking.override_lines.keys():
ad_line = AdmissionsLine.objects.get(id=int(al))
ad_line.cost = ad_booking.override_lines[str(al)]
ad_line.save()
print("MLINE 8.05", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
# booking.admission_payment = ad_booking
booking.save()
print("MLINE 9.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
#if not request.user.is_staff:
# print "USER IS NOT STAFF."
#request.session['ps_last_booking'] = booking.id
#utils.delete_session_booking(request.session)
# send out the invoice before the confirmation is sent if total is greater than zero
#if booking.cost_total > 0:
print("MLINE 10.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
try:
emails.send_booking_invoice(booking,context_processor)
except Exception as e:
print ("Error Sending Invoice ("+str(booking.id)+") :"+str(e))
# for fully paid bookings, fire off confirmation emaili
#if booking.invoice_status == 'paid':
print("MLINE 11.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
try:
emails.send_booking_confirmation(booking,context_processor)
except Exception as e:
print ("Error Sending Booking Confirmation ("+str(booking.id)+") :"+str(e))
print("MLINE 12.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
refund_failed = None
if models.RefundFailed.objects.filter(booking=booking).count() > 0:
refund_failed = models.RefundFailed.objects.filter(booking=booking)
# Create/Update Vessel in VesselDetails Table
print("MLINE 13.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
try:
if models.VesselDetail.objects.filter(rego_no=booking.details['vessel_rego']).count() > 0:
vd = models.VesselDetail.objects.filter(rego_no=booking.details['vessel_rego'])
p = vd[0]
p.vessel_size=booking.details['vessel_size']
p.vessel_draft=booking.details['vessel_draft']
p.vessel_beam=booking.details['vessel_beam']
p.vessel_weight=booking.details['vessel_weight']
p.save()
else:
models.VesselDetail.objects.create(rego_no=booking.details['vessel_rego'],
vessel_size=booking.details['vessel_size'],
vessel_draft=booking.details['vessel_draft'],
vessel_beam=booking.details['vessel_beam'],
vessel_weight=booking.details['vessel_weight']
)
print("MLINE 14.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
except:
print ("ERROR: create vesseldetails on booking success")
context = {
'booking': booking,
'book_inv': [book_inv],
'refund_failed' : refund_failed
}
print("MLINE 15.01", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
return context
def booking_annual_admission_success(basket, booking, context_processor):
order = Order.objects.get(basket=basket[0])
invoice = Invoice.objects.get(order_number=order.number)
invoice_ref = invoice.reference
book_inv, created = models.BookingAnnualInvoice.objects.get_or_create(booking_annual_admission=booking, invoice_reference=invoice_ref)
#invoice_ref = request.GET.get('invoice')
if booking.booking_type == 3:
try:
inv = Invoice.objects.get(reference=invoice_ref)
order = Order.objects.get(number=inv.order_number)
order.user = booking.customer
order.save()
except Invoice.DoesNotExist:
print ("INVOICE ERROR")
logger.error('{} tried making a booking with an incorrect invoice'.format('User {} with id {}'.format(booking.customer.get_full_name(),booking.customer.id) if booking.customer else 'An anonymous user'))
return redirect('public_make_booking')
if inv.system not in ['0516']:
print ("SYSTEM ERROR")
logger.error('{} tried making a booking with an invoice from another system with reference number {}'.format('User {} with id {}'.format(booking.customer.get_full_name(),booking.customer.id) if booking.customer else 'An anonymous user',inv.reference))
return redirect('public_make_booking')
if book_inv:
# set booking to be permanent fixture
booking.booking_type = 1 # internet booking
booking.expiry_time = None
update_payments(invoice_ref)
#Calculate Admissions and create object
booking.save()
#if not request.user.is_staff:
# print "USER IS NOT STAFF."
print ("SEND EMAIL")
try:
emails.send_annual_admission_booking_invoice(booking,context_processor)
except Exception as e:
print ("Error Sending Invoice ("+str(booking.id)+") :"+str(e))
try:
emails.send_new_annual_admission_booking_internal(booking,context_processor)
except Exception as e:
print ("Error Sending Booking Confirmation ("+str(booking.id)+") :"+str(e))
# for fully paid bookings, fire off confirmation emaili
#if booking.invoice_status == 'paid':
context = {
'booking': booking,
'book_inv': [book_inv],
}
try:
if models.VesselDetail.objects.filter(rego_no=booking.details['vessel_rego']).count() > 0:
vd = models.VesselDetail.objects.filter(rego_no=booking.details['vessel_rego'])
p = vd[0]
p.vessel_name=booking.details['vessel_name']
p.save()
except:
print ("ERROR: create vesseldetails on booking success")
print ("COMPLETED SUCCESS")
return context
def booking_admission_success(basket, booking, context_processor):
arrival = models.AdmissionsLine.objects.filter(admissionsBooking=booking)[0].arrivalDate
overnight = models.AdmissionsLine.objects.filter(admissionsBooking=booking)[0].overnightStay
order = Order.objects.get(basket=basket[0])
invoice = Invoice.objects.get(order_number=order.number)
invoice_ref = invoice.reference
#invoice_ref = request.GET.get('invoice')
if booking.booking_type == 3:
try:
inv = Invoice.objects.get(reference=invoice_ref)
order = Order.objects.get(number=inv.order_number)
order.user = booking.customer
order.save()
except Invoice.DoesNotExist:
logger.error('{} tried making a booking with an incorrect invoice'.format('User {} with id {}'.format(booking.customer.get_full_name(),booking.customer.id) if booking.customer else 'An anonymous user'))
return redirect('admissions', args=(booking.location.key,))
if inv.system not in ['0516']:
logger.error('{} tried making a booking with an invoice from another system with reference number {}'.format('User {} with id {}'.format(booking.customer.get_full_name(),booking.customer.id) if booking.customer else 'An anonymous user',inv.reference))
return redirect('admissions', args=(booking.location.key,))
try:
b = AdmissionsBookingInvoice.objects.get(invoice_reference=invoice_ref)
logger.error('{} tried making an admission booking with an already used invoice with reference number {}'.format('User {} with id {}'.format(booking.customer.get_full_name(),booking.customer.id) if booking.customer else 'An anonymous user',inv.reference))
return redirect('admissions', args=(booking.location.key,))
except AdmissionsBookingInvoice.DoesNotExist:
logger.info('{} finished temporary booking {}, creating new AdmissionBookingInvoice with reference {}'.format('User {} with id {}'.format(booking.customer.get_full_name(),booking.customer.id) if booking.customer else 'An anonymous user',booking.id, invoice_ref))
# FIXME: replace with server side notify_url callback
admissionsInvoice = AdmissionsBookingInvoice.objects.get_or_create(admissions_booking=booking, invoice_reference=invoice_ref)
#if request.user.__class__.__name__ == 'EmailUser':
# booking.created_by = request.user
# set booking to be permanent fixture
booking.booking_type = 1 # internet booking
booking.save()
#request.session['ad_last_booking'] = booking.id
#utils.delete_session_admissions_booking(request.session)
try:
# send out the invoice before the confirmation is sent
emails.send_admissions_booking_invoice(booking,context_processor)
except Exception as e:
print ("Error Sending Invoice ("+str(booking.id)+") :"+str(e))
try:
# for fully paid bookings, fire off confirmation email
emails.send_admissions_booking_confirmation(booking,context_processor)
except Exception as e:
print ("Error Sending Booking Confirmation ("+str(booking.id)+") :"+str(e))
context = {
'admissionsBooking': booking,
'arrival' : arrival,
'overnight': overnight,
'admissionsInvoice': [invoice_ref]
}
|
the-stack_0_7927 | #!/usr/bin/env python3
import sys
import os
import struct
import select
import time
import getopt
import tqdm
import socket
try:
optlist, args = getopt.getopt(sys.argv[1:], 's')
timeout = 0.01
n = 1024
slow = False
for o, a in optlist:
if o == "-s":
slow = True
n = 16
print('Running in slow mode')
out = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host, port = args[1].split(':')
out.connect((host, int(port)))
size = os.path.getsize(args[0])
out.send(struct.pack('>I', size))
with open(args[0], 'rb') as f:
data = f.read()
for i in tqdm.tqdm(range(0, len(data), n)):
out.send(data[i:i+n])
if slow:
time.sleep(timeout)
out.close()
os.execlp('telnet', 'telnet', host, port.strip())
except getopt.GetoptError as err:
print(str(err))
print('Usage: send.py [-s] file host:port')
|
the-stack_0_7928 | """Sensor classes represent modbus registers for an inverter."""
from __future__ import annotations
import logging
from math import modf
from typing import Any, Dict, List, Sequence, Tuple, Union
import attr
_LOGGER = logging.getLogger(__name__)
def ensure_tuple(val: Any) -> Tuple[int]:
"""Return a tuple."""
if isinstance(val, tuple):
return val # type: ignore
if isinstance(val, int):
return (val,)
return tuple(val) # type: ignore
@attr.define(slots=True)
class Sensor:
"""Sunsynk sensor."""
reg_address: Tuple[int, ...] = attr.field(converter=ensure_tuple)
name: str = attr.field()
unit: str = attr.field(default="")
factor: float = attr.field(default=1)
value: Union[float, int, str, None] = None
# func: Union[
# None, Callable[[Tuple[int, ...]], str], Callable[[float], Any]
# ] = attr.field(default=None)
reg_value: Tuple[int, ...] = attr.field(init=False, factory=tuple)
def append_to(self, arr: List[Sensor]) -> Sensor:
"""Append to a list of sensors."""
arr.append(self)
return self
def reg_to_value(self, value: Tuple[int, ...]) -> Union[float, int, str, None]:
"""Update the reg_value and update."""
if isinstance(value, tuple):
self.reg_value = value
else:
self.reg_value = (value,)
self.update_value()
return self.value
@property
def id(self) -> str: # pylint: disable=invalid-name
"""Get the sensor ID."""
return slug(self.name)
def update_value(self) -> None:
"""Update the value from the reg_value."""
hval = self.reg_value[1] if len(self.reg_value) > 1 else 0
lval = self.reg_value[0]
_LOGGER.debug(
"%s low=%d high=%d value=%s%s",
self.name,
lval,
hval,
self.value,
self.unit,
)
self.value = (lval + (hval << 16)) * self.factor
if self.factor < 0: # Indicate this register is signed
self.value = -self.value
# Value might be negative.
if self.value > 0x7FFF:
self.value -= 0xFFFF
# if self.func:
# self.value = self.func(self.value) # type: ignore
# make integer/round?
if isinstance(self.value, float):
if modf(self.value)[0] == 0:
self.value = int(self.value)
else:
self.value = round(self.value, 2)
class HSensor(Sensor):
"""Hybrid sensor."""
class RWSensor(Sensor):
"""Read & write sensor."""
def group_sensors(
sensors: Sequence[Sensor], allow_gap: int = 3
) -> Sequence[Sequence[int]]:
"""Group sensor registers into blocks for reading."""
if not sensors:
return []
regs = set()
for sen in sensors:
regs |= set(sen.reg_address)
adr = sorted(regs)
cgroup = [adr[0]]
groups = [cgroup]
for idx in range(1, len(adr)):
gap = adr[idx] - adr[idx - 1]
if gap > allow_gap or len(cgroup) >= 60:
cgroup = []
groups.append(cgroup)
cgroup.append(adr[idx])
return groups
def update_sensors(sensors: Sequence[Sensor], registers: Dict[int, int]) -> None:
"""Update sensors."""
for sen in sensors:
try:
sen.reg_value = tuple(registers[i] for i in sen.reg_address)
except KeyError:
continue
sen.update_value()
def slug(name: str) -> str:
"""Create a slug."""
return name.lower().replace(" ", "_")
class TemperatureSensor(Sensor):
"""Offset by 100 for temperature."""
def update_value(self) -> None:
"""Offset by 100 for temperature."""
super().update_value()
try:
self.value = round(float(self.value) - 100, 2) # type: ignore
except (TypeError, ValueError) as err:
self.value = 0
_LOGGER.error("Could not decode temperature: %s", err)
class TimeRWSensor(RWSensor):
"""Extract the time."""
def update_value(self) -> None:
"""Extract the time."""
sval = str(self.reg_value[0])
self.value = f"{sval[:-2]}:{sval[-2:]}"
class SDStatusSensor(Sensor):
"""SD card status."""
def update_value(self) -> None:
"""SD card status."""
self.value = {
1000: "fault",
2000: "ok",
}.get(self.reg_value[0]) or f"unknown {self.reg_value[0]}"
class InverterStateSensor(Sensor):
"""Inverter status."""
def update_value(self) -> None:
"""Inverter status."""
if self.reg_value[0] == 2:
self.value = "ok"
else:
self.value = f"unknown {self.reg_value[0]}"
class SerialSensor(Sensor):
"""Decode Inverter serial number."""
def update_value(self) -> None:
"""Decode Inverter serial number."""
res = ""
for b16 in self.reg_value:
res += chr(b16 >> 8)
res += chr(b16 & 0xFF)
self.value = res
class FaultSensor(Sensor):
"""Decode Inverter faults."""
def update_value(self) -> None:
"""Decode Inverter faults."""
faults = {
13: "Working mode change",
18: "AC over current",
20: "DC over current",
23: "F23 AC leak current or transient over current",
24: "F24 DC insulation impedance",
26: "F26 DC busbar imbalanced",
29: "Parallel comms cable",
35: "No AC grid",
42: "AC line low voltage",
47: "AC freq high/low",
56: "DC busbar voltage low",
63: "ARC fault",
64: "Heat sink tempfailure",
}
err = []
off = 0
for b16 in self.reg_value:
for bit in range(16):
msk = 1 << bit
if msk & b16:
msg = f"F{bit+off+1:02} " + faults.get(off + msk, "")
err.append(msg.strip())
off += 16
self.value = ", ".join(err)
|
the-stack_0_7929 | from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
# hex: 0x0D0900
_REQUEST_MESSAGE_TYPE = 854272
# hex: 0x0D0901
_RESPONSE_MESSAGE_TYPE = 854273
_REQUEST_INITIAL_FRAME_SIZE = REQUEST_HEADER_SIZE
def encode_request(name):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
StringCodec.encode(buf, name, True)
return OutboundMessage(buf, False)
|
the-stack_0_7930 | import gevent
import pytest
import requests
import responses
from eth_keys.exceptions import BadSignature, ValidationError
from eth_utils import decode_hex, keccak, to_canonical_address
from raiden.api.v1.encoding import CapabilitiesSchema
from raiden.exceptions import InvalidSignature
from raiden.network.utils import get_average_http_response_time
from raiden.settings import CapabilitiesConfig
from raiden.transfer.utils.secret import decrypt_secret, encrypt_secret
from raiden.utils.capabilities import capconfig_to_dict, capdict_to_config
from raiden.utils.keys import privatekey_to_publickey
from raiden.utils.signer import LocalSigner, Signer, recover
from raiden.utils.typing import UserID
def test_privatekey_to_publickey():
privkey = keccak(b"secret")
pubkey = (
"c283b0507c4ec6903a49fac84a5aead951f3c38b2c72b69da8a70a5bac91e9c"
"705f70c7554b26e82b90d2d1bbbaf711b10c6c8b807077f4070200a8fb4c6b771"
)
assert pubkey == privatekey_to_publickey(privkey).hex()
def test_signer_sign():
privkey = keccak(b"secret") # 0x38e959391dD8598aE80d5d6D114a7822A09d313A
message = b"message"
# generated with Metamask's web3.personal.sign
signature = decode_hex(
"0x1eff8317c59ab169037f5063a5129bb1bab0299fef0b5621d866b07be59e2c0a"
"6a404e88d3360fb58bd13daf577807c2cf9b6b26d80fc929c52e952769a460981c"
)
signer: Signer = LocalSigner(privkey)
assert signer.sign(message) == signature
def test_encrypt_secret():
privkey = keccak(b"secret")
message = b"message"
signer: Signer = LocalSigner(privkey)
signature = signer.sign(message)
encrypted_secret = encrypt_secret(
message, {"user_id": UserID(message.decode()), "displayname": signature.hex()}, 0, 0
)
decrypted_message, amount, payment_id = decrypt_secret(encrypted_secret, privkey)
assert decrypted_message == message
assert amount == 0
assert payment_id == 0
def test_recover():
account = to_canonical_address("0x38e959391dD8598aE80d5d6D114a7822A09d313A")
message = b"message"
# generated with Metamask's web3.personal.sign
signature = decode_hex(
"0x1eff8317c59ab169037f5063a5129bb1bab0299fef0b5621d866b07be59e2c0a"
"6a404e88d3360fb58bd13daf577807c2cf9b6b26d80fc929c52e952769a460981c"
)
assert recover(data=message, signature=signature) == account
@pytest.mark.parametrize(
("signature", "nested_exception"),
[
pytest.param(b"\x00" * 65, BadSignature, id="BadSignature"),
pytest.param(b"bla", ValidationError, id="ValidationError"),
],
)
def test_recover_exception(signature, nested_exception):
with pytest.raises(InvalidSignature) as exc_info:
recover(b"bla", signature)
assert isinstance(exc_info.value.__context__, nested_exception)
def test_get_http_rtt_happy(requests_responses):
"""Ensure get_http_rtt returns the average RTT over the number of samples."""
delay = iter([0.05, 0.05, 0.2])
def response(_):
gevent.sleep(next(delay))
return 200, {}, ""
requests_responses.add_callback(responses.GET, "http://url", callback=response)
result = get_average_http_response_time(url="http://url", method="get", samples=3)
assert 0.1 <= result[1] < 0.11 # exact answer is 0.1, but we have some overhead
def test_get_http_rtt_ignore_failing(requests_responses):
"""Ensure get_http_rtt ignores failing servers."""
# RequestException (e.g. DNS not resolvable, server not reachable)
requests_responses.add(responses.GET, "http://url1", body=requests.RequestException())
assert get_average_http_response_time(url="http://url1", method="get") is None
# Server misconfigured
requests_responses.add(responses.GET, "http://url2", status=404)
assert get_average_http_response_time(url="http://url2", method="get") is None
# Internal server error
requests_responses.add(responses.GET, "http://url3", status=500)
assert get_average_http_response_time(url="http://url3", method="get") is None
def test_deserialize_capabilities():
capabilities_schema = CapabilitiesSchema()
base_url = "mxc://raiden.network/cap"
capstring = f"{base_url}?foo=1&toad=1&bar=max&form=1&agar=1¬true=0&l=one&l=2"
parsed = capabilities_schema.load({"capabilities": capstring})["capabilities"]
assert parsed.get("foo") is True
assert parsed.get("toad") is True
assert parsed.get("bar") == "max"
assert parsed.get("agar") is True
assert parsed.get("nottrue") is False
assert parsed.get("l") == ["one", "2"]
assert not parsed.get("nothing")
assert capabilities_schema.dump({"capabilities": parsed})["capabilities"] == f"{capstring}"
parsed["false"] = False
# Explicit new value changes the serialization format
assert (
capabilities_schema.dump({"capabilities": parsed})["capabilities"] != f"mxc://{capstring}"
)
assert capabilities_schema.load({"capabilities": ""})["capabilities"] == {}
assert capabilities_schema.load({})["capabilities"] == "mxc://"
def test_capconfig_to_dict():
# test method supports adding unknown keys
config = CapabilitiesConfig()
config.foo = True
as_dict = capconfig_to_dict(config)
assert as_dict.get("foo") is True
assert as_dict.get("bar") is None
assert capdict_to_config(as_dict) == config
as_dict["bar"] = True
assert capdict_to_config(as_dict).bar is True # pylint: disable=no-member
|
the-stack_0_7931 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# statistics.py: collect statistic data
#
# Copyright (C) 2014 Politecnico di Torino, Italy
# TORSEC group -- http://security.polito.it
#
# Author: Roberto Sassu <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see
# <http://www.gnu.org/licenses/>.
import time
class Timer(object):
start_time = 0
last_get_time = 0
current_time = 0
@classmethod
def start(self):
Timer.start_time = time.time()
Timer.current_time = Timer.start_time
@classmethod
def get_current(self):
return str(round(time.time() - Timer.start_time, 5))
@classmethod
def get_elapsed(self):
Timer.last_get_time = Timer.current_time
Timer.current_time = time.time()
return str(round(Timer.current_time - Timer.last_get_time, 5))
def __del__(cls):
print('Delete Timer object in statistics.py')
cls.start_time = 0
cls.last_get_time = 0
cls.current_time = 0
class Statistics(object):
global_stat = dict(time_parse_ima_list=0, time_exec_query=0,
time_build_graph=0, time_load_time_analysis=0,
time_run_time_analysis=0, time_total=0,
n_meas_code=0, n_meas_code_known=0,
n_meas_struct_data=0, n_meas_struct_data_known=0,
n_meas_unstruct_data=0, n_meas_violation=0,
n_tot_meas=0)
@classmethod
def inc_arch_stat(self, arch=None):
Statistics.arch_stat[arch] += 1
current_arch = Statistics.global_stat['distro_arch']
if (arch != current_arch and
Statistics.arch_stat[arch] >
Statistics.arch_stat[current_arch]):
Statistics.global_stat['distro_arch'] = arch
@classmethod
def inc_stat(self, stat_key=None, stat_value=None):
Statistics.global_stat[stat_key] += 1
@classmethod
def dec_stat(self, stat_key=None, stat_value=None):
Statistics.global_stat[stat_key] -= 1
@classmethod
def set_stat(self, stat_key=None, stat_value=None):
Statistics.global_stat[stat_key] = stat_value
@classmethod
def get_stat(self, stat_key=None):
return Statistics.global_stat[stat_key]
@classmethod
def start_timer(self):
Timer.start()
@classmethod
def set_elapsed_time(self, stat_key=None):
Statistics.global_stat[stat_key] = Timer.get_elapsed()
@classmethod
def set_current_time(self, stat_key=None):
Statistics.global_stat[stat_key] = Timer.get_current()
def __init__(self):
return
def __del__(cls):
print('Delete Statistics object in statistics.py')
cls.global_stat = dict(time_parse_ima_list=0, time_exec_query=0,
time_build_graph=0, time_load_time_analysis=0,
time_run_time_analysis=0, time_total=0,
n_meas_code=0, n_meas_code_known=0,
n_meas_struct_data=0,
n_meas_struct_data_known=0,
n_meas_unstruct_data=0, n_meas_violation=0,
n_tot_meas=0)
|
the-stack_0_7932 | from typing import Any, Optional, Union
from chia.types.blockchain_format.sized_bytes import bytes32
import click
async def show_async(
rpc_port: Optional[int],
state: bool,
show_connections: bool,
exit_node: bool,
add_connection: str,
remove_connection: str,
block_header_hash_by_height: str,
block_by_header_hash: str,
) -> None:
import aiohttp
import time
import traceback
from time import localtime, struct_time
from typing import List, Optional
from chia.consensus.block_record import BlockRecord
from chia.rpc.full_node_rpc_client import FullNodeRpcClient
from chia.server.outbound_message import NodeType
from chia.types.full_block import FullBlock
from chia.util.bech32m import encode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.ints import uint16
from chia.util.misc import format_bytes
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
if state:
blockchain_state = await client.get_blockchain_state()
if blockchain_state is None:
print("There is no blockchain found yet. Try again shortly")
return None
peak: Optional[BlockRecord] = blockchain_state["peak"]
difficulty = blockchain_state["difficulty"]
sub_slot_iters = blockchain_state["sub_slot_iters"]
synced = blockchain_state["sync"]["synced"]
sync_mode = blockchain_state["sync"]["sync_mode"]
total_iters = peak.total_iters if peak is not None else 0
num_blocks: int = 10
network_name = config["selected_network"]
genesis_challenge = config["farmer"]["network_overrides"]["constants"][network_name]["GENESIS_CHALLENGE"]
full_node_port = config["full_node"]["port"]
full_node_rpc_port = config["full_node"]["rpc_port"]
print(f"Network: {network_name} Port: {full_node_port} Rpc Port: {full_node_rpc_port}")
print(f"Genesis Challenge: {genesis_challenge}")
if synced:
print("Current Blockchain Status: Full Node Synced")
print("\nPeak: Hash:", peak.header_hash if peak is not None else "")
elif peak is not None and sync_mode:
sync_max_block = blockchain_state["sync"]["sync_tip_height"]
sync_current_block = blockchain_state["sync"]["sync_progress_height"]
print(f"Current Blockchain Status: Syncing {sync_current_block}/{sync_max_block}.")
print("Peak: Hash:", peak.header_hash if peak is not None else "")
elif peak is not None:
print(f"Current Blockchain Status: Not Synced. Peak height: {peak.height}")
else:
print("\nSearching for an initial chain\n")
print("You may be able to expedite with 'chia show -a host:port' using a known node.\n")
if peak is not None:
if peak.is_transaction_block:
peak_time = peak.timestamp
else:
peak_hash = peak.header_hash
curr = await client.get_block_record(peak_hash)
while curr is not None and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
peak_time = curr.timestamp
peak_time_struct = struct_time(localtime(peak_time))
print(
" Time:",
f"{time.strftime('%a %b %d %Y %T %Z', peak_time_struct)}",
f" Height: {peak.height:>10}\n",
)
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
print(f"Current difficulty: {difficulty}")
print(f"Current VDF sub_slot_iters: {sub_slot_iters}")
print("Total iterations since the start of the blockchain:", total_iters)
print("")
print(" Height: | Hash:")
added_blocks: List[BlockRecord] = []
curr = await client.get_block_record(peak.header_hash)
while curr is not None and len(added_blocks) < num_blocks and curr.height > 0:
added_blocks.append(curr)
curr = await client.get_block_record(curr.prev_hash)
for b in added_blocks:
print(f"{b.height:>9} | {b.header_hash}")
else:
print("Blockchain has no blocks yet")
# if called together with show_connections, leave a blank line
if show_connections:
print("")
if show_connections:
connections = await client.get_connections()
print("Connections:")
print(
"Type IP Ports NodeID Last Connect"
+ " MiB Up|Dwn"
)
for con in connections:
last_connect_tuple = struct_time(localtime(con["last_message_time"]))
last_connect = time.strftime("%b %d %T", last_connect_tuple)
mb_down = con["bytes_read"] / (1024 * 1024)
mb_up = con["bytes_written"] / (1024 * 1024)
host = con["peer_host"]
# Strip IPv6 brackets
host = host.strip("[]")
# Nodetype length is 9 because INTRODUCER will be deprecated
if NodeType(con["type"]) is NodeType.FULL_NODE:
peak_height = con["peak_height"]
connection_peak_hash = con["peak_hash"]
if connection_peak_hash is None:
connection_peak_hash = "No Info"
else:
if connection_peak_hash.startswith(("0x", "0X")):
connection_peak_hash = connection_peak_hash[2:]
connection_peak_hash = f"{connection_peak_hash[:8]}..."
if peak_height is None:
peak_height = 0
con_str = (
f"{NodeType(con['type']).name:9} {host:38} "
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
f"{mb_up:7.1f}|{mb_down:<7.1f}"
f"\n "
f"-SB Height: {peak_height:8.0f} -Hash: {connection_peak_hash}"
)
else:
con_str = (
f"{NodeType(con['type']).name:9} {host:38} "
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
f"{mb_up:7.1f}|{mb_down:<7.1f}"
)
print(con_str)
# if called together with state, leave a blank line
if state:
print("")
if exit_node:
node_stop = await client.stop_node()
print(node_stop, "Node stopped")
if add_connection:
if ":" not in add_connection:
print("Enter a valid IP and port in the following format: 10.5.4.3:8000")
else:
ip, port = (
":".join(add_connection.split(":")[:-1]),
add_connection.split(":")[-1],
)
print(f"Connecting to {ip}, {port}")
try:
await client.open_connection(ip, int(port))
except Exception:
print(f"Failed to connect to {ip}:{port}")
if remove_connection:
result_txt = ""
if len(remove_connection) != 8:
result_txt = "Invalid NodeID. Do not include '.'"
else:
connections = await client.get_connections()
for con in connections:
if remove_connection == con["node_id"].hex()[:8]:
print("Attempting to disconnect", "NodeID", remove_connection)
try:
await client.close_connection(con["node_id"])
except Exception:
result_txt = f"Failed to disconnect NodeID {remove_connection}"
else:
result_txt = f"NodeID {remove_connection}... {NodeType(con['type']).name} "
f"{con['peer_host']} disconnected"
elif result_txt == "":
result_txt = f"NodeID {remove_connection}... not found"
print(result_txt)
if block_header_hash_by_height != "":
block_header = await client.get_block_record_by_height(block_header_hash_by_height)
if block_header is not None:
print(f"Header hash of block {block_header_hash_by_height}: " f"{block_header.header_hash.hex()}")
else:
print("Block height", block_header_hash_by_height, "not found")
if block_by_header_hash != "":
block: Optional[BlockRecord] = await client.get_block_record(hexstr_to_bytes(block_by_header_hash))
full_block: Optional[FullBlock] = await client.get_block(hexstr_to_bytes(block_by_header_hash))
# Would like to have a verbose flag for this
if block is not None:
assert full_block is not None
prev_b = await client.get_block_record(block.prev_hash)
if prev_b is not None:
difficulty = block.weight - prev_b.weight
else:
difficulty = block.weight
if block.is_transaction_block:
assert full_block.transactions_info is not None
block_time = struct_time(
localtime(
full_block.foliage_transaction_block.timestamp
if full_block.foliage_transaction_block
else None
)
)
block_time_string = time.strftime("%a %b %d %Y %T %Z", block_time)
cost = str(full_block.transactions_info.cost)
tx_filter_hash: Union[str, bytes32] = "Not a transaction block"
if full_block.foliage_transaction_block:
tx_filter_hash = full_block.foliage_transaction_block.filter_hash
fees: Any = block.fees
else:
block_time_string = "Not a transaction block"
cost = "Not a transaction block"
tx_filter_hash = "Not a transaction block"
fees = "Not a transaction block"
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
farmer_address = encode_puzzle_hash(block.farmer_puzzle_hash, address_prefix)
pool_address = encode_puzzle_hash(block.pool_puzzle_hash, address_prefix)
pool_pk = (
full_block.reward_chain_block.proof_of_space.pool_public_key
if full_block.reward_chain_block.proof_of_space.pool_public_key is not None
else "Pay to pool puzzle hash"
)
print(
f"Block Height {block.height}\n"
f"Header Hash 0x{block.header_hash.hex()}\n"
f"Timestamp {block_time_string}\n"
f"Weight {block.weight}\n"
f"Previous Block 0x{block.prev_hash.hex()}\n"
f"Difficulty {difficulty}\n"
f"Sub-slot iters {block.sub_slot_iters}\n"
f"Cost {cost}\n"
f"Total VDF Iterations {block.total_iters}\n"
f"Is a Transaction Block?{block.is_transaction_block}\n"
f"Deficit {block.deficit}\n"
f"PoSpace 'k' Size {full_block.reward_chain_block.proof_of_space.size}\n"
f"Plot Public Key 0x{full_block.reward_chain_block.proof_of_space.plot_public_key}\n"
f"Pool Public Key {pool_pk}\n"
f"Tx Filter Hash {tx_filter_hash}\n"
f"Farmer Address {farmer_address}\n"
f"Pool Address {pool_address}\n"
f"Fees Amount {fees}\n"
)
else:
print("Block with header hash", block_header_hash_by_height, "not found")
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node rpc is running at {rpc_port}")
print("This is normal if full node is still starting up")
else:
tb = traceback.format_exc()
print(f"Exception from 'show' {tb}")
client.close()
await client.await_closed()
@click.command("show", short_help="Show node information")
@click.option(
"-p",
"--rpc-port",
help=(
"Set the port where the Full Node is hosting the RPC interface. "
"See the rpc_port under full_node in config.yaml"
),
type=int,
default=None,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-s", "--state", help="Show the current state of the blockchain", is_flag=True, type=bool, default=False)
@click.option(
"-c", "--connections", help="List nodes connected to this Full Node", is_flag=True, type=bool, default=False
)
@click.option("-e", "--exit-node", help="Shut down the running Full Node", is_flag=True, default=False)
@click.option("-a", "--add-connection", help="Connect to another Full Node by ip:port", type=str, default="")
@click.option(
"-r", "--remove-connection", help="Remove a Node by the first 8 characters of NodeID", type=str, default=""
)
@click.option(
"-bh", "--block-header-hash-by-height", help="Look up a block header hash by block height", type=str, default=""
)
@click.option("-b", "--block-by-header-hash", help="Look up a block by block header hash", type=str, default="")
def show_cmd(
rpc_port: Optional[int],
wallet_rpc_port: Optional[int],
state: bool,
connections: bool,
exit_node: bool,
add_connection: str,
remove_connection: str,
block_header_hash_by_height: str,
block_by_header_hash: str,
) -> None:
import asyncio
asyncio.run(
show_async(
rpc_port,
state,
connections,
exit_node,
add_connection,
remove_connection,
block_header_hash_by_height,
block_by_header_hash,
)
)
|
the-stack_0_7933 | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note that the unit_tests/__init__.py has the following lines to stop
# side effects from the imorts from charm helpers.
# sys.path.append('./lib')
# mock out some charmhelpers libraries as they have apt install side effects
# sys.modules['charmhelpers.contrib.openstack.utils'] = mock.MagicMock()
# sys.modules['charmhelpers.contrib.network.ip'] = mock.MagicMock()
from __future__ import absolute_import
import unit_tests.utils as utils
import charms_openstack.sdn.ovs as ovs
class TestCharmOpenStackSDNOVS(utils.BaseTestCase):
def test_set_manager(self):
self.patch_object(ovs, 'subprocess')
ovs.set_manager('myurl')
self.subprocess.check_call.assert_called_once_with(
['ovs-vsctl', 'set-manager', 'myurl'])
def test__get_ovstbl(self):
self.patch_object(ovs, 'subprocess')
self.subprocess.check_output.return_value = 'ovstbl'
self.assertEqual(ovs._get_ovstbl(), 'ovstbl')
self.subprocess.check_output.assert_called_once_with(
['ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid'])
def test_set_config(self):
self.patch_object(ovs, 'subprocess')
self.patch_object(ovs, '_get_ovstbl')
self._get_ovstbl.return_value = 'a_uuid'
ovs.set_config('mykey', 'myvalue', 'mytable')
self.subprocess.check_call.assert_called_once_with(
['ovs-vsctl', 'set', 'Open_vSwitch', 'a_uuid',
'mytable:mykey=myvalue'])
|
the-stack_0_7936 | from block_getter import *
from pprint import pprint
import sys
import time
import os
def get_head_to_tail_blocks(tail_block_height, head_block_height, api_interval):
wanted_block_numbers = [block_number for block_number in range(tail_block_height, head_block_height + 1)]
lack_block_numbers = check_lack_blocks(wanted_block_numbers)
task_size = len(lack_block_numbers)
failed_times = 0
while True:
for lack_block_number in lack_block_numbers[:]:
if fetch_block_file(lack_block_number) == True:
lack_block_numbers.remove(lack_block_number)
sys.stdout.write('\rTask ' + str(task_size - len(lack_block_numbers)) + '/' + str(task_size))
sys.stdout.flush()
time.sleep(api_interval)
if len(lack_block_numbers) == 0:
break
else:
failed_times += 1
if failed_times > 10:
return False
merge_blocks_file(os.path.dirname(os.path.abspath(__file__)) + '/Outputs/MONA_Blocks.json')
make_transactions_file(os.path.dirname(os.path.abspath(__file__)) + '/Outputs/MONA_Blocks.json', os.path.dirname(os.path.abspath(__file__)) + '/Outputs/MONA_Transactions.json')
print('')
return True
if __name__ == '__main__':
tail_block_height = int(sys.argv[1])
head_block_height = 0
if len(sys.argv) == 1:
merge_blocks_file(os.path.dirname(os.path.abspath(__file__)) + '/Outputs/MONA_Blocks.json')
make_transactions_file(os.path.dirname(os.path.abspath(__file__)) + '/Outputs/MONA_Blocks.json', os.path.dirname(os.path.abspath(__file__)) + '/Outputs/MONA_Transactions.json')
sys.exit(0)
if len(sys.argv) == 3:
# head to tail
head_block_height = int(sys.argv[2])
get_head_to_tail_blocks(tail_block_height, head_block_height, 1)
sys.exit(0)
else:
# highest to tail
while True:
head_block_height = get_max_height()
if head_block_height == -1:
head_block_height = tail_block_height
get_head_to_tail_blocks(tail_block_height, head_block_height, 1)
time.sleep(10)
sys.exit(-1) |
the-stack_0_7938 | #!/usr/bin/env python3
import argparse
from distutils.util import strtobool
import logging
from espnet.transform.transformation import Transformation
from espnet.utils.cli_readers import file_reader_helper
from espnet.utils.cli_utils import get_commandline_args
from espnet.utils.cli_utils import is_scipy_wav_style
from espnet.utils.cli_writers import file_writer_helper
def get_parser():
parser = argparse.ArgumentParser(
description="copy feature with preprocessing",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--in-filetype",
type=str,
default="mat",
choices=["mat", "hdf5", "sound.hdf5", "sound"],
help="Specify the file format for the rspecifier. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument(
"--out-filetype",
type=str,
default="mat",
choices=["mat", "hdf5", "sound.hdf5", "sound"],
help="Specify the file format for the wspecifier. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument(
"--write-num-frames", type=str, help="Specify wspecifer for utt2num_frames"
)
parser.add_argument(
"--compress", type=strtobool, default=False, help="Save in compressed format"
)
parser.add_argument(
"--compression-method",
type=int,
default=2,
help="Specify the method(if mat) or " "gzip-level(if hdf5)",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
parser.add_argument(
"rspecifier", type=str, help="Read specifier for feats. e.g. ark:some.ark"
)
parser.add_argument(
"wspecifier", type=str, help="Write specifier. e.g. ark:some.ark"
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
# logging info
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
if args.verbose > 0:
logging.basicConfig(level=logging.INFO, format=logfmt)
else:
logging.basicConfig(level=logging.WARN, format=logfmt)
logging.info(get_commandline_args())
if args.preprocess_conf is not None:
preprocessing = Transformation(args.preprocess_conf)
logging.info("Apply preprocessing: {}".format(preprocessing))
else:
preprocessing = None
with file_writer_helper(
args.wspecifier,
filetype=args.out_filetype,
write_num_frames=args.write_num_frames,
compress=args.compress,
compression_method=args.compression_method,
) as writer:
for utt, mat in file_reader_helper(args.rspecifier, args.in_filetype):
if is_scipy_wav_style(mat):
# If data is sound file, then got as Tuple[int, ndarray]
rate, mat = mat
if preprocessing is not None:
mat = preprocessing(mat, uttid_list=utt)
# shape = (Time, Channel)
if args.out_filetype in ["sound.hdf5", "sound"]:
# Write Tuple[int, numpy.ndarray] (scipy style)
writer[utt] = (rate, mat)
else:
writer[utt] = mat
if __name__ == "__main__":
main()
|
the-stack_0_7939 | from collections import OrderedDict
from inspect import Signature, Parameter
from typing import Any
from typing import List
import torch
from nncf.common.graph.definitions import MODEL_INPUT_OP_NAME
from nncf.common.graph.definitions import MODEL_OUTPUT_OP_NAME
from nncf.torch.dynamic_graph.patch_pytorch import register_operator
from nncf.torch.dynamic_graph.graph_tracer import ModelInputInfo, create_mock_tensor
from nncf.torch.utils import is_tensor, is_traced_tensor
from nncf.torch.nested_objects_traversal import objwalk
from nncf.common.utils.logger import logger as nncf_logger
@register_operator(name=MODEL_INPUT_OP_NAME)
def nncf_model_input(tensor: 'torch.Tensor'):
return tensor
@register_operator(name=MODEL_OUTPUT_OP_NAME)
def nncf_model_output(tensor: 'torch.Tensor'):
return tensor
def wrap_nncf_model_inputs_with_objwalk(model_args, model_kwargs):
model_args = objwalk(model_args, is_tensor, nncf_model_input)
model_kwargs = objwalk(model_kwargs, is_tensor, nncf_model_input)
return model_args, model_kwargs
def wrap_nncf_model_outputs_with_objwalk(model_outputs):
model_outputs = objwalk(model_outputs, is_traced_tensor, nncf_model_output)
return model_outputs
def replicate_same_tensors(obj: Any) -> Any:
"""
Required to handle the situation when multiple references to one and the
same tensor are present in the input. If tensor replication is not done, then
at runtime one and the same tensor could be wrapped by input/output wrappers twice,
which will disrupt the traced graph structure and possibly hook calls.
"""
observed_tensor_object_ids = set() # type: Set[int]
def replicate_fn(tensor: torch.Tensor) -> torch.Tensor:
tensor_object_id = id(tensor)
if tensor_object_id in observed_tensor_object_ids:
return tensor.clone()
observed_tensor_object_ids.add(tensor_object_id)
return tensor
obj = objwalk(obj, is_tensor, replicate_fn)
return obj
class InputInfoWrapManager:
INPUTS_MISMATCH_WARNING_TEXT = "Compression with regards to this input may occur incorrectly. Make sure " \
"you call the compressed model with inputs that correspond to what NNCF was " \
"configured to expect (either via NNCF config's input_infos, or custom" \
"dummy_forward_fn/wrap_inputs_fn parameters), or that you know what you are " \
"doing. This warning will not be shown again."
ARGS_INPUTS_MISMATCH_FORMAT_STRING = "Inputs mismatch - could not find arg with idx {} in NNCF-wrapped model " \
"input args! " + INPUTS_MISMATCH_WARNING_TEXT
KWARGS_INPUTS_MISMATCH_FORMAT_STRING = "Inputs mismatch - could not find kwarg '{}' in NNCF-wrapped model input " \
"kwargs! " + INPUTS_MISMATCH_WARNING_TEXT
def __init__(self, input_infos: List[ModelInputInfo],
fwd_signature: Signature,
module_ref_for_device: torch.nn.Module = None):
self._module_ref_for_device = module_ref_for_device
arg_iis_list = [ii for ii in input_infos if ii.keyword is None]
kwarg_iis_list = [(ii.keyword, ii) for ii in input_infos if ii.keyword is not None]
kwarg_iis = OrderedDict()
arg_iis = tuple(arg_iis_list)
for kw, ii in kwarg_iis_list:
kwarg_iis[kw] = ii
bound_params = fwd_signature.bind(*arg_iis, **kwarg_iis)
self._fwd_params_to_input_infos_odict = bound_params.arguments
self._fwd_signature = fwd_signature # type: Signature
def set_device(self, device: str):
self._device = device
def wrap_inputs(self, model_args, model_kwargs):
bound_model_params = self._fwd_signature.bind(*model_args, **model_kwargs)
for param_name in self._fwd_params_to_input_infos_odict:
param_kind = self._fwd_signature.parameters[param_name].kind
if param_kind is Parameter.VAR_POSITIONAL or param_kind is Parameter.VAR_KEYWORD:
nncf_logger.warning("An input_info tensor was bound to a *args or **kwargs variadic parameter in the"
"forward's signature! This is currently unsupported by NNCF. Input compression may "
"be incorrect.")
# Currently won't support input info mapping to *args or **kwargs-mapped parameters
continue
if param_name not in bound_model_params.arguments:
nncf_logger.warning("A call to a compressed model's forward occured without one of the params"
"specified in input_infos! Input compression may be incorrect. Trying to recover "
"by wrapping the default value for the parameter.")
bound_model_params.apply_defaults()
potential_tensor = bound_model_params.arguments[param_name]
if potential_tensor is not None:
bound_model_params.arguments[param_name] = nncf_model_input(bound_model_params.arguments[param_name])
else:
# Default was None - cannot wrap as-is. Will wrap a dummy tensor as specified in
# input infos - will conserve the call order of nncf_model_input nodes,
# and the post-hooks for the input node will execute. The result won't go anywhere, though.
nncf_logger.warning("Wrapping a dummy tensor for input {}".format(param_name))
info_for_missing_input = self._fwd_params_to_input_infos_odict[param_name]
device = 'cuda'
if self._module_ref_for_device is not None:
device = next(self._module_ref_for_device.parameters()).device
dummy_tensor = create_mock_tensor(info_for_missing_input, device)
_ = nncf_model_input(dummy_tensor)
return bound_model_params.args, bound_model_params.kwargs
|
the-stack_0_7941 | # -*- coding: utf-8 -*-
import logging
import rest_framework_swagger.renderers as rest_swagger_renderers
from django.core import urlresolvers
from zope.dottedname import resolve
logger = logging.getLogger(__name__)
def resolve_swagger_doc(url, method):
resolve_result = urlresolvers.resolve(url)
swaggerdoc = getattr(resolve_result.func, '__swaggerdoc__', None)
if swaggerdoc:
return swaggerdoc
view_class = resolve.resolve(resolve_result.view_name)
view = getattr(view_class, method.lower(), None)
return getattr(view, '__swaggerdoc__', None)
def overwrite_data(url, method, data, swaggerdoc):
additional_data = dict(swaggerdoc).get(method, {})
try:
data['paths'][url][method].update(additional_data)
except (KeyError, TypeError, AttributeError) as err:
logger.debug('Cannot update swagger data: %r', err)
class SwaggerAdditinalDocRenderer(rest_swagger_renderers.OpenAPIRenderer):
def add_customizations(self, data, renderer_context):
super(SwaggerAdditinalDocRenderer, self).add_customizations(
data, renderer_context)
for url, path in data['paths'].items():
for method, method_data in path.items():
swaggerdoc = resolve_swagger_doc(url, method)
if swaggerdoc:
overwrite_data(url, method, data, swaggerdoc)
|
the-stack_0_7943 | import artm
import numpy as np
import shutil
import pytest
import warnings
from ..cooking_machine.models.topic_model import TopicModel
from ..cooking_machine.dataset import Dataset, W_DIFF_BATCHES_1
from ..viewers import top_documents_viewer
NUM_TOPICS = 5
NUM_DOCUMENT_PASSES = 1
NUM_ITERATIONS = 10
class TestTopDocumentsViewer:
""" """
topic_model = None
theta = None
top_documents_viewer = None
@classmethod
def setup_class(cls):
""" """
with warnings.catch_warnings():
warnings.filterwarnings(action="ignore", message=W_DIFF_BATCHES_1)
dataset = Dataset('tests/test_data/test_dataset.csv')
dictionary = dataset.get_dictionary()
batch_vectorizer = dataset.get_batch_vectorizer()
model_artm = artm.ARTM(
num_topics=NUM_TOPICS,
cache_theta=True,
num_document_passes=NUM_DOCUMENT_PASSES,
dictionary=dictionary,
scores=[artm.PerplexityScore(name='PerplexityScore')],)
cls.topic_model = TopicModel(model_artm, model_id='model_id')
cls.topic_model._fit(batch_vectorizer, num_iterations=NUM_ITERATIONS)
cls.theta = cls.topic_model.get_theta(dataset=dataset)
cls.top_documents_viewer = top_documents_viewer.TopDocumentsViewer(model=cls.topic_model)
@classmethod
def teardown_class(cls):
""" """
shutil.rmtree("tests/test_data/test_dataset_batches")
def test_check_output_format(self):
""" """
topics_documents = TestTopDocumentsViewer.top_documents_viewer.view()
assert isinstance(topics_documents, list), 'Result of view() not of type "list"'
assert all(isinstance(topic_documents, list) for topic_documents in topics_documents),\
'Some elements in the result list of view() not of type "list"'
def test_check_output_content(self):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
documents_indices = list(range(num_documents))
topics_documents_from_viewer = TestTopDocumentsViewer.top_documents_viewer.view()
documents_from_viewer = merge_lists(topics_documents_from_viewer)
assert sorted(documents_from_viewer) == documents_indices,\
'Viewer returned as documents "{0}".' \
'But expected to get documents\' indices from "0" to "{1}"'.format(
documents_from_viewer, num_documents - 1)
def test_check_precomputed_distances_parameter_workable(self):
""" """
index_of_topic_to_be_nearest_to_all_documents = 0
distances_all_one_except_to_one_topic = np.ones_like(TestTopDocumentsViewer.theta.values)
distances_all_one_except_to_one_topic[:, index_of_topic_to_be_nearest_to_all_documents] = 0
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
precomputed_distances=distances_all_one_except_to_one_topic)
topics_documents = documents_viewer.view()
num_documents_in_nearest_topic = len(
topics_documents[index_of_topic_to_be_nearest_to_all_documents])
num_documents = TestTopDocumentsViewer.theta.shape[1]
assert num_documents_in_nearest_topic == num_documents,\
'Expected to see all documents in one topic.' \
'But the topic has "{}" documents instead of "{}"'.format(
num_documents_in_nearest_topic, num_documents)
@pytest.mark.parametrize("max_num_top_documents", [0, 1])
def test_check_max_top_documents_number_parameter_workable(self, max_num_top_documents):
""" """
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
max_top_number=max_num_top_documents)
topics_documents = documents_viewer.view()
assert all(len(topic_documents) <= max_num_top_documents
for topic_documents in topics_documents),\
'Not all top documents lists from "{}" have less elements than required "{}"'.format(
topics_documents, max_num_top_documents)
def test_check_object_clusters_parameter_workable(self):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
cluster_label_to_be_same_for_all_documents = 0
cluster_labels = list(
cluster_label_to_be_same_for_all_documents for _ in range(num_documents))
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
object_clusters=cluster_labels)
topics_documents = documents_viewer.view()
num_documents_with_given_cluster_label = len(
topics_documents[cluster_label_to_be_same_for_all_documents])
assert num_documents_with_given_cluster_label == num_documents,\
'Marked all documents with label "{}".' \
'Expected to see all "{}" documents in that topic,' \
'but there are only "{}" documents'.format(
cluster_label_to_be_same_for_all_documents, num_documents,
num_documents_with_given_cluster_label)
@pytest.mark.parametrize("illegal_cluster_label", [-1, NUM_TOPICS])
def test_check_object_clusters_parameter_validates_range_of_input_labels(
self, illegal_cluster_label):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
cluster_labels = list(0 for _ in range(num_documents))
cluster_labels[0] = illegal_cluster_label
with pytest.raises(ValueError):
_ = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
object_clusters=cluster_labels).view()
def merge_lists(iterable_of_lists):
""" """
result = []
for i in iterable_of_lists:
result += i
return result
|
the-stack_0_7948 |
import tifffile
import tqdm
import os
import numpy as np
import sys
import fnmatch
def scandir(path, pat, cb):
for root, dirs, files in os.walk(path):
head, tail = os.path.split(root)
for file in files:
if fnmatch.fnmatch(file, pat):
fn = os.path.join(root, file)
cb(fn)
def sumframes(tiffinput, tiffoutput, numframes):
print (tiffoutput)
with tifffile.TiffWriter(tiffoutput) as out_tif:
with tifffile.TiffFile(tiffinput) as in_tif:
total = len(in_tif.pages)
framesum = in_tif.pages[0].asarray()*0
n = 0
for f in range(total):
framesum += in_tif.pages[f].asarray()
n += 1
if (n==numframes):
out_tif.save(framesum.astype(dtype=np.uint16))
sys.stdout.write(f"\rframe {f}/{total} ({f/total*100:.2f}%)")
n=0
framesum *= 0
print()
def _process(args):
path, outdir, numframes = args
print(f"pid={os.getpid()}: {path}")
filename = os.path.split(path)[1]
os.makedirs(outdir, exist_ok=True)
outfile = outdir + filename
sumframes(path,outfile,numframes)
def sumframes_dir(inputdir, outputdir, numframes):
params = []
def cb(fn):
args=[ fn, outputdir, numframes]
#params.append(args)
_process(args)
scandir(inputdir, "*.tif", cb)
# p = Pool(8)
# p.map(_process, params)
if __name__ == "__main__":
sumframes_dir('O:/mod/', 'O:/mod-sumframes/', 6)
# split_tiff('../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-1_0.tif',
# '../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-psplit-1_0.tif', 0.5, 100.2, 1/0.47, 300,
# '../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-psplit-1_0-bg.tif') |
the-stack_0_7949 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author: RShirohara
import argparse
import inspect
import sys
import novelconverter
_EXTENSION = (
"markdown",
"ddmarkdown",
"pixiv"
)
def get_args():
_parser = argparse.ArgumentParser(
description=novelconverter.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
_parser.add_argument(
"from_format", type=str, help="Format of the original text"
)
_parser.add_argument(
"to_format", type=str, help="Format of the output text"
)
_parser.add_argument(
"-o", "--output", type=str, help="File path of the output text"
)
_parser.add_argument(
"-i", "--input", type=str, help="File path of the original text"
)
args = _parser.parse_args()
return args
def load_data(path):
if path:
with open(path, "r") as _f:
source = _f.read()
else:
source = sys.stdin.read()
return source
def export_data(source, path):
if path:
with open(path, "w") as _f:
_f.write(source)
else:
print(source)
def load_extension(ext_name, proc_name):
# ext_nameが存在しているか確認
if ext_name not in _EXTENSION:
raise ValueError(f"No extension named {ext_name} exists.")
ext = eval(f"novelconverter.extension.{ext_name}")
_in_processor = [
x[0] for x in inspect.getmembers(ext, inspect.isfunction)
]
processor = {
x.replace("build_", ""): eval(f"ext.{x}", {"ext": ext})
for x in _in_processor
}
# proc_nameが存在するかを確認
if proc_name not in processor.keys():
sys.stderr.write(f"No processor named {proc_name} exists.\n")
return novelconverter.util.Processor()
return processor[proc_name]()
class NovelConverter(novelconverter.NovelConverter):
def build_registry(self, from_form, to_form):
self.inlineparser = load_extension(from_form, "inlineparser")
self.blockparser = load_extension(from_form, "blockparser")
self.renderer = load_extension(to_form, "renderer")
self.preprocessor = load_extension(from_form, "preprocessor")
self.postprocessor = load_extension(to_form, "postprocessor")
def main():
args = get_args()
nv = NovelConverter()
nv.build_registry(args.from_format, args.to_format)
source = load_data(args.input)
result = nv.convert(source)
export_data(result, args.output)
if __name__ == "__main__":
main()
|
the-stack_0_7950 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Mike Place <[email protected]>`
'''
# Import python libs
from __future__ import absolute_import
import copy
import os
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock
# Import salt libs
from salt import minion
from salt.utils import event
from salt.exceptions import SaltSystemExit
import salt.syspaths
import tornado
ensure_in_syspath('../')
__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MinionTestCase(TestCase):
def test_invalid_master_address(self):
with patch.dict(__opts__, {'ipv6': False, 'master': float('127.0'), 'master_port': '4555', 'retry_dns': False}):
self.assertRaises(SaltSystemExit, minion.resolve_dns, __opts__)
@skipIf(os.geteuid() != 0, 'You must be logged in as root to run this test')
def test_sock_path_len(self):
'''
This tests whether or not a larger hash causes the sock path to exceed
the system's max sock path length. See the below link for more
information.
https://github.com/saltstack/salt/issues/12172#issuecomment-43903643
'''
opts = {
'id': 'salt-testing',
'hash_type': 'sha512',
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'),
'extension_modules': ''
}
with patch.dict(__opts__, opts):
try:
event_publisher = event.AsyncEventPublisher(__opts__)
result = True
except SaltSystemExit:
result = False
self.assertTrue(result)
# Tests for _handle_decoded_payload in the salt.minion.Minion() class: 3
def test_handle_decoded_payload_jid_match_in_jid_queue(self):
'''
Tests that the _handle_decoded_payload function returns when a jid is given that is already present
in the jid_queue.
Note: This test doesn't contain all of the patch decorators above the function like the other tests
for _handle_decoded_payload below. This is essential to this test as the call to the function must
return None BEFORE any of the processes are spun up because we should be avoiding firing duplicate
jobs.
'''
mock_opts = {'cachedir': '',
'extension_modules': ''}
mock_data = {'fun': 'foo.bar',
'jid': 123}
mock_jid_queue = [123]
try:
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop())
ret = minion._handle_decoded_payload(mock_data)
self.assertEqual(minion.jid_queue, mock_jid_queue)
self.assertIsNone(ret)
finally:
minion.destroy()
@patch('salt.minion.Minion.ctx', MagicMock(return_value={}))
@patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True))
@patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True))
def test_handle_decoded_payload_jid_queue_addition(self):
'''
Tests that the _handle_decoded_payload function adds a jid to the minion's jid_queue when the new
jid isn't already present in the jid_queue.
'''
mock_jid = 11111
mock_opts = {'cachedir': '',
'extension_modules': '',
'minion_jid_queue_hwm': 100}
mock_data = {'fun': 'foo.bar',
'jid': mock_jid}
mock_jid_queue = [123, 456]
try:
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop())
# Assert that the minion's jid_queue attribute matches the mock_jid_queue as a baseline
# This can help debug any test failures if the _handle_decoded_payload call fails.
self.assertEqual(minion.jid_queue, mock_jid_queue)
# Call the _handle_decoded_payload function and update the mock_jid_queue to include the new
# mock_jid. The mock_jid should have been added to the jid_queue since the mock_jid wasn't
# previously included. The minion's jid_queue attribute and the mock_jid_queue should be equal.
minion._handle_decoded_payload(mock_data)
mock_jid_queue.append(mock_jid)
self.assertEqual(minion.jid_queue, mock_jid_queue)
finally:
minion.destroy()
@patch('salt.minion.Minion.ctx', MagicMock(return_value={}))
@patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True))
@patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True))
def test_handle_decoded_payload_jid_queue_reduced_minion_jid_queue_hwm(self):
'''
Tests that the _handle_decoded_payload function removes a jid from the minion's jid_queue when the
minion's jid_queue high water mark (minion_jid_queue_hwm) is hit.
'''
mock_opts = {'cachedir': '',
'extension_modules': '',
'minion_jid_queue_hwm': 2}
mock_data = {'fun': 'foo.bar',
'jid': 789}
mock_jid_queue = [123, 456]
try:
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop())
# Assert that the minion's jid_queue attribute matches the mock_jid_queue as a baseline
# This can help debug any test failures if the _handle_decoded_payload call fails.
self.assertEqual(minion.jid_queue, mock_jid_queue)
# Call the _handle_decoded_payload function and check that the queue is smaller by one item
# and contains the new jid
minion._handle_decoded_payload(mock_data)
self.assertEqual(len(minion.jid_queue), 2)
self.assertEqual(minion.jid_queue, [456, 789])
finally:
minion.destroy()
if __name__ == '__main__':
from integration import run_tests
run_tests(MinionTestCase, needs_daemon=False)
|
the-stack_0_7952 | #!/usr/bin/env python
import sys, time, re
from splunklib.searchcommands import \
dispatch, EventingCommand, Configuration, Option, validators
from libtf.logparsers import TFAuthLog, TFHttpLog, TFGenericLog
import ConfigParser
import os
import StringIO
import subprocess
@Configuration()
class ReaperCommand(EventingCommand):
""" Filters out noise from Splunk queries by leveraging the Threshing Floor
API.
##Syntax
.. code-block::
reaper logtype=<http, auth, generic> <port=<int>:<'udp|tcp'>>
##Description
The :code:`reaper` command filters network security noise from HTTP logs,
ssh access logs, and generic log files.
"""
BASE_URI = "https://api.threshingfloor.io"
API_KEY = ""
logtype = Option(
doc='''**Syntax:** **type'=***<event-type>*
**Description:** The type of events you wish to reduce. Can be `http`, `auth`, or `generic`.''',
name='type',
validate=validators.Set('http', 'auth', 'generic'))
ports = Option()
def get_config(self, conf_file_name, section):
env = dict()
env.update(os.environ)
splunk_home = env.get('SPLUNK_HOME', '/Applications/Splunk')
btool = os.path.join(splunk_home, "bin", "btool")
tmp = subprocess.Popen([btool, conf_file_name, "list"],
stdout=subprocess.PIPE, env=env)
(output, error) = tmp.communicate()
f = StringIO.StringIO()
f.write(output)
f.seek(0)
cfgparse = ConfigParser.RawConfigParser()
cfgparse.readfp(f)
cfg = dict()
for opt in cfgparse.options(section):
cfg[opt] = cfgparse.get(section, opt)
return cfg
def transform(self, events):
# We have like, 3 copies of the events which is not optimal
dictEvent = []
rawEvents = []
# Save off the events so they can be parsed by the library
for event in events:
dictEvent.append(event)
rawEvents.append(event['_raw'])
# Set to generic mode if ports are present and no type is specified
if self.logtype == None and self.ports != None:
self.logtype = 'generic'
else:
self.logtype = self.guessType(rawEvents)
# Send an error if
if self.logtype == 'generic' and self.ports == None:
raise Exception("Generic mode requires the port option.")
# Get the ports of we have them
if self.ports:
ports = self.ports.split(";")
# Initialize the correct log type
if self.logtype == 'auth':
analyzed = TFAuthLog(rawEvents, self.API_KEY, self.BASE_URI)
elif self.logtype == 'http':
analyzed = TFHttpLog(rawEvents, self.API_KEY, self.BASE_URI)
elif self.logtype == 'generic':
analyzed = TFGenericLog(rawEvents, ports, self.API_KEY, self.BASE_URI)
else:
raise TFException("Failed to parse the query.")
reduced = analyzed.reduce()
reducedItem = reduced.next()
for i in range(0, len(dictEvent)):
if dictEvent[i]['_raw'] == reducedItem:
yield dictEvent[i]
reducedItem = reduced.next()
return
def guessType(self, logfile, baseName=None):
REGEX_HTTP = "^\[(?P<timestamp>.+)?\]\s\"(?P<request>.+?)\"\s(?P<responseCode>\d+)\s(?P<size>\d+)(?P<combinedFields>.*)"
# If we can't do that, we will read 10 lines in, then try to match with a regular expression
logline = logfile[min(10, len(logfile)-1)]
try:
# See if it's http
splitLine = logline.split()
m = re.search(REGEX_HTTP, " ".join(splitLine[3:]))
if m:
return 'http'
# See if it's auth
try:
# Try and make a timestamp from the beginning of the line
if int(time.mktime(time.strptime(" ".join(splitLine[0:3]) + " " + "2017", "%b %d %H:%M:%S %Y"))) > 0:
return 'auth'
except Exception as e:
pass
# If we haven't returned by now, we can't figure out the type
raise TFException("Unable to automatically identify the log type. Please specify a type with the -t flag.")
except IOError as e:
exit()
def __init__(self):
EventingCommand.__init__(self)
conf = self.get_config('threshingfloor', 'api-config')
self.BASE_URI = conf.get('base_uri', None)
self.API_KEY = conf.get('api_key', None)
dispatch(ReaperCommand, sys.argv, sys.stdin, sys.stdout, __name__)
|
the-stack_0_7954 | import time
import numpy as np
from scipy import optimize
from statsmodels.distributions.empirical_distribution import ECDF
from numba import njit, prange, double, int64, boolean
# plotting
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
# consav
from consav.misc import nonlinspace
from consav.misc import normal_gauss_hermite
from consav import linear_interp
from consav import ModelClass # only used with numba
##########################
# 1. pure Python version #
##########################
class ConsumptionSavingModelClass():
#########
# setup #
#########
def __init__(self,name='baseline',solmethod='EGM',**kwargs):
""" setup model sub-classes and parameters in .par """
# a. set baseline parameters
self.name = name
self.solmethod = solmethod
# parameters and grids
class ParClass: None
self.par = ParClass()
# solution
class SolClass: None
self.sol = SolClass()
# simulation
class SimClass: None
self.sim = SimClass()
self.setup()
# b. update parameters
for key,val in kwargs.items():
setattr(self.par,key,val) # like par.key = val
def setup(self):
""" baseline parameters in .par """
par = self.par
# a. demographics
par.T = 200
par.TR = par.T # retirement age (end-of-period), no retirement if TR = T
par.age_min = 25 # only relevant for figures
# b. preferences
par.rho = 2
par.beta = 0.96
# c. income parameters
# growth
par.G = 1.02
# standard deviations
par.sigma_xi = 0.1
par.sigma_psi = 0.1
# low income shock
par.low_p = 0.005 # called pi in slides
par.low_val = 0.0 # called mu in slides
# life-cycle
par.L = np.ones(par.T) # if ones then no life-cycle
# d. saving and borrowing
par.R = 1.04
par.borrowingfac = 0.0
# e. numerical integration and grids
par.a_max = 20.0 # maximum point i grid for a
par.a_phi = 1.1 # curvature parameters
par.m_max = 20.0 # maximum point i grid for m
par.m_phi = 1.1 # curvature parameters
# number of elements
par.Nxi = 8 # number of quadrature points for xi
par.Npsi = 8 # number of quadrature points for psi
par.Na = 500 # number of points in grid for a
par.Nm = 100 # number of points in grid for m
# f. simulation
par.sim_mini = 2.5 # initial m in simulation
par.simN = 100_000 # number of persons in simulation
par.simT = 100 # number of periods in simulation
par.simlifecycle = 0 # = 0 simulate infinite horizon model
def create_grids(self):
""" create grids and other preperations for solving the model"""
par = self.par
# a. perfect foresight or buffer-stock model
if par.sigma_xi == 0 and par.sigma_psi == 0 and par.low_p == 0: # no risk
self.model = 'pf' # perfect foresight
else:
self.model = 'bs' # buffer-stock
# b. shocks
# i. basic GuassHermite
psi, psi_w = normal_gauss_hermite(sigma=par.sigma_psi,n=par.Npsi)
xi, xi_w = normal_gauss_hermite(sigma=par.sigma_xi,n=par.Nxi)
# ii. add low income shock to xi
if par.low_p > 0:
# a. weights
xi_w *= (1.0-par.low_p)
xi_w = np.insert(xi_w,0,par.low_p)
# b. values
xi = (xi-par.low_val*par.low_p)/(1.0-par.low_p)
xi = np.insert(xi,0,par.low_val)
# iii. vectorize tensor product of shocks and total weight
psi_vec,xi_vec = np.meshgrid(psi,xi,indexing='ij')
psi_w_vec,xi_w_vec = np.meshgrid(psi_w,xi_w,indexing='ij')
par.psi_vec = psi_vec.ravel()
par.xi_vec = xi_vec.ravel()
par.w = xi_w_vec.ravel()*psi_w_vec.ravel()
assert 1-np.sum(par.w) < 1e-8 # == summing to 1
# iv. count number of shock nodes
par.Nshocks = par.w.size
# c. minimum a
if par.borrowingfac == 0:
par.a_min = np.zeros(par.T) # never any borriwng
else:
# using formula from slides
psi_min = np.min(par.psi_vec)
xi_min = np.min(par.xi_vec)
par.a_min = np.nan*np.ones(par.T)
for t in reversed(range(par.T-1)):
if t >= par.TR-1: # in retirement
Omega = 0
elif t == par.TR-2: # next period is retirement
Omega = par.R**(-1)*par.G*par.L[t+1]*psi_min*xi_min
else: # before retirement
Omega = par.R**(-1)*(np.fmin(Omega,par.borrowingfac)+xi_min)*par.G*par.L[t+1]*psi_min
par.a_min[t] = -np.fmin(Omega,par.borrowingfac)*par.G*par.L[t+1]*psi_min
# d. end-of-period assets and cash-on-hand
par.grid_a = np.nan*np.ones((par.T,par.Na))
par.grid_m = np.nan*np.ones((par.T,par.Nm))
for t in range(par.T):
par.grid_a[t,:] = nonlinspace(par.a_min[t]+1e-6,par.a_max,par.Na,par.a_phi)
par.grid_m[t,:] = nonlinspace(par.a_min[t]+1e-6,par.m_max,par.Nm,par.m_phi)
# e. conditions
par.FHW = par.G/par.R
par.AI = (par.R*par.beta)**(1/par.rho)
par.GI = par.AI*np.sum(par.w*par.psi_vec**(-1))/par.G
par.RI = par.AI/par.R
par.WRI = par.low_p**(1/par.rho)*par.AI/par.R
par.FVA = par.beta*np.sum(par.w*(par.G*par.psi_vec)**(1-par.rho))
# f. fast solution with EGM
# grid_a tiled with the number of shocks
par.grid_a_tile = np.ones((par.TR,par.Na*par.Nshocks))
for t in range(par.TR):
par.grid_a_tile[t,:] = np.tile(par.grid_a[t,:],par.Nshocks)
# xi, psi and w repeated with the number of grid points for a
par.xi_vec_rep = np.repeat(par.xi_vec,par.Na)
par.psi_vec_rep = np.repeat(par.psi_vec,par.Na)
par.w_rep = np.repeat(par.w,par.Na)
# g. check for existance of solution
self.print_and_check_parameters(do_print=False)
def print_and_check_parameters(self,do_print=True):
""" print and check parameters """
par = self.par
if do_print:
print(f'FHW = {par.FHW:.3f}, AI = {par.AI:.3f}, GI = {par.GI:.3f}, RI = {par.RI:.3f}, WRI = {par.WRI:.3f}, FVA = {par.FVA:.3f}')
# check for existance of solution
if self.model == 'pf' and par.GI >= 1 and par.RI >= 1:
raise Exception('GI >= 1 and RI >= 1: no solution')
if self.model == 'bs' and (par.FVA >= 1 or par.WRI >= 1):
raise Exception('FVA >= 1 or WRI >= 1: no solution')
def utility(self,c):
""" utility function """
return c**(1-self.par.rho)/(1-self.par.rho)
def marg_utility(self,c):
""" marginal utility function """
return c**(-self.par.rho)
def inv_marg_utility(self,u):
""" inverse marginal utility funciton """
return u**(-1/self.par.rho)
#########
# solve #
#########
def solve(self,do_print=True):
""" gateway for solving the model """
# a. create (or re-create) grids
self.create_grids()
# b. solve
if self.solmethod in ['EGM','EGMvec']:
self.solve_EGM(do_print=do_print)
elif self.solmethod == 'VFI':
self.solve_VFI(do_print=do_print)
else:
raise Exception(f'{self.solmethod} is an unknown solution method')
def solve_EGM(self,do_print):
""" solve model using EGM """
t0 = time.time()
par = self.par
sol = self.sol
# a. allocate
sol.m = np.zeros((par.T,par.Na+1))
sol.c = np.zeros((par.T,par.Na+1))
sol.inv_v = np.zeros((par.T,par.Na+1))
# working memory
m = np.zeros(par.Na)
c = np.zeros(par.Na)
inv_v = np.zeros(par.Na)
# b. last period (= consume all)
sol.m[-1,:] = np.linspace(0,par.a_max,par.Na+1)
sol.c[-1,:] = sol.m[-1,:]
sol.inv_v[-1,0] = 0
sol.inv_v[-1,1:] = 1.0/self.utility(sol.c[-1,1:])
# c. before last period
for t in reversed(range(par.T-1)):
# i. solve by EGM
if self.solmethod == 'EGM':
self.EGM(t,m,c,inv_v)
elif self.solmethod == 'EGMvec':
self.EGMvec(t,m,c,inv_v)
# ii. add zero consumption
sol.m[t,0] = par.a_min[t]
sol.m[t,1:] = m
sol.c[t,0] = 0
sol.c[t,1:] = c
sol.inv_v[t,0] = 0
sol.inv_v[t,1:] = inv_v
if do_print:
print(f'model solved in {time.time()-t0:.1f} secs')
def EGM(self,t,m,c,inv_v):
""" EGM with partly vectorized code """
par = self.par
sol = self.sol
# loop over end-of-period assets
for i_a in range(par.Na):
# a. prep
a = par.grid_a[t,i_a]
if t+1 <= par.TR-1: # still working in next-period
fac = par.G*par.L[t]*par.psi_vec
w = par.w
xi = par.xi_vec
else:
fac = par.G*par.L[t]
w = 1
xi = 1
inv_fac = 1.0/fac
# b. future m and c (vectors)
m_plus = inv_fac*par.R*a + xi
c_plus = np.zeros(m_plus.size)
linear_interp.interp_1d_vec(sol.m[t+1,:],sol.c[t+1,:],m_plus,c_plus)
inv_v_plus = np.zeros(m_plus.size)
linear_interp.interp_1d_vec(sol.m[t+1,:],sol.inv_v[t+1,:],m_plus,inv_v_plus)
v_plus = 1.0/inv_v_plus
# c. average future marginal utility (number)
marg_u_plus = self.marg_utility(fac*c_plus)
avg_marg_u_plus = np.sum(w*marg_u_plus)
avg_v_plus = np.sum(w*(fac**(1-par.rho))*v_plus)
# d. current c
c[i_a] = self.inv_marg_utility(par.beta*par.R*avg_marg_u_plus)
# e. current m
m[i_a] = a + c[i_a]
# f. current v
if c[i_a] > 0:
inv_v[i_a] = 1.0/(self.utility(c[i_a]) + par.beta*avg_v_plus)
else:
inv_v[i_a] = 0
def EGMvec(self,t,m,c,inv_v):
""" EGM with fully vectorized code """
par = self.par
sol = self.sol
# a. prep
if t+1 <= par.TR-1: # still working in next-period
a = par.grid_a_tile[t,:]
fac = par.G*par.L[t]*par.psi_vec_rep
w = par.w_rep
xi = par.xi_vec_rep
Nshocks = par.Nshocks
else:
a = par.grid_a
fac = par.G*par.L[t]
w = 1
xi = 1
Nshocks = par.Nshocks
inv_fac = 1.0/fac
# b. future m and c
m_plus = inv_fac*par.R*a + xi
c_plus = np.zeros(m_plus.size)
linear_interp.interp_1d_vec(sol.m[t+1,:],sol.c[t+1,:],m_plus,c_plus)
inv_v_plus = np.zeros(m_plus.size)
linear_interp.interp_1d_vec(sol.m[t+1,:],sol.inv_v[t+1,:],m_plus,inv_v_plus)
v_plus = 1.0/inv_v_plus
# c. average future marginal utility
marg_u_plus = self.marg_utility(fac*c_plus)
avg_marg_u_plus = np.sum( (w*marg_u_plus).reshape((Nshocks,par.Na) ),axis=0)
avg_v_plus = np.sum( (w*(fac**(1-par.rho))*v_plus).reshape((Nshocks,par.Na) ),axis=0)
# d. current c
c[:] = self.inv_marg_utility(par.beta*par.R*avg_marg_u_plus)
# e. current m
m[:] = par.grid_a[t,:] + c
# f. current v
I = c > 0
inv_v[I] = 1.0/(self.utility(c[I]) + par.beta*avg_v_plus[I])
inv_v[~I] = 0.0
def solve_VFI(self,do_print):
""" solve model with VFI """
t0 = time.time()
par = self.par
sol = self.sol
# a. allocate solution
sol.m = np.nan*np.ones((par.T,par.Nm))
sol.c = np.nan*np.ones((par.T,par.Nm))
sol.inv_v = np.nan*np.ones((par.T,par.Nm))
# b. last period (= consume all)
sol.m[-1,:] = par.grid_m[-1,:]
sol.c[-1,:] = sol.m[-1,:]
sol.inv_v[-1,:] = 1.0/self.utility(sol.c[-1,:])
# c. before last period
for t in reversed(range(par.T-1)):
for i_m in range(par.Nm):
m = par.grid_m[t,i_m]
result = optimize.minimize_scalar(
lambda c: self.value_of_choice(c,t,m),method='bounded',
bounds=(0,m))
sol.c[t,i_m] = result.x
sol.inv_v[t,i_m]= -1/result.fun
# save grid for m
sol.m[t,:] = par.grid_m[t,:]
if do_print:
print(f'model solved in {time.time()-t0:.1f} secs')
def value_of_choice(self,c,t,m):
""" value of choice of c used in VFI """
par = self.par
sol = self.sol
# a. end-of-period assets
a = m-c
# b. next-period cash-on-hand
if t+1 <= par.TR-1: # still working in next-period
fac = par.G*par.L[t]*par.psi_vec
w = par.w
xi = par.xi_vec
else:
fac = par.G*par.L[t]
w = 1
xi = 1
m_plus = (par.R/fac)*a + xi
# c. continuation value
inv_v_plus = np.zeros(m_plus.size)
linear_interp.interp_1d_vec(sol.m[t+1,:],sol.inv_v[t+1,:],m_plus,inv_v_plus)
v_plus = 1/inv_v_plus
# d. value-of-choice
total = self.utility(c) + par.beta*np.sum(w*fac**(1-par.rho)*v_plus)
return -total
############
# simulate #
############
def simulate(self,seed=2017, do_print = True):
""" simulate the model """
np.random.seed(seed)
par = self.par
sim = self.sim
t0 = time.time()
# a. allocate
sim.m = np.nan*np.zeros((par.simN,par.simT))
sim.c = np.nan*np.zeros((par.simN,par.simT))
sim.a = np.nan*np.zeros((par.simN,par.simT))
sim.p = np.nan*np.zeros((par.simN,par.simT))
sim.y = np.nan*np.zeros((par.simN,par.simT))
# b. shocks
_shocki = np.random.choice(par.Nshocks,size=(par.simN,par.simT),p=par.w)
sim.psi = par.psi_vec[_shocki]
sim.xi = par.xi_vec[_shocki]
# c. initial values
sim.m[:,0] = par.sim_mini
sim.p[:,0] = 0.0
# d. simulation
self.simulate_timeloop()
# e. renomarlized
sim.P = np.exp(sim.p)
sim.Y = np.exp(sim.y)
sim.M = sim.m*sim.P
sim.C = sim.c*sim.P
sim.A = sim.a*sim.P
if do_print:
print(f'model simulated in {time.time()-t0:.1f} secs')
def simulate_timeloop(self):
""" simulate model with loop over time """
par = self.par
sol = self.sol
sim = self.sim
# loop over time
for t in range(par.simT):
# a. solution
if par.simlifecycle == 0:
grid_m = sol.m[0,:]
grid_c = sol.c[0,:]
else:
grid_m = sol.m[t,:]
grid_c = sol.c[t,:]
# b. consumption
linear_interp.interp_1d_vec(grid_m,grid_c,sim.m[:,t],sim.c[:,t])
sim.a[:,t] = sim.m[:,t] - sim.c[:,t]
# c. next-period states
if t < par.simT-1:
if t+1 > par.TR-1:
sim.m[:,t+1] = par.R*sim.a[:,t] / (par.G*par.L[t]) + 1
sim.p[:,t+1] = np.log(par.G) + np.log(par.L[t]) + sim.p[:,t]
sim.y[:,t+1] = sim.p[:,t+1]
else:
sim.m[:,t+1] = par.R*sim.a[:,t] / (par.G*par.L[t]*sim.psi[:,t+1]) + sim.xi[:,t+1]
sim.p[:,t+1] = np.log(par.G) + np.log(par.L[t]) + sim.p[:,t] + np.log(sim.psi[:,t+1])
I = sim.xi[:,t+1] > 0
sim.y[I,t+1] = sim.p[I,t+1] + np.log(sim.xi[I,t+1])
##################
# solution plots #
##################
def plot_value_function_convergence(self):
par = self.par
sol = self.sol
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
for t in [par.T-1, par.T-2, par.T-6, par.T-11, 100, 50, 0]:
if t > par.T-1 or t < 0: continue
ax.plot(sol.m[t,:],-sol.inv_v[t,:],label=f'$n = {par.T-t}$')
# limits
ax.set_xlim([np.min(par.a_min), 5])
ax.set_ylim([0, 1])
# layout
bbox = {'boxstyle':'square','ec':'white','fc':'white'}
ax.text(1.5,0.4,f'$\\beta = {par.beta:.2f}$, $R = {par.R:.2f}$, $G = {par.G:.2f}$',bbox=bbox)
ax.set_xlabel('$m_t$')
ax.set_ylabel('$-1.0/v_t(m_t)$')
ax.legend(loc='upper right',frameon=True)
fig.savefig(f'figs/val_converge_{self.name}.pdf')
def plot_consumption_function_convergence(self):
par = self.par
sol = self.sol
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
for t in [par.T-1, par.T-2, par.T-6, par.T-11, 100, 50, 0]:
if t > par.T-1 or t < 0: continue
ax.plot(sol.m[t,:],sol.c[t,:],label=f'$n = {par.T-t}$')
# limits
ax.set_xlim([np.min(par.a_min), 5])
ax.set_ylim([0, 5])
# layout
bbox = {'boxstyle':'square','ec':'white','fc':'white'}
ax.text(1.5,0.5,f'$\\beta = {par.beta:.2f}$, $R = {par.R:.2f}$, $G = {par.G:.2f}$',bbox=bbox)
ax.set_xlabel('$m_t$')
ax.set_ylabel('$-1.0/v_t(m_t)$')
ax.legend(loc='upper left',frameon=True)
fig.savefig(f'figs/cons_converge_{self.name}.pdf')
def plot_consumption_function_convergence_age(self):
par = self.par
sol = self.sol
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
# consumption function for various ages
for age in [25, 35, 45, 55, 65, 75, par.T+par.age_min-2, par.T+par.age_min-1]:
ax.plot(sol.m[age-par.age_min],sol.c[age-par.age_min],label=f'age = {age}')
# limits
ax.set_xlim([min(par.a_min), 5])
ax.set_ylim([0, 5])
# layout
bbox = {'boxstyle':'square','ec':'white','fc':'white'}
ax.text(1.5,0.5,f'$\\beta = {par.beta:.2f}$, $R = {par.R:.2f}$, $G = {par.G:.2f}$',bbox=bbox)
ax.set_xlabel('$m_t$')
ax.set_ylabel('$c(m_t)$')
ax.legend(loc='upper left',frameon=True)
fig.savefig(f'figs/cons_converge_{self.name}.pdf')
def plot_consumption_function_pf(self):
par = self.par
sol = self.sol
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
# perfect foresight consumption
c_pf = (1-par.RI)*(sol.m[0,:]+(1-par.FHW)**(-1)-1)
# consumption function deviation from perfect foresight
ax.plot(sol.m[0,:],sol.c[0,:]-c_pf,'-',lw=1.5)
# limits
ax.set_xlim([1, 500])
ylim_now = ax.set_ylim()
if np.max(np.abs(ylim_now)) < 1e-4:
ax.set_ylim([-1,1])
# layout
ax.set_xlabel('$m_t$')
ax.set_ylabel('$c(m_t) - c^{PF}(m_t)$')
fig.savefig(f'figs/cons_converge_pf_{self.name}.pdf')
def plot_buffer_stock_target(self):
par = self.par
sol = self.sol
# a. find a and avg. m_plus and c_plus
# allocate
a = np.nan*np.ones(par.Na+1)
m_plus = np.nan*np.ones(par.Na+1)
C_plus = np.nan*np.ones(par.Na+1)
delta_log_C_plus = np.nan*np.ones(par.Na+1)
delta_log_C_plus_approx_2 = np.nan*np.ones(par.Na+1)
fac = 1.0/(par.G*par.psi_vec)
for i_a in range(par.Na+1):
# a. a and m
a[i_a] = sol.m[0,i_a]-sol.c[0,i_a]
m_plus[i_a] = np.sum(par.w*(fac*par.R*a[i_a] + par.xi_vec))
# b. C_plus
m_plus_vec = fac*par.R*a[i_a] + par.xi_vec
c_plus_vec = np.zeros(m_plus_vec.size)
linear_interp.interp_1d_vec(sol.m[0,:],sol.c[0,:],m_plus_vec,c_plus_vec)
C_plus_vec = par.G*par.psi_vec*c_plus_vec
C_plus[i_a] = np.sum(par.w*C_plus_vec)
# c. approx
if self.model == 'bs' and sol.c[0,i_a] > 0:
delta_log_C_plus[i_a] = np.sum(par.w*(np.log(par.G*C_plus_vec)))-np.log(sol.c[0,i_a])
var_C_plus = np.sum(par.w*(np.log(par.G*C_plus_vec) - np.log(sol.c[0,i_a]) - delta_log_C_plus[i_a])**2)
delta_log_C_plus_approx_2[i_a] = par.rho**(-1)*(np.log(par.R*par.beta)) + 2/par.rho*var_C_plus + np.log(par.G)
# b. find target
i = np.argmin(np.abs(m_plus-sol.m[0,:]))
m_target = sol.m[0,i]
# c. figure 1 - buffer-stock target
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
# limits
ax.set_xlim([np.min(par.a_min), 5])
ax.set_ylim([0, 5])
# layout
bbox = {'boxstyle':'square','ec':'white','fc':'white'}
ax.text(2.1,0.25,f'$\\beta = {par.beta:.2f}$, $R = {par.R:.2f}$, $G = {par.G:.2f}$',bbox=bbox)
ax.set_xlabel('$m_t$')
ax.set_ylabel('')
# i. consumption
ax.plot(sol.m[0,:],sol.c[0,:],'-',lw=1.5,label='$c(m_t)$')
ax.legend(loc='upper left',frameon=True)
fig.savefig(f'figs/buffer_stock_target_{self.name}_c.pdf')
# ii. perfect foresight solution
if par.FHW < 1 and par.RI < 1:
c_pf = (1-par.RI)*(sol.m[0,:]+(1-par.FHW)**(-1)-1)
ax.plot(sol.m[0,:],c_pf,':',lw=1.5,color='black',label='$c^{PF}(m_t)$')
ax.legend(loc='upper left',frameon=True)
fig.savefig(f'figs/buffer_stock_target_{self.name}_pf.pdf')
# iii. a
ax.plot(sol.m[0,:],a,'-',lw=1.5,label=r'$a_t=m_t-c^{\star}(m_t)$')
ax.legend(loc='upper left',frameon=True)
fig.savefig(f'figs/buffer_stock_target_{self.name}_a.pdf')
# iv. m_plus
ax.plot(sol.m[0,:],m_plus,'-',lw=1.5,label='$E[m_{t+1} | a_t]$')
ax.legend(loc='upper left',frameon=True)
fig.savefig(f'figs/buffer_stock_target_{self.name}_m_plus.pdf')
# v. 45
ax.plot([0,5],[0,5],'-',lw=1.5,color='black',label='45 degree')
ax.legend(loc='upper left',frameon=True)
fig.savefig(f'figs/buffer_stock_target_{self.name}_45.pdf')
# vi. target
if self.model == 'bs' and par.GI < 1:
ax.plot([m_target,m_target],[0,5],'--',lw=1.5,color='black',label=f'target = {m_target:.2f}')
ax.legend(loc='upper left',frameon=True)
fig.savefig(f'figs/buffer_stock_target_{self.name}.pdf')
# STOP
if self.model == 'pf':
return
# d. figure 2 - C ratio
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
I = sol.c[0,:] > 0
ax.plot(sol.m[0,I],(C_plus[I]/sol.c[0,I]),'-',lw=1.5,label='$E[C_{t+1}/C_t]$')
ax.plot([m_target,m_target],[0,10],'--',lw=1.5,color='black',label='target')
ax.plot([np.min(par.a_min),500],[par.G,par.G],':',lw=1.5,color='black',label='$G$')
ax.plot([np.min(par.a_min),500],[(par.R*par.beta)**(1/par.rho),(par.R*par.beta)**(1/par.rho)],
'-',lw=1.5,color='black',label=r'$(\beta R)^{1/\rho}$')
# limit
ax.set_xlim([np.min(par.a_min),10])
ax.set_ylim([0.95,1.1])
# layout
ax.set_xlabel('$m_t$')
ax.set_ylabel('$C_{t+1}/C_t$')
ax.legend(loc='upper right',frameon=True)
fig.savefig(f'figs/cons_growth_{self.name}.pdf')
# e. figure 3 - euler approx
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
ax.plot(sol.m[0,:],delta_log_C_plus,'-',lw=1.5,label=r'$E[\Delta \log C_{t+1}]$')
ax.plot(sol.m[0,:],par.rho**(-1)*np.log(par.R*par.beta)*np.ones(par.Na+1)+np.log(par.G),'-',lw=1.5,label='1st order approx.')
ax.plot(sol.m[0,:],delta_log_C_plus_approx_2,'-',lw=1.5,label='2nd order approx.')
ax.plot([m_target,m_target],[-10 ,10],'--',lw=1.5,color='black',label='target')
# limit
ax.set_xlim([np.min(par.a_min),10])
ax.set_ylim([-0.03,0.12])
# layout
ax.set_xlabel('$m_t$')
ax.set_ylabel(r'$E[\Delta \log C_{t+1}]$')
ax.legend(loc='upper right',frameon=True)
fig.savefig(f'figs/euler_approx_{self.name}.pdf')
####################
# simulation plots #
####################
def plot_simulate_cdf_cash_on_hand(self):
par = self.par
sim = self.sim
# figure
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
for t in [0,1,2,4,9,29,49,par.simT-1]:
ecdf = ECDF(sim.m[:,t])
ax.plot(ecdf.x,ecdf.y,lw=1.5,label=f'$t = {t}$')
# limits
ax.set_xlim([np.min(par.a_min),4])
# layout
ax.set_xlabel('$m_t$')
ax.set_ylabel('CDF')
ax.legend(loc='upper right',frameon=True)
fig.savefig(f'figs/sim_cdf_cash_on_hand_{self.name}.pdf')
def plot_simulate_consumption_growth(self):
par = self.par
sim = self.sim
# 1. consumption growth
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
y = np.mean(np.log(sim.C[:,1:])-np.log(sim.C[:,:-1]),axis=0)
ax.plot(np.arange(par.simT-1),y,'-',lw=1.5,label=r'$E[\Delta\log(C_t)]$')
y = np.log(np.mean(sim.C[:,1:],axis=0))-np.log(np.mean(sim.C[:,:-1],axis=0))
ax.plot(np.arange(par.simT-1),y,'-',lw=1.5,
label=r'$\Delta\log(E[C_t])$')
ax.axhline(np.log(par.G),ls='-',lw=1.5,color='black',label='$\\log(G)$')
ax.axhline(np.log(par.G)-0.5*par.sigma_psi**2,ls='--',lw=1.5,color='black',label=r'$\log(G)-0.5\sigma_{\psi}^2$')
# layout
ax.set_xlabel('time')
ax.set_ylabel('')
ax.legend(loc='lower right',frameon=True)
fig.savefig(f'figs/sim_cons_growth_{self.name}.pdf')
# b. cash-on-hand
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
ax.plot(np.arange(par.simT),np.mean(sim.m,axis=0),'-',lw=1.5,label='mean')
ax.plot(np.arange(par.simT),np.percentile(sim.m,25,axis=0),'--',lw=1.5,color='black',label='25th percentile')
ax.plot(np.arange(par.simT),np.percentile(sim.m,75,axis=0),'--',lw=1.5,color='black',label='75th percentile')
# layout
ax.set_xlabel('time')
ax.set_ylabel('$m_t$')
ax.legend(loc='upper right',frameon=True)
fig.savefig(f'figs/sim_cash_on_hand_{self.name}.pdf')
####################
# life-cycle plots #
####################
def plot_life_cycle_income(self):
par = self.par
sim = self.sim
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
ax.plot(par.age_min+np.arange(1,par.simT),np.nanmean(sim.Y[:,1:],axis=0),'-',lw=1.5)
# layout
ax.set_ylabel('income, $Y_t$')
ax.set_xlabel('age')
fig.savefig(f'figs/sim_Y_{self.name}.pdf')
def plot_life_cycle_cashonhand(self):
par = self.par
sim = self.sim
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
ax.plot(par.age_min+np.arange(par.simT),np.mean(sim.M,axis=0),'-',lw=1.5)
# layout
ax.set_ylabel('cash-on-hand, $M_t$')
ax.set_xlabel('age')
fig.savefig(f'figs/sim_M_{self.name}.pdf')
def plot_life_cycle_consumption(self):
par = self.par
sim = self.sim
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
ax.plot(par.age_min+np.arange(par.simT),np.mean(sim.C,axis=0),'-',lw=1.5)
# layout
ax.set_ylabel('consumption, $C_t$')
ax.set_xlabel('age')
fig.savefig(f'figs/sim_C_{self.name}.pdf')
def plot_life_cycle_assets(self):
par = self.par
sim = self.sim
fig = plt.figure(figsize=(6,4),dpi=100)
ax = fig.add_subplot(1,1,1)
ax.plot(par.age_min+np.arange(par.simT),np.mean(sim.A,axis=0),'-',lw=1.5)
# layout
ax.set_ylabel('assets, $A_t$')
ax.set_xlabel('age')
fig.savefig(f'figs/sim_A_{self.name}.pdf')
####################
# 2. numba version #
####################
## same results with faster code
class ConsumptionSavingModelClassNumba(ModelClass,ConsumptionSavingModelClass):
def __init__(self,name='baseline',solmethod='EGM',**kwargs):
# a. set baseline parameters
self.name = name
self.solmethod = solmethod
# b. define subclasses
parlist = [
# setup
('T',int64),
('TR',int64),
('age_min',int64),
('rho',double),
('beta',double),
('G',double),
('sigma_xi',double),
('sigma_psi',double),
('low_p',double),
('low_val',double),
('L',double[:]),
('R',double),
('borrowingfac',double),
('a_max',double),
('a_phi',double),
('m_max',double),
('m_phi',double),
('Npsi',int64),
('Nxi',int64),
('Na',int64),
('Nm',int64),
('sim_mini',double),
('simN',int64),
('simT',int64),
('simlifecycle',boolean),
# create grids
('psi_vec',double[:]),
('psi_w_vec',double[:]),
('xi_vec',double[:]),
('xi_w_vec',double[:]),
('w',double[:]),
('Nshocks',int64),
('a_min',double[:]),
('grid_a',double[:,:]),
('grid_m',double[:,:]),
('FHW',double),
('AI',double),
('GI',double),
('RI',double),
('WRI',double),
('FVA',double),
('grid_a_tile',double[:,:]),
('psi_vec_rep',double[:]),
('xi_vec_rep',double[:]),
('w_rep',double[:]),
]
sollist = [
('m',double[:,:]),
('c',double[:,:]),
('inv_v',double[:,:]),
]
simlist = [
('m',double[:,:]),
('c',double[:,:]),
('a',double[:,:]),
('p',double[:,:]),
('y',double[:,:]),
('psi',double[:,:]),
('xi',double[:,:]),
('P',double[:,:]),
('Y',double[:,:]),
('M',double[:,:]),
('C',double[:,:]),
('A',double[:,:]),
]
# c. create subclasses
self.par,self.sol,self.sim = self.create_subclasses(parlist,sollist,simlist)
self.setup()
# b. update parameters
for key,val in kwargs.items():
setattr(self.par,key,val) # like par.key = val
def EGM(self,t,m,c,inv_v):
""" overwrite method with numba version """
EGM(self.par,self.sol,t,m,c,inv_v)
def simulate_timeloop(self):
""" overwrite method with numba version """
simulate_timeloop(self.par,self.sol,self.sim)
# jitted utility function
@njit
def utility(par,c):
return c**(1-par.rho)/(1-par.rho)
@njit
def marg_utility(par,c):
return c**(-par.rho)
@njit
def inv_marg_utility(par,u):
return u**(-1/par.rho)
# jitted EGM
@njit(parallel=True)
def EGM(par,sol,t,m,c,inv_v):
""" EGM with fully unrolled loops """
# loop over end-of-period assets
for i_a in prange(par.Na):
a = par.grid_a[t,i_a]
still_working_next_period = t+1 <= par.TR-1
Nshocks = par.Nshocks if still_working_next_period else 1
# loop over shocks
avg_marg_u_plus = 0
avg_v_plus = 0
for i_shock in range(Nshocks):
# a. prep
if still_working_next_period:
fac = par.G*par.L[t]*par.psi_vec[i_shock]
w = par.w[i_shock]
xi = par.xi_vec[i_shock]
else:
fac = par.G*par.L[t]
w = 1
xi = 1
inv_fac = 1.0/fac
# b. future m and c
m_plus = inv_fac*par.R*a + xi
c_plus = linear_interp.interp_1d(sol.m[t+1,:],sol.c[t+1,:],m_plus)
inv_v_plus = linear_interp.interp_1d(sol.m[t+1,:],sol.inv_v[t+1,:],m_plus)
v_plus = 1.0/inv_v_plus
# c. average future marginal utility
marg_u_plus = marg_utility(par,fac*c_plus)
avg_marg_u_plus += w*marg_u_plus
avg_v_plus += w*(fac**(1-par.rho))*v_plus
# d. current c
c[i_a] = inv_marg_utility(par,par.beta*par.R*avg_marg_u_plus)
# e. current m
m[i_a] = a + c[i_a]
# f. current v
if c[i_a] > 0:
inv_v[i_a] = 1.0/(utility(par,c[i_a]) + par.beta*avg_v_plus)
else:
inv_v[i_a] = 0
# jitted simulate_timeloop
@njit(parallel=True)
def simulate_timeloop(par,sol,sim):
""" simulate model with parallization over households """
# unpack (helps numba)
m = sim.m
p = sim.p
y = sim.y
c = sim.c
a = sim.a
# loop over first households and then time
for i in prange(par.simN):
for t in range(par.simT):
# a. solution
if par.simlifecycle == 0:
grid_m = sol.m[0,:]
grid_c = sol.c[0,:]
else:
grid_m = sol.m[t,:]
grid_c = sol.c[t,:]
# b. consumption
c[i,t] = linear_interp.interp_1d(grid_m,grid_c,m[i,t])
a[i,t] = m[i,t] - c[i,t]
# c. next-period
if t < par.simT-1:
if t+1 > par.TR-1:
m[i,t+1] = par.R*a[i,t] / (par.G*par.L[t]) + 1
p[i,t+1] = np.log(par.G) + np.log(par.L[t]) + p[i,t]
y[i,t+1] = p[i,t+1]
else:
m[i,t+1] = par.R*a[i,t] / (par.G*par.L[t]*sim.psi[i,t+1]) + sim.xi[i,t+1]
p[i,t+1] = np.log(par.G) + np.log(par.L[t]) + p[i,t] + np.log(sim.psi[i,t+1])
if sim.xi[i,t+1] > 0:
y[i,t+1] = p[i,t+1] + np.log(sim.xi[i,t+1]) |
the-stack_0_7955 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Gaussian Process
================
In this example we show how to use NUTS to sample from the posterior
over the hyperparameters of a gaussian process.
"""
import argparse
import os
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import jax
from jax import vmap
import jax.numpy as jnp
import jax.random as random
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
matplotlib.use('Agg') # noqa: E402
# squared exponential kernel with diagonal noise term
def kernel(X, Z, var, length, noise, jitter=1.0e-6, include_noise=True):
deltaXsq = jnp.power((X[:, None] - Z) / length, 2.0)
k = var * jnp.exp(-0.5 * deltaXsq)
if include_noise:
k += (noise + jitter) * jnp.eye(X.shape[0])
return k
def model(X, Y):
# set uninformative log-normal priors on our three kernel hyperparameters
var = numpyro.sample("kernel_var", dist.LogNormal(0.0, 10.0))
noise = numpyro.sample("kernel_noise", dist.LogNormal(0.0, 10.0))
length = numpyro.sample("kernel_length", dist.LogNormal(0.0, 10.0))
# compute kernel
k = kernel(X, X, var, length, noise)
# sample Y according to the standard gaussian process formula
numpyro.sample("Y", dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]), covariance_matrix=k),
obs=Y)
# helper function for doing hmc inference
def run_inference(model, args, rng_key, X, Y):
start = time.time()
kernel = NUTS(model)
mcmc = MCMC(kernel, args.num_warmup, args.num_samples, num_chains=args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True)
mcmc.run(rng_key, X, Y)
mcmc.print_summary()
print('\nMCMC elapsed time:', time.time() - start)
return mcmc.get_samples()
# do GP prediction for a given set of hyperparameters. this makes use of the well-known
# formula for gaussian process predictions
def predict(rng_key, X, Y, X_test, var, length, noise):
# compute kernels between train and test data, etc.
k_pp = kernel(X_test, X_test, var, length, noise, include_noise=True)
k_pX = kernel(X_test, X, var, length, noise, include_noise=False)
k_XX = kernel(X, X, var, length, noise, include_noise=True)
K_xx_inv = jnp.linalg.inv(k_XX)
K = k_pp - jnp.matmul(k_pX, jnp.matmul(K_xx_inv, jnp.transpose(k_pX)))
sigma_noise = jnp.sqrt(jnp.clip(jnp.diag(K), a_min=0.)) * jax.random.normal(rng_key, X_test.shape[:1])
mean = jnp.matmul(k_pX, jnp.matmul(K_xx_inv, Y))
# we return both the mean function and a sample from the posterior predictive for the
# given set of hyperparameters
return mean, mean + sigma_noise
# create artificial regression dataset
def get_data(N=30, sigma_obs=0.15, N_test=400):
np.random.seed(0)
X = jnp.linspace(-1, 1, N)
Y = X + 0.2 * jnp.power(X, 3.0) + 0.5 * jnp.power(0.5 + X, 2.0) * jnp.sin(4.0 * X)
Y += sigma_obs * np.random.randn(N)
Y -= jnp.mean(Y)
Y /= jnp.std(Y)
assert X.shape == (N,)
assert Y.shape == (N,)
X_test = jnp.linspace(-1.3, 1.3, N_test)
return X, Y, X_test
def main(args):
X, Y, X_test = get_data(N=args.num_data)
# do inference
rng_key, rng_key_predict = random.split(random.PRNGKey(0))
samples = run_inference(model, args, rng_key, X, Y)
# do prediction
vmap_args = (random.split(rng_key_predict, args.num_samples * args.num_chains), samples['kernel_var'],
samples['kernel_length'], samples['kernel_noise'])
means, predictions = vmap(lambda rng_key, var, length, noise:
predict(rng_key, X, Y, X_test, var, length, noise))(*vmap_args)
mean_prediction = np.mean(means, axis=0)
percentiles = np.percentile(predictions, [5.0, 95.0], axis=0)
# make plots
fig, ax = plt.subplots(1, 1)
# plot training data
ax.plot(X, Y, 'kx')
# plot 90% confidence level of predictions
ax.fill_between(X_test, percentiles[0, :], percentiles[1, :], color='lightblue')
# plot mean prediction
ax.plot(X_test, mean_prediction, 'blue', ls='solid', lw=2.0)
ax.set(xlabel="X", ylabel="Y", title="Mean predictions with 90% CI")
plt.savefig("gp_plot.pdf")
plt.tight_layout()
if __name__ == "__main__":
assert numpyro.__version__.startswith('0.4.0')
parser = argparse.ArgumentParser(description="Gaussian Process example")
parser.add_argument("-n", "--num-samples", nargs="?", default=1000, type=int)
parser.add_argument("--num-warmup", nargs='?', default=1000, type=int)
parser.add_argument("--num-chains", nargs='?', default=1, type=int)
parser.add_argument("--num-data", nargs='?', default=25, type=int)
parser.add_argument("--device", default='cpu', type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
|
the-stack_0_7956 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends XCBC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more XCBC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import XCBCTestFramework
from test_framework.address import AddressType
from test_framework.util import (
assert_equal,
set_node_times,
)
import collections
from decimal import Decimal
import enum
import itertools
import random
Call = enum.Enum("Call", "single multiaddress multiscript")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data address_type rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
assert_equal(self.address["solvable"], True)
assert_equal(self.address["isscript"], self.address_type == AddressType.p2sh_segwit)
assert_equal(self.address["iswitness"], self.address_type == AddressType.bech32)
if self.address["isscript"]:
assert_equal(self.address["embedded"]["isscript"], False)
assert_equal(self.address["embedded"]["iswitness"], True)
if self.call == Call.single:
if self.data == Data.address:
response = self.node.importaddress(address=self.address["address"], label=self.label, rescan=rescan)
elif self.data == Data.pub:
response = self.node.importpubkey(pubkey=self.address["pubkey"], label=self.label, rescan=rescan)
elif self.data == Data.priv:
response = self.node.importprivkey(privkey=self.key, label=self.label, rescan=rescan)
assert_equal(response, None)
elif self.call in (Call.multiaddress, Call.multiscript):
request = {
"scriptPubKey": {
"address": self.address["address"]
} if self.call == Call.multiaddress else self.address["scriptPubKey"],
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}
if self.address_type == AddressType.p2sh_segwit and self.data != Data.address:
# We need solving data when providing a pubkey or privkey as data
request.update({"redeemscript": self.address['embedded']['scriptPubKey']})
response = self.node.importmulti(
requests=[request],
options={"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)},
)
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmation_height=None):
"""Verify that listtransactions/listreceivedbyaddress return expected values."""
txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True)
current_height = self.node.getblockcount()
assert_equal(len(txs), self.expected_txs)
addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["label"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], 1 + current_height - confirmation_height)
assert_equal("trusted" not in tx, True)
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], 1 + current_height - confirmation_height)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, AddressType, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
AMOUNT_DUST = 0.00000546
def get_rand_amount():
r = random.uniform(AMOUNT_DUST, 1)
return Decimal(str(round(r, 8)))
class ImportRescanTest(XCBCTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
self.supports_cli = False
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
self.extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
# Import keys with pruning disabled
self.start_nodes(extra_args=[[]] * self.num_nodes)
self.import_deterministic_coinbase_privkeys()
self.stop_nodes()
self.start_nodes()
for i in range(1, self.num_nodes):
self.connect_nodes(i, 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress(
label=variant.label,
address_type=variant.address_type.value,
))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = get_rand_amount()
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
variant.timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
self.sync_all() # Conclude sync before calling setmocktime to avoid timeouts
# Generate a block further in the future (past the rescan window).
assert_equal(self.nodes[0].getrawmempool(), [])
set_node_times(
self.nodes,
self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + TIMESTAMP_WINDOW + 1,
)
self.nodes[0].generate(1)
self.sync_all()
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
self.log.info('Run import for variant {}'.format(variant))
expect_rescan = variant.rescan == Rescan.yes
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(variant.timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, variant.confirmation_height)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = get_rand_amount()
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
assert_equal(self.nodes[0].getrawmempool(), [])
self.sync_all()
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
self.log.info('Run check for variant {}'.format(variant))
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, variant.confirmation_height)
if __name__ == "__main__":
ImportRescanTest().main()
|
the-stack_0_7957 | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pdb
from tqdm import tqdm
import argparse
import pandas as pd
import sys
BASE_DIR=os.path.dirname(os.getcwd())
sys.path.append(BASE_DIR)
sys.path.append('/home/tam63/geometric-js')
import torch
import scipy.stats
from scipy.stats import norm
from scipy.special import logsumexp
from vae.utils.modelIO import save_model, load_model, load_metadata
from notebooks.utils import PlotParams
# from utils.helpers import (create_safe_directory, get_device, set_seed,
# get_n_param)
TRAIN_MODELS_DIR = "/home/tam63/results/alpha-experiments"
DATA_DIR = "/home/tam63/geometric-js/data"
SAVE_DIR = "/home/tam63/figures/alpha-experiments"
def parse_arguments(args_to_parse):
"""Parse the command line arguments.
Parameters
----------
args_to_parse: list of str
Arguments to parse (splitted on whitespaces).
"""
description = "PyTorch implementation and evaluation of Variational" + \
"AutoEncoders and metrics."
parser = argparse.ArgumentParser(description=description)
# General options
general = parser.add_argument_group('General options')
general.add_argument('--dataset', type=str, choices=['mnist', 'fashion', 'dsprites'],
help="Name of the dataset being plotted.")
general.add_argument('--divergence', type=str, choices=['dGJS', 'GJS', 'both'],
help="Type of geometric-JS divergence to be plotted on comparison plot.")
general.add_argument('--model-loc', type=str,
help="Location of the trained models to be used to generate plots.")
args = parser.parse_args(args_to_parse)
print(args)
return args
def bootstrap(x, low, high, n_samples):
mu = x.mean()
n = len(x)
X = np.random.choice(x, size=n_samples*n).reshape(n_samples, n)
mu_star = X.mean(axis=1)
d_star = np.sort(mu_star - mu)
return mu, mu + d_star[int(low*n_samples)], mu + d_star[int(high*n_samples)]
def compute_samples(model, data, num_samples, debug=False):
"""
Description
---------------------------------------------------------------
Sample from importance distribution z_samples ~ q(z|X) and
compute p(z_samples), q(z_samples) for importance sampling
Inputs
---------------------------------------------------------------
model : pytorch nn.Module
VAE model implemented in pytroch which has been
trained on the training data corresponding to the
passed test data, which is contained in the variable
'data'.
data : pytorch Tensor
Tensor of shape [batch_size, 1, im_size, im_size],
where im_size is the dimension size of the images used
to train the model, and batch size is the number of
data instances passed, which is therefore also the
number of estimates of the probability distribution
which will be produced.
num_samples : int
For each passed data instance, the probability
distribution p(x|z) will be estimated using a monte
carlo integration with num_samples samples.
returns
---------------------------------------------------------------
z_samples, pz, qz : numpy array
Returns arrays containing the representation of each
passed input image in latent space in z_samples, and the
probabilty distributions qz and pz which are defined by
samples drawn from the normal distribution defined by the
latent space (qz) and defined by the latent space
"""
data = data.cuda()
z_mean, z_log_sigma = model.encoder(data)
z_mean = z_mean.cpu().detach().numpy()
z_log_sigma = z_log_sigma.cpu().detach().numpy()
z_samples = []
qz = []
for m, s in zip(z_mean, z_log_sigma):
# len(s) = len(s) = 10 = size of the latent space dimension
#
# z_vals is num_samples (= 128) samples drawn from the normal
# distribution defined by the mean and std (m[i], s[i])
#
# qz_vals is the normal distribution defined by the samples
# in the vector z_vals
z_vals = [np.random.normal(m[i], np.exp(s[i]), num_samples) for i in range(len(m))]
qz_vals = [norm.pdf(z_vals[i], loc=m[i], scale=np.exp(s[i])) for i in range(len(m))]
z_samples.append(z_vals)
qz.append(qz_vals)
z_samples = np.array(z_samples)
pz = norm.pdf(z_samples)
qz = np.array(qz)
# pdb.set_trace()
# Check why the axes are being swapped
z_samples = np.swapaxes(z_samples, 1, 2)
pz = np.swapaxes(pz, 1, 2)
qz = np.swapaxes(qz, 1, 2)
return z_samples, pz, qz
def estimate_logpx_batch(model, data, num_samples, debug=False, digit_size=32):
"""
"""
z_samples, pz, qz = compute_samples(model, data, num_samples)
assert len(z_samples) == len(data)
assert len(z_samples) == len(pz)
assert len(z_samples) == len(qz)
z_samples = torch.tensor(z_samples).float().cuda()
result = []
for i in range(len(data)):
x_predict = model.decoder(z_samples[i]).reshape(-1, digit_size ** 2)
x_predict = x_predict.cpu().detach().numpy()
x_predict = np.clip(x_predict, np.finfo(float).eps, 1. - np.finfo(float).eps)
p_vals = pz[i]
q_vals = qz[i]
# pdb.set_trace()
datum = data[i].cpu().reshape(digit_size ** 2).numpy() #.reshape(digit_size ** 2)
# \log p(x|z) = Binary cross entropy
logp_xz = np.sum(datum * np.log(x_predict + 1e-9) + (1. - datum) * np.log(1.0 - x_predict + 1e-9), axis=-1)
logpz = np.sum(np.log(p_vals + 1e-9), axis=-1)
logqz = np.sum(np.log(q_vals + 1e-9), axis=-1)
argsum = logp_xz + logpz - logqz
logpx = -np.log(num_samples + 1e-9) + logsumexp(argsum)
result.append(logpx)
return np.array(result)
def estimate_logpx(model, data, num_samples, verbosity=0, digit_size=32):
batches = []
iterations = int(np.ceil(1. * len(data) / 100))
for b in tqdm(range(iterations)):
batch_data = data[b * 100:(b + 1) * 100]
batches.append(estimate_logpx_batch(model, batch_data, num_samples, digit_size=digit_size))
if verbosity and b % max(11 - verbosity, 1) == 0:
print("Batch %d [%d, %d): %.2f" % (b, b * 100, (b+1) * 100, np.mean(np.concatenate(batches))))
log_probs = np.concatenate(batches)
mu, lb, ub = bootstrap(log_probs, 0.025, 0.975, 1000)
return mu, lb, ub
def main(args):
device = 'cuda'
plotter = PlotParams()
plotter.set_params()
DATA_DIR = os.path.join(os.pardir, 'data')
FIG_DIR = os.path.join(os.pardir, 'figs')
RES_DIR = os.path.join(os.pardir, 'results')
# 1) select dataset to load:
if args.dataset == 'dsprites':
X_test = np.load(os.path.join(DATA_DIR, 'dsprites', 'dsprite_train.npz'))['imgs']
X_test = torch.tensor(X_test).unsqueeze(1).float() / 255.0
digit_size = 64
X_test = X_test[:10000]
X_test = X_test.to(device)
elif args.dataset == 'fashion':
X_test = torch.load(os.path.join(DATA_DIR, 'fashionMnist', 'FashionMNIST', 'processed', 'test.pt'))
digit_size = 32
X_test = X_test[0].unsqueeze(1).float() / 255.0
X_test = torch.nn.functional.pad(X_test, pad=(2, 2, 2, 2))
X_test = X_test[:10000]
X_test = X_test.to(device)
elif args.dataset == 'mnist':
X_test = torch.load(os.path.join(DATA_DIR, 'mnist', 'MNIST', 'processed', 'test.pt'))
digit_size = 32
X_test = X_test[0].unsqueeze(1).float() / 255.0
X_test = torch.nn.functional.pad(X_test, pad=(2, 2, 2, 2))
X_test = X_test[:10000]
X_test = X_test.to(device)
# 2) Get the trained alpha dGJS probabilities:
av_a = []
log_probs_lb = []
log_probs_ub = []
log_probs_mu = []
log_probs_best = -np.inf
if args.divergence in ['GJS', 'dGJS']:
divergence = args.divergence
for initial_a in [i/10 for i in range(11)]:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/{divergence}-A_0={initial_a}"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu += [logpx_mu]
log_probs_lb += [logpx_lb]
log_probs_ub += [logpx_ub]
if logpx_mu > log_probs_best:
model_best = model_path
log_probs_best = logpx_mu
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# 3) Get the comparison divergences probabilities:
av_a_i = []
log_probs_lb_i = []
log_probs_ub_i = []
log_probs_mu_i = []
log_probs_best_i = -np.inf
model_names = []
# KL:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/KL"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("KL")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# fwdKL:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/fwdKL"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("fwdKL")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# MMD:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/MMD"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("MMD")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# no-constraint:
# model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/no-constraint"
# model = load_model(model_path)
# logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
# log_probs_mu_i += [logpx_mu]
# log_probs_lb_i += [logpx_lb]
# log_probs_ub_i += [logpx_ub]
# model_names.append("no-constraint")
# print(model_path)
# print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# 4) Plot:
fig = plt.figure(figsize=(10, 10))
yerr_bar = np.array(log_probs_ub) - np.array(log_probs_lb)
yerr_bar_i = np.array(log_probs_ub_i) - np.array(log_probs_lb_i)
initial_a = [i/10 for i in range(11)]
plt.errorbar(initial_a, log_probs_mu, yerr=yerr_bar, label=args.divergence)
for i in range(len(model_names)):
plt.errorbar(initial_a, [log_probs_mu_i[i]] * len(initial_a), yerr=[yerr_bar_i[i]] * len(initial_a), label=model_names[i])
plt.xlabel(r'Initial $\alpha$')
plt.ylabel(r'$\log(p_{\theta}(X))$')
plt.legend()
plt.title("Log model evidence vs initial alpha")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance.pdf")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance.png", dpi=200)
# save tight layout version:
fig = plt.figure(figsize=(10, 10))
yerr_bar = np.array(log_probs_ub) - np.array(log_probs_lb)
yerr_bar_i = np.array(log_probs_ub_i) - np.array(log_probs_lb_i)
initial_a = [i/10 for i in range(11)]
plt.errorbar(initial_a, log_probs_mu, yerr=yerr_bar, label=args.divergence)
for i in range(len(model_names)):
plt.errorbar(initial_a, [log_probs_mu_i[i]] * len(initial_a), yerr=[yerr_bar_i[i]] * len(initial_a), label=model_names[i])
plt.xlabel(r'Initial $\alpha$')
plt.ylabel(r'$\log(p_{\theta}(X))$')
plt.legend()
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance-tight-layout.pdf")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance-tight-layout.png", dpi=200)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args) |
the-stack_0_7958 | import argparse
from utils.helpers import read_lines
from gector.gec_model import GecBERTModel
def predict_for_file(input_file, output_file, model, batch_size=32):
test_data = read_lines(input_file)
predictions = []
cnt_corrections = 0
batch = []
count = 0
for sent in test_data:
batch.append(sent.split())
if len(batch) == batch_size:
preds, cnt = model.handle_batch(batch, count, batch_size)
predictions.extend(preds)
cnt_corrections += cnt
batch = []
count += 1
if batch:
preds, cnt = model.handle_batch(batch, count, batch_size)
predictions.extend(preds)
cnt_corrections += cnt
with open(output_file, 'w') as f:
f.write("\n".join([" ".join(x) for x in predictions]) + '\n')
return cnt_corrections
def main(args):
# get all paths
model = GecBERTModel(vocab_path=args.vocab_path,
model_paths=args.model_path,
max_len=args.max_len, min_len=args.min_len,
iterations=args.iteration_count,
min_error_probability=args.min_error_probability,
lowercase_tokens=args.lowercase_tokens,
model_name=args.transformer_model,
special_tokens_fix=args.special_tokens_fix,
log=False,
confidence=args.additional_confidence,
is_ensemble=args.is_ensemble,
weigths=args.weights)
cnt_corrections = predict_for_file(args.input_file, args.output_file, model,
batch_size=args.batch_size)
# evaluate with m2 or ERRANT
print(f"Produced overall corrections: {cnt_corrections}")
if __name__ == '__main__':
# read parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_path',
help='Path to the model file.', nargs='+',
required=True)
parser.add_argument('--vocab_path',
help='Path to the model file.',
default='data/output_vocabulary' # to use pretrained models
)
parser.add_argument('--input_file',
help='Path to the evalset file',
required=True)
parser.add_argument('--output_file',
help='Path to the output file',
required=True)
parser.add_argument('--max_len',
type=int,
help='The max sentence length'
'(all longer will be truncated)',
default=50)
parser.add_argument('--min_len',
type=int,
help='The minimum sentence length'
'(all longer will be returned w/o changes)',
default=3)
parser.add_argument('--batch_size',
type=int,
help='The size of hidden unit cell.',
default=128)
parser.add_argument('--lowercase_tokens',
type=int,
help='Whether to lowercase tokens.',
default=0)
parser.add_argument('--transformer_model',
choices=['bert', 'gpt2', 'transformerxl', 'xlnet', 'distilbert', 'roberta', 'albert'],
help='Name of the transformer model.',
default='roberta')
parser.add_argument('--iteration_count',
type=int,
help='The number of iterations of the model.',
default=5)
parser.add_argument('--additional_confidence',
type=float,
help='How many probability to add to $KEEP token.',
default=0)
parser.add_argument('--min_error_probability',
type=float,
help='Minimum probability for each action to apply. '
'Also, minimum error probability, as described in the paper.',
default=0.0)
parser.add_argument('--special_tokens_fix',
type=int,
help='Whether to fix problem with [CLS], [SEP] tokens tokenization. '
'For reproducing reported results it should be 0 for BERT/XLNet and 1 for RoBERTa.',
default=1)
parser.add_argument('--is_ensemble',
type=int,
help='Whether to do ensembling.',
default=0)
parser.add_argument('--weights',
help='Used to calculate weighted average', nargs='+',
default=None)
args = parser.parse_args()
main(args)
|
the-stack_0_7959 |
class BinarySearchTree(object):
class Node(object):
def __init__(self, key, value):
self.left = None
self.right = None
self.key = key
self.value = value
def __repr__(self):
return "Node(key={}, value={}, left={}, right={})".format(self.key, self.value, self.left, self.right)
def match_and_parent(self, key, parent=None):
if self.key == key:
return self, parent
elif self.key < key and self.right is not None:
return self.right.match_and_parent(key, self)
elif self.key > key and self.left is not None:
return self.left.match_and_parent(key, self)
else:
return None, self
def add_child(self, node):
if self.key < node.key:
assert self.right is None
self.right = node
elif self.key > node.key:
assert self.left is None
self.left = node
else:
raise ValueError('Adding child with equal key')
def remove_child(self, node):
if node is self.left:
self.left = None
elif node is self.right:
self.right = None
else:
raise ValueError("Not this node's child")
def __init__(self):
self.root = None
self.size = 0
def get(self, key):
if self.root is None:
raise IndexError('Key {} not Found'.format(key))
node, _ = self.root.match_and_parent(key)
if node is None:
raise IndexError('Key {} not Found'.format(key))
return node.value
def set(self, key, value):
if self.root is None:
self.root = self.Node(key, value)
self.size += 1
return
node, parent = self.root.match_and_parent(key)
if node is None:
node = self.Node(key, value)
parent.add_child(node)
self.size += 1
else:
node.value = value
def remove(self, key):
if self.root is None:
raise IndexError('Key {} not Found'.format(key))
node, parent = self.root.match_and_parent(key, self.root)
if node is None:
raise IndexError('Key {} not Found'.format(key))
elif node is parent:
self.root = None
self.size = 0
else:
parent.remove_child(node)
self.size -= 1
def __len__(self):
return self.size
def __eq__(self, other):
if self.root is None and other.root is None:
return True
elif self.root is None or other.root is None:
return False
elif len(self) != len(other):
return False
for i in range(len(self)):
a = self.get(i)
b = other.get(i)
if a != b:
return False
return True
class BSTString(object):
"""Implementation of String using a binary search tree
This is pretty ineffective for everything, is intended as
using a barebones data structure for a string implementation
that isn't trivial like List.
"""
def __init__(self, s=""):
self._data = BinarySearchTree()
for i, c in enumerate(s):
self._data.set(i, c)
def __getitem__(self, position):
return self._data.get(position)
def __add__(self, other):
result = BSTString("")
n = len(self)
for i in range(n):
result._data.set(i, self[i])
for i in range(n, n + len(other)):
result._data.set(i, other[i - n])
return result
def __eq__(self, other):
return self._data == other._data
def __len__(self):
return len(self._data)
def __repr__(self):
return ''.join([self._data.get(i) for i in range(len(self))])
def split(self, n):
indices = [i for i in range(len(self))]
index_list = [indices[i:i+n] for i in range(0, len(self), n)]
result = []
for indices in index_list:
result.append(BSTString([self._data.get(i) for i in indices]))
return result
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.