text
stringlengths 213
32.3k
|
---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from absl import flags
from absl.testing import absltest
flags.DEFINE_string('seen_in_crittenden', 'alleged mountain lion',
'This tests if unicode input to these functions works.')
class FlagsUnicodeLiteralsTest(absltest.TestCase):
def testUnicodeFlagNameAndValueAreGood(self):
alleged_mountain_lion = flags.FLAGS.seen_in_crittenden
self.assertTrue(
isinstance(alleged_mountain_lion, type(u'')),
msg='expected flag value to be a {} not {}'.format(
type(u''), type(alleged_mountain_lion)))
self.assertEqual(alleged_mountain_lion, u'alleged mountain lion')
if __name__ == '__main__':
absltest.main()
|
import asyncio
from datetime import timedelta
from pyecobee import ECOBEE_API_KEY, ECOBEE_REFRESH_TOKEN, Ecobee, ExpiredTokenError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import config_validation as cv
from homeassistant.util import Throttle
from .const import (
_LOGGER,
CONF_REFRESH_TOKEN,
DATA_ECOBEE_CONFIG,
DOMAIN,
ECOBEE_PLATFORMS,
)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=180)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Optional(CONF_API_KEY): cv.string})}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""
Ecobee uses config flow for configuration.
But, an "ecobee:" entry in configuration.yaml will trigger an import flow
if a config entry doesn't already exist. If ecobee.conf exists, the import
flow will attempt to import it and create a config entry, to assist users
migrating from the old ecobee component. Otherwise, the user will have to
continue setting up the integration via the config flow.
"""
hass.data[DATA_ECOBEE_CONFIG] = config.get(DOMAIN, {})
if not hass.config_entries.async_entries(DOMAIN) and hass.data[DATA_ECOBEE_CONFIG]:
# No config entry exists and configuration.yaml config exists, trigger the import flow.
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up ecobee via a config entry."""
api_key = entry.data[CONF_API_KEY]
refresh_token = entry.data[CONF_REFRESH_TOKEN]
data = EcobeeData(hass, entry, api_key=api_key, refresh_token=refresh_token)
if not await data.refresh():
return False
await data.update()
if data.ecobee.thermostats is None:
_LOGGER.error("No ecobee devices found to set up")
return False
hass.data[DOMAIN] = data
for component in ECOBEE_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
class EcobeeData:
"""
Handle getting the latest data from ecobee.com so platforms can use it.
Also handle refreshing tokens and updating config entry with refreshed tokens.
"""
def __init__(self, hass, entry, api_key, refresh_token):
"""Initialize the Ecobee data object."""
self._hass = hass
self._entry = entry
self.ecobee = Ecobee(
config={ECOBEE_API_KEY: api_key, ECOBEE_REFRESH_TOKEN: refresh_token}
)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def update(self):
"""Get the latest data from ecobee.com."""
try:
await self._hass.async_add_executor_job(self.ecobee.update)
_LOGGER.debug("Updating ecobee")
except ExpiredTokenError:
_LOGGER.debug("Refreshing expired ecobee tokens")
await self.refresh()
async def refresh(self) -> bool:
"""Refresh ecobee tokens and update config entry."""
_LOGGER.debug("Refreshing ecobee tokens and updating config entry")
if await self._hass.async_add_executor_job(self.ecobee.refresh_tokens):
self._hass.config_entries.async_update_entry(
self._entry,
data={
CONF_API_KEY: self.ecobee.config[ECOBEE_API_KEY],
CONF_REFRESH_TOKEN: self.ecobee.config[ECOBEE_REFRESH_TOKEN],
},
)
return True
_LOGGER.error("Error refreshing ecobee tokens")
return False
async def async_unload_entry(hass, config_entry):
"""Unload the config entry and platforms."""
hass.data.pop(DOMAIN)
tasks = []
for platform in ECOBEE_PLATFORMS:
tasks.append(
hass.config_entries.async_forward_entry_unload(config_entry, platform)
)
return all(await asyncio.gather(*tasks))
|
import tensorflow as tf
import time
from datetime import datetime
import math
import argparse
import sys
from nets.mobilenet import mobilenet, mobilenet_arg_scope
import numpy as np
slim = tf.contrib.slim
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target)
duration = time.time() - start_time
if i >= num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def time_tensorflow_run_placeholder(session, target, feed_dict, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target,feed_dict=feed_dict)
duration = time.time() - start_time
if i >= num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def run_benchmark():
if FLAGS.quantized:
graph_filename = FLAGS.quantized_graph
# Create a graph def object to read the graph
with tf.gfile.GFile(graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
with tf.device('/'+FLAGS.mode+':0'):
image_size = 224
if FLAGS.quantized:
inputs = np.random.random((FLAGS.batch_size, image_size, image_size, 3))
tf.import_graph_def(graph_def)
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=config)
# We define the input and output node we will feed in
input_node = graph.get_tensor_by_name('import/MobileNet/input_images:0')
output_node = graph.get_tensor_by_name('import/MobileNet/Predictions/Softmax:0')
time_tensorflow_run_placeholder(sess, output_node, {input_node: inputs}, "Forward")
else:
image_size = 224
inputs = tf.Variable(tf.random_normal([FLAGS.batch_size,
image_size,
image_size, 3],
dtype=tf.float32,
stddev=1e-1))
with slim.arg_scope(mobilenet_arg_scope()):
logits, end_points = mobilenet(inputs, is_training=False)
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=config)
sess.run(init)
time_tensorflow_run(sess, logits, "Forward")
# Add a simple objective so we can calculate the backward pass.
objective = tf.nn.l2_loss(logits)
# Compute the gradient with respect to all the parameters.
grad = tf.gradients(objective, tf.trainable_variables())
# Run the backward benchmark.
time_tensorflow_run(sess, grad, "Forward-backward")
def main(_):
run_benchmark()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=1,
help='Batch size.'
)
parser.add_argument(
'--num_batches',
type=int,
default=100,
help='Number of batches to run.'
)
parser.add_argument(
'--mode',
type=str,
default='cpu',
help='gpu/cpu mode.'
)
parser.add_argument(
'--quantized',
type=bool,
default=False,
help='Benchmark quantized graph.'
)
parser.add_argument(
'--quantized_graph',
type=str,
default='',
help='Path to quantized graph file.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
import voluptuous as vol
from homeassistant.components.geo_location import DOMAIN
from homeassistant.const import CONF_EVENT, CONF_PLATFORM, CONF_SOURCE, CONF_ZONE
from homeassistant.core import HassJob, callback
from homeassistant.helpers import condition, config_validation as cv
from homeassistant.helpers.config_validation import entity_domain
from homeassistant.helpers.event import TrackStates, async_track_state_change_filtered
# mypy: allow-untyped-defs, no-check-untyped-defs
EVENT_ENTER = "enter"
EVENT_LEAVE = "leave"
DEFAULT_EVENT = EVENT_ENTER
TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "geo_location",
vol.Required(CONF_SOURCE): cv.string,
vol.Required(CONF_ZONE): entity_domain("zone"),
vol.Required(CONF_EVENT, default=DEFAULT_EVENT): vol.Any(
EVENT_ENTER, EVENT_LEAVE
),
}
)
def source_match(state, source):
"""Check if the state matches the provided source."""
return state and state.attributes.get("source") == source
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
source = config.get(CONF_SOURCE).lower()
zone_entity_id = config.get(CONF_ZONE)
trigger_event = config.get(CONF_EVENT)
job = HassJob(action)
@callback
def state_change_listener(event):
"""Handle specific state changes."""
# Skip if the event's source does not match the trigger's source.
from_state = event.data.get("old_state")
to_state = event.data.get("new_state")
if not source_match(from_state, source) and not source_match(to_state, source):
return
zone_state = hass.states.get(zone_entity_id)
from_match = condition.zone(hass, zone_state, from_state)
to_match = condition.zone(hass, zone_state, to_state)
if (
trigger_event == EVENT_ENTER
and not from_match
and to_match
or trigger_event == EVENT_LEAVE
and from_match
and not to_match
):
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": "geo_location",
"source": source,
"entity_id": event.data.get("entity_id"),
"from_state": from_state,
"to_state": to_state,
"zone": zone_state,
"event": trigger_event,
"description": f"geo_location - {source}",
}
},
event.context,
)
return async_track_state_change_filtered(
hass, TrackStates(False, set(), {DOMAIN}), state_change_listener
).async_remove
|
import argparse
import itertools
import pathlib
import textwrap
parser = argparse.ArgumentParser()
parser.add_argument("filepaths", nargs="+", type=pathlib.Path)
args = parser.parse_args()
filepaths = sorted(p for p in args.filepaths if p.is_file())
def extract_short_test_summary_info(lines):
up_to_start_of_section = itertools.dropwhile(
lambda l: "=== short test summary info ===" not in l,
lines,
)
up_to_section_content = itertools.islice(up_to_start_of_section, 1, None)
section_content = itertools.takewhile(
lambda l: l.startswith("FAILED"), up_to_section_content
)
content = "\n".join(section_content)
return content
def format_log_message(path):
py_version = path.name.split("-")[1]
summary = f"Python {py_version} Test Summary Info"
with open(path) as f:
data = extract_short_test_summary_info(line.rstrip() for line in f)
message = (
textwrap.dedent(
"""\
<details><summary>{summary}</summary>
```
{data}
```
</details>
"""
)
.rstrip()
.format(summary=summary, data=data)
)
return message
print("Parsing logs ...")
message = "\n\n".join(format_log_message(path) for path in filepaths)
output_file = pathlib.Path("pytest-logs.txt")
print(f"Writing output file to: {output_file.absolute()}")
output_file.write_text(message)
|
from __future__ import print_function
"""
Warning: do not use this library. It is unstable and most of the routines
here have been superceded by other libraries (e.g. genmsg). These
routines will likely be *deleted* in future releases.
"""
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
import os
import sys
import roslib.manifest
import roslib.names
import roslib.packages
import roslib.resources
import rospkg
VERBOSE = False
# @return: True if msg-related scripts should print verbose output
def is_verbose():
return VERBOSE
# set whether msg-related scripts should print verbose output
def set_verbose(v):
global VERBOSE
VERBOSE = v
EXT = '.msg'
SEP = '/' # e.g. std_msgs/String
# character that designates a constant assignment rather than a field
CONSTCHAR = '='
COMMENTCHAR = '#'
class MsgSpecException(Exception):
pass
# TODOXXX: unit test
def base_msg_type(type_):
"""
Compute the base data type, e.g. for arrays, get the underlying array item type
@param type_: ROS msg type (e.g. 'std_msgs/String')
@type type_: str
@return: base type
@rtype: str
"""
if type_ is None:
return None
if '[' in type_:
return type_[:type_.find('[')]
return type_
def resolve_type(type_, package_context):
"""
Resolve type name based on current package context.
NOTE: in ROS Diamondback, 'Header' resolves to
'std_msgs/Header'. In previous releases, it resolves to
'roslib/Header' (REP 100).
e.g.::
resolve_type('String', 'std_msgs') -> 'std_msgs/String'
resolve_type('String[]', 'std_msgs') -> 'std_msgs/String[]'
resolve_type('std_msgs/String', 'foo') -> 'std_msgs/String'
resolve_type('uint16', 'std_msgs') -> 'uint16'
resolve_type('uint16[]', 'std_msgs') -> 'uint16[]'
"""
bt = base_msg_type(type_)
if bt in BUILTIN_TYPES:
return type_
elif bt == 'Header':
return 'std_msgs/Header'
elif SEP in type_:
return type_
else:
return '%s%s%s' % (package_context, SEP, type_)
# NOTE: this assumes that we aren't going to support multi-dimensional
def parse_type(type_):
"""
Parse ROS message field type
@param type_: ROS field type
@type type_: str
@return: base_type, is_array, array_length
@rtype: str, bool, int
@raise MsgSpecException: if type_ cannot be parsed
"""
if not type_:
raise MsgSpecException('Invalid empty type')
if '[' in type_:
var_length = type_.endswith('[]')
splits = type_.split('[')
if len(splits) > 2:
raise MsgSpecException('Currently only support 1-dimensional array types: %s' % type_)
if var_length:
return type_[:-2], True, None
else:
try:
length = int(splits[1][:-1])
return splits[0], True, length
except ValueError:
raise MsgSpecException('Invalid array dimension: [%s]' % splits[1][:-1])
else:
return type_, False, None
################################################################################
# name validation
def is_valid_msg_type(x):
"""
@return: True if the name is a syntatically legal message type name
@rtype: bool
"""
if not x or len(x) != len(x.strip()):
return False
base = base_msg_type(x)
if not roslib.names.is_legal_resource_name(base):
return False
# parse array indicies
x = x[len(base):]
state = 0
for c in x:
if state == 0:
if c != '[':
return False
state = 1 # open
elif state == 1:
if c == ']':
state = 0 # closed
else:
try:
int(c)
except Exception:
return False
return state == 0
def is_valid_constant_type(x):
"""
@return: True if the name is a legal constant type. Only simple types are allowed.
@rtype: bool
"""
return x in PRIMITIVE_TYPES
def is_valid_msg_field_name(x):
"""
@return: True if the name is a syntatically legal message field name
@rtype: bool
"""
return roslib.names.is_legal_resource_base_name(x)
# msg spec representation ##########################################
class Constant(object):
"""
Container class for holding a Constant declaration
"""
__slots__ = ['type', 'name', 'val', 'val_text']
def __init__(self, type_, name, val, val_text):
"""
@param type_: constant type
@type type_: str
@param name: constant name
@type name: str
@param val: constant value
@type val: str
@param val_text: Original text definition of \a val
@type val_text: str
"""
if type is None or name is None or val is None or val_text is None:
raise ValueError('Constant must have non-None parameters')
self.type = type_
self.name = name.strip() # names are always stripped of whitespace
self.val = val
self.val_text = val_text
def __eq__(self, other):
if not isinstance(other, Constant):
return False
return self.type == other.type and self.name == other.name and self.val == other.val
def __repr__(self):
return '%s %s=%s' % (self.type, self.name, self.val)
def __str__(self):
return '%s %s=%s' % (self.type, self.name, self.val)
def _strify_spec(spec, buff=None, indent=''):
"""
Convert spec into a string representation. Helper routine for MsgSpec.
@param indent: internal use only
@type indent: str
@param buff: internal use only
@type buff: StringIO
@return: string representation of spec
@rtype: str
"""
if buff is None:
buff = StringIO()
for c in spec.constants:
buff.write('%s%s %s=%s\n' % (indent, c.type, c.name, c.val_text))
for type_, name in zip(spec.types, spec.names):
buff.write('%s%s %s\n' % (indent, type_, name))
base_type = base_msg_type(type_)
if base_type not in BUILTIN_TYPES:
subspec = get_registered(base_type)
_strify_spec(subspec, buff, indent + ' ')
return buff.getvalue()
class Field(object):
"""
Container class for storing information about a single field in a MsgSpec
Contains:
name
type
base_type
is_array
array_len
is_builtin
is_header
"""
def __init__(self, name, type):
self.name = name
self.type = type
(self.base_type, self.is_array, self.array_len) = parse_type(type)
self.is_header = is_header_type(self.base_type)
self.is_builtin = is_builtin(self.base_type)
def __repr__(self):
return '[%s, %s, %s, %s, %s]' % (self.name, self.type, self.base_type, self.is_array, self.array_len)
class MsgSpec(object):
"""
Container class for storing loaded msg description files. Field
types and names are stored in separate lists with 1-to-1
correspondence. MsgSpec can also return an md5 of the source text.
"""
def __init__(self, types, names, constants, text, full_name='', short_name='', package=''):
"""
@param types: list of field types, in order of declaration
@type types: [str]
@param names: list of field names, in order of declaration
@type names: [str]
@param constants: Constant declarations
@type constants: [L{Constant}]
@param text: text of declaration
@type text: str
@raise MsgSpecException: if spec is invalid (e.g. fields with the same name)
"""
self.types = types
if len(set(names)) != len(names):
raise MsgSpecException('Duplicate field names in message: %s' % names)
self.names = names
self.constants = constants
assert len(self.types) == len(self.names), 'len(%s) != len(%s)' % (self.types, self.names)
# Header.msg support
if (len(self.types)):
self.header_present = is_header_type(self.types[0]) and self.names[0] == 'header'
else:
self.header_present = False
self.text = text
self.full_name = full_name
self.short_name = short_name
self.package = package
self._parsed_fields = [Field(name, type) for (name, type) in zip(self.names, self.types)]
def fields(self):
"""
@return: zip list of types and names (e.g. [('int32', 'x'), ('int32', 'y')]
@rtype: [(str,str),]
"""
return list(zip(self.types, self.names)) # py3k
def parsed_fields(self):
"""
@return: list of Field classes
@rtype: [Field,]
"""
return self._parsed_fields
def has_header(self):
"""
@return: True if msg decription contains a 'Header header'
declaration at the beginning
@rtype: bool
"""
return self.header_present
def __eq__(self, other):
if not other or not isinstance(other, MsgSpec):
return False
return self.types == other.types and self.names == other.names and \
self.constants == other.constants and self.text == other.text
def __ne__(self, other):
if not other or not isinstance(other, MsgSpec):
return True
return not self.__eq__(other)
def __repr__(self):
if self.constants:
return 'MsgSpec[%s, %s, %s]' % (repr(self.constants), repr(self.types), repr(self.names))
else:
return 'MsgSpec[%s, %s]' % (repr(self.types), repr(self.names))
def __str__(self):
return _strify_spec(self)
# msg spec loading utilities ##########################################
def reinit():
"""
Reinitialize roslib.msgs. This API is for message generators
(e.g. genpy) that need to re-initialize the registration table.
"""
global _initialized, _loaded_packages
# unset the initialized state and unregister everything
_initialized = False
del _loaded_packages[:]
REGISTERED_TYPES.clear()
_init()
_initialized = False
def _init():
# lazy-init
global _initialized
if _initialized:
return
fname = '%s%s' % (HEADER, EXT)
std_msgs_dir = roslib.packages.get_pkg_dir('std_msgs')
if std_msgs_dir is None:
raise MsgSpecException('Unable to locate roslib: %s files cannot be loaded' % EXT)
header = os.path.join(std_msgs_dir, 'msg', fname)
if not os.path.isfile(header):
sys.stderr.write("ERROR: cannot locate %s. Expected to find it at '%s'\n" % (fname, header))
return False
# register Header under both contexted and de-contexted name
_, spec = load_from_file(header, '')
register(HEADER, spec)
register('std_msgs/'+HEADER, spec)
# backwards compat, REP 100
register('roslib/'+HEADER, spec)
for k, spec in EXTENDED_BUILTINS.items():
register(k, spec)
_initialized = True
# .msg file routines ##############################################################
def _msg_filter(f):
"""
Predicate for filtering directory list. matches message files
@param f: filename
@type f: str
"""
return os.path.isfile(f) and f.endswith(EXT)
# also used by doxymaker
def list_msg_types(package, include_depends):
"""
List all messages in the specified package
@param package str: name of package to search
@param include_depends bool: if True, will also list messages in package dependencies
@return [str]: message type names
"""
types = roslib.resources.list_package_resources(package, include_depends, 'msg', _msg_filter)
return [x[:-len(EXT)] for x in types]
def msg_file(package, type_):
"""
Determine the file system path for the specified .msg
resource. .msg resource does not have to exist.
@param package: name of package .msg file is in
@type package: str
@param type_: type name of message, e.g. 'Point2DFloat32'
@type type_: str
@return: file path of .msg file in specified package
@rtype: str
"""
return roslib.packages.resource_file(package, 'msg', type_+EXT)
def get_pkg_msg_specs(package):
"""
List all messages that a package contains.
@param package: package to load messages from
@type package: str
@return: list of message type names and specs for package, as well as a list
of message names that could not be processed.
@rtype: [(str, L{MsgSpec}), [str]]
"""
_init()
types = list_msg_types(package, False)
specs = [] # no fancy list comprehension as we want to show errors
failures = []
for t in types:
try:
typespec = load_from_file(msg_file(package, t), package)
specs.append(typespec)
except Exception as e:
failures.append(t)
print('ERROR: unable to load %s, %s' % (t, e))
return specs, failures
def load_package_dependencies(package, load_recursive=False):
"""
Register all messages that the specified package depends on.
@param load_recursive: (optional) if True, load all dependencies,
not just direct dependencies. By default, this is false to
prevent packages from incorrectly inheriting dependencies.
@type load_recursive: bool
"""
global _loaded_packages
_init()
if VERBOSE:
print('Load dependencies for package', package)
if not load_recursive:
manifest_file = roslib.manifest.manifest_file(package, True)
m = roslib.manifest.parse_file(manifest_file)
depends = [d.package for d in m.depends] # #391
else:
depends = rospkg.RosPack().get_depends(package, implicit=True)
msgs = []
failures = []
for d in depends:
if VERBOSE:
print('Load dependency', d)
# check if already loaded
# - we are dependent on manifest.getAll returning first-order dependencies first
if d in _loaded_packages or d == package:
continue
_loaded_packages.append(d)
specs, failed = get_pkg_msg_specs(d)
msgs.extend(specs)
failures.extend(failed)
for key, spec in msgs:
register(key, spec)
def load_package(package):
"""
Load package into the local registered namespace. All messages found
in the package will be registered if they are successfully
loaded. This should only be done with one package (i.e. the 'main'
package) per Python instance.
@param package: package name
@type package: str
"""
global _loaded_packages
_init()
if VERBOSE:
print('Load package', package)
# check if already loaded
# - we are dependent on manifest.getAll returning first-order dependencies first
if package in _loaded_packages:
if VERBOSE:
print('Package %s is already loaded' % package)
return
_loaded_packages.append(package)
specs, failed = get_pkg_msg_specs(package)
if VERBOSE:
print('Package contains the following messages: %s' % specs)
for key, spec in specs:
# register spec under both local and fully-qualified key
register(key, spec)
register(package + roslib.names.PRN_SEPARATOR + key, spec)
def _convert_val(type_, val):
"""
Convert constant value declaration to python value. Does not do
type-checking, so ValueError or other exceptions may be raised.
@param type_: ROS field type
@type type_: str
@param val: string representation of constant
@type val: str:
@raise ValueError: if unable to convert to python representation
@raise MsgSpecException: if value exceeds specified integer width
"""
if type_ in ['float32', 'float64']:
return float(val)
elif type_ in ['string']:
return val.strip() # string constants are always stripped
elif type_ in ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'char', 'byte']:
# bounds checking
bits = [('int8', 8), ('uint8', 8), ('int16', 16), ('uint16', 16),
('int32', 32), ('uint32', 32), ('int64', 64), ('uint64', 64),
('byte', 8), ('char', 8)]
b = [b for t, b in bits if t == type_][0]
import math
if type_[0] == 'u' or type_ == 'char':
lower = 0
upper = int(math.pow(2, b)-1)
else:
upper = int(math.pow(2, b-1)-1)
lower = -upper - 1 # two's complement min
val = int(val) # python will autocast to long if necessary
if val > upper or val < lower:
raise MsgSpecException('cannot coerce [%s] to %s (out of bounds)' % (val, type_))
return val
elif type_ == 'bool':
# TODO: need to nail down constant spec for bool
return True if eval(val) else False
raise MsgSpecException('invalid constant type: [%s]' % type_)
def load_by_type(msgtype, package_context=''):
"""
Load message specification for specified type
@param package_context: package name to use for the type name or
'' to use the local (relative) naming convention.
@type package_context: str
@return: Message type name and message specification
@rtype: (str, L{MsgSpec})
"""
pkg, basetype = roslib.names.package_resource_name(msgtype)
pkg = pkg or package_context # convert '' -> local package
try:
m_f = msg_file(pkg, basetype)
except roslib.packages.InvalidROSPkgException:
raise MsgSpecException('Cannot locate message type [%s], package [%s] does not exist' % (msgtype, pkg))
return load_from_file(m_f, pkg)
def load_from_string(text, package_context='', full_name='', short_name=''):
"""
Load message specification from a string.
@param text: .msg text
@type text: str
@param package_context: package name to use for the type name or
'' to use the local (relative) naming convention.
@type package_context: str
@return: Message specification
@rtype: L{MsgSpec}
@raise MsgSpecException: if syntax errors or other problems are detected in file
"""
types = []
names = []
constants = []
for orig_line in text.split('\n'):
l = orig_line.split(COMMENTCHAR)[0].strip() # strip comments
if not l:
continue # ignore empty lines
splits = [s for s in [x.strip() for x in l.split(' ')] if s] # split type/name, filter out empties
type_ = splits[0]
if not is_valid_msg_type(type_):
raise MsgSpecException('%s is not a legal message type' % type_)
if CONSTCHAR in l:
if not is_valid_constant_type(type_):
raise MsgSpecException('%s is not a legal constant type' % type_)
if type_ == 'string':
# strings contain anything to the right of the equals sign, there are no comments allowed
idx = orig_line.find(CONSTCHAR)
name = orig_line[orig_line.find(' ')+1:idx]
val = orig_line[idx+1:]
else:
splits = [x.strip() for x in ' '.join(splits[1:]).split(CONSTCHAR)] # resplit on '='
if len(splits) != 2:
raise MsgSpecException('Invalid declaration: %s' % l)
name = splits[0]
val = splits[1]
try:
val_converted = _convert_val(type_, val)
except Exception as e:
raise MsgSpecException('Invalid declaration: %s' % e)
constants.append(Constant(type_, name, val_converted, val.strip()))
else:
if len(splits) != 2:
raise MsgSpecException('Invalid declaration: %s' % l)
name = splits[1]
if not is_valid_msg_field_name(name):
raise MsgSpecException('%s is not a legal message field name' % name)
if package_context and SEP not in type_:
if not base_msg_type(type_) in RESERVED_TYPES:
# print "rewrite", type_, "to", "%s/%s"%(package_context, type_)
type_ = '%s/%s' % (package_context, type_)
types.append(type_)
names.append(name)
return MsgSpec(types, names, constants, text, full_name, short_name, package_context)
def load_from_file(file_path, package_context=''):
"""
Convert the .msg representation in the file to a MsgSpec instance.
This does *not* register the object.
@param file_path: path of file to load from
@type file_path: str:
@param package_context: package name to prepend to type name or
'' to use local (relative) naming convention.
@type package_context: str
@return: Message type name and message specification
@rtype: (str, L{MsgSpec})
@raise MsgSpecException: if syntax errors or other problems are detected in file
"""
if VERBOSE:
if package_context:
print('Load spec from', file_path, 'into package [%s]' % package_context)
else:
print('Load spec from', file_path)
file_name = os.path.basename(file_path)
type_ = file_name[:-len(EXT)]
base_type_ = type_
# determine the type name
if package_context:
while package_context.endswith(SEP):
package_context = package_context[:-1] # strip message separators
type_ = '%s%s%s' % (package_context, SEP, type_)
if not roslib.names.is_legal_resource_name(type_):
raise MsgSpecException('%s: [%s] is not a legal type name' % (file_path, type_))
f = open(file_path, 'r')
try:
try:
text = f.read()
return (type_, load_from_string(text, package_context, type_, base_type_))
except MsgSpecException as e:
raise MsgSpecException('%s: %s' % (file_name, e))
finally:
f.close()
# data structures and builtins specification ###########################
# adjustable constants, in case we change our minds
HEADER = 'Header'
TIME = 'time'
DURATION = 'duration'
def is_header_type(type_):
"""
@param type_: message type name
@type type_: str
@return: True if \a type_ refers to the ROS Header type
@rtype: bool
"""
# for backwards compatibility, include roslib/Header. REP 100
return type_ in [HEADER, 'std_msgs/Header', 'roslib/Header']
# time and duration types are represented as aggregate data structures
# for the purposes of serialization from the perspective of
# roslib.msgs. genmsg_py will do additional special handling is required
# to convert them into rospy.msg.Time/Duration instances.
# time as msg spec. time is unsigned
TIME_MSG = 'uint32 secs\nuint32 nsecs'
# duration as msg spec. duration is just like time except signed
DURATION_MSG = 'int32 secs\nint32 nsecs'
# primitive types are those for which we allow constants, i.e. have primitive representation
PRIMITIVE_TYPES = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float32', 'float64',
'string',
'bool',
# deprecated:
'char', 'byte']
BUILTIN_TYPES = PRIMITIVE_TYPES + [TIME, DURATION]
def is_builtin(msg_type_name):
"""
@param msg_type_name: name of message type
@type msg_type_name: str
@return: True if msg_type_name is a builtin/primitive type
@rtype: bool
"""
return msg_type_name in BUILTIN_TYPES
# extended builtins are builtin types that can be represented as MsgSpec instances
EXTENDED_BUILTINS = {TIME: load_from_string(TIME_MSG), DURATION: load_from_string(DURATION_MSG)}
RESERVED_TYPES = BUILTIN_TYPES + [HEADER]
REGISTERED_TYPES = {}
_loaded_packages = [] # keep track of packages so that we only load once (note: bug #59)
def is_registered(msg_type_name):
"""
@param msg_type_name: name of message type
@type msg_type_name: str
@return: True if msg spec for specified msg type name is
registered. NOTE: builtin types are not registered.
@rtype: bool
"""
return msg_type_name in REGISTERED_TYPES
def get_registered(msg_type_name, default_package=None):
"""
@param msg_type_name: name of message type
@type msg_type_name: str
@return: msg spec for msg type name
@rtype: L{MsgSpec}
"""
if msg_type_name in REGISTERED_TYPES:
return REGISTERED_TYPES[msg_type_name]
elif default_package:
# if msg_type_name has no package specifier, try with default package resolution
p, n = roslib.names.package_resource_name(msg_type_name)
if not p:
return REGISTERED_TYPES[roslib.names.resource_name(default_package, msg_type_name)]
raise KeyError(msg_type_name)
def register(msg_type_name, msg_spec):
"""
Load MsgSpec into the type dictionary
@param msg_type_name: name of message type
@type msg_type_name: str
@param msg_spec: spec to load
@type msg_spec: L{MsgSpec}
"""
if VERBOSE:
print('Register msg %s' % msg_type_name)
REGISTERED_TYPES[msg_type_name] = msg_spec
|
import pytest
import voluptuous as vol
from homeassistant import core
from homeassistant.components import light
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_PLATFORM,
ENTITY_MATCH_ALL,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.exceptions import Unauthorized
from homeassistant.setup import async_setup_component
from homeassistant.util import color
from tests.common import async_mock_service
orig_Profiles = light.Profiles
async def test_methods(hass):
"""Test if methods call the services as expected."""
# Test is_on
hass.states.async_set("light.test", STATE_ON)
assert light.is_on(hass, "light.test")
hass.states.async_set("light.test", STATE_OFF)
assert not light.is_on(hass, "light.test")
# Test turn_on
turn_on_calls = async_mock_service(hass, light.DOMAIN, SERVICE_TURN_ON)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "entity_id_val",
light.ATTR_TRANSITION: "transition_val",
light.ATTR_BRIGHTNESS: "brightness_val",
light.ATTR_RGB_COLOR: "rgb_color_val",
light.ATTR_XY_COLOR: "xy_color_val",
light.ATTR_PROFILE: "profile_val",
light.ATTR_COLOR_NAME: "color_name_val",
light.ATTR_WHITE_VALUE: "white_val",
},
blocking=True,
)
assert len(turn_on_calls) == 1
call = turn_on_calls[-1]
assert call.domain == light.DOMAIN
assert call.service == SERVICE_TURN_ON
assert call.data.get(ATTR_ENTITY_ID) == "entity_id_val"
assert call.data.get(light.ATTR_TRANSITION) == "transition_val"
assert call.data.get(light.ATTR_BRIGHTNESS) == "brightness_val"
assert call.data.get(light.ATTR_RGB_COLOR) == "rgb_color_val"
assert call.data.get(light.ATTR_XY_COLOR) == "xy_color_val"
assert call.data.get(light.ATTR_PROFILE) == "profile_val"
assert call.data.get(light.ATTR_COLOR_NAME) == "color_name_val"
assert call.data.get(light.ATTR_WHITE_VALUE) == "white_val"
# Test turn_off
turn_off_calls = async_mock_service(hass, light.DOMAIN, SERVICE_TURN_OFF)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_OFF,
{
ATTR_ENTITY_ID: "entity_id_val",
light.ATTR_TRANSITION: "transition_val",
},
blocking=True,
)
assert len(turn_off_calls) == 1
call = turn_off_calls[-1]
assert call.domain == light.DOMAIN
assert call.service == SERVICE_TURN_OFF
assert call.data[ATTR_ENTITY_ID] == "entity_id_val"
assert call.data[light.ATTR_TRANSITION] == "transition_val"
# Test toggle
toggle_calls = async_mock_service(hass, light.DOMAIN, SERVICE_TOGGLE)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TOGGLE,
{ATTR_ENTITY_ID: "entity_id_val", light.ATTR_TRANSITION: "transition_val"},
blocking=True,
)
assert len(toggle_calls) == 1
call = toggle_calls[-1]
assert call.domain == light.DOMAIN
assert call.service == SERVICE_TOGGLE
assert call.data[ATTR_ENTITY_ID] == "entity_id_val"
assert call.data[light.ATTR_TRANSITION] == "transition_val"
async def test_services(hass, mock_light_profiles):
"""Test the provided services."""
platform = getattr(hass.components, "test.light")
platform.init()
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
# Test init
assert light.is_on(hass, ent1.entity_id)
assert not light.is_on(hass, ent2.entity_id)
assert not light.is_on(hass, ent3.entity_id)
# Test basic turn_on, turn_off, toggle services
await hass.services.async_call(
light.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ent1.entity_id}, blocking=True
)
await hass.services.async_call(
light.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ent2.entity_id}, blocking=True
)
assert not light.is_on(hass, ent1.entity_id)
assert light.is_on(hass, ent2.entity_id)
# turn on all lights
await hass.services.async_call(
light.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True
)
assert light.is_on(hass, ent1.entity_id)
assert light.is_on(hass, ent2.entity_id)
assert light.is_on(hass, ent3.entity_id)
# turn off all lights
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
assert not light.is_on(hass, ent1.entity_id)
assert not light.is_on(hass, ent2.entity_id)
assert not light.is_on(hass, ent3.entity_id)
# turn off all lights by setting brightness to 0
await hass.services.async_call(
light.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True
)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL, light.ATTR_BRIGHTNESS: 0},
blocking=True,
)
assert not light.is_on(hass, ent1.entity_id)
assert not light.is_on(hass, ent2.entity_id)
assert not light.is_on(hass, ent3.entity_id)
# toggle all lights
await hass.services.async_call(
light.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True
)
assert light.is_on(hass, ent1.entity_id)
assert light.is_on(hass, ent2.entity_id)
assert light.is_on(hass, ent3.entity_id)
# toggle all lights
await hass.services.async_call(
light.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True
)
assert not light.is_on(hass, ent1.entity_id)
assert not light.is_on(hass, ent2.entity_id)
assert not light.is_on(hass, ent3.entity_id)
# Ensure all attributes process correctly
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent1.entity_id,
light.ATTR_TRANSITION: 10,
light.ATTR_BRIGHTNESS: 20,
light.ATTR_COLOR_NAME: "blue",
},
blocking=True,
)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent2.entity_id,
light.ATTR_RGB_COLOR: (255, 255, 255),
light.ATTR_WHITE_VALUE: 255,
},
blocking=True,
)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent3.entity_id,
light.ATTR_XY_COLOR: (0.4, 0.6),
},
blocking=True,
)
_, data = ent1.last_call("turn_on")
assert data == {
light.ATTR_TRANSITION: 10,
light.ATTR_BRIGHTNESS: 20,
light.ATTR_HS_COLOR: (240, 100),
}
_, data = ent2.last_call("turn_on")
assert data == {light.ATTR_HS_COLOR: (0, 0), light.ATTR_WHITE_VALUE: 255}
_, data = ent3.last_call("turn_on")
assert data == {light.ATTR_HS_COLOR: (71.059, 100)}
# Ensure attributes are filtered when light is turned off
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent1.entity_id,
light.ATTR_TRANSITION: 10,
light.ATTR_BRIGHTNESS: 0,
light.ATTR_COLOR_NAME: "blue",
},
blocking=True,
)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent2.entity_id,
light.ATTR_BRIGHTNESS: 0,
light.ATTR_RGB_COLOR: (255, 255, 255),
light.ATTR_WHITE_VALUE: 0,
},
blocking=True,
)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent3.entity_id,
light.ATTR_BRIGHTNESS: 0,
light.ATTR_XY_COLOR: (0.4, 0.6),
},
blocking=True,
)
assert not light.is_on(hass, ent1.entity_id)
assert not light.is_on(hass, ent2.entity_id)
assert not light.is_on(hass, ent3.entity_id)
_, data = ent1.last_call("turn_off")
assert data == {light.ATTR_TRANSITION: 10}
_, data = ent2.last_call("turn_off")
assert data == {}
_, data = ent3.last_call("turn_off")
assert data == {}
# One of the light profiles
mock_light_profiles["relax"] = (35.932, 69.412, 144, 0)
prof_name, prof_h, prof_s, prof_bri, prof_t = "relax", 35.932, 69.412, 144, 0
# Test light profiles
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ent1.entity_id, light.ATTR_PROFILE: prof_name},
blocking=True,
)
# Specify a profile and a brightness attribute to overwrite it
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent2.entity_id,
light.ATTR_PROFILE: prof_name,
light.ATTR_BRIGHTNESS: 100,
light.ATTR_TRANSITION: 1,
},
blocking=True,
)
_, data = ent1.last_call("turn_on")
assert data == {
light.ATTR_BRIGHTNESS: prof_bri,
light.ATTR_HS_COLOR: (prof_h, prof_s),
light.ATTR_TRANSITION: prof_t,
}
_, data = ent2.last_call("turn_on")
assert data == {
light.ATTR_BRIGHTNESS: 100,
light.ATTR_HS_COLOR: (prof_h, prof_s),
light.ATTR_TRANSITION: 1,
}
# Test toggle with parameters
await hass.services.async_call(
light.DOMAIN,
SERVICE_TOGGLE,
{
ATTR_ENTITY_ID: ent3.entity_id,
light.ATTR_PROFILE: prof_name,
light.ATTR_BRIGHTNESS_PCT: 100,
},
blocking=True,
)
_, data = ent3.last_call("turn_on")
assert data == {
light.ATTR_BRIGHTNESS: 255,
light.ATTR_HS_COLOR: (prof_h, prof_s),
light.ATTR_TRANSITION: prof_t,
}
# Test bad data
await hass.services.async_call(
light.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True
)
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ent1.entity_id, light.ATTR_PROFILE: -1},
blocking=True,
)
with pytest.raises(vol.MultipleInvalid):
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ent2.entity_id, light.ATTR_XY_COLOR: ["bla-di-bla", 5]},
blocking=True,
)
with pytest.raises(vol.MultipleInvalid):
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ent3.entity_id, light.ATTR_RGB_COLOR: [255, None, 2]},
blocking=True,
)
_, data = ent1.last_call("turn_on")
assert data == {}
_, data = ent2.last_call("turn_on")
assert data == {}
_, data = ent3.last_call("turn_on")
assert data == {}
# faulty attributes will not trigger a service call
with pytest.raises(vol.MultipleInvalid):
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent1.entity_id,
light.ATTR_PROFILE: prof_name,
light.ATTR_BRIGHTNESS: "bright",
},
blocking=True,
)
with pytest.raises(vol.MultipleInvalid):
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent1.entity_id,
light.ATTR_RGB_COLOR: "yellowish",
},
blocking=True,
)
with pytest.raises(vol.MultipleInvalid):
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ent2.entity_id, light.ATTR_WHITE_VALUE: "high"},
blocking=True,
)
_, data = ent1.last_call("turn_on")
assert data == {}
_, data = ent2.last_call("turn_on")
assert data == {}
async def test_light_profiles(hass, mock_light_profiles):
"""Test light profiles."""
platform = getattr(hass.components, "test.light")
platform.init()
mock_light_profiles["test"] = color.color_xy_to_hs(0.4, 0.6) + (100, 0)
mock_light_profiles["test_off"] = 0, 0, 0, 0
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
ent1, _, _ = platform.ENTITIES
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ent1.entity_id,
light.ATTR_PROFILE: "test",
},
blocking=True,
)
_, data = ent1.last_call("turn_on")
assert light.is_on(hass, ent1.entity_id)
assert data == {
light.ATTR_HS_COLOR: (71.059, 100),
light.ATTR_BRIGHTNESS: 100,
light.ATTR_TRANSITION: 0,
}
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ent1.entity_id, light.ATTR_PROFILE: "test_off"},
blocking=True,
)
_, data = ent1.last_call("turn_off")
assert not light.is_on(hass, ent1.entity_id)
assert data == {light.ATTR_TRANSITION: 0}
async def test_default_profiles_group(hass, mock_light_profiles):
"""Test default turn-on light profile for all lights."""
platform = getattr(hass.components, "test.light")
platform.init()
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
mock_light_profiles["group.all_lights.default"] = color.color_xy_to_hs(0.4, 0.6) + (
99,
2,
)
ent, _, _ = platform.ENTITIES
await hass.services.async_call(
light.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ent.entity_id}, blocking=True
)
_, data = ent.last_call("turn_on")
assert data == {
light.ATTR_HS_COLOR: (71.059, 100),
light.ATTR_BRIGHTNESS: 99,
light.ATTR_TRANSITION: 2,
}
async def test_default_profiles_light(hass, mock_light_profiles):
"""Test default turn-on light profile for a specific light."""
platform = getattr(hass.components, "test.light")
platform.init()
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
mock_light_profiles["group.all_lights.default"] = color.color_xy_to_hs(0.3, 0.5) + (
200,
0,
)
mock_light_profiles["light.ceiling_2.default"] = color.color_xy_to_hs(0.6, 0.6) + (
100,
3,
)
dev = next(filter(lambda x: x.entity_id == "light.ceiling_2", platform.ENTITIES))
await hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: dev.entity_id,
},
blocking=True,
)
_, data = dev.last_call("turn_on")
assert data == {
light.ATTR_HS_COLOR: (50.353, 100),
light.ATTR_BRIGHTNESS: 100,
light.ATTR_TRANSITION: 3,
}
async def test_light_context(hass, hass_admin_user):
"""Test that light context works."""
platform = getattr(hass.components, "test.light")
platform.init()
assert await async_setup_component(hass, "light", {"light": {"platform": "test"}})
await hass.async_block_till_done()
state = hass.states.get("light.ceiling")
assert state is not None
await hass.services.async_call(
"light",
"toggle",
{"entity_id": state.entity_id},
blocking=True,
context=core.Context(user_id=hass_admin_user.id),
)
state2 = hass.states.get("light.ceiling")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
async def test_light_turn_on_auth(hass, hass_admin_user):
"""Test that light context works."""
platform = getattr(hass.components, "test.light")
platform.init()
assert await async_setup_component(hass, "light", {"light": {"platform": "test"}})
await hass.async_block_till_done()
state = hass.states.get("light.ceiling")
assert state is not None
hass_admin_user.mock_policy({})
with pytest.raises(Unauthorized):
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": state.entity_id},
blocking=True,
context=core.Context(user_id=hass_admin_user.id),
)
async def test_light_brightness_step(hass):
"""Test that light context works."""
platform = getattr(hass.components, "test.light")
platform.init()
entity = platform.ENTITIES[0]
entity.supported_features = light.SUPPORT_BRIGHTNESS
entity.brightness = 100
assert await async_setup_component(hass, "light", {"light": {"platform": "test"}})
await hass.async_block_till_done()
state = hass.states.get(entity.entity_id)
assert state is not None
assert state.attributes["brightness"] == 100
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity.entity_id, "brightness_step": -10},
blocking=True,
)
_, data = entity.last_call("turn_on")
assert data["brightness"] == 90, data
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity.entity_id, "brightness_step_pct": 10},
blocking=True,
)
_, data = entity.last_call("turn_on")
assert data["brightness"] == 126, data
async def test_light_brightness_pct_conversion(hass):
"""Test that light brightness percent conversion."""
platform = getattr(hass.components, "test.light")
platform.init()
entity = platform.ENTITIES[0]
entity.supported_features = light.SUPPORT_BRIGHTNESS
entity.brightness = 100
assert await async_setup_component(hass, "light", {"light": {"platform": "test"}})
await hass.async_block_till_done()
state = hass.states.get(entity.entity_id)
assert state is not None
assert state.attributes["brightness"] == 100
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity.entity_id, "brightness_pct": 1},
blocking=True,
)
_, data = entity.last_call("turn_on")
assert data["brightness"] == 3, data
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity.entity_id, "brightness_pct": 2},
blocking=True,
)
_, data = entity.last_call("turn_on")
assert data["brightness"] == 5, data
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity.entity_id, "brightness_pct": 50},
blocking=True,
)
_, data = entity.last_call("turn_on")
assert data["brightness"] == 128, data
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity.entity_id, "brightness_pct": 99},
blocking=True,
)
_, data = entity.last_call("turn_on")
assert data["brightness"] == 252, data
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": entity.entity_id, "brightness_pct": 100},
blocking=True,
)
_, data = entity.last_call("turn_on")
assert data["brightness"] == 255, data
def test_deprecated_base_class(caplog):
"""Test deprecated base class."""
class CustomLight(light.Light):
pass
CustomLight()
assert "Light is deprecated, modify CustomLight" in caplog.text
async def test_profiles(hass):
"""Test profiles loading."""
profiles = orig_Profiles(hass)
await profiles.async_initialize()
assert profiles.data == {
"concentrate": (35.932, 69.412, 219, 0),
"energize": (43.333, 21.176, 203, 0),
"reading": (38.88, 49.02, 240, 0),
"relax": (35.932, 69.412, 144, 0),
}
|
import os
import click
from molecule import config
from molecule import logger
from molecule import util
from molecule.command import base as command_base
from molecule.command.init import base
LOG = logger.get_logger(__name__)
class Role(base.Base):
"""
.. program:: molecule init role --role-name foo
.. option:: molecule init role --role-name foo
Initialize a new role.
.. program:: molecule init role --role-name foo --template path
.. option:: molecule init role --role-name foo --template path
Initialize a new role using a local *cookiecutter* template. This
allows the customization of a role while still using the upstream
``molecule`` folder. This is similar to an
``ansible-galaxy init`` skeleton. Please refer to the ``init scenario``
command in order to generate a custom ``molecule`` scenario.
"""
def __init__(self, command_args):
self._command_args = command_args
def execute(self):
"""
Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None
"""
role_name = self._command_args['role_name']
role_directory = os.getcwd()
msg = 'Initializing new role {}...'.format(role_name)
LOG.info(msg)
if os.path.isdir(role_name):
msg = ('The directory {} exists. '
'Cannot create new role.').format(role_name)
util.sysexit_with_message(msg)
template_directory = ''
if 'template' in self._command_args.keys():
template_directory = self._command_args['template']
else:
template_directory = 'role'
self._process_templates(template_directory, self._command_args,
role_directory)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**self._command_args),
'scenario/verifier/{verifier_name}'.format(**self._command_args),
]
for template in templates:
self._process_templates(template, self._command_args,
scenario_base_directory)
self._process_templates('molecule', self._command_args, role_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized role in {} successfully.'.format(role_directory)
LOG.success(msg)
@click.command()
@click.pass_context
@click.option(
'--dependency-name',
type=click.Choice(['galaxy']),
default='galaxy',
help='Name of dependency to initialize. (galaxy)')
@click.option(
'--driver-name',
'-d',
type=click.Choice(config.molecule_drivers()),
default='docker',
help='Name of driver to initialize. (docker)')
@click.option(
'--lint-name',
type=click.Choice(['yamllint']),
default='yamllint',
help='Name of lint to initialize. (yamllint)')
@click.option(
'--provisioner-name',
type=click.Choice(['ansible']),
default='ansible',
help='Name of provisioner to initialize. (ansible)')
@click.option(
'--role-name', '-r', required=True, help='Name of the role to create.')
@click.option(
'--verifier-name',
type=click.Choice(config.molecule_verifiers()),
default='testinfra',
help='Name of verifier to initialize. (testinfra)')
@click.option(
'--template',
'-t',
type=click.Path(
exists=True, dir_okay=True, readable=True, resolve_path=True),
help="Path to a cookiecutter custom template to initialize the role. "
"The upstream molecule folder will be added to this template")
def role(ctx, dependency_name, driver_name, lint_name, provisioner_name,
role_name, verifier_name, template): # pragma: no cover
""" Initialize a new role for use with Molecule. """
command_args = {
'dependency_name': dependency_name,
'driver_name': driver_name,
'lint_name': lint_name,
'provisioner_name': provisioner_name,
'role_name': role_name,
'scenario_name': command_base.MOLECULE_DEFAULT_SCENARIO_NAME,
'subcommand': __name__,
'verifier_name': verifier_name,
}
if verifier_name == 'inspec':
command_args['verifier_lint_name'] = 'rubocop'
if verifier_name == 'goss':
command_args['verifier_lint_name'] = 'yamllint'
if template is not None:
command_args['template'] = template
r = Role(command_args)
r.execute()
|
from homeassistant import data_entry_flow
from homeassistant.components.agent_dvr import config_flow
from homeassistant.components.agent_dvr.const import SERVER_URL
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_PORT, CONTENT_TYPE_JSON
from homeassistant.core import HomeAssistant
from . import init_integration
from tests.common import load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_show_user_form(hass: HomeAssistant) -> None:
"""Test that the user set up form is served."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "user"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_user_device_exists_abort(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort flow if Agent device already configured."""
await init_integration(hass, aioclient_mock)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "example.local", CONF_PORT: 8090},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_connection_error(hass: HomeAssistant, aioclient_mock) -> None:
"""Test we show user form on Agent connection error."""
aioclient_mock.get("http://example.local:8090/command.cgi?cmd=getStatus", text="")
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "example.local", CONF_PORT: 8090},
)
assert result["errors"] == {"base": "cannot_connect"}
assert result["step_id"] == "user"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_full_user_flow_implementation(
hass: HomeAssistant, aioclient_mock
) -> None:
"""Test the full manual user flow from start to finish."""
aioclient_mock.get(
"http://example.local:8090/command.cgi?cmd=getStatus",
text=load_fixture("agent_dvr/status.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"http://example.local:8090/command.cgi?cmd=getObjects",
text=load_fixture("agent_dvr/objects.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER},
)
assert result["step_id"] == "user"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "example.local", CONF_PORT: 8090}
)
assert result["data"][CONF_HOST] == "example.local"
assert result["data"][CONF_PORT] == 8090
assert result["data"][SERVER_URL] == "http://example.local:8090/"
assert result["title"] == "DESKTOP"
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entries = hass.config_entries.async_entries(config_flow.DOMAIN)
assert entries[0].unique_id == "c0715bba-c2d0-48ef-9e3e-bc81c9ea4447"
|
import flatbuffers
class Invocation(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsInvocation(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Invocation()
x.Init(buf, n + offset)
return x
# Invocation
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Invocation
def Request(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Invocation
def Registration(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Invocation
def Payload(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Invocation
def PayloadAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Invocation
def PayloadLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Invocation
def EncAlgo(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Invocation
def EncSerializer(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Invocation
def EncKey(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Invocation
def EncKeyAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Invocation
def EncKeyLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Invocation
def Procedure(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Invocation
def Timeout(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Invocation
def ReceiveProgress(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Invocation
def Caller(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Invocation
def CallerAuthid(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Invocation
def CallerAuthrole(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def InvocationStart(builder): builder.StartObject(12)
def InvocationAddRequest(builder, request): builder.PrependUint64Slot(0, request, 0)
def InvocationAddRegistration(builder, registration): builder.PrependUint64Slot(1, registration, 0)
def InvocationAddPayload(builder, payload): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(payload), 0)
def InvocationStartPayloadVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def InvocationAddEncAlgo(builder, encAlgo): builder.PrependUint8Slot(3, encAlgo, 0)
def InvocationAddEncSerializer(builder, encSerializer): builder.PrependUint8Slot(4, encSerializer, 0)
def InvocationAddEncKey(builder, encKey): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(encKey), 0)
def InvocationStartEncKeyVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def InvocationAddProcedure(builder, procedure): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(procedure), 0)
def InvocationAddTimeout(builder, timeout): builder.PrependUint32Slot(7, timeout, 0)
def InvocationAddReceiveProgress(builder, receiveProgress): builder.PrependBoolSlot(8, receiveProgress, 0)
def InvocationAddCaller(builder, caller): builder.PrependUint64Slot(9, caller, 0)
def InvocationAddCallerAuthid(builder, callerAuthid): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(callerAuthid), 0)
def InvocationAddCallerAuthrole(builder, callerAuthrole): builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(callerAuthrole), 0)
def InvocationEnd(builder): return builder.EndObject()
|
import getpass
import pytest
from mock import patch, Mock, call
from pymongo.errors import OperationFailure
from pymongo.read_preferences import Primary
from arctic.hooks import get_mongodb_uri
from arctic.scripts import arctic_enable_sharding as mes
from ...util import run_as_main
def test_enable_sharding(mongo_host, arctic, mongo_server, user_library, user_library_name):
c = mongo_server.api
with patch.object(c, 'admin') as admin:
with patch('pymongo.MongoClient', return_value=c) as mc:
run_as_main(mes.main, '--host', mongo_host, '--library', user_library_name)
assert mc.call_args_list == [call(get_mongodb_uri(mongo_host))]
assert len(admin.command.call_args_list) == 3
assert call('buildinfo', read_preference=Primary(), session=None) in admin.command.call_args_list or call('buildinfo', read_preference=Primary()) in admin.command.call_args_list
assert call('shardCollection', 'arctic_' + user_library_name, key={'symbol': 'hashed'}) in admin.command.call_args_list
assert call('enablesharding', 'arctic_' + getpass.getuser()) in admin.command.call_args_list
def test_enable_sharding_already_on_db(mongo_host, arctic, mongo_server, user_library, user_library_name):
c = mongo_server.api
with patch.object(c, 'admin') as admin:
admin.command = Mock(return_value=[OperationFailure("failed: already enabled"),
None])
with patch('pymongo.MongoClient', return_value=c) as mc:
run_as_main(mes.main, '--host', mongo_host, '--library', user_library_name)
assert mc.call_args_list == [call(get_mongodb_uri(mongo_host))]
assert len(admin.command.call_args_list) == 3
assert call('buildinfo', read_preference=Primary(), session=None) in admin.command.call_args_list or call('buildinfo', read_preference=Primary()) in admin.command.call_args_list
assert call('shardCollection', 'arctic_' + user_library_name, key={'symbol': 'hashed'}) in admin.command.call_args_list
assert call('enablesharding', 'arctic_' + getpass.getuser()) in admin.command.call_args_list
def test_enable_sharding_on_db_other_failure(mongo_host, arctic, mongo_server, user_library, user_library_name):
# Create the user agains the current mongo database
c = mongo_server.api
with pytest.raises(OperationFailure):
with patch.object(c, 'admin') as admin:
with patch('pymongo.MongoClient', return_value=c):
admin.command = Mock(side_effect=OperationFailure('OOPS'))
run_as_main(mes.main, '--host', mongo_host, '--library', user_library_name)
|
import sys
import code
from typing import MutableSequence
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt
from PyQt5.QtWidgets import QTextEdit, QWidget, QVBoxLayout, QApplication
from PyQt5.QtGui import QTextCursor
from qutebrowser.config import stylesheet
from qutebrowser.misc import cmdhistory, miscwidgets
from qutebrowser.utils import utils, objreg
console_widget = None
class ConsoleLineEdit(miscwidgets.CommandLineEdit):
"""A QLineEdit which executes entered code and provides a history.
Attributes:
_history: The command history of executed commands.
Signals:
execute: Emitted when a commandline should be executed.
"""
execute = pyqtSignal(str)
def __init__(self, _namespace, parent):
"""Constructor.
Args:
_namespace: The local namespace of the interpreter.
"""
super().__init__(parent=parent)
self._history = cmdhistory.History(parent=self)
self.returnPressed.connect(self.on_return_pressed)
@pyqtSlot()
def on_return_pressed(self):
"""Execute the line of code which was entered."""
self._history.stop()
text = self.text()
if text:
self._history.append(text)
self.execute.emit(text)
self.setText('')
def history_prev(self):
"""Go back in the history."""
try:
if not self._history.is_browsing():
item = self._history.start(self.text().strip())
else:
item = self._history.previtem()
except (cmdhistory.HistoryEmptyError,
cmdhistory.HistoryEndReachedError):
return
self.setText(item)
def history_next(self):
"""Go forward in the history."""
if not self._history.is_browsing():
return
try:
item = self._history.nextitem()
except cmdhistory.HistoryEndReachedError:
return
self.setText(item)
def keyPressEvent(self, e):
"""Override keyPressEvent to handle special keypresses."""
if e.key() == Qt.Key_Up:
self.history_prev()
e.accept()
elif e.key() == Qt.Key_Down:
self.history_next()
e.accept()
elif e.modifiers() & Qt.ControlModifier and e.key() == Qt.Key_C:
self.setText('')
e.accept()
else:
super().keyPressEvent(e)
class ConsoleTextEdit(QTextEdit):
"""Custom QTextEdit for console output."""
def __init__(self, parent=None):
super().__init__(parent)
self.setAcceptRichText(False)
self.setReadOnly(True)
self.setFocusPolicy(Qt.ClickFocus)
def __repr__(self):
return utils.get_repr(self)
def append_text(self, text):
"""Append new text and scroll output to bottom.
We can't use Qt's way to append stuff because that inserts weird
newlines.
"""
self.moveCursor(QTextCursor.End)
self.insertPlainText(text)
scrollbar = self.verticalScrollBar()
scrollbar.setValue(scrollbar.maximum())
class ConsoleWidget(QWidget):
"""A widget with an interactive Python console.
Attributes:
_lineedit: The line edit in the console.
_output: The output widget in the console.
_vbox: The layout which contains everything.
_more: A flag which is set when more input is expected.
_buffer: The buffer for multi-line commands.
_interpreter: The InteractiveInterpreter to execute code with.
"""
STYLESHEET = """
ConsoleWidget > ConsoleTextEdit, ConsoleWidget > ConsoleLineEdit {
font: {{ conf.fonts.debug_console }};
}
"""
def __init__(self, parent=None):
super().__init__(parent)
if not hasattr(sys, 'ps1'):
sys.ps1 = '>>> '
if not hasattr(sys, 'ps2'):
sys.ps2 = '... '
namespace = {
'__name__': '__console__',
'__doc__': None,
'q_app': QApplication.instance(),
# We use parent as self here because the user "feels" the whole
# console, not just the line edit.
'self': parent,
'objreg': objreg,
}
self._more = False
self._buffer: MutableSequence[str] = []
self._lineedit = ConsoleLineEdit(namespace, self)
self._lineedit.execute.connect(self.push)
self._output = ConsoleTextEdit()
self.write(self._curprompt())
self._vbox = QVBoxLayout()
self._vbox.setSpacing(0)
self._vbox.addWidget(self._output)
self._vbox.addWidget(self._lineedit)
stylesheet.set_register(self)
self.setLayout(self._vbox)
self._lineedit.setFocus()
self._interpreter = code.InteractiveInterpreter(namespace)
def __repr__(self):
return utils.get_repr(self, visible=self.isVisible())
def write(self, line):
"""Write a line of text (without added newline) to the output."""
self._output.append_text(line)
@pyqtSlot(str)
def push(self, line):
"""Push a line to the interpreter."""
self._buffer.append(line)
source = '\n'.join(self._buffer)
self.write(line + '\n')
# We do two special things with the context managers here:
# - We replace stdout/stderr to capture output. Even if we could
# override InteractiveInterpreter's write method, most things are
# printed elsewhere (e.g. by exec). Other Python GUI shells do the
# same.
# - We disable our exception hook, so exceptions from the console get
# printed and don't open a crashdialog.
with utils.fake_io(self.write), utils.disabled_excepthook():
self._more = self._interpreter.runsource(source, '<console>')
self.write(self._curprompt())
if not self._more:
self._buffer = []
def _curprompt(self):
"""Get the prompt which is visible currently."""
return sys.ps2 if self._more else sys.ps1
def init():
"""Initialize a global console."""
global console_widget
console_widget = ConsoleWidget()
|
from yeelight import BulbType
from homeassistant.components.yeelight import (
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
)
from homeassistant.const import CONF_DEVICES, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry
from homeassistant.setup import async_setup_component
from . import (
CONFIG_ENTRY_DATA,
ENTITY_AMBILIGHT,
ENTITY_BINARY_SENSOR,
ENTITY_LIGHT,
ENTITY_NIGHTLIGHT,
ID,
IP_ADDRESS,
MODULE,
MODULE_CONFIG_FLOW,
_mocked_bulb,
_patch_discovery,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_setup_discovery(hass: HomeAssistant):
"""Test setting up Yeelight by discovery."""
config_entry = MockConfigEntry(domain=DOMAIN, data=CONFIG_ENTRY_DATA)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(MODULE), patch(f"{MODULE}.Bulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_BINARY_SENSOR) is not None
assert hass.states.get(ENTITY_LIGHT) is not None
# Unload
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert hass.states.get(ENTITY_BINARY_SENSOR) is None
assert hass.states.get(ENTITY_LIGHT) is None
async def test_setup_import(hass: HomeAssistant):
"""Test import from yaml."""
mocked_bulb = _mocked_bulb()
name = "yeelight"
with patch(f"{MODULE}.Bulb", return_value=mocked_bulb), patch(
f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb
):
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_DEVICES: {
IP_ADDRESS: {
CONF_NAME: name,
CONF_NIGHTLIGHT_SWITCH_TYPE: NIGHTLIGHT_SWITCH_TYPE_LIGHT,
}
}
}
},
)
await hass.async_block_till_done()
assert hass.states.get(f"binary_sensor.{name}_nightlight") is not None
assert hass.states.get(f"light.{name}") is not None
assert hass.states.get(f"light.{name}_nightlight") is not None
async def test_unique_ids_device(hass: HomeAssistant):
"""Test Yeelight unique IDs from yeelight device IDs."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
**CONFIG_ENTRY_DATA,
CONF_NIGHTLIGHT_SWITCH: True,
},
unique_id=ID,
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
mocked_bulb.bulb_type = BulbType.WhiteTempMood
with _patch_discovery(MODULE), patch(f"{MODULE}.Bulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
er = await entity_registry.async_get_registry(hass)
assert er.async_get(ENTITY_BINARY_SENSOR).unique_id == ID
assert er.async_get(ENTITY_LIGHT).unique_id == ID
assert er.async_get(ENTITY_NIGHTLIGHT).unique_id == f"{ID}-nightlight"
assert er.async_get(ENTITY_AMBILIGHT).unique_id == f"{ID}-ambilight"
async def test_unique_ids_entry(hass: HomeAssistant):
"""Test Yeelight unique IDs from entry IDs."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
**CONFIG_ENTRY_DATA,
CONF_NIGHTLIGHT_SWITCH: True,
},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
mocked_bulb.bulb_type = BulbType.WhiteTempMood
with _patch_discovery(MODULE), patch(f"{MODULE}.Bulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
er = await entity_registry.async_get_registry(hass)
assert er.async_get(ENTITY_BINARY_SENSOR).unique_id == config_entry.entry_id
assert er.async_get(ENTITY_LIGHT).unique_id == config_entry.entry_id
assert (
er.async_get(ENTITY_NIGHTLIGHT).unique_id
== f"{config_entry.entry_id}-nightlight"
)
assert (
er.async_get(ENTITY_AMBILIGHT).unique_id == f"{config_entry.entry_id}-ambilight"
)
|
import logging
from httpcore import ConnectError
import voluptuous as vol
from wolf_smartset.token_auth import InvalidAuth
from wolf_smartset.wolf_client import WolfClient
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import ( # pylint:disable=unused-import
DEVICE_GATEWAY,
DEVICE_ID,
DEVICE_NAME,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
USER_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Wolf SmartSet Service."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize with empty username and password."""
self.username = None
self.password = None
self.fetched_systems = None
async def async_step_user(self, user_input=None):
"""Handle the initial step to get connection parameters."""
errors = {}
if user_input is not None:
wolf_client = WolfClient(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD]
)
try:
self.fetched_systems = await wolf_client.fetch_system_list()
except ConnectError:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
self.username = user_input[CONF_USERNAME]
self.password = user_input[CONF_PASSWORD]
return await self.async_step_device()
return self.async_show_form(
step_id="user", data_schema=USER_SCHEMA, errors=errors
)
async def async_step_device(self, user_input=None):
"""Allow user to select device from devices connected to specified account."""
errors = {}
if user_input is not None:
device_name = user_input[DEVICE_NAME]
system = [
device for device in self.fetched_systems if device.name == device_name
]
device_id = system[0].id
await self.async_set_unique_id(device_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=user_input[DEVICE_NAME],
data={
CONF_USERNAME: self.username,
CONF_PASSWORD: self.password,
DEVICE_NAME: device_name,
DEVICE_GATEWAY: system[0].gateway,
DEVICE_ID: device_id,
},
)
data_schema = vol.Schema(
{
vol.Required(DEVICE_NAME): vol.In(
[info.name for info in self.fetched_systems]
)
}
)
return self.async_show_form(
step_id="device", data_schema=data_schema, errors=errors
)
|
import os
import abc
from molecule import util
from molecule.verifier.lint import flake8
from molecule.verifier.lint import rubocop
from molecule.verifier.lint import yamllint
class Base(object):
__metaclass__ = abc.ABCMeta
def __init__(self, config):
"""
Base initializer for all :ref:`Verifier` classes.
:param config: An instance of a Molecule config.
:returns: None
"""
self._config = config
@abc.abstractproperty
def name(self): # pragma: no cover
"""
Name of the verifier and returns a string.
:returns: str
"""
pass
@abc.abstractproperty
def default_options(self): # pragma: no cover
"""
Default CLI arguments provided to ``cmd`` and returns a dict.
:return: dict
"""
pass
@abc.abstractproperty
def default_env(self): # pragma: no cover
"""
Default env variables provided to ``cmd`` and returns a dict.
:return: dict
"""
pass
@abc.abstractmethod
def execute(self): # pragma: no cover
"""
Executes ``cmd`` and returns None.
:return: None
"""
pass
@property
def enabled(self):
return self._config.config['verifier']['enabled']
@property
def directory(self):
return os.path.join(self._config.scenario.directory,
self._config.config['verifier']['directory'])
@property
def options(self):
return util.merge_dicts(self.default_options,
self._config.config['verifier']['options'])
@property
def env(self):
return util.merge_dicts(self.default_env,
self._config.config['verifier']['env'])
@property
@util.memoize
def lint(self):
lint_name = self._config.config['verifier']['lint']['name']
if lint_name == 'flake8':
return flake8.Flake8(self._config)
if lint_name == 'rubocop':
return rubocop.RuboCop(self._config)
if lint_name == 'yamllint':
return yamllint.Yamllint(self._config)
|
import asyncio
from collections import namedtuple
import ctypes
import logging
import struct
import threading
import async_timeout
import pyads
import voluptuous as vol
from homeassistant.const import (
CONF_DEVICE,
CONF_IP_ADDRESS,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DATA_ADS = "data_ads"
# Supported Types
ADSTYPE_BOOL = "bool"
ADSTYPE_BYTE = "byte"
ADSTYPE_DINT = "dint"
ADSTYPE_INT = "int"
ADSTYPE_UDINT = "udint"
ADSTYPE_UINT = "uint"
CONF_ADS_FACTOR = "factor"
CONF_ADS_TYPE = "adstype"
CONF_ADS_VALUE = "value"
CONF_ADS_VAR = "adsvar"
CONF_ADS_VAR_BRIGHTNESS = "adsvar_brightness"
CONF_ADS_VAR_POSITION = "adsvar_position"
STATE_KEY_STATE = "state"
STATE_KEY_BRIGHTNESS = "brightness"
STATE_KEY_POSITION = "position"
DOMAIN = "ads"
SERVICE_WRITE_DATA_BY_NAME = "write_data_by_name"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DEVICE): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_IP_ADDRESS): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_SERVICE_WRITE_DATA_BY_NAME = vol.Schema(
{
vol.Required(CONF_ADS_TYPE): vol.In(
[
ADSTYPE_INT,
ADSTYPE_UINT,
ADSTYPE_BYTE,
ADSTYPE_BOOL,
ADSTYPE_DINT,
ADSTYPE_UDINT,
]
),
vol.Required(CONF_ADS_VALUE): vol.Coerce(int),
vol.Required(CONF_ADS_VAR): cv.string,
}
)
def setup(hass, config):
"""Set up the ADS component."""
conf = config[DOMAIN]
net_id = conf[CONF_DEVICE]
ip_address = conf.get(CONF_IP_ADDRESS)
port = conf[CONF_PORT]
client = pyads.Connection(net_id, port, ip_address)
AdsHub.ADS_TYPEMAP = {
ADSTYPE_BOOL: pyads.PLCTYPE_BOOL,
ADSTYPE_BYTE: pyads.PLCTYPE_BYTE,
ADSTYPE_DINT: pyads.PLCTYPE_DINT,
ADSTYPE_INT: pyads.PLCTYPE_INT,
ADSTYPE_UDINT: pyads.PLCTYPE_UDINT,
ADSTYPE_UINT: pyads.PLCTYPE_UINT,
}
AdsHub.ADSError = pyads.ADSError
AdsHub.PLCTYPE_BOOL = pyads.PLCTYPE_BOOL
AdsHub.PLCTYPE_BYTE = pyads.PLCTYPE_BYTE
AdsHub.PLCTYPE_DINT = pyads.PLCTYPE_DINT
AdsHub.PLCTYPE_INT = pyads.PLCTYPE_INT
AdsHub.PLCTYPE_UDINT = pyads.PLCTYPE_UDINT
AdsHub.PLCTYPE_UINT = pyads.PLCTYPE_UINT
try:
ads = AdsHub(client)
except pyads.ADSError:
_LOGGER.error(
"Could not connect to ADS host (netid=%s, ip=%s, port=%s)",
net_id,
ip_address,
port,
)
return False
hass.data[DATA_ADS] = ads
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, ads.shutdown)
def handle_write_data_by_name(call):
"""Write a value to the connected ADS device."""
ads_var = call.data.get(CONF_ADS_VAR)
ads_type = call.data.get(CONF_ADS_TYPE)
value = call.data.get(CONF_ADS_VALUE)
try:
ads.write_by_name(ads_var, value, ads.ADS_TYPEMAP[ads_type])
except pyads.ADSError as err:
_LOGGER.error(err)
hass.services.register(
DOMAIN,
SERVICE_WRITE_DATA_BY_NAME,
handle_write_data_by_name,
schema=SCHEMA_SERVICE_WRITE_DATA_BY_NAME,
)
return True
# Tuple to hold data needed for notification
NotificationItem = namedtuple(
"NotificationItem", "hnotify huser name plc_datatype callback"
)
class AdsHub:
"""Representation of an ADS connection."""
def __init__(self, ads_client):
"""Initialize the ADS hub."""
self._client = ads_client
self._client.open()
# All ADS devices are registered here
self._devices = []
self._notification_items = {}
self._lock = threading.Lock()
def shutdown(self, *args, **kwargs):
"""Shutdown ADS connection."""
_LOGGER.debug("Shutting down ADS")
for notification_item in self._notification_items.values():
_LOGGER.debug(
"Deleting device notification %d, %d",
notification_item.hnotify,
notification_item.huser,
)
try:
self._client.del_device_notification(
notification_item.hnotify, notification_item.huser
)
except pyads.ADSError as err:
_LOGGER.error(err)
try:
self._client.close()
except pyads.ADSError as err:
_LOGGER.error(err)
def register_device(self, device):
"""Register a new device."""
self._devices.append(device)
def write_by_name(self, name, value, plc_datatype):
"""Write a value to the device."""
with self._lock:
try:
return self._client.write_by_name(name, value, plc_datatype)
except pyads.ADSError as err:
_LOGGER.error("Error writing %s: %s", name, err)
def read_by_name(self, name, plc_datatype):
"""Read a value from the device."""
with self._lock:
try:
return self._client.read_by_name(name, plc_datatype)
except pyads.ADSError as err:
_LOGGER.error("Error reading %s: %s", name, err)
def add_device_notification(self, name, plc_datatype, callback):
"""Add a notification to the ADS devices."""
attr = pyads.NotificationAttrib(ctypes.sizeof(plc_datatype))
with self._lock:
try:
hnotify, huser = self._client.add_device_notification(
name, attr, self._device_notification_callback
)
except pyads.ADSError as err:
_LOGGER.error("Error subscribing to %s: %s", name, err)
else:
hnotify = int(hnotify)
self._notification_items[hnotify] = NotificationItem(
hnotify, huser, name, plc_datatype, callback
)
_LOGGER.debug(
"Added device notification %d for variable %s", hnotify, name
)
def _device_notification_callback(self, notification, name):
"""Handle device notifications."""
contents = notification.contents
hnotify = int(contents.hNotification)
_LOGGER.debug("Received notification %d", hnotify)
# get dynamically sized data array
data_size = contents.cbSampleSize
data = (ctypes.c_ubyte * data_size).from_address(
ctypes.addressof(contents)
+ pyads.structs.SAdsNotificationHeader.data.offset
)
try:
with self._lock:
notification_item = self._notification_items[hnotify]
except KeyError:
_LOGGER.error("Unknown device notification handle: %d", hnotify)
return
# Parse data to desired datatype
if notification_item.plc_datatype == self.PLCTYPE_BOOL:
value = bool(struct.unpack("<?", bytearray(data))[0])
elif notification_item.plc_datatype == self.PLCTYPE_INT:
value = struct.unpack("<h", bytearray(data))[0]
elif notification_item.plc_datatype == self.PLCTYPE_BYTE:
value = struct.unpack("<B", bytearray(data))[0]
elif notification_item.plc_datatype == self.PLCTYPE_UINT:
value = struct.unpack("<H", bytearray(data))[0]
elif notification_item.plc_datatype == self.PLCTYPE_DINT:
value = struct.unpack("<i", bytearray(data))[0]
elif notification_item.plc_datatype == self.PLCTYPE_UDINT:
value = struct.unpack("<I", bytearray(data))[0]
else:
value = bytearray(data)
_LOGGER.warning("No callback available for this datatype")
notification_item.callback(notification_item.name, value)
class AdsEntity(Entity):
"""Representation of ADS entity."""
def __init__(self, ads_hub, name, ads_var):
"""Initialize ADS binary sensor."""
self._name = name
self._unique_id = ads_var
self._state_dict = {}
self._state_dict[STATE_KEY_STATE] = None
self._ads_hub = ads_hub
self._ads_var = ads_var
self._event = None
async def async_initialize_device(
self, ads_var, plctype, state_key=STATE_KEY_STATE, factor=None
):
"""Register device notification."""
def update(name, value):
"""Handle device notifications."""
_LOGGER.debug("Variable %s changed its value to %d", name, value)
if factor is None:
self._state_dict[state_key] = value
else:
self._state_dict[state_key] = value / factor
asyncio.run_coroutine_threadsafe(async_event_set(), self.hass.loop)
self.schedule_update_ha_state()
async def async_event_set():
"""Set event in async context."""
self._event.set()
self._event = asyncio.Event()
await self.hass.async_add_executor_job(
self._ads_hub.add_device_notification, ads_var, plctype, update
)
try:
with async_timeout.timeout(10):
await self._event.wait()
except asyncio.TimeoutError:
_LOGGER.debug("Variable %s: Timeout during first update", ads_var)
@property
def name(self):
"""Return the default name of the binary sensor."""
return self._name
@property
def unique_id(self):
"""Return an unique identifier for this entity."""
return self._unique_id
@property
def should_poll(self):
"""Return False because entity pushes its state to HA."""
return False
@property
def available(self):
"""Return False if state has not been updated yet."""
return self._state_dict[STATE_KEY_STATE] is not None
|
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.netatmo import config_flow
from homeassistant.components.netatmo.const import (
CONF_NEW_AREA,
CONF_WEATHER_AREAS,
DOMAIN,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.async_mock import patch
from tests.common import MockConfigEntry
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
VALID_CONFIG = {}
async def test_abort_if_existing_entry(hass):
"""Check flow abort when an entry already exist."""
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
flow = config_flow.NetatmoFlowHandler()
flow.hass = hass
result = await hass.config_entries.flow.async_init(
"netatmo", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
result = await hass.config_entries.flow.async_init(
"netatmo",
context={"source": "homekit"},
data={"host": "0.0.0.0", "properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_full_flow(hass, aiohttp_client, aioclient_mock, current_request):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
"netatmo",
{
"netatmo": {CONF_CLIENT_ID: CLIENT_ID, CONF_CLIENT_SECRET: CLIENT_SECRET},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
"netatmo", context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
scope = "+".join(
[
"access_camera",
"access_presence",
"read_camera",
"read_homecoach",
"read_presence",
"read_smokedetector",
"read_station",
"read_thermostat",
"write_camera",
"write_presence",
"write_thermostat",
]
)
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}&scope={scope}"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.netatmo.async_setup_entry", return_value=True
) as mock_setup:
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
async def test_option_flow(hass):
"""Test config flow options."""
valid_option = {
"lat_ne": 32.91336,
"lon_ne": -117.187429,
"lat_sw": 32.83336,
"lon_sw": -117.26743,
"show_on_map": False,
"area_name": "Home",
"mode": "avg",
}
expected_result = {
"lat_ne": 32.9133601,
"lon_ne": -117.1874289,
"lat_sw": 32.8333601,
"lon_sw": -117.26742990000001,
"show_on_map": False,
"area_name": "Home",
"mode": "avg",
}
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=DOMAIN,
data=VALID_CONFIG,
options={},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "public_weather_areas"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_NEW_AREA: "Home"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "public_weather"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=valid_option
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "public_weather_areas"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
for k, v in expected_result.items():
assert config_entry.options[CONF_WEATHER_AREAS]["Home"][k] == v
async def test_option_flow_wrong_coordinates(hass):
"""Test config flow options with mixed up coordinates."""
valid_option = {
"lat_ne": 32.1234567,
"lon_ne": -117.2345678,
"lat_sw": 32.2345678,
"lon_sw": -117.1234567,
"show_on_map": False,
"area_name": "Home",
"mode": "avg",
}
expected_result = {
"lat_ne": 32.2345678,
"lon_ne": -117.1234567,
"lat_sw": 32.1234567,
"lon_sw": -117.2345678,
"show_on_map": False,
"area_name": "Home",
"mode": "avg",
}
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=DOMAIN,
data=VALID_CONFIG,
options={},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "public_weather_areas"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_NEW_AREA: "Home"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "public_weather"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=valid_option
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "public_weather_areas"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
for k, v in expected_result.items():
assert config_entry.options[CONF_WEATHER_AREAS]["Home"][k] == v
|
import configparser
import os
import pytest
from twtxt.config import Config
from twtxt.models import Source
@pytest.fixture(scope="session")
def config_dir(tmpdir_factory):
cfg = configparser.ConfigParser()
cfg.add_section("twtxt")
cfg.set("twtxt", "nick", "foo")
cfg.set("twtxt", "twtfile", "~/foo.txt")
cfg.set("twtxt", "twturl", "https://axample.org/twtxt.txt")
cfg.set("twtxt", "check_following", "False")
cfg.set("twtxt", "use_pager", "True")
cfg.set("twtxt", "use_cache", "False")
cfg.set("twtxt", "porcelain", "True")
cfg.set("twtxt", "character_limit", "150")
cfg.set("twtxt", "character_warning", "150")
cfg.set("twtxt", "disclose_identity", "True")
cfg.set("twtxt", "limit_timeline", "50")
cfg.set("twtxt", "timeline_update_interval", "20")
cfg.set("twtxt", "timeout", "1.0")
cfg.set("twtxt", "sorting", "ascending")
cfg.set("twtxt", "post_tweet_hook", "echo {twtfile")
cfg.set("twtxt", "pre_tweet_hook", "echo {twtfile")
cfg.add_section("following")
cfg.set("following", "foo", "https://example.org/foo.twtxt")
config_dir = tmpdir_factory.mktemp("config")
Config.config_dir = str(config_dir)
with open(str(config_dir.join(Config.config_name)), "w") as config_file:
cfg.write(config_file)
# Manually create an invalid config file.
with open(str(config_dir.join("config_sanity")), "w") as config_file:
config_file.write("[twtxt]\n")
config_file.write("nick = altoyr\n")
config_file.write("twtfile = ~/twtxt.txt\n")
config_file.write("check_following = TTrue\n")
config_file.write("use_pager = Falste\n")
config_file.write("use_cache = Trute\n")
config_file.write("porcelain = Faltse\n")
config_file.write("disclose_identity = Ftalse\n")
config_file.write("limit_timeline = 2t0\n")
config_file.write("timeout = 5t.0\n")
config_file.write("sorting = destcending\n")
config_file.write("[following]\n")
config_file.write("twtxt = https://buckket.org/twtxt_news.txt\n")
return config_dir
def test_defaults():
empty_cfg = configparser.ConfigParser()
empty_conf = Config("foobar", empty_cfg)
assert empty_conf.nick == os.environ.get("USER", "")
assert empty_conf.twtfile == "twtxt.txt"
assert empty_conf.twturl is None
assert empty_conf.check_following is True
assert empty_conf.use_pager is False
assert empty_conf.use_cache is True
assert empty_conf.porcelain is False
assert empty_conf.character_limit is None
assert empty_conf.character_warning is None
assert empty_conf.disclose_identity is False
assert empty_conf.limit_timeline == 20
assert empty_conf.timeline_update_interval == 10
assert empty_conf.timeout == 5.0
assert empty_conf.sorting == "descending"
assert empty_conf.post_tweet_hook is None
assert empty_conf.pre_tweet_hook is None
def check_cfg(cfg):
assert cfg.nick == "foo"
assert cfg.twtfile == os.path.expanduser("~/foo.txt")
assert cfg.twturl == "https://axample.org/twtxt.txt"
assert cfg.check_following is False
assert cfg.use_pager is True
assert cfg.use_cache is False
assert cfg.porcelain is True
assert cfg.character_limit == 150
assert cfg.character_warning == 150
assert cfg.disclose_identity is True
assert cfg.limit_timeline == 50
assert cfg.timeline_update_interval == 20
assert cfg.timeout == 1.0
assert cfg.sorting == "ascending"
assert cfg.post_tweet_hook == "echo {twtfile"
assert cfg.pre_tweet_hook == "echo {twtfile"
assert cfg.check_config_sanity() == True
def test_from_file(config_dir):
with pytest.raises(ValueError) as e:
Config.from_file("invalid")
assert "Config file not found." in str(e.value)
with open(str(config_dir.join("empty")), "a") as fh:
fh.write("XXX")
with pytest.raises(ValueError) as e:
Config.from_file(str(config_dir.join("empty")))
assert "Config file is invalid." in str(e.value)
conf = Config.from_file(str(config_dir.join(Config.config_name)))
check_cfg(conf)
def test_discover():
conf = Config.discover()
check_cfg(conf)
def test_create_config(config_dir):
config_dir_old = Config.config_dir
Config.config_dir = str(config_dir.join("new"))
conf_w = Config.create_config(os.path.join(Config.config_dir, Config.config_name),
"bar", "batz.txt", "https://example.org", False, True)
conf_r = Config.discover()
assert conf_r.nick == "bar"
assert conf_r.twtfile == "batz.txt"
assert conf_r.twturl == "https://example.org"
assert conf_r.character_limit == 140
assert conf_r.character_warning == 140
assert conf_r.following[0].nick == "twtxt"
assert conf_r.following[0].url == "https://buckket.org/twtxt_news.txt"
assert set(conf_r.options.keys()) == {"nick", "twtfile", "twturl", "disclose_identity", "character_limit",
"character_warning"}
conf_r.cfg.remove_section("twtxt")
assert conf_r.options == {}
conf_r.cfg.remove_section("following")
assert conf_r.following == []
Config.config_dir = config_dir_old
def test_add_get_remove_source():
conf = Config.discover()
conf.cfg.remove_section("following")
assert conf.remove_source_by_nick("foo") is False
assert conf.get_source_by_nick("baz") is None
conf.add_source(Source("foo", "bar"))
source = conf.get_source_by_nick("foo")
assert source.nick == "foo"
assert source.url == "bar"
assert conf.remove_source_by_nick("baz") is False
assert conf.remove_source_by_nick("foo") is True
assert conf.following == []
def test_build_default_map():
empty_cfg = configparser.ConfigParser()
empty_conf = Config("foobar", empty_cfg)
default_map = {
"following": {
"check": empty_conf.check_following,
"timeout": empty_conf.timeout,
"porcelain": empty_conf.porcelain,
},
"tweet": {
"twtfile": empty_conf.twtfile,
},
"timeline": {
"pager": empty_conf.use_pager,
"cache": empty_conf.use_cache,
"limit": empty_conf.limit_timeline,
"timeout": empty_conf.timeout,
"sorting": empty_conf.sorting,
"porcelain": empty_conf.porcelain,
"twtfile": empty_conf.twtfile,
"update_interval": empty_conf.timeline_update_interval,
},
"view": {
"pager": empty_conf.use_pager,
"cache": empty_conf.use_cache,
"limit": empty_conf.limit_timeline,
"timeout": empty_conf.timeout,
"sorting": empty_conf.sorting,
"porcelain": empty_conf.porcelain,
"update_interval": empty_conf.timeline_update_interval,
}
}
assert empty_conf.build_default_map() == default_map
def test_check_config_file_sanity(capsys, config_dir):
with pytest.raises(ValueError) as e:
Config.from_file(str(config_dir.join("config_sanity")))
assert "Error in config file." in str(e.value)
out, err = capsys.readouterr()
for line in ["✗ Config error on limit_timeline - invalid literal for int() with base 10: '2t0'",
"✗ Config error on check_following - Not a boolean: TTrue",
"✗ Config error on porcelain - Not a boolean: Faltse",
"✗ Config error on disclose_identity - Not a boolean: Ftalse",
"✗ Config error on timeout - could not convert string to float: '5t.0'",
"✗ Config error on use_pager - Not a boolean: Falste",
"✗ Config error on use_cache - Not a boolean: Trute"]:
assert line in out
|
import asyncio
from bravia_tv import BraviaRC
from homeassistant.const import CONF_HOST, CONF_MAC
from .const import BRAVIARC, DOMAIN, UNDO_UPDATE_LISTENER
PLATFORMS = ["media_player"]
async def async_setup(hass, config):
"""Set up the Bravia TV component."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up a config entry."""
host = config_entry.data[CONF_HOST]
mac = config_entry.data[CONF_MAC]
undo_listener = config_entry.add_update_listener(update_listener)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = {
BRAVIARC: BraviaRC(host, mac),
UNDO_UPDATE_LISTENER: undo_listener,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in PLATFORMS
]
)
)
hass.data[DOMAIN][config_entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
|
from collections import deque
__all__ = ('DummyLock', 'LaxBoundedSemaphore')
class LaxBoundedSemaphore:
"""Asynchronous Bounded Semaphore.
Lax means that the value will stay within the specified
range even if released more times than it was acquired.
Example:
>>> from future import print_statement as printf
# ^ ignore: just fooling stupid pyflakes
>>> x = LaxBoundedSemaphore(2)
>>> x.acquire(printf, 'HELLO 1')
HELLO 1
>>> x.acquire(printf, 'HELLO 2')
HELLO 2
>>> x.acquire(printf, 'HELLO 3')
>>> x._waiters # private, do not access directly
[print, ('HELLO 3',)]
>>> x.release()
HELLO 3
"""
def __init__(self, value):
self.initial_value = self.value = value
self._waiting = deque()
self._add_waiter = self._waiting.append
self._pop_waiter = self._waiting.popleft
def acquire(self, callback, *partial_args, **partial_kwargs):
"""Acquire semaphore.
This will immediately apply ``callback`` if
the resource is available, otherwise the callback is suspended
until the semaphore is released.
Arguments:
callback (Callable): The callback to apply.
*partial_args (Any): partial arguments to callback.
"""
value = self.value
if value <= 0:
self._add_waiter((callback, partial_args, partial_kwargs))
return False
else:
self.value = max(value - 1, 0)
callback(*partial_args, **partial_kwargs)
return True
def release(self):
"""Release semaphore.
Note:
If there are any waiters this will apply the first waiter
that is waiting for the resource (FIFO order).
"""
try:
waiter, args, kwargs = self._pop_waiter()
except IndexError:
self.value = min(self.value + 1, self.initial_value)
else:
waiter(*args, **kwargs)
def grow(self, n=1):
"""Change the size of the semaphore to accept more users."""
self.initial_value += n
self.value += n
[self.release() for _ in range(n)]
def shrink(self, n=1):
"""Change the size of the semaphore to accept less users."""
self.initial_value = max(self.initial_value - n, 0)
self.value = max(self.value - n, 0)
def clear(self):
"""Reset the semaphore, which also wipes out any waiting callbacks."""
self._waiting.clear()
self.value = self.initial_value
def __repr__(self):
return '<{} at {:#x} value:{} waiting:{}>'.format(
self.__class__.__name__, id(self), self.value, len(self._waiting),
)
class DummyLock:
"""Pretending to be a lock."""
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
|
import argparse
import numpy as np
import re
from chainer import Link
import chainer.links.caffe.caffe_function as caffe
from chainer import serializers
from chainercv.links.model.ssd import Normalize
def rename(name):
m = re.match(r'conv(\d+)_([123])$', name)
if m:
i, j = map(int, m.groups())
if i >= 6:
i += 2
return 'extractor/conv{:d}_{:d}'.format(i, j)
m = re.match(r'fc([67])$', name)
if m:
return 'extractor/conv{:d}'.format(int(m.group(1)))
if name == r'conv4_3_norm':
return 'extractor/norm4'
m = re.match(r'conv4_3_norm_mbox_(loc|conf)$', name)
if m:
return 'multibox/{:s}/0'.format(m.group(1))
m = re.match(r'fc7_mbox_(loc|conf)$', name)
if m:
return ('multibox/{:s}/1'.format(m.group(1)))
m = re.match(r'conv(\d+)_2_mbox_(loc|conf)$', name)
if m:
i, type_ = int(m.group(1)), m.group(2)
if i >= 6:
return 'multibox/{:s}/{:d}'.format(type_, i - 4)
return name
class SSDCaffeFunction(caffe.CaffeFunction):
def __init__(self, model_path):
print('loading weights from {:s} ... '.format(model_path))
super(SSDCaffeFunction, self).__init__(model_path)
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, Link):
new_name = rename(name)
if new_name == 'extractor/conv1_1':
# BGR -> RGB
value.W.array[:, ::-1] = value.W.array
print('{:s} -> {:s} (BGR -> RGB)'.format(name, new_name))
elif new_name.startswith('multibox/loc/'):
# xy -> yx
for data in (value.W.array, value.b.array):
data = data.reshape((-1, 4) + data.shape[1:])
data[:, [1, 0, 3, 2]] = data.copy()
print('{:s} -> {:s} (xy -> yx)'.format(name, new_name))
else:
print('{:s} -> {:s}'.format(name, new_name))
else:
new_name = name
super(SSDCaffeFunction, self).__setattr__(new_name, value)
@caffe._layer('Normalize', None)
def _setup_normalize(self, layer):
blobs = layer.blobs
func = Normalize(caffe._get_num(blobs[0]))
func.scale.array[:] = np.array(blobs[0].data)
with self.init_scope():
setattr(self, layer.name, func)
@caffe._layer('AnnotatedData', None)
@caffe._layer('Flatten', None)
@caffe._layer('MultiBoxLoss', None)
@caffe._layer('Permute', None)
@caffe._layer('PriorBox', None)
def _skip_layer(self, _):
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument('caffemodel')
parser.add_argument('output')
args = parser.parse_args()
model = SSDCaffeFunction(args.caffemodel)
serializers.save_npz(args.output, model)
if __name__ == '__main__':
main()
|
def large_int_format(x):
num = round_downer(x)
if 1000000000 <= num:
return str(num // 1000000000) + 'b'
elif 1000000 <= num < 1000000000:
return str(num // 1000000) + 'mm'
elif 1000 <= num < 1000000:
return str(num // 1000) + 'k'
else:
return str(num)
def round_downer(x):
power_of_ten = 10 ** (len(str(int(x))) - 1)
num = power_of_ten * (x // power_of_ten)
return num
|
import errno
import os
import re
import sys
import traceback
from setuptools import setup
INFO_PLIST_TEMPLATE = '''\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleIdentifier</key>
<string>%(name)s</string>
</dict>
</plist>
'''
def fix_virtualenv():
executable_dir = os.path.dirname(sys.executable)
try:
os.mkdir(os.path.join(executable_dir, 'Contents'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(os.path.join(executable_dir, 'Contents', 'Info.plist'), 'w') as f:
f.write(INFO_PLIST_TEMPLATE % {'name': 'rumps'})
with open('README.rst') as f:
readme = f.read()
with open('CHANGES.rst') as f:
changes = f.read()
with open('rumps/__init__.py') as f:
version = re.search(r'__version__ = \'(.*?)\'', f.read()).group(1)
setup(
name='rumps',
version=version,
description='Ridiculously Uncomplicated MacOS Python Statusbar apps.',
author='Jared Suttles',
url='https://github.com/jaredks/rumps',
packages=['rumps', 'rumps.packages'],
package_data={'': ['LICENSE']},
long_description=readme + '\n\n' + changes,
license='BSD License',
install_requires=[
'pyobjc-framework-Cocoa'
],
extras_require={
'dev': [
'pytest>=4.3',
'pytest-mock>=2.0.0',
'tox>=3.8'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: MacOS X',
'Environment :: MacOS X :: Cocoa',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Objective C',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
# if this looks like a virtualenv
if hasattr(sys, 'real_prefix'):
print('=' * 64)
print(
'\n'
'It looks like we are inside a virtualenv. Attempting to apply fix.\n'
)
try:
fix_virtualenv()
except Exception:
traceback.print_exc()
print(
'WARNING: Could not fix virtualenv. UI interaction likely will '
'not function properly.\n'
)
else:
print(
'Applied best-effort fix for virtualenv to support proper UI '
'interaction.\n'
)
print(
'Use of venv is suggested for creating virtual environments:'
'\n\n'
' python3 -m venv env'
'\n'
)
print('=' * 64)
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from filestat import FilestatCollector
##########################################################################
class TestFilestatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('FilestatCollector', {
'interval': 10
})
self.collector = FilestatCollector(config, None)
def test_import(self):
self.assertTrue(FilestatCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_sys_fs_file_nr(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/sys/fs/file-nr')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
FilestatCollector.PROC = self.getFixturePath('proc_sys_fs_file-nr')
self.collector.collect()
metrics = {
'assigned': 576,
'unused': 0,
'max': 4835852
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import os
from urllib.request import urlopen
# Internal imports
import vcr
def test_nonexistent_directory(tmpdir, httpbin):
"""If we load a cassette in a nonexistent directory, it can save ok"""
# Check to make sure directory doesnt exist
assert not os.path.exists(str(tmpdir.join("nonexistent")))
# Run VCR to create dir and cassette file
with vcr.use_cassette(str(tmpdir.join("nonexistent", "cassette.yml"))):
urlopen(httpbin.url).read()
# This should have made the file and the directory
assert os.path.exists(str(tmpdir.join("nonexistent", "cassette.yml")))
def test_unpatch(tmpdir, httpbin):
"""Ensure that our cassette gets unpatched when we're done"""
with vcr.use_cassette(str(tmpdir.join("unpatch.yaml"))) as cass:
urlopen(httpbin.url).read()
# Make the same request, and assert that we haven't served any more
# requests out of cache
urlopen(httpbin.url).read()
assert cass.play_count == 0
def test_basic_json_use(tmpdir, httpbin):
"""
Ensure you can load a json serialized cassette
"""
test_fixture = str(tmpdir.join("synopsis.json"))
with vcr.use_cassette(test_fixture, serializer="json"):
response = urlopen(httpbin.url).read()
assert b"difficult sometimes" in response
def test_patched_content(tmpdir, httpbin):
"""
Ensure that what you pull from a cassette is what came from the
request
"""
with vcr.use_cassette(str(tmpdir.join("synopsis.yaml"))) as cass:
response = urlopen(httpbin.url).read()
assert cass.play_count == 0
with vcr.use_cassette(str(tmpdir.join("synopsis.yaml"))) as cass:
response2 = urlopen(httpbin.url).read()
assert cass.play_count == 1
cass._save(force=True)
with vcr.use_cassette(str(tmpdir.join("synopsis.yaml"))) as cass:
response3 = urlopen(httpbin.url).read()
assert cass.play_count == 1
assert response == response2
assert response2 == response3
def test_patched_content_json(tmpdir, httpbin):
"""
Ensure that what you pull from a json cassette is what came from the
request
"""
testfile = str(tmpdir.join("synopsis.json"))
with vcr.use_cassette(testfile) as cass:
response = urlopen(httpbin.url).read()
assert cass.play_count == 0
with vcr.use_cassette(testfile) as cass:
response2 = urlopen(httpbin.url).read()
assert cass.play_count == 1
cass._save(force=True)
with vcr.use_cassette(testfile) as cass:
response3 = urlopen(httpbin.url).read()
assert cass.play_count == 1
assert response == response2
assert response2 == response3
|
import pkgutil
from scattertext.viz.BasicHTMLFromScatterplotStructure import D3URLs, ExternalJSUtilts, PackedDataUtils
GRAPH_VIZ_FILE_NAME = 'graph_plot.html'
class GraphStructure(object):
def __init__(self,
scatterplot_structure,
graph_renderer,
scatterplot_width=500,
scatterplot_height=700,
d3_url_struct=None,
protocol='http',
template_file_name=GRAPH_VIZ_FILE_NAME):
''',
Parameters
----------
scatterplot_structure: ScatterplotStructure
graph_renderer: GraphRenderer
scatterplot_width: int
scatterplot_height: int
d3_url_struct: D3URLs
protocol: str
http or https
template_file_name: file name to use as template
'''
self.graph_renderer = graph_renderer
self.scatterplot_structure = scatterplot_structure
self.d3_url_struct = d3_url_struct if d3_url_struct else D3URLs()
ExternalJSUtilts.ensure_valid_protocol(protocol)
self.protocol = protocol
self.scatterplot_width = scatterplot_width
self.scatterplot_height = scatterplot_height
self.template_file_name = template_file_name
def to_html(self):
'''
Returns
-------
str, the html file representation
'''
javascript_to_insert = self._get_javascript_to_insert()
autocomplete_css = PackedDataUtils.full_content_of_default_autocomplete_css()
html_template = self._get_html_template()
html_content = self._replace_html_template(autocomplete_css, html_template, javascript_to_insert)
return html_content
def _get_javascript_to_insert(self):
return '\n'.join([
PackedDataUtils.full_content_of_javascript_files(),
self.scatterplot_structure._visualization_data.to_javascript(),
self.scatterplot_structure.get_js_to_call_build_scatterplot(),
PackedDataUtils.javascript_post_build_viz('termSearch', 'plotInterface'),
self.graph_renderer.get_javascript()
])
def _replace_html_template(self, autocomplete_css, html_template, javascript_to_insert):
return (html_template
.replace('/***AUTOCOMPLETE CSS***/', autocomplete_css, 1)
.replace('<!-- INSERT SCRIPT -->', javascript_to_insert, 1)
.replace('<!--D3URL-->', self.d3_url_struct.get_d3_url(), 1)
.replace('<!-- INSERT GRAPH -->', self.graph_renderer.get_graph())
.replace('<!--D3SCALECHROMATIC-->', self.d3_url_struct.get_d3_scale_chromatic_url())
.replace('http://', self.protocol + '://')
.replace('{width}', str(self.scatterplot_width))
.replace('{height}', str(self.scatterplot_height))
)
def _get_html_template(self):
return PackedDataUtils.get_packaged_html_template_content(self.template_file_name)
|
import json
import logging
import os
import requests
from kalliope.core.NeuronModule import NeuronModule, InvalidParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Uri(NeuronModule):
def __init__(self, **kwargs):
super(Uri, self).__init__(**kwargs)
# input variables
self.url = kwargs.get('url', None)
self.headers = kwargs.get('headers', None)
self.data = kwargs.get('data', None)
self.data_from_file = kwargs.get('data_from_file', None)
self.method = kwargs.get('method', "GET")
self.user = kwargs.get('user', None)
self.password = kwargs.get('password', None)
self.timeout = kwargs.get('timeout', None)
# processing parameters
self.parameters = None
# output variable
self.status_code = None
self.content = None
self.text = None
self.response_header = None
# this is a switch case option
switch_case = {
"GET": self.do_get,
"POST": self.do_post,
"DELETE": self.do_delete,
"PUT": self.do_put,
"HEAD": self.do_head,
"PATCH": self.do_patch,
"OPTIONS": self.do_options
}
# check parameters
if self._is_parameters_ok():
# we get parameters that will be passed to the requests lib
self.parameters = self.get_parameters()
# we call the right method depending of the method selected
switch_case[self.method]()
message = {
"status_code": self.status_code,
"content": self.content,
"response_header": self.response_header
}
self.say(message)
def do_get(self):
logger.debug(self.neuron_name + " do_get method called")
r = requests.get(url=self.url, **self.parameters)
self.post_processing_request(r)
def do_post(self):
logger.debug(self.neuron_name + " do_post method called")
r = requests.post(url=self.url, **self.parameters)
self.post_processing_request(r)
def do_delete(self):
logger.debug(self.neuron_name + " do_delete method called")
r = requests.delete(url=self.url, **self.parameters)
self.post_processing_request(r)
def do_put(self):
logger.debug(self.neuron_name + " do_put method called")
r = requests.put(url=self.url, **self.parameters)
self.post_processing_request(r)
def do_head(self):
logger.debug(self.neuron_name + " do_head method called")
r = requests.head(url=self.url, **self.parameters)
self.post_processing_request(r)
def do_patch(self):
logger.debug(self.neuron_name + " do_patch method called")
r = requests.patch(url=self.url, **self.parameters)
self.post_processing_request(r)
def do_options(self):
logger.debug(self.neuron_name + " do_options method called")
r = requests.options(url=self.url, **self.parameters)
self.post_processing_request(r)
def get_parameters(self):
"""
:return: Dict of parameters usable by the "requests" lib
"""
returned_parameters = dict()
if self.headers is not None:
returned_parameters["headers"] = self.headers
if self.timeout is not None:
returned_parameters["timeout"] = self.timeout
if self.data is not None:
returned_parameters["data"] = self.data
if self.data_from_file is not None:
returned_parameters["data"] = self.data_from_file
if self.user is not None:
# this implicitly means that the password is set too, the check has been done in _is_parameters_ok
returned_parameters["auth"] = self.user, self.password
logger.debug(self.neuron_name + " parameters: %s" % returned_parameters)
return returned_parameters
def post_processing_request(self, r):
self.status_code = r.status_code
self.content = r.content
# we try to load into a json object the content. So Kalliope can use it to talk
try:
self.content = json.loads(self.content.decode())
except ValueError:
logger.debug(self.neuron_name + "cannot get a valid json from returned content")
pass
self.text = r.text
self.response_header = r.headers
logger.debug(self.neuron_name + " status_code: %s" % self.status_code)
logger.debug(self.neuron_name + " content: %s" % self.content)
logger.debug(self.neuron_name + " response_header: %s" % self.response_header)
def _is_parameters_ok(self):
"""
Check that all provided parameters in the neurons are valid
:return: True if all check passed
"""
# URL is mandatory
if self.url is None:
raise InvalidParameterException("Uri needs an url")
# headers can be null, but if provided it must be a list
if self.headers is not None:
if not isinstance(self.headers, dict):
raise InvalidParameterException("headers must be a list of key: value")
# timeout in second must be an integer
if self.timeout is not None:
if not isinstance(self.timeout, int):
raise InvalidParameterException("timeout must be an integer")
# data must be loadable with json
if self.data is not None:
try:
json.loads(self.data)
except ValueError as e:
raise InvalidParameterException("error in \"data\" parameter: %s" % e)
# data_from_file path must exist and data inside must be loadable by json
if self.data_from_file is not None:
# check that the file exist
if not os.path.exists(self.data_from_file):
raise InvalidParameterException("error in \"data_file\". File does not exist: %s" % self.data_from_file)
# then try to load the json from the file
try:
self.data_from_file = self.readfile(self.data_from_file)
except ValueError as e:
raise InvalidParameterException("error in \"data\" parameter: %s" % e)
# we cannot provide both data and data from file
if self.data is not None and self.data_from_file is not None:
raise InvalidParameterException("URI can be used with data or data_from_file, not both in same time")
# the provided method must exist
allowed_method = ["GET", "POST", "DELETE", "PUT", "HEAD", "PATCH", "OPTIONS"]
if self.method not in allowed_method:
raise InvalidParameterException("method %s not in: %s" % (self.method, allowed_method))
return True
@staticmethod
def readfile(file_path):
"""
return the content of the file <file_path>
:param file_path: File path to read
:return: Str content of the file
"""
file_to_read = open(file_path, 'r')
return file_to_read.read()
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import STATE_HOME, STATE_NOT_HOME
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a device_tracker."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_not_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set("device_tracker.entity", STATE_HOME)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "device_tracker.entity",
"type": "is_home",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_home - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "device_tracker.entity",
"type": "is_not_home",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_not_home - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_home - event - test_event1"
hass.states.async_set("device_tracker.entity", STATE_NOT_HOME)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_not_home - event - test_event2"
|
import getpass
import logging
from yandextank.common.util import SecuredShell
from ...common.interfaces import AbstractPlugin
from ..Phantom import Plugin as PhantomPlugin
logger = logging.getLogger(__name__)
class Plugin(AbstractPlugin):
'''Plugin that collects remote system information'''
SECTION = "platform"
@staticmethod
def get_key():
return __file__
def __init__(self, core, cfg, name):
AbstractPlugin.__init__(self, core, cfg, name)
self.hosts = []
self.port = None
self.logfile = None
self.default_target = None
def _echo_wrapper(cmd):
return 'echo "====Executing: {cmd}"; {cmd}'.format(cmd=cmd)
cmds = {
"dpkg": "dpkg -l",
"uname": "uname -a",
"ulimit": "ulimit -a",
"os_identifier": "cat /etc/issue.net",
"uptime": "uptime",
"cpuinfo": "cat /proc/cpuinfo",
"meminfo": "cat /proc/meminfo",
"free": "free -m",
"mounts": "cat /proc/mounts",
"df": "df -h",
"ifconfig": "ifconfig -a",
"sysctl": "cat /etc/sysctl.conf",
"lsmod": "lsmod"
}
self.cmd = "%s" % ";\n".join(
[_echo_wrapper(cmd) for key, cmd in cmds.items()])
def get_available_options(self):
return ["hosts", "port", "username", "timeout"]
def configure(self):
try:
hosts = self.get_option("hosts", "").strip()
if hosts:
self.hosts = hosts.split(" ")
self.port = int(self.get_option("port", 22))
self.username = self.get_option("username", getpass.getuser())
self.timeout = int(self.get_option("timeout", 3))
except BaseException:
logger.error(
'Exception trying to configure Platform plugin', exc_info=True)
self.logfile = self.core.mkstemp(".log", "platform_")
self.core.add_artifact_file(self.logfile)
def prepare_test(self):
try:
phantom = self.core.get_plugin_of_type(PhantomPlugin)
info = phantom.get_info()
if info:
if info.address and info.address not in self.hosts:
logger.debug(
"Adding platform check of default_target %s",
info.address)
self.hosts.append(info.address)
except KeyError as ex:
logger.debug("Phantom plugin not found: %s", ex)
for host in self.hosts:
self.ssh = SecuredShell(
host, self.port, self.username, self.timeout)
try:
out, errors, err_code = self.ssh.execute(self.cmd)
except Exception:
logger.warning(
"Failed to check remote system information at %s:%s", host,
self.port)
logger.debug(
"Failed to check remote system information at %s:%s",
host,
self.port,
exc_info=True)
else:
# logger.info('Remote system `%s` information: %s', host, out)
with open(self.logfile, 'w') as f:
f.write(out)
if errors:
logging.debug("[%s] error: '%s'", host, errors)
def is_test_finished(self):
return -1
|
import contextlib
import itertools
import re
from getpass import getpass
from typing import Match, Pattern, Tuple, Optional, AsyncIterator, Any, Dict, Iterator, List
from urllib.parse import quote_plus
try:
# pylint: disable=import-error
import pymongo.errors
import motor.core
import motor.motor_asyncio
except ModuleNotFoundError:
motor = None
pymongo = None
from .. import errors
from .base import BaseDriver, IdentifierData
__all__ = ["MongoDriver"]
class MongoDriver(BaseDriver):
"""
Subclass of :py:class:`.BaseDriver`.
"""
_conn: Optional["motor.motor_asyncio.AsyncIOMotorClient"] = None
@classmethod
async def initialize(cls, **storage_details) -> None:
if motor is None:
raise errors.MissingExtraRequirements(
"Red must be installed with the [mongo] extra to use the MongoDB driver"
)
uri = storage_details.get("URI", "mongodb")
host = storage_details["HOST"]
port = storage_details["PORT"]
user = storage_details["USERNAME"]
password = storage_details["PASSWORD"]
database = storage_details.get("DB_NAME", "default_db")
if port is 0:
ports = ""
else:
ports = ":{}".format(port)
if user is not None and password is not None:
url = "{}://{}:{}@{}{}/{}".format(
uri, quote_plus(user), quote_plus(password), host, ports, database
)
else:
url = "{}://{}{}/{}".format(uri, host, ports, database)
cls._conn = motor.motor_asyncio.AsyncIOMotorClient(url, retryWrites=True)
@classmethod
async def teardown(cls) -> None:
if cls._conn is not None:
cls._conn.close()
@staticmethod
def get_config_details():
while True:
uri = input("Enter URI scheme (mongodb or mongodb+srv): ")
if uri is "":
uri = "mongodb"
if uri in ["mongodb", "mongodb+srv"]:
break
else:
print("Invalid URI scheme")
host = input("Enter host address: ")
if uri is "mongodb":
port = int(input("Enter host port: "))
else:
port = 0
admin_uname = input("Enter login username: ")
admin_password = getpass("Enter login password: ")
db_name = input("Enter mongodb database name: ")
if admin_uname == "":
admin_uname = admin_password = None
ret = {
"HOST": host,
"PORT": port,
"USERNAME": admin_uname,
"PASSWORD": admin_password,
"DB_NAME": db_name,
"URI": uri,
}
return ret
@property
def db(self) -> "motor.core.Database":
"""
Gets the mongo database for this cog's name.
:return:
PyMongo Database object.
"""
return self._conn.get_database()
def get_collection(self, category: str) -> "motor.core.Collection":
"""
Gets a specified collection within the PyMongo database for this cog.
Unless you are doing custom stuff ``category`` should be one of the class
attributes of :py:class:`core.config.Config`.
:param str category:
The group identifier of a category.
:return:
PyMongo collection object.
"""
return self.db[self.cog_name][category]
@staticmethod
def get_primary_key(identifier_data: IdentifierData) -> Tuple[str, ...]:
# noinspection PyTypeChecker
return identifier_data.primary_key
async def rebuild_dataset(
self, identifier_data: IdentifierData, cursor: "motor.motor_asyncio.AsyncIOMotorCursor"
):
ret = {}
async for doc in cursor:
pkeys = doc["_id"]["RED_primary_key"]
del doc["_id"]
doc = self._unescape_dict_keys(doc)
if len(pkeys) == 0:
# Global data
ret.update(**doc)
elif len(pkeys) > 0:
# All other data
partial = ret
for key in pkeys[:-1]:
if key in identifier_data.primary_key:
continue
if key not in partial:
partial[key] = {}
partial = partial[key]
if pkeys[-1] in identifier_data.primary_key:
partial.update(**doc)
else:
partial[pkeys[-1]] = doc
return ret
async def get(self, identifier_data: IdentifierData):
mongo_collection = self.get_collection(identifier_data.category)
pkey_filter = self.generate_primary_key_filter(identifier_data)
escaped_identifiers = list(map(self._escape_key, identifier_data.identifiers))
if len(identifier_data.identifiers) > 0:
proj = {"_id": False, ".".join(escaped_identifiers): True}
partial = await mongo_collection.find_one(filter=pkey_filter, projection=proj)
else:
# The case here is for partial primary keys like all_members()
cursor = mongo_collection.find(filter=pkey_filter)
partial = await self.rebuild_dataset(identifier_data, cursor)
if partial is None:
raise KeyError("No matching document was found and Config expects a KeyError.")
for i in escaped_identifiers:
partial = partial[i]
if isinstance(partial, dict):
return self._unescape_dict_keys(partial)
return partial
async def set(self, identifier_data: IdentifierData, value=None):
uuid = self._escape_key(identifier_data.uuid)
primary_key = list(map(self._escape_key, self.get_primary_key(identifier_data)))
dot_identifiers = ".".join(map(self._escape_key, identifier_data.identifiers))
if isinstance(value, dict):
if len(value) == 0:
await self.clear(identifier_data)
return
value = self._escape_dict_keys(value)
mongo_collection = self.get_collection(identifier_data.category)
num_pkeys = len(primary_key)
if num_pkeys >= identifier_data.primary_key_len:
# We're setting at the document level or below.
dot_identifiers = ".".join(map(self._escape_key, identifier_data.identifiers))
if dot_identifiers:
update_stmt = {"$set": {dot_identifiers: value}}
else:
update_stmt = {"$set": value}
try:
await mongo_collection.update_one(
{"_id": {"RED_uuid": uuid, "RED_primary_key": primary_key}},
update=update_stmt,
upsert=True,
)
except pymongo.errors.WriteError as exc:
if exc.args and exc.args[0].startswith("Cannot create field"):
# There's a bit of a failing edge case here...
# If we accidentally set the sub-field of an array, and the key happens to be a
# digit, it will successfully set the value in the array, and not raise an
# error. This is different to how other drivers would behave, and could lead to
# unexpected behaviour.
raise errors.CannotSetSubfield
else:
# Unhandled driver exception, should expose.
raise
else:
# We're setting above the document level.
# Easiest and most efficient thing to do is delete all documents that we're potentially
# replacing, then insert_many().
# We'll do it in a transaction so we can roll-back in case something goes horribly
# wrong.
pkey_filter = self.generate_primary_key_filter(identifier_data)
async with await self._conn.start_session() as session:
with contextlib.suppress(pymongo.errors.CollectionInvalid):
# Collections must already exist when inserting documents within a transaction
await self.db.create_collection(mongo_collection.full_name)
try:
async with session.start_transaction():
await mongo_collection.delete_many(pkey_filter, session=session)
await mongo_collection.insert_many(
self.generate_documents_to_insert(
uuid, primary_key, value, identifier_data.primary_key_len
),
session=session,
)
except pymongo.errors.OperationFailure:
# This DB version / setup doesn't support transactions, so we'll have to use
# a shittier method.
# The strategy here is to separate the existing documents and the new documents
# into ones to be deleted, ones to be replaced, and new ones to be inserted.
# Then we can do a bulk_write().
# This is our list of (filter, new_document) tuples for replacing existing
# documents. The `new_document` should be taken and removed from `value`, so
# `value` only ends up containing documents which need to be inserted.
to_replace: List[Tuple[Dict, Dict]] = []
# This is our list of primary key filters which need deleting. They should
# simply be all the primary keys which were part of existing documents but are
# not included in the new documents.
to_delete: List[Dict] = []
async for document in mongo_collection.find(pkey_filter, session=session):
pkey = document["_id"]["RED_primary_key"]
new_document = value
try:
for pkey_part in pkey[num_pkeys:-1]:
new_document = new_document[pkey_part]
# This document is being replaced - remove it from `value`.
new_document = new_document.pop(pkey[-1])
except KeyError:
# We've found the primary key of an old document which isn't in the
# updated set of documents - it should be deleted.
to_delete.append({"_id": {"RED_uuid": uuid, "RED_primary_key": pkey}})
else:
_filter = {"_id": {"RED_uuid": uuid, "RED_primary_key": pkey}}
new_document.update(_filter)
to_replace.append((_filter, new_document))
# What's left of `value` should be the new documents needing to be inserted.
to_insert = self.generate_documents_to_insert(
uuid, primary_key, value, identifier_data.primary_key_len
)
requests = list(
itertools.chain(
(pymongo.DeleteOne(f) for f in to_delete),
(pymongo.ReplaceOne(f, d) for f, d in to_replace),
(pymongo.InsertOne(d) for d in to_insert if d),
)
)
# This will pipeline the operations so they all complete quickly. However if
# any of them fail, the rest of them will complete - i.e. this operation is not
# atomic.
await mongo_collection.bulk_write(requests, ordered=False)
def generate_primary_key_filter(self, identifier_data: IdentifierData):
uuid = self._escape_key(identifier_data.uuid)
primary_key = list(map(self._escape_key, self.get_primary_key(identifier_data)))
ret = {"_id.RED_uuid": uuid}
if len(identifier_data.identifiers) > 0:
ret["_id.RED_primary_key"] = primary_key
elif len(identifier_data.primary_key) > 0:
for i, key in enumerate(primary_key):
keyname = f"_id.RED_primary_key.{i}"
ret[keyname] = key
else:
ret["_id.RED_primary_key"] = {"$exists": True}
return ret
@classmethod
def generate_documents_to_insert(
cls, uuid: str, primary_keys: List[str], data: Dict[str, Dict[str, Any]], pkey_len: int
) -> Iterator[Dict[str, Any]]:
num_missing_pkeys = pkey_len - len(primary_keys)
if num_missing_pkeys == 1:
for pkey, document in data.items():
document["_id"] = {"RED_uuid": uuid, "RED_primary_key": primary_keys + [pkey]}
yield document
else:
for pkey, inner_data in data.items():
for document in cls.generate_documents_to_insert(
uuid, primary_keys + [pkey], inner_data, pkey_len
):
yield document
async def clear(self, identifier_data: IdentifierData):
# There are five cases here:
# 1) We're clearing out a subset of identifiers (aka identifiers is NOT empty)
# 2) We're clearing out full primary key and no identifiers
# 3) We're clearing out partial primary key and no identifiers
# 4) Primary key is empty, should wipe all documents in the collection
# 5) Category is empty, all of this cog's data should be deleted
pkey_filter = self.generate_primary_key_filter(identifier_data)
if identifier_data.identifiers:
# This covers case 1
mongo_collection = self.get_collection(identifier_data.category)
dot_identifiers = ".".join(map(self._escape_key, identifier_data.identifiers))
await mongo_collection.update_one(pkey_filter, update={"$unset": {dot_identifiers: 1}})
elif identifier_data.category:
# This covers cases 2-4
mongo_collection = self.get_collection(identifier_data.category)
await mongo_collection.delete_many(pkey_filter)
else:
# This covers case 5
db = self.db
super_collection = db[self.cog_name]
results = await db.list_collections(
filter={"name": {"$regex": rf"^{super_collection.name}\."}}
)
for result in results:
await db[result["name"]].delete_many(pkey_filter)
@classmethod
async def aiter_cogs(cls) -> AsyncIterator[Tuple[str, str]]:
db = cls._conn.get_database()
for collection_name in await db.list_collection_names():
parts = collection_name.split(".")
if not len(parts) == 2:
continue
cog_name = parts[0]
for cog_id in await db[collection_name].distinct("_id.RED_uuid"):
yield cog_name, cog_id
@classmethod
async def delete_all_data(
cls, *, interactive: bool = False, drop_db: Optional[bool] = None, **kwargs
) -> None:
"""Delete all data being stored by this driver.
Parameters
----------
interactive : bool
Set to ``True`` to allow the method to ask the user for
input from the console, regarding the other unset parameters
for this method.
drop_db : Optional[bool]
Set to ``True`` to drop the entire database for the current
bot's instance. Otherwise, collections which appear to be
storing bot data will be dropped.
"""
if interactive is True and drop_db is None:
print(
"Please choose from one of the following options:\n"
" 1. Drop the entire MongoDB database for this instance, or\n"
" 2. Delete all of Red's data within this database, without dropping the database "
"itself."
)
options = ("1", "2")
while True:
resp = input("> ")
try:
drop_db = bool(options.index(resp))
except ValueError:
print("Please type a number corresponding to one of the options.")
else:
break
db = cls._conn.get_database()
if drop_db is True:
await cls._conn.drop_database(db)
else:
async with await cls._conn.start_session() as session:
async for cog_name, cog_id in cls.aiter_cogs():
await db.drop_collection(db[cog_name], session=session)
@staticmethod
def _escape_key(key: str) -> str:
return _SPECIAL_CHAR_PATTERN.sub(_replace_with_escaped, key)
@staticmethod
def _unescape_key(key: str) -> str:
return _CHAR_ESCAPE_PATTERN.sub(_replace_with_unescaped, key)
@classmethod
def _escape_dict_keys(cls, data: dict) -> dict:
"""Recursively escape all keys in a dict."""
ret = {}
for key, value in data.items():
key = cls._escape_key(key)
if isinstance(value, dict):
value = cls._escape_dict_keys(value)
ret[key] = value
return ret
@classmethod
def _unescape_dict_keys(cls, data: dict) -> dict:
"""Recursively unescape all keys in a dict."""
ret = {}
for key, value in data.items():
key = cls._unescape_key(key)
if isinstance(value, dict):
value = cls._unescape_dict_keys(value)
ret[key] = value
return ret
_SPECIAL_CHAR_PATTERN: Pattern[str] = re.compile(r"([.$]|\\U0000002E|\\U00000024)")
_SPECIAL_CHARS = {
".": "\\U0000002E",
"$": "\\U00000024",
"\\U0000002E": "\\U&0000002E",
"\\U00000024": "\\U&00000024",
}
def _replace_with_escaped(match: Match[str]) -> str:
return _SPECIAL_CHARS[match[0]]
_CHAR_ESCAPE_PATTERN: Pattern[str] = re.compile(r"(\\U0000002E|\\U00000024)")
_CHAR_ESCAPES = {
"\\U0000002E": ".",
"\\U00000024": "$",
"\\U&0000002E": "\\U0000002E",
"\\U&00000024": "\\U00000024",
}
def _replace_with_unescaped(match: Match[str]) -> str:
return _CHAR_ESCAPES[match[0]]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits'))
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(predictions.op.name.startswith(
'InceptionV4/Logits/Predictions'))
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildWithoutAuxLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_v4(inputs, num_classes,
create_aux_logits=False)
self.assertFalse('AuxLogits' in endpoints)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testAllEndPointsShapes(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v4(inputs, num_classes)
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'Mixed_3a': [batch_size, 73, 73, 160],
'Mixed_4a': [batch_size, 71, 71, 192],
'Mixed_5a': [batch_size, 35, 35, 384],
# 4 x Inception-A blocks
'Mixed_5b': [batch_size, 35, 35, 384],
'Mixed_5c': [batch_size, 35, 35, 384],
'Mixed_5d': [batch_size, 35, 35, 384],
'Mixed_5e': [batch_size, 35, 35, 384],
# Reduction-A block
'Mixed_6a': [batch_size, 17, 17, 1024],
# 7 x Inception-B blocks
'Mixed_6b': [batch_size, 17, 17, 1024],
'Mixed_6c': [batch_size, 17, 17, 1024],
'Mixed_6d': [batch_size, 17, 17, 1024],
'Mixed_6e': [batch_size, 17, 17, 1024],
'Mixed_6f': [batch_size, 17, 17, 1024],
'Mixed_6g': [batch_size, 17, 17, 1024],
'Mixed_6h': [batch_size, 17, 17, 1024],
# Reduction-A block
'Mixed_7a': [batch_size, 8, 8, 1536],
# 3 x Inception-C blocks
'Mixed_7b': [batch_size, 8, 8, 1536],
'Mixed_7c': [batch_size, 8, 8, 1536],
'Mixed_7d': [batch_size, 8, 8, 1536],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'PreLogitsFlatten': [batch_size, 1536],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4_base(inputs)
self.assertTrue(net.op.name.startswith(
'InceptionV4/Mixed_7d'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 8, 8, 1536])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
for name, op in end_points.iteritems():
self.assertTrue(op.name.startswith('InceptionV4/' + name))
def testBuildOnlyUpToFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
all_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
for index, endpoint in enumerate(all_endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v4_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV4/' + endpoint))
self.assertItemsEqual(all_endpoints[:index+1], end_points)
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_v4(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_v4(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v4(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
if __name__ == '__main__':
tf.test.main()
|
from datetime import timedelta
import logging
from anel_pwrctrl import DeviceMaster
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_PORT_RECV = "port_recv"
CONF_PORT_SEND = "port_send"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PORT_RECV): cv.port,
vol.Required(CONF_PORT_SEND): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up PwrCtrl devices/switches."""
host = config.get(CONF_HOST)
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
port_recv = config[CONF_PORT_RECV]
port_send = config[CONF_PORT_SEND]
try:
master = DeviceMaster(
username=username,
password=password,
read_port=port_send,
write_port=port_recv,
)
master.query(ip_addr=host)
except OSError as ex:
_LOGGER.error("Unable to discover PwrCtrl device: %s", str(ex))
return False
devices = []
for device in master.devices.values():
parent_device = PwrCtrlDevice(device)
devices.extend(
PwrCtrlSwitch(switch, parent_device) for switch in device.switches.values()
)
add_entities(devices)
class PwrCtrlSwitch(SwitchEntity):
"""Representation of a PwrCtrl switch."""
def __init__(self, port, parent_device):
"""Initialize the PwrCtrl switch."""
self._port = port
self._parent_device = parent_device
@property
def unique_id(self):
"""Return the unique ID of the device."""
return f"{self._port.device.host}-{self._port.get_index()}"
@property
def name(self):
"""Return the name of the device."""
return self._port.label
@property
def is_on(self):
"""Return true if the device is on."""
return self._port.get_state()
def update(self):
"""Trigger update for all switches on the parent device."""
self._parent_device.update()
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._port.on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._port.off()
class PwrCtrlDevice:
"""Device representation for per device throttling."""
def __init__(self, device):
"""Initialize the PwrCtrl device."""
self._device = device
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the device and all its switches."""
self._device.update()
|
from gitless import core
from . import commit_dialog
from . import helpers, pprint
def parser(subparsers, repo):
"""Adds the commit parser to the given subparsers object."""
desc = 'save changes to the local repository'
commit_parser = subparsers.add_parser(
'commit', help=desc, description=(
desc.capitalize() + '. ' +
'By default all tracked modified files are committed. To customize the'
' set of files to be committed use the only, exclude, and include '
'flags'), aliases=['ci'])
commit_parser.add_argument(
'-m', '--message', help='Commit message', dest='m')
commit_parser.add_argument(
'-p', '--partial',
help='Interactively select segments of files to commit', dest='p',
action='store_true')
helpers.oei_flags(commit_parser, repo)
commit_parser.set_defaults(func=main)
def main(args, repo):
commit_files = helpers.oei_fs(args, repo)
if not commit_files:
pprint.err('No files to commit')
pprint.err_exp('use gl track f if you want to track changes to file f')
return False
curr_b = repo.current_branch
total_additions = 0
total_deletions = 0
for fp in commit_files:
try:
patch = curr_b.diff_file(fp)
except KeyError:
continue
if patch.delta.is_binary:
continue
total_additions += patch.line_stats[1]
total_deletions += patch.line_stats[2]
partials = None
if args.p:
partials = _do_partial_selection(commit_files, curr_b)
if not _author_info_is_ok(repo):
return False
msg = args.m if args.m else commit_dialog.show(commit_files, repo)
if not msg.strip():
if partials:
core.git('reset', 'HEAD', partials)
raise ValueError('Missing commit message')
_auto_track(commit_files, curr_b)
ci = curr_b.create_commit(commit_files, msg, partials=partials)
pprint.ok('Commit on branch {0} succeeded'.format(repo.current_branch))
pprint.blank()
pprint.commit(ci, line_additions=total_additions, line_deletions=total_deletions)
if curr_b.fuse_in_progress:
_op_continue(curr_b.fuse_continue, 'Fuse')
elif curr_b.merge_in_progress:
_op_continue(curr_b.merge_continue, 'Merge')
return True
def _author_info_is_ok(repo):
def show_config_error(key):
pprint.err('Missing {0} for commit author'.format(key))
pprint.err_exp('change the value of git\'s user.{0} setting'.format(key))
def config_is_ok(key):
try:
if not repo.config['user.{0}'.format(key)]:
show_config_error(key)
return False
except KeyError:
show_config_error(key)
return False
return True
return config_is_ok('name') and config_is_ok('email')
def _do_partial_selection(files, curr_b):
partials = []
for fp in files:
f_st = curr_b.status_file(fp)
if not f_st.exists_at_head:
pprint.warn('Can\'t select segments for new file {0}'.format(fp))
continue
if not f_st.exists_in_wd:
pprint.warn('Can\'t select segments for deleted file {0}'.format(fp))
continue
core.git('add', '-p', fp)
# TODO: check that at least one hunk was staged
partials.append(fp)
return partials
def _auto_track(files, curr_b):
"""Tracks those untracked files in the list."""
for fp in files:
f = curr_b.status_file(fp)
if f.type == core.GL_STATUS_UNTRACKED:
curr_b.track_file(f.fp)
def _op_continue(op, fn):
pprint.blank()
try:
op(op_cb=pprint.OP_CB)
pprint.ok('{0} succeeded'.format(fn))
except core.ApplyFailedError as e:
pprint.ok('{0} succeeded'.format(fn))
raise e
|
from django.db import transaction
from weblate.checks.flags import Flags
from weblate.trans.models import Change, Component, Unit, update_source
from weblate.utils.state import STATE_APPROVED, STATE_FUZZY, STATE_TRANSLATED
EDITABLE_STATES = STATE_FUZZY, STATE_TRANSLATED, STATE_APPROVED
def bulk_perform(
user,
unit_set,
query,
target_state,
add_flags,
remove_flags,
add_labels,
remove_labels,
):
matching = unit_set.search(query).prefetch()
components = Component.objects.filter(
id__in=matching.values_list("translation__component_id", flat=True)
)
target_state = int(target_state)
add_flags = Flags(add_flags)
remove_flags = Flags(remove_flags)
updated = 0
for component in components:
with transaction.atomic(), component.lock():
component.preload_sources()
component.commit_pending("bulk edit", user)
component_units = matching.filter(
translation__component=component
).select_for_update()
can_edit_source = user is None or user.has_perm("source.edit", component)
update_unit_ids = []
source_units = []
for unit in component_units:
changed = False
source_unit = unit.source_unit
if (
target_state != -1
and (user is None or user.has_perm("unit.edit", unit))
and target_state != unit.state
and unit.state in EDITABLE_STATES
):
# Create change object for edit, update is done outside the looop
unit.generate_change(
user, user, Change.ACTION_BULK_EDIT, check_new=False
)
changed = True
update_unit_ids.append(unit.pk)
if unit.is_source:
source_units.append(unit)
if can_edit_source:
if add_flags or remove_flags:
flags = Flags(source_unit.extra_flags)
flags.merge(add_flags)
flags.remove(remove_flags)
new_flags = flags.format()
if source_unit.extra_flags != new_flags:
source_unit.is_bulk_edit = True
source_unit.extra_flags = new_flags
source_unit.save(update_fields=["extra_flags"])
changed = True
if add_labels:
source_unit.is_bulk_edit = True
source_unit.labels.add(*add_labels)
changed = True
if remove_labels:
source_unit.is_bulk_edit = True
source_unit.labels.remove(*remove_labels)
changed = True
if changed:
updated += 1
if target_state != -1:
# Bulk update state
Unit.objects.filter(pk__in=update_unit_ids).update(
pending=True, state=target_state
)
# Fire source_change event in bulk for source units
for unit in source_units:
# The change is already done in the database, we
# need it here to recalculate state of translation
# units
unit.is_bulk_edit = True
unit.pending = True
unit.state = target_state
update_source(Unit, unit)
component.invalidate_stats_deep()
return updated
|
import random
from unittest import TestCase
import numpy as np
from scattertext.ScatterChartExplorer import ScatterChartExplorer
from scattertext.test.test_semioticSquare import get_test_corpus
from scattertext.test.test_termDocMatrixFactory import build_hamlet_jz_corpus, build_hamlet_jz_corpus_with_meta, \
build_hamlet_jz_corpus_with_alt_text
class TestScatterChartExplorer(TestCase):
def test_to_dict(self):
np.random.seed(0)
random.seed(0)
corpus = build_hamlet_jz_corpus()
j = (ScatterChartExplorer(corpus,
minimum_term_frequency=0)
.to_dict('hamlet'))
self.assertEqual(set(j.keys()), set(['info', 'data', 'docs']))
self.assertEqual(set(j['info'].keys()),
set(['not_category_name', 'category_name',
'category_terms', 'not_category_internal_names',
'not_category_terms', 'category_internal_name',
'categories', 'neutral_category_name',
'extra_category_name',
'neutral_category_internal_names',
'extra_category_internal_names']))
self.assertEqual(list(j['docs']['labels']),
[0, 0, 0, 0, 1, 1, 1, 1])
self.assertEqual(list(j['docs']['texts']),
["what art thou that usurp'st this time of night,",
'together with that fair and warlike form',
'in which the majesty of buried denmark',
'did sometimes march? by heaven i charge thee, speak!',
'halt! who goes there?', 'it is i sire tone from brooklyn.',
'well, speak up man what is it?',
'news from the east sire! the best of both worlds has returned!'])
expected = {'y': 0.5, 'ncat': 0, 'ncat25k': 0, 'bg': 5,
'cat': 1, 's': 0.5, 'term': 'art', 'os': 0.5192, 'extra': 0, 'extra25k': 0,
'cat25k': 758, 'x': 0.06, 'neut': 0, 'neut25k': 0, 'ox': 5, 'oy': 3}
actual = [t for t in j['data'] if t['term'] == 'art'][0]
'''
for var in expected.keys():
try:
#np.testing.assert_almost_equal(actual[var], expected[var],decimal=1)
except TypeError:
self.assertEqual(actual[var], expected[var])
'''
self.assertEqual(set(expected.keys()), set(actual.keys()))
self.assertEqual(expected['term'], actual['term'])
self.assertEqual(j['docs'].keys(), {'texts', 'labels', 'categories'})
j = (ScatterChartExplorer(corpus,
minimum_term_frequency=0)
.inject_term_metadata({'art': {'display': 'blah blah blah', 'color': 'red'}})
.to_dict('hamlet'))
actual = [t for t in j['data'] if t['term'] == 'art'][0]
expected = {'y': 0.5, 'ncat': 0, 'ncat25k': 0, 'bg': 5,
'cat': 1, 's': 0.5, 'term': 'art', 'os': 0.5192, 'extra': 0, 'extra25k': 0,
'cat25k': 758, 'x': 0.06, 'neut': 0, 'neut25k': 0, 'ox': 5, 'oy': 3,
'etc': {'display': 'blah blah blah', 'color': 'red'}}
self.assertEqual(set(actual.keys()), set(expected.keys()))
self.assertEqual(actual['etc'], expected['etc'])
actual = [t for t in j['data'] if t['term'] != 'art'][0]
self.assertEqual(set(actual.keys()), set(expected.keys()))
self.assertEqual(actual['etc'], {})
def test_hide_terms(self):
corpus = build_hamlet_jz_corpus().get_unigram_corpus()
terms_to_hide = ['thou', 'heaven']
sc = (ScatterChartExplorer(corpus, minimum_term_frequency=0).hide_terms(terms_to_hide))
self.assertEquals(type(sc), ScatterChartExplorer)
j = sc.to_dict('hamlet', include_term_category_counts=True)
self.assertTrue(all(['display' in t and t['display'] == False for t in j['data'] if t['term'] in terms_to_hide]))
self.assertTrue(all(['display' not in t for t in j['data'] if t['term'] not in terms_to_hide]))
def test_include_term_category_counts(self):
corpus = build_hamlet_jz_corpus().get_unigram_corpus()
j = (ScatterChartExplorer(corpus,
minimum_term_frequency=0)
.to_dict('hamlet', include_term_category_counts=True))
self.assertEqual(set(j.keys()), set(['info', 'data', 'docs', 'termCounts']))
self.assertEqual(len(j['termCounts']), corpus.get_num_categories())
term_idx_set = set()
for cat_counts in j['termCounts']:
term_idx_set |= set(cat_counts.keys())
self.assertTrue(all([freq >= docs for freq, docs in cat_counts.values()]))
self.assertEqual(len(term_idx_set), corpus.get_num_terms())
def test_multi_categories(self):
corpus = get_test_corpus()
j_vs_all = ScatterChartExplorer(corpus=corpus, minimum_term_frequency=0) \
.to_dict('hamlet')
j_vs_swift = ScatterChartExplorer(corpus=corpus, minimum_term_frequency=0) \
.to_dict('hamlet', not_categories=['swift'])
self.assertNotEqual(set(j_vs_all['info']['not_category_internal_names']),
set(j_vs_swift['info']['not_category_internal_names']))
self.assertEqual(list(j_vs_all['docs']['labels']), list(j_vs_swift['docs']['labels']))
self.assertEqual(list(j_vs_all['docs']['categories']), list(j_vs_swift['docs']['categories']))
def test_metadata(self):
corpus = build_hamlet_jz_corpus()
meta = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight']
j = (ScatterChartExplorer(corpus,
minimum_term_frequency=0)
.to_dict('hamlet', metadata=meta))
self.maxDiff = None
j['docs']['labels'] = list(j['docs']['labels'])
self.assertEqual(j['docs'],
{'labels': [0, 0, 0, 0, 1, 1, 1, 1],
'categories': ['hamlet', 'jay-z/r. kelly'],
'meta': ['one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight'],
'texts': ["what art thou that usurp'st this time of night,",
'together with that fair and warlike form',
'in which the majesty of buried denmark',
'did sometimes march? by heaven i charge thee, speak!',
'halt! who goes there?',
'it is i sire tone from brooklyn.',
'well, speak up man what is it?',
'news from the east sire! the best of both worlds has returned!']}
)
def test_alternative_text(self):
corpus = build_hamlet_jz_corpus_with_alt_text()
j = (ScatterChartExplorer(corpus,
minimum_term_frequency=0)
.to_dict('hamlet', alternative_text_field='alt'))
self.assertEqual(j['docs']['texts'][0], j['docs']['texts'][0].upper())
j = (ScatterChartExplorer(corpus,
minimum_term_frequency=0)
.to_dict('hamlet'))
self.assertNotEqual(j['docs']['texts'][0], j['docs']['texts'][0].upper())
def test_extra_features(self):
corpus = build_hamlet_jz_corpus_with_meta()
meta = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight']
j = (ScatterChartExplorer(corpus,
minimum_term_frequency=0,
use_non_text_features=True)
.to_dict('hamlet', metadata=meta))
extras = [{'cat3': 1, 'cat4': 2},
{'cat4': 2},
{'cat3': 2, 'cat5': 1},
{'cat6': 2, 'cat9': 1},
{'cat3': 1, 'cat4': 2},
{'cat1': 2, 'cat2': 1},
{'cat2': 2, 'cat5': 1},
{'cat3': 2, 'cat4': 1}]
extras = [{'cat1': 2}] * 8
self.maxDiff = None
j['docs']['labels'] = list(j['docs']['labels'])
self.assertEqual(j['docs'],
{'labels': [0, 0, 0, 0, 1, 1, 1, 1],
'categories': ['hamlet', 'jay-z/r. kelly'],
'extra': extras,
'meta': ['one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight'],
'texts': ["what art thou that usurp'st this time of night,",
'together with that fair and warlike form',
'in which the majesty of buried denmark',
'did sometimes march? by heaven i charge thee, speak!',
'halt! who goes there?',
'it is i sire tone from brooklyn.',
'well, speak up man what is it?',
'news from the east sire! the best of both worlds has returned!']}
)
|
from functools import partial
import logging
import os
from matrix_client.client import MatrixClient, MatrixRequestError
import voluptuous as vol
from homeassistant.components.notify import ATTR_MESSAGE, ATTR_TARGET
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
from .const import DOMAIN, SERVICE_SEND_MESSAGE
_LOGGER = logging.getLogger(__name__)
SESSION_FILE = ".matrix.conf"
CONF_HOMESERVER = "homeserver"
CONF_ROOMS = "rooms"
CONF_COMMANDS = "commands"
CONF_WORD = "word"
CONF_EXPRESSION = "expression"
EVENT_MATRIX_COMMAND = "matrix_command"
COMMAND_SCHEMA = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_WORD, "trigger"): cv.string,
vol.Exclusive(CONF_EXPRESSION, "trigger"): cv.is_regex,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ROOMS, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
),
cv.has_at_least_one_key(CONF_WORD, CONF_EXPRESSION),
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOMESERVER): cv.url,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Required(CONF_USERNAME): cv.matches_regex("@[^:]*:.*"),
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_ROOMS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_COMMANDS, default=[]): [COMMAND_SCHEMA],
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_SEND_MESSAGE = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.string,
vol.Required(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
}
)
def setup(hass, config):
"""Set up the Matrix bot component."""
config = config[DOMAIN]
try:
bot = MatrixBot(
hass,
os.path.join(hass.config.path(), SESSION_FILE),
config[CONF_HOMESERVER],
config[CONF_VERIFY_SSL],
config[CONF_USERNAME],
config[CONF_PASSWORD],
config[CONF_ROOMS],
config[CONF_COMMANDS],
)
hass.data[DOMAIN] = bot
except MatrixRequestError as exception:
_LOGGER.error("Matrix failed to log in: %s", str(exception))
return False
hass.services.register(
DOMAIN,
SERVICE_SEND_MESSAGE,
bot.handle_send_message,
schema=SERVICE_SCHEMA_SEND_MESSAGE,
)
return True
class MatrixBot:
"""The Matrix Bot."""
def __init__(
self,
hass,
config_file,
homeserver,
verify_ssl,
username,
password,
listening_rooms,
commands,
):
"""Set up the client."""
self.hass = hass
self._session_filepath = config_file
self._auth_tokens = self._get_auth_tokens()
self._homeserver = homeserver
self._verify_tls = verify_ssl
self._mx_id = username
self._password = password
self._listening_rooms = listening_rooms
# We have to fetch the aliases for every room to make sure we don't
# join it twice by accident. However, fetching aliases is costly,
# so we only do it once per room.
self._aliases_fetched_for = set()
# Word commands are stored dict-of-dict: First dict indexes by room ID
# / alias, second dict indexes by the word
self._word_commands = {}
# Regular expression commands are stored as a list of commands per
# room, i.e., a dict-of-list
self._expression_commands = {}
for command in commands:
if not command.get(CONF_ROOMS):
command[CONF_ROOMS] = listening_rooms
if command.get(CONF_WORD):
for room_id in command[CONF_ROOMS]:
if room_id not in self._word_commands:
self._word_commands[room_id] = {}
self._word_commands[room_id][command[CONF_WORD]] = command
else:
for room_id in command[CONF_ROOMS]:
if room_id not in self._expression_commands:
self._expression_commands[room_id] = []
self._expression_commands[room_id].append(command)
# Log in. This raises a MatrixRequestError if login is unsuccessful
self._client = self._login()
def handle_matrix_exception(exception):
"""Handle exceptions raised inside the Matrix SDK."""
_LOGGER.error("Matrix exception:\n %s", str(exception))
self._client.start_listener_thread(exception_handler=handle_matrix_exception)
def stop_client(_):
"""Run once when Home Assistant stops."""
self._client.stop_listener_thread()
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_client)
# Joining rooms potentially does a lot of I/O, so we defer it
def handle_startup(_):
"""Run once when Home Assistant finished startup."""
self._join_rooms()
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, handle_startup)
def _handle_room_message(self, room_id, room, event):
"""Handle a message sent to a Matrix room."""
if event["content"]["msgtype"] != "m.text":
return
if event["sender"] == self._mx_id:
return
_LOGGER.debug("Handling message: %s", event["content"]["body"])
if event["content"]["body"][0] == "!":
# Could trigger a single-word command
pieces = event["content"]["body"].split(" ")
cmd = pieces[0][1:]
command = self._word_commands.get(room_id, {}).get(cmd)
if command:
event_data = {
"command": command[CONF_NAME],
"sender": event["sender"],
"room": room_id,
"args": pieces[1:],
}
self.hass.bus.fire(EVENT_MATRIX_COMMAND, event_data)
# After single-word commands, check all regex commands in the room
for command in self._expression_commands.get(room_id, []):
match = command[CONF_EXPRESSION].match(event["content"]["body"])
if not match:
continue
event_data = {
"command": command[CONF_NAME],
"sender": event["sender"],
"room": room_id,
"args": match.groupdict(),
}
self.hass.bus.fire(EVENT_MATRIX_COMMAND, event_data)
def _join_or_get_room(self, room_id_or_alias):
"""Join a room or get it, if we are already in the room.
We can't just always call join_room(), since that seems to crash
the client if we're already in the room.
"""
rooms = self._client.get_rooms()
if room_id_or_alias in rooms:
_LOGGER.debug("Already in room %s", room_id_or_alias)
return rooms[room_id_or_alias]
for room in rooms.values():
if room.room_id not in self._aliases_fetched_for:
room.update_aliases()
self._aliases_fetched_for.add(room.room_id)
if room_id_or_alias in room.aliases:
_LOGGER.debug(
"Already in room %s (known as %s)", room.room_id, room_id_or_alias
)
return room
room = self._client.join_room(room_id_or_alias)
_LOGGER.info("Joined room %s (known as %s)", room.room_id, room_id_or_alias)
return room
def _join_rooms(self):
"""Join the Matrix rooms that we listen for commands in."""
for room_id in self._listening_rooms:
try:
room = self._join_or_get_room(room_id)
room.add_listener(
partial(self._handle_room_message, room_id), "m.room.message"
)
except MatrixRequestError as ex:
_LOGGER.error("Could not join room %s: %s", room_id, ex)
def _get_auth_tokens(self):
"""
Read sorted authentication tokens from disk.
Returns the auth_tokens dictionary.
"""
try:
auth_tokens = load_json(self._session_filepath)
return auth_tokens
except HomeAssistantError as ex:
_LOGGER.warning(
"Loading authentication tokens from file '%s' failed: %s",
self._session_filepath,
str(ex),
)
return {}
def _store_auth_token(self, token):
"""Store authentication token to session and persistent storage."""
self._auth_tokens[self._mx_id] = token
save_json(self._session_filepath, self._auth_tokens)
def _login(self):
"""Login to the Matrix homeserver and return the client instance."""
# Attempt to generate a valid client using either of the two possible
# login methods:
client = None
# If we have an authentication token
if self._mx_id in self._auth_tokens:
try:
client = self._login_by_token()
_LOGGER.debug("Logged in using stored token")
except MatrixRequestError as ex:
_LOGGER.warning(
"Login by token failed, falling back to password: %d, %s",
ex.code,
ex.content,
)
# If we still don't have a client try password
if not client:
try:
client = self._login_by_password()
_LOGGER.debug("Logged in using password")
except MatrixRequestError as ex:
_LOGGER.error(
"Login failed, both token and username/password invalid: %d, %s",
ex.code,
ex.content,
)
# Re-raise the error so _setup can catch it
raise
return client
def _login_by_token(self):
"""Login using authentication token and return the client."""
return MatrixClient(
base_url=self._homeserver,
token=self._auth_tokens[self._mx_id],
user_id=self._mx_id,
valid_cert_check=self._verify_tls,
)
def _login_by_password(self):
"""Login using password authentication and return the client."""
_client = MatrixClient(
base_url=self._homeserver, valid_cert_check=self._verify_tls
)
_client.login_with_password(self._mx_id, self._password)
self._store_auth_token(_client.token)
return _client
def _send_message(self, message, target_rooms):
"""Send the message to the Matrix server."""
for target_room in target_rooms:
try:
room = self._join_or_get_room(target_room)
_LOGGER.debug(room.send_text(message))
except MatrixRequestError as ex:
_LOGGER.error(
"Unable to deliver message to room '%s': %d, %s",
target_room,
ex.code,
ex.content,
)
def handle_send_message(self, service):
"""Handle the send_message service."""
self._send_message(service.data[ATTR_MESSAGE], service.data[ATTR_TARGET])
|
import json
from pydexcom import GlucoseReading
from homeassistant.components.dexcom.const import CONF_SERVER, DOMAIN, SERVER_US
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
CONFIG = {
CONF_USERNAME: "test_username",
CONF_PASSWORD: "test_password",
CONF_SERVER: SERVER_US,
}
GLUCOSE_READING = GlucoseReading(json.loads(load_fixture("dexcom_data.json")))
async def init_integration(hass) -> MockConfigEntry:
"""Set up the Dexcom integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
title="test_username",
unique_id="test_username",
data=CONFIG,
options=None,
)
with patch(
"homeassistant.components.dexcom.Dexcom.get_current_glucose_reading",
return_value=GLUCOSE_READING,
), patch(
"homeassistant.components.dexcom.Dexcom.create_session",
return_value="test_session_id",
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
import abodepy.helpers.constants as CONST
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
)
from . import AbodeDevice
from .const import DOMAIN
# Sensor types: Name, icon
SENSOR_TYPES = {
CONST.TEMP_STATUS_KEY: ["Temperature", DEVICE_CLASS_TEMPERATURE],
CONST.HUMI_STATUS_KEY: ["Humidity", DEVICE_CLASS_HUMIDITY],
CONST.LUX_STATUS_KEY: ["Lux", DEVICE_CLASS_ILLUMINANCE],
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Abode sensor devices."""
data = hass.data[DOMAIN]
entities = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_SENSOR):
for sensor_type in SENSOR_TYPES:
if sensor_type not in device.get_value(CONST.STATUSES_KEY):
continue
entities.append(AbodeSensor(data, device, sensor_type))
async_add_entities(entities)
class AbodeSensor(AbodeDevice):
"""A sensor implementation for Abode devices."""
def __init__(self, data, device, sensor_type):
"""Initialize a sensor for an Abode device."""
super().__init__(data, device)
self._sensor_type = sensor_type
self._name = f"{self._device.name} {SENSOR_TYPES[self._sensor_type][0]}"
self._device_class = SENSOR_TYPES[self._sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def unique_id(self):
"""Return a unique ID to use for this device."""
return f"{self._device.device_uuid}-{self._sensor_type}"
@property
def state(self):
"""Return the state of the sensor."""
if self._sensor_type == CONST.TEMP_STATUS_KEY:
return self._device.temp
if self._sensor_type == CONST.HUMI_STATUS_KEY:
return self._device.humidity
if self._sensor_type == CONST.LUX_STATUS_KEY:
return self._device.lux
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
if self._sensor_type == CONST.TEMP_STATUS_KEY:
return self._device.temp_unit
if self._sensor_type == CONST.HUMI_STATUS_KEY:
return self._device.humidity_unit
if self._sensor_type == CONST.LUX_STATUS_KEY:
return self._device.lux_unit
|
import logging
from pyhomeworks.pyhomeworks import HW_LIGHT_CHANGED
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import CONF_ADDR, CONF_DIMMERS, CONF_RATE, HOMEWORKS_CONTROLLER, HomeworksDevice
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discover_info=None):
"""Set up Homeworks lights."""
if discover_info is None:
return
controller = hass.data[HOMEWORKS_CONTROLLER]
devs = []
for dimmer in discover_info[CONF_DIMMERS]:
dev = HomeworksLight(
controller, dimmer[CONF_ADDR], dimmer[CONF_NAME], dimmer[CONF_RATE]
)
devs.append(dev)
add_entities(devs, True)
class HomeworksLight(HomeworksDevice, LightEntity):
"""Homeworks Light."""
def __init__(self, controller, addr, name, rate):
"""Create device with Addr, name, and rate."""
super().__init__(controller, addr, name)
self._rate = rate
self._level = 0
self._prev_level = 0
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
signal = f"homeworks_entity_{self._addr}"
_LOGGER.debug("connecting %s", signal)
self.async_on_remove(
async_dispatcher_connect(self.hass, signal, self._update_callback)
)
self._controller.request_dimmer_level(self._addr)
@property
def supported_features(self):
"""Supported features."""
return SUPPORT_BRIGHTNESS
def turn_on(self, **kwargs):
"""Turn on the light."""
if ATTR_BRIGHTNESS in kwargs:
new_level = kwargs[ATTR_BRIGHTNESS]
elif self._prev_level == 0:
new_level = 255
else:
new_level = self._prev_level
self._set_brightness(new_level)
def turn_off(self, **kwargs):
"""Turn off the light."""
self._set_brightness(0)
@property
def brightness(self):
"""Control the brightness."""
return self._level
def _set_brightness(self, level):
"""Send the brightness level to the device."""
self._controller.fade_dim(
float((level * 100.0) / 255.0), self._rate, 0, self._addr
)
@property
def device_state_attributes(self):
"""Supported attributes."""
return {"homeworks_address": self._addr}
@property
def is_on(self):
"""Is the light on/off."""
return self._level != 0
@callback
def _update_callback(self, msg_type, values):
"""Process device specific messages."""
if msg_type == HW_LIGHT_CHANGED:
self._level = int((values[1] * 255.0) / 100.0)
if self._level != 0:
self._prev_level = self._level
self.async_write_ha_state()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import cv2
from utils.det_utils import *
from configs.kitti_config import config
import pickle
class MobileNetDetTest(tf.test.TestCase):
def test_batch_iou_fast(self):
anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32)
anchors = xywh_to_yxyx(anchors)
bboxes = tf.placeholder(dtype=tf.float32, shape=[None, 4])
bboxes_ = xywh_to_yxyx(bboxes)
ious = batch_iou_fast(anchors, bboxes_)
with self.test_session() as sess:
ious, bboxes_ = sess.run([ious, bboxes],
feed_dict={bboxes: [[599.37, 212.45, 27.62, 25.34],
config.ANCHOR_SHAPE[2]]}
)
print("ious:", ious)
print("max iou idx:", np.argmax(ious, axis=-1))
print("bboxes:", bboxes_)
def test_set_anchors(self):
with self.test_session() as sess:
anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32)
output = sess.run(anchors)
print(np.shape(output))
self.assertAllEqual(np.shape(output), [config.FEA_HEIGHT * config.FEA_WIDTH * config.NUM_ANCHORS, 4])
print("Anchors:", output)
print("Anchors shape:", np.shape(output))
print("Num of anchors:", config.NUM_ANCHORS)
def test_arg_closest_anchor(self):
with self.test_session() as sess:
bbox_1 = tf.convert_to_tensor([10, 10, 20, 20], dtype=tf.float32)
bbox_2 = tf.convert_to_tensor([110, 110, 30, 30], dtype=tf.float32)
bboxes = tf.stack([bbox_1, bbox_2], axis=0)
anchor_1 = tf.convert_to_tensor([0,0,10,10], dtype=tf.float32)
anchor_2 = tf.convert_to_tensor([100,100,110,110], dtype=tf.float32)
anchors = tf.stack([anchor_1, anchor_2], axis=0)
indices = arg_closest_anchor(bboxes, anchors)
output = sess.run(indices)
print('test_arg_closest_anchor')
print(output)
def test_update_tensor(self):
with self.test_session() as sess:
ref = tf.placeholder(dtype=tf.int64, shape=[None])#tf.convert_to_tensor([1, 2, 3], dtype=tf.int64)
indices = tf.convert_to_tensor([2], dtype=tf.int64)
update = tf.convert_to_tensor([9], dtype=tf.int64)
tensor_updated = update_tensor(ref, indices, update)
output = sess.run(tensor_updated, feed_dict={ref: [1, 2, 3]})
print("test update tensor:")
print("tensor updated", output)
def test_encode_annos(self):
with open("/home/zehao/PycharmProjects/MobileNet/utils/test_data.pkl", "rb") as fin:
test_data = pickle.load(fin)
with self.test_session() as sess:
anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32)
num_image = len(test_data["test_bbox"])
for i in range(50):
bboxes = tf.convert_to_tensor(test_data["test_bbox"][i][0], dtype=tf.float32)
bboxes = xywh_to_yxyx(bboxes)
labels = tf.convert_to_tensor(test_data["test_label"][i][0])
input_mask, labels_input, box_delta_input, box_input = \
encode_annos(labels, bboxes, anchors, config.NUM_CLASSES)
out_input_mask, out_labels_input, out_box_delta_input, out_box_input, out_anchors = \
sess.run([input_mask, labels_input, box_delta_input, box_input, anchors])
print("num_bbox:", np.shape(test_data["test_bbox"][i][0])[0])
sd_indices = np.where(test_data["test_input_mask"][i][0] > 0)[1]
print("SDet:")
print("indices:", sd_indices)
print("mask:", np.where(test_data["test_input_mask"][i][0] > 0)[1])
print("bbox:", test_data["test_bbox"][i][0])
print("label:", test_data["test_label"][i][0])
print("delta:", test_data["test_input_delta"][i][0][0][sd_indices])
print("first:", sd_indices[0], test_data["test_input_bbox"][i][0][0][sd_indices[0]], test_data["test_input_delta"][i][0][0][sd_indices[0]])
indices = np.where(out_input_mask > 0)[0]
print("Mine:")
print("indices:", indices)
print("mask:", np.where(out_input_mask > 0)[0])
print("bbox:", out_box_input[indices])
print("label:", out_labels_input[indices])
print("delta:", out_box_delta_input[indices])
print("first:", indices[0], out_box_input[indices[0]], out_box_delta_input[indices[0]])
print("\n")
# print("bbox:", out_box_input[indices])
# aidx = np.where(test_data["test_input_mask"][i][0] > 0)[1]
# encode_idx = np.where(out_input_mask > 0)[0]
# flag = False
# if np.shape(aidx)[0] != np.shape(encode_idx)[0]:
# flag = True
# elif not np.alltrue(np.equal(aidx, encode_idx)):
# flag = True
# error_bidx = np.where(aidx != encode_idx)
# true_aidx = aidx[error_bidx]
# error_aidx = encode_idx[error_bidx]
# if flag:
# image = test_data["test_image"][i][0]
# for b in range(np.shape(test_data["test_bbox"][i][0])[0]):
# bboxes = test_data["test_bbox"][i][0]
# bbox = bboxes[b]
# x = bbox[0]
# y = bbox[1]
# w = bbox[2]
# h = bbox[3]
# x1 = x-0.5*w
# y1 = y-0.5*h
# x2 = x+0.5*w
# y2 = y+0.5*h
# color = (255,0,0)
# cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness=2)
# if np.any(error_bidx[0] == b):#b in error_bidx:
# for a in config.ANCHOR_SHAPE[true_aidx]:
# true_a = a
# x = true_a[0]
# y = true_a[1]
# w = true_a[2]
# h = true_a[3]
# x1 = x - 0.5 * w
# y1 = y - 0.5 * h
# x2 = x + 0.5 * w
# y2 = y + 0.5 * h
# color = (0, 255, 255)
# cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness=2)
# for ea in config.ANCHOR_SHAPE[error_aidx]:
# error_a = ea
# x = error_a[0]
# y = error_a[1]
# w = error_a[2]
# h = error_a[3]
# x1 = x - 0.5 * w
# y1 = y - 0.5 * h
# x2 = x + 0.5 * w
# y2 = y + 0.5 * h
# color = (255, 255, 0)
# cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness=2)
# # cv2.imwrite("img" + str(b) + ".jpg", image)
# cv2.imshow("img", image)
# cv2.waitKey(0)
def test_set_anchors(self):
anchors = config.ANCHOR_SHAPE
image = np.zeros((config.IMG_HEIGHT, config.IMG_WIDTH, 3))
num_anchors = np.shape(anchors)[0]
for i in range(num_anchors):
anchor = anchors[i]
x = anchor[0]
y = anchor[1]
w = anchor[2]
h = anchor[3]
x1 = x - 0.5*w
y1 = y - 0.5*h
x2 = x + 0.5*w
y2 = y + 0.5*h
cv2.rectangle(image,
(int(x1), int(y1)),
(int(x2), int(y2)),
(255,255,255)
)
cv2.rectangle(image,
(int(739.72003), int(181.11)),
(int(770.04), int(204.92)),
(255, 0, 0),
2)
if i == 2313:
cv2.rectangle(image,
(int(x1), int(y1)),
(int(x2), int(y2)),
(0, 255, 255),
2
)
cv2.imshow("anchors", image)
cv2.waitKey(0)
|
import json
import logging
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService
from homeassistant.const import (
CONF_API_KEY,
CONF_RECIPIENT,
CONF_USERNAME,
CONTENT_TYPE_JSON,
HTTP_OK,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
BASE_API_URL = "https://rest.clicksend.com/v3"
HEADERS = {CONTENT_TYPE: CONTENT_TYPE_JSON}
CONF_LANGUAGE = "language"
CONF_VOICE = "voice"
CONF_CALLER = "caller"
DEFAULT_LANGUAGE = "en-us"
DEFAULT_VOICE = "female"
TIMEOUT = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_RECIPIENT): cv.string,
vol.Optional(CONF_LANGUAGE, default=DEFAULT_LANGUAGE): cv.string,
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): cv.string,
vol.Optional(CONF_CALLER): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the ClickSend notification service."""
if not _authenticate(config):
_LOGGER.error("You are not authorized to access ClickSend")
return None
return ClicksendNotificationService(config)
class ClicksendNotificationService(BaseNotificationService):
"""Implementation of a notification service for the ClickSend service."""
def __init__(self, config):
"""Initialize the service."""
self.username = config[CONF_USERNAME]
self.api_key = config[CONF_API_KEY]
self.recipient = config[CONF_RECIPIENT]
self.language = config[CONF_LANGUAGE]
self.voice = config[CONF_VOICE]
self.caller = config.get(CONF_CALLER)
if self.caller is None:
self.caller = self.recipient
def send_message(self, message="", **kwargs):
"""Send a voice call to a user."""
data = {
"messages": [
{
"source": "hass.notify",
"from": self.caller,
"to": self.recipient,
"body": message,
"lang": self.language,
"voice": self.voice,
}
]
}
api_url = f"{BASE_API_URL}/voice/send"
resp = requests.post(
api_url,
data=json.dumps(data),
headers=HEADERS,
auth=(self.username, self.api_key),
timeout=TIMEOUT,
)
if resp.status_code == HTTP_OK:
return
obj = json.loads(resp.text)
response_msg = obj["response_msg"]
response_code = obj["response_code"]
_LOGGER.error(
"Error %s : %s (Code %s)", resp.status_code, response_msg, response_code
)
def _authenticate(config):
"""Authenticate with ClickSend."""
api_url = f"{BASE_API_URL}/account"
resp = requests.get(
api_url,
headers=HEADERS,
auth=(config.get(CONF_USERNAME), config.get(CONF_API_KEY)),
timeout=TIMEOUT,
)
if resp.status_code != HTTP_OK:
return False
return True
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_integer(
'batch_size', 100, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'max_num_batches', None,
'Max number of batches to evaluate by default use all.')
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'checkpoint_path', '/tmp/tfmodel/',
'The directory where the model was written to or an absolute path to a '
'checkpoint file.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'test', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
tf.app.flags.DEFINE_integer(
'eval_image_size', None, 'Eval image size')
tf.app.flags.DEFINE_float('width_multiplier', 1.0,
'Width Multiplier, for MobileNet only.')
FLAGS = tf.app.flags.FLAGS
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
tf_global_step = slim.get_or_create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
####################
# Select the model #
####################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=False,
width_multiplier=FLAGS.width_multiplier)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=False,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
####################
# Define the model #
####################
logits, _ = network_fn(images)
if FLAGS.moving_average_decay:
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, tf_global_step)
variables_to_restore = variable_averages.variables_to_restore(
slim.get_model_variables())
variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
variables_to_restore = slim.get_variables_to_restore()
predictions = tf.argmax(logits, 1)
labels = tf.squeeze(labels)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'Recall_5': slim.metrics.streaming_recall_at_k(
logits, labels, 5),
})
# Print the summaries to screen.
for name, value in names_to_values.iteritems():
summary_name = 'eval/%s' % name
op = tf.summary.scalar(summary_name, value, collections=[])
op = tf.Print(op, [value], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# TODO(sguada) use num_epochs=1
if FLAGS.max_num_batches:
num_batches = FLAGS.max_num_batches
else:
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Evaluating %s' % checkpoint_path)
slim.evaluation.evaluate_once(
master=FLAGS.master,
checkpoint_path=checkpoint_path,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=list(names_to_updates.values()),
variables_to_restore=variables_to_restore)
if __name__ == '__main__':
tf.app.run()
|
import unittest
import numpy as np
from pgmpy.factors.distributions import GaussianDistribution as JGD
from pgmpy.sampling import LeapFrog, ModifiedEuler, GradLogPDFGaussian
class TestGradLogPDFGaussian(unittest.TestCase):
def setUp(self):
mean = np.array([1, 2, 3, 4])
covariance = np.array(
[
[1, 0.2, 0.4, 0.7],
[0.2, 2, 0.5, 0.8],
[0.4, 0.5, 3, 0.6],
[0.7, 0.8, 0.6, 4],
]
)
self.test_model = JGD(["x", "y", "z", "t"], mean, covariance)
self.test_gradient = GradLogPDFGaussian([0, 0, 0, 0], self.test_model)
def test_error(self):
with self.assertRaises(TypeError):
GradLogPDFGaussian(1, self.test_model)
with self.assertRaises(ValueError):
GradLogPDFGaussian([1, 1], self.test_model)
def test_gradient(self):
grad, log = self.test_gradient.get_gradient_log_pdf()
np.testing.assert_almost_equal(
grad, np.array([0.05436475, 0.49454937, 0.75465073, 0.77837868])
)
np.testing.assert_almost_equal(log, -3.21046521505)
class TestLeapFrog(unittest.TestCase):
def setUp(self):
mean = np.array([-1, 1, -1])
covariance = np.array([[1, 0.6, 0.5], [0.6, 2, 0.3], [0.5, 0.3, 1]])
self.test_model = JGD(["x", "y", "z"], mean, covariance)
position = [0, 0, 0]
momentum = [-1, -1, -1]
self.test_with_grad_log = LeapFrog(
model=self.test_model,
position=position,
momentum=momentum,
stepsize=0.3,
grad_log_pdf=GradLogPDFGaussian,
grad_log_position=None,
)
grad_log_position, _ = GradLogPDFGaussian(
position, self.test_model
).get_gradient_log_pdf()
self.test_without_grad_log = LeapFrog(
model=self.test_model,
position=position,
momentum=momentum,
stepsize=0.4,
grad_log_pdf=GradLogPDFGaussian,
grad_log_position=grad_log_position,
)
def test_errors(self):
with self.assertRaises(TypeError):
LeapFrog(
model=self.test_model,
position=1,
momentum=[1, 1],
stepsize=0.1,
grad_log_pdf=GradLogPDFGaussian,
)
with self.assertRaises(TypeError):
LeapFrog(
model=self.test_model,
position=[1, 1],
momentum=1,
stepsize=0.1,
grad_log_pdf=GradLogPDFGaussian,
)
with self.assertRaises(ValueError):
LeapFrog(
model=self.test_model,
position=[1, 1],
momentum=[1],
stepsize=0.1,
grad_log_pdf=GradLogPDFGaussian,
)
with self.assertRaises(TypeError):
LeapFrog(
model=self.test_model,
position=[1],
momentum=[1],
stepsize=0.1,
grad_log_pdf=1,
)
with self.assertRaises(ValueError):
LeapFrog(
model=self.test_model,
position=[1, 1],
momentum=[1, 1],
stepsize=0.1,
grad_log_pdf=GradLogPDFGaussian,
)
with self.assertRaises(TypeError):
LeapFrog(
model=self.test_model,
position=[1, 1, 1],
momentum=[1, 1, 1],
stepsize=0.1,
grad_log_pdf=GradLogPDFGaussian,
grad_log_position=1,
)
with self.assertRaises(ValueError):
LeapFrog(
model=self.test_model,
position=[1, 1, 1],
momentum=[1, 1, 1],
stepsize=0.1,
grad_log_pdf=GradLogPDFGaussian,
grad_log_position=[1, 1],
)
def test_leapfrog_methods(self):
new_pos, new_momentum, new_grad = self.test_with_grad_log.get_proposed_values()
np.testing.assert_almost_equal(
new_pos, np.array([-0.35634146, -0.25609756, -0.33])
)
np.testing.assert_almost_equal(
new_momentum, np.array([-1.3396624, -0.70344884, -1.16963415])
)
np.testing.assert_almost_equal(
new_grad, np.array([-1.0123835, 1.00139798, -0.46422764])
)
(
new_pos,
new_momentum,
new_grad,
) = self.test_without_grad_log.get_proposed_values()
np.testing.assert_almost_equal(
new_pos, np.array([-0.5001626, -0.32195122, -0.45333333])
)
np.testing.assert_almost_equal(
new_momentum, np.array([-1.42947981, -0.60709102, -1.21246612])
)
np.testing.assert_almost_equal(
new_grad, np.array([-0.89536651, 0.98893516, -0.39566396])
)
def tearDown(self):
del self.test_model
del self.test_with_grad_log
del self.test_without_grad_log
class TestModifiedEuler(unittest.TestCase):
def setUp(self):
mean = np.array([0, 0])
covariance = np.array([[-1, 0.8], [0.8, 3]])
self.test_model = JGD(["x", "y"], mean, covariance)
position = [0, 0]
momentum = [-2, 1]
self.test_with_grad_log = ModifiedEuler(
model=self.test_model,
position=position,
momentum=momentum,
stepsize=0.5,
grad_log_pdf=GradLogPDFGaussian,
grad_log_position=None,
)
grad_log_position, _ = GradLogPDFGaussian(
position, self.test_model
).get_gradient_log_pdf()
self.test_without_grad_log = ModifiedEuler(
model=self.test_model,
position=position,
momentum=momentum,
stepsize=0.3,
grad_log_pdf=GradLogPDFGaussian,
grad_log_position=grad_log_position,
)
def test_modified_euler_methods(self):
new_pos, new_momentum, new_grad = self.test_with_grad_log.get_proposed_values()
np.testing.assert_almost_equal(new_pos, np.array([-1.0, 0.5]))
np.testing.assert_almost_equal(new_momentum, np.array([-2.0, 1.0]))
np.testing.assert_almost_equal(new_grad, np.array([-0.93406593, 0.08241758]))
(
new_pos,
new_momentum,
new_grad,
) = self.test_without_grad_log.get_proposed_values()
np.testing.assert_almost_equal(new_pos, np.array([-0.6, 0.3]))
np.testing.assert_almost_equal(new_momentum, np.array([-2.0, 1.0]))
np.testing.assert_almost_equal(new_grad, np.array([-0.56043956, 0.04945055]))
def tearDown(self):
del self.test_model
del self.test_with_grad_log
del self.test_without_grad_log
|
import asyncio
import warnings
from datetime import timedelta
from typing import List, Iterable, Union, TYPE_CHECKING, Dict
import discord
if TYPE_CHECKING:
from .. import Config
from ..bot import Red
from ..commands import Context
async def mass_purge(messages: List[discord.Message], channel: discord.TextChannel):
"""Bulk delete messages from a channel.
If more than 100 messages are supplied, the bot will delete 100 messages at
a time, sleeping between each action.
Note
----
Messages must not be older than 14 days, and the bot must not be a user
account.
Parameters
----------
messages : `list` of `discord.Message`
The messages to bulk delete.
channel : discord.TextChannel
The channel to delete messages from.
Raises
------
discord.Forbidden
You do not have proper permissions to delete the messages or you’re not
using a bot account.
discord.HTTPException
Deleting the messages failed.
"""
while messages:
# discord.NotFound can be raised when `len(messages) == 1` and the message does not exist.
# As a result of this obscure behavior, this error needs to be caught just in case.
try:
await channel.delete_messages(messages[:100])
except discord.errors.HTTPException:
pass
messages = messages[100:]
await asyncio.sleep(1.5)
async def slow_deletion(messages: Iterable[discord.Message]):
"""Delete a list of messages one at a time.
Any exceptions raised when trying to delete the message will be silenced.
Parameters
----------
messages : `iterable` of `discord.Message`
The messages to delete.
"""
for message in messages:
try:
await message.delete()
except discord.HTTPException:
pass
def get_audit_reason(author: discord.Member, reason: str = None, *, shorten: bool = False):
"""Construct a reason to appear in the audit log.
Parameters
----------
author : discord.Member
The author behind the audit log action.
reason : str
The reason behind the audit log action.
shorten : bool
When set to ``True``, the returned audit reason string will be
shortened to fit the max length allowed by Discord audit logs.
Returns
-------
str
The formatted audit log reason.
"""
audit_reason = (
"Action requested by {} (ID {}). Reason: {}".format(author, author.id, reason)
if reason
else "Action requested by {} (ID {}).".format(author, author.id)
)
if shorten and len(audit_reason) > 512:
audit_reason = f"{audit_reason[:509]}..."
return audit_reason
async def is_allowed_by_hierarchy(
bot: "Red", settings: "Config", guild: discord.Guild, mod: discord.Member, user: discord.Member
):
warnings.warn(
"`is_allowed_by_hierarchy()` is deprecated since Red 3.4.1"
" and will be removed in the first minor release after 2020-11-31.",
DeprecationWarning,
stacklevel=2,
)
if not await settings.guild(guild).respect_hierarchy():
return True
is_special = mod == guild.owner or await bot.is_owner(mod)
return mod.top_role.position > user.top_role.position or is_special
async def is_mod_or_superior(
bot: "Red", obj: Union[discord.Message, discord.Member, discord.Role]
):
"""Check if an object has mod or superior permissions.
If a message is passed, its author's permissions are checked. If a role is
passed, it simply checks if it is one of either the admin or mod roles.
Parameters
----------
bot : redbot.core.bot.Red
The bot object.
obj : `discord.Message` or `discord.Member` or `discord.Role`
The object to check permissions for.
Returns
-------
bool
:code:`True` if the object has mod permissions.
Raises
------
TypeError
If the wrong type of ``obj`` was passed.
"""
if isinstance(obj, discord.Message):
user = obj.author
elif isinstance(obj, discord.Member):
user = obj
elif isinstance(obj, discord.Role):
gid = obj.guild.id
if obj in await bot.get_admin_role_ids(gid):
return True
if obj in await bot.get_mod_role_ids(gid):
return True
return False
else:
raise TypeError("Only messages, members or roles may be passed")
if await bot.is_owner(user):
return True
if await bot.is_mod(user):
return True
return False
def strfdelta(delta: timedelta):
"""Format a timedelta object to a message with time units.
Parameters
----------
delta : datetime.timedelta
The duration to parse.
Returns
-------
str
A message representing the timedelta with units.
"""
s = []
if delta.days:
ds = "%i day" % delta.days
if delta.days > 1:
ds += "s"
s.append(ds)
hrs, rem = divmod(delta.seconds, 60 * 60)
if hrs:
hs = "%i hr" % hrs
if hrs > 1:
hs += "s"
s.append(hs)
mins, secs = divmod(rem, 60)
if mins:
s.append("%i min" % mins)
if secs:
s.append("%i sec" % secs)
return " ".join(s)
async def is_admin_or_superior(
bot: "Red", obj: Union[discord.Message, discord.Member, discord.Role]
):
"""Same as `is_mod_or_superior` except for admin permissions.
If a message is passed, its author's permissions are checked. If a role is
passed, it simply checks if it is the admin role.
Parameters
----------
bot : redbot.core.bot.Red
The bot object.
obj : `discord.Message` or `discord.Member` or `discord.Role`
The object to check permissions for.
Returns
-------
bool
:code:`True` if the object has admin permissions.
Raises
------
TypeError
If the wrong type of ``obj`` was passed.
"""
if isinstance(obj, discord.Message):
user = obj.author
elif isinstance(obj, discord.Member):
user = obj
elif isinstance(obj, discord.Role):
return obj.id in await bot.get_admin_role_ids(obj.guild.id)
else:
raise TypeError("Only messages, members or roles may be passed")
if await bot.is_owner(user):
return True
if await bot.is_admin(user):
return True
return False
async def check_permissions(ctx: "Context", perms: Dict[str, bool]) -> bool:
"""Check if the author has required permissions.
This will always return ``True`` if the author is a bot owner, or
has the ``administrator`` permission. If ``perms`` is empty, this
will only check if the user is a bot owner.
Parameters
----------
ctx : Context
The command invocation context to check.
perms : Dict[str, bool]
A dictionary mapping permissions to their required states.
Valid permission names are those listed as properties of
the `discord.Permissions` class.
Returns
-------
bool
``True`` if the author has the required permissions.
"""
if await ctx.bot.is_owner(ctx.author):
return True
elif not perms:
return False
resolved = ctx.channel.permissions_for(ctx.author)
return resolved.administrator or all(
getattr(resolved, name, None) == value for name, value in perms.items()
)
|
import asyncio
import datetime
import functools
import logging
import ssl
import threading
from aiohttp.test_utils import make_mocked_request
import pytest
import requests_mock as _requests_mock
from homeassistant import core as ha, loader, runner, util
from homeassistant.auth.const import GROUP_ID_ADMIN, GROUP_ID_READ_ONLY
from homeassistant.auth.providers import homeassistant, legacy_api_password
from homeassistant.components import mqtt
from homeassistant.components.websocket_api.auth import (
TYPE_AUTH,
TYPE_AUTH_OK,
TYPE_AUTH_REQUIRED,
)
from homeassistant.components.websocket_api.http import URL
from homeassistant.const import ATTR_NOW, EVENT_TIME_CHANGED
from homeassistant.exceptions import ServiceNotFound
from homeassistant.helpers import event
from homeassistant.setup import async_setup_component
from homeassistant.util import location
from tests.async_mock import MagicMock, Mock, patch
from tests.ignore_uncaught_exceptions import IGNORE_UNCAUGHT_EXCEPTIONS
pytest.register_assert_rewrite("tests.common")
from tests.common import ( # noqa: E402, isort:skip
CLIENT_ID,
INSTANCES,
MockUser,
async_fire_mqtt_message,
async_test_home_assistant,
mock_storage as mock_storage,
)
from tests.test_util.aiohttp import mock_aiohttp_client # noqa: E402, isort:skip
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
asyncio.set_event_loop_policy(runner.HassEventLoopPolicy(False))
# Disable fixtures overriding our beautiful policy
asyncio.set_event_loop_policy = lambda policy: None
def pytest_configure(config):
"""Register marker for tests that log exceptions."""
config.addinivalue_line(
"markers", "no_fail_on_log_exception: mark test to not fail on logged exception"
)
def check_real(func):
"""Force a function to require a keyword _test_real to be passed in."""
@functools.wraps(func)
async def guard_func(*args, **kwargs):
real = kwargs.pop("_test_real", None)
if not real:
raise Exception(
'Forgot to mock or pass "_test_real=True" to %s', func.__name__
)
return await func(*args, **kwargs)
return guard_func
# Guard a few functions that would make network connections
location.async_detect_location_info = check_real(location.async_detect_location_info)
util.get_local_ip = lambda: "127.0.0.1"
@pytest.fixture(autouse=True)
def verify_cleanup():
"""Verify that the test has cleaned up resources correctly."""
threads_before = frozenset(threading.enumerate())
yield
if len(INSTANCES) >= 2:
count = len(INSTANCES)
for inst in INSTANCES:
inst.stop()
pytest.exit(f"Detected non stopped instances ({count}), aborting test run")
threads = frozenset(threading.enumerate()) - threads_before
assert not threads
@pytest.fixture
def hass_storage():
"""Fixture to mock storage."""
with mock_storage() as stored_data:
yield stored_data
@pytest.fixture
def hass(loop, hass_storage, request):
"""Fixture to provide a test instance of Home Assistant."""
def exc_handle(loop, context):
"""Handle exceptions by rethrowing them, which will fail the test."""
# Most of these contexts will contain an exception, but not all.
# The docs note the key as "optional"
# See https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.call_exception_handler
if "exception" in context:
exceptions.append(context["exception"])
else:
exceptions.append(
Exception(
"Received exception handler without exception, but with message: %s"
% context["message"]
)
)
orig_exception_handler(loop, context)
exceptions = []
hass = loop.run_until_complete(async_test_home_assistant(loop))
orig_exception_handler = loop.get_exception_handler()
loop.set_exception_handler(exc_handle)
yield hass
loop.run_until_complete(hass.async_stop(force=True))
for ex in exceptions:
if (
request.module.__name__,
request.function.__name__,
) in IGNORE_UNCAUGHT_EXCEPTIONS:
continue
if isinstance(ex, ServiceNotFound):
continue
raise ex
@pytest.fixture
async def stop_hass():
"""Make sure all hass are stopped."""
orig_hass = ha.HomeAssistant
created = []
def mock_hass():
hass_inst = orig_hass()
created.append(hass_inst)
return hass_inst
with patch("homeassistant.core.HomeAssistant", mock_hass):
yield
for hass_inst in created:
if hass_inst.state == ha.CoreState.stopped:
continue
with patch.object(hass_inst.loop, "stop"):
await hass_inst.async_block_till_done()
await hass_inst.async_stop(force=True)
@pytest.fixture
def requests_mock():
"""Fixture to provide a requests mocker."""
with _requests_mock.mock() as m:
yield m
@pytest.fixture
def aioclient_mock():
"""Fixture to mock aioclient calls."""
with mock_aiohttp_client() as mock_session:
yield mock_session
@pytest.fixture
def mock_device_tracker_conf():
"""Prevent device tracker from reading/writing data."""
devices = []
async def mock_update_config(path, id, entity):
devices.append(entity)
with patch(
"homeassistant.components.device_tracker.legacy"
".DeviceTracker.async_update_config",
side_effect=mock_update_config,
), patch(
"homeassistant.components.device_tracker.legacy.async_load_config",
side_effect=lambda *args: devices,
):
yield devices
@pytest.fixture
def hass_access_token(hass, hass_admin_user):
"""Return an access token to access Home Assistant."""
refresh_token = hass.loop.run_until_complete(
hass.auth.async_create_refresh_token(hass_admin_user, CLIENT_ID)
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def hass_owner_user(hass, local_auth):
"""Return a Home Assistant admin user."""
return MockUser(is_owner=True).add_to_hass(hass)
@pytest.fixture
def hass_admin_user(hass, local_auth):
"""Return a Home Assistant admin user."""
admin_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_ADMIN)
)
return MockUser(groups=[admin_group]).add_to_hass(hass)
@pytest.fixture
def hass_read_only_user(hass, local_auth):
"""Return a Home Assistant read only user."""
read_only_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_READ_ONLY)
)
return MockUser(groups=[read_only_group]).add_to_hass(hass)
@pytest.fixture
def hass_read_only_access_token(hass, hass_read_only_user):
"""Return a Home Assistant read only user."""
refresh_token = hass.loop.run_until_complete(
hass.auth.async_create_refresh_token(hass_read_only_user, CLIENT_ID)
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def legacy_auth(hass):
"""Load legacy API password provider."""
prv = legacy_api_password.LegacyApiPasswordAuthProvider(
hass,
hass.auth._store,
{"type": "legacy_api_password", "api_password": "test-password"},
)
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
@pytest.fixture
def local_auth(hass):
"""Load local auth provider."""
prv = homeassistant.HassAuthProvider(
hass, hass.auth._store, {"type": "homeassistant"}
)
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
@pytest.fixture
def hass_client(hass, aiohttp_client, hass_access_token):
"""Return an authenticated HTTP client."""
async def auth_client():
"""Return an authenticated client."""
return await aiohttp_client(
hass.http.app, headers={"Authorization": f"Bearer {hass_access_token}"}
)
return auth_client
@pytest.fixture
def current_request(hass):
"""Mock current request."""
with patch("homeassistant.helpers.network.current_request") as mock_request_context:
mocked_request = make_mocked_request(
"GET",
"/some/request",
headers={"Host": "example.com"},
sslcontext=ssl.SSLContext(ssl.PROTOCOL_TLS),
)
mock_request_context.get = Mock(return_value=mocked_request)
yield mock_request_context
@pytest.fixture
def hass_ws_client(aiohttp_client, hass_access_token, hass):
"""Websocket client fixture connected to websocket server."""
async def create_client(hass=hass, access_token=hass_access_token):
"""Create a websocket client."""
assert await async_setup_component(hass, "websocket_api", {})
client = await aiohttp_client(hass.http.app)
with patch("homeassistant.components.http.auth.setup_auth"):
websocket = await client.ws_connect(URL)
auth_resp = await websocket.receive_json()
assert auth_resp["type"] == TYPE_AUTH_REQUIRED
if access_token is None:
await websocket.send_json(
{"type": TYPE_AUTH, "access_token": "incorrect"}
)
else:
await websocket.send_json(
{"type": TYPE_AUTH, "access_token": access_token}
)
auth_ok = await websocket.receive_json()
assert auth_ok["type"] == TYPE_AUTH_OK
# wrap in client
websocket.client = client
return websocket
return create_client
@pytest.fixture(autouse=True)
def fail_on_log_exception(request, monkeypatch):
"""Fixture to fail if a callback wrapped by catch_log_exception or coroutine wrapped by async_create_catching_coro throws."""
if "no_fail_on_log_exception" in request.keywords:
return
def log_exception(format_err, *args):
raise
monkeypatch.setattr("homeassistant.util.logging.log_exception", log_exception)
@pytest.fixture
def mqtt_config():
"""Fixture to allow overriding MQTT config."""
return None
@pytest.fixture
def mqtt_client_mock(hass):
"""Fixture to mock MQTT client."""
mid = 0
def get_mid():
nonlocal mid
mid += 1
return mid
class FakeInfo:
def __init__(self, mid):
self.mid = mid
self.rc = 0
with patch("paho.mqtt.client.Client") as mock_client:
@ha.callback
def _async_fire_mqtt_message(topic, payload, qos, retain):
async_fire_mqtt_message(hass, topic, payload, qos, retain)
mid = get_mid()
mock_client.on_publish(0, 0, mid)
return FakeInfo(mid)
def _subscribe(topic, qos=0):
mid = get_mid()
mock_client.on_subscribe(0, 0, mid)
return (0, mid)
def _unsubscribe(topic):
mid = get_mid()
mock_client.on_unsubscribe(0, 0, mid)
return (0, mid)
mock_client = mock_client.return_value
mock_client.connect.return_value = 0
mock_client.subscribe.side_effect = _subscribe
mock_client.unsubscribe.side_effect = _unsubscribe
mock_client.publish.side_effect = _async_fire_mqtt_message
yield mock_client
@pytest.fixture
async def mqtt_mock(hass, mqtt_client_mock, mqtt_config):
"""Fixture to mock MQTT component."""
if mqtt_config is None:
mqtt_config = {mqtt.CONF_BROKER: "mock-broker"}
result = await async_setup_component(hass, mqtt.DOMAIN, {mqtt.DOMAIN: mqtt_config})
assert result
await hass.async_block_till_done()
# Workaround: asynctest==0.13 fails on @functools.lru_cache
spec = dir(hass.data["mqtt"])
spec.remove("_matching_subscriptions")
mqtt_component_mock = MagicMock(
return_value=hass.data["mqtt"],
spec_set=spec,
wraps=hass.data["mqtt"],
)
mqtt_component_mock._mqttc = mqtt_client_mock
hass.data["mqtt"] = mqtt_component_mock
component = hass.data["mqtt"]
component.reset_mock()
return component
@pytest.fixture
def mock_zeroconf():
"""Mock zeroconf."""
with patch("homeassistant.components.zeroconf.HaZeroconf") as mock_zc:
yield mock_zc.return_value
@pytest.fixture
def legacy_patchable_time():
"""Allow time to be patchable by using event listeners instead of asyncio loop."""
@ha.callback
@loader.bind_hass
def async_track_point_in_utc_time(hass, action, point_in_time):
"""Add a listener that fires once after a specific point in UTC time."""
# Ensure point_in_time is UTC
point_in_time = event.dt_util.as_utc(point_in_time)
# Since this is called once, we accept a HassJob so we can avoid
# having to figure out how to call the action every time its called.
job = action if isinstance(action, ha.HassJob) else ha.HassJob(action)
@ha.callback
def point_in_time_listener(event):
"""Listen for matching time_changed events."""
now = event.data[ATTR_NOW]
if now < point_in_time or hasattr(point_in_time_listener, "run"):
return
# Set variable so that we will never run twice.
# Because the event bus might have to wait till a thread comes
# available to execute this listener it might occur that the
# listener gets lined up twice to be executed. This will make
# sure the second time it does nothing.
setattr(point_in_time_listener, "run", True)
async_unsub()
hass.async_run_hass_job(job, now)
async_unsub = hass.bus.async_listen(EVENT_TIME_CHANGED, point_in_time_listener)
return async_unsub
@ha.callback
@loader.bind_hass
def async_track_utc_time_change(
hass, action, hour=None, minute=None, second=None, local=False
):
"""Add a listener that will fire if time matches a pattern."""
job = ha.HassJob(action)
# We do not have to wrap the function with time pattern matching logic
# if no pattern given
if all(val is None for val in (hour, minute, second)):
@ha.callback
def time_change_listener(ev) -> None:
"""Fire every time event that comes in."""
hass.async_run_hass_job(job, ev.data[ATTR_NOW])
return hass.bus.async_listen(EVENT_TIME_CHANGED, time_change_listener)
matching_seconds = event.dt_util.parse_time_expression(second, 0, 59)
matching_minutes = event.dt_util.parse_time_expression(minute, 0, 59)
matching_hours = event.dt_util.parse_time_expression(hour, 0, 23)
next_time = None
def calculate_next(now) -> None:
"""Calculate and set the next time the trigger should fire."""
nonlocal next_time
localized_now = event.dt_util.as_local(now) if local else now
next_time = event.dt_util.find_next_time_expression_time(
localized_now, matching_seconds, matching_minutes, matching_hours
)
# Make sure rolling back the clock doesn't prevent the timer from
# triggering.
last_now = None
@ha.callback
def pattern_time_change_listener(ev) -> None:
"""Listen for matching time_changed events."""
nonlocal next_time, last_now
now = ev.data[ATTR_NOW]
if last_now is None or now < last_now:
# Time rolled back or next time not yet calculated
calculate_next(now)
last_now = now
if next_time <= now:
hass.async_run_hass_job(
job, event.dt_util.as_local(now) if local else now
)
calculate_next(now + datetime.timedelta(seconds=1))
# We can't use async_track_point_in_utc_time here because it would
# break in the case that the system time abruptly jumps backwards.
# Our custom last_now logic takes care of resolving that scenario.
return hass.bus.async_listen(EVENT_TIME_CHANGED, pattern_time_change_listener)
with patch(
"homeassistant.helpers.event.async_track_point_in_utc_time",
async_track_point_in_utc_time,
), patch(
"homeassistant.helpers.event.async_track_utc_time_change",
async_track_utc_time_change,
):
yield
|
import copy
import json
import os
from typing import Dict, List, Text
from absl import flags
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import data
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_cluster_parameter_group
from perfkitbenchmarker.providers.aws import aws_cluster_subnet_group
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
VALID_EXIST_STATUSES = ['creating', 'available']
DELETION_STATUSES = ['deleting']
READY_STATUSES = ['available']
ELIMINATE_AUTOMATED_SNAPSHOT_RETENTION = '--automated-snapshot-retention-period=0'
DEFAULT_DATABASE_NAME = 'dev'
BOOTSTRAP_DB = 'sample'
REDSHIFT_JDBC_JAR = 'redshift-jdbc-client-1.0.jar'
def AddTags(resource_arn, region):
"""Adds tags to a Redshift cluster created by PerfKitBenchmarker.
Args:
resource_arn: The arn of AWS resource to operate on.
region: The AWS region resource was created in.
"""
cmd_prefix = util.AWS_PREFIX
tag_cmd = cmd_prefix + ['redshift', 'create-tags', '--region=%s' % region,
'--resource-name', resource_arn, '--tags']
tag_cmd += util.MakeFormattedDefaultTags()
vm_util.IssueCommand(tag_cmd)
def GetDefaultRegion():
"""Utility method to supply the default region."""
cmd_prefix = util.AWS_PREFIX
default_region_cmd = cmd_prefix + ['configure', 'get', 'region']
stdout, _, _ = vm_util.IssueCommand(default_region_cmd)
return stdout
def GetRedshiftClientInterface(database: str, user: str,
password: str) -> edw_service.EdwClientInterface:
"""Builds and Returns the requested Redshift client Interface.
Args:
database: Name of the database to run queries against.
user: Redshift username for authentication.
password: Redshift password for authentication.
Returns:
A concrete Client Interface object.
Raises:
RuntimeError: if an unsupported redshift_client_interface is requested
"""
if FLAGS.redshift_client_interface == 'CLI':
return CliClientInterface(database, user, password)
if FLAGS.redshift_client_interface == 'JDBC':
return JdbcClientInterface(database, user, password)
raise RuntimeError('Unknown Redshift Client Interface requested.')
class CliClientInterface(edw_service.EdwClientInterface):
"""Command Line Client Interface class for Redshift.
Uses the native Redshift client that ships with pgbench.
https://docs.aws.amazon.com/cli/latest/reference/redshift/index.html
Attributes:
host: Host endpoint to be used for interacting with the cluster.
database: Name of the database to run queries against.
user: Redshift username for authentication.
password: Redshift password for authentication.
"""
def __init__(self, database: str, user: str, password: str):
self.database = database
self.user = user
self.password = password
def SetProvisionedAttributes(self, bm_spec: benchmark_spec.BenchmarkSpec):
"""Sets any attributes that were unknown during initialization."""
super(CliClientInterface, self).SetProvisionedAttributes(bm_spec)
self.host = bm_spec.edw_service.endpoint
def Prepare(self, package_name: str) -> None:
"""Prepares the client vm to execute query.
Installs the redshift tool dependencies.
Args:
package_name: String name of the package defining the preprovisioned data
(certificates, etc.) to extract and use during client vm preparation.
"""
self.client_vm.Install('pip')
self.client_vm.RemoteCommand('sudo pip install absl-py')
self.client_vm.Install('pgbench')
# Push the framework to execute a sql query and gather performance details
service_specific_dir = os.path.join('edw', Redshift.SERVICE_TYPE)
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir, 'script_runner.sh')))
runner_permission_update_cmd = 'chmod 755 {}'.format('script_runner.sh')
self.client_vm.RemoteCommand(runner_permission_update_cmd)
self.client_vm.PushFile(
data.ResourcePath(os.path.join('edw', 'script_driver.py')))
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir,
'provider_specific_script_driver.py')))
def ExecuteQuery(self, query_name: Text) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute
Returns:
A tuple of (execution_time, execution details)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
performance_details: A dictionary of query execution attributes eg. job_id
"""
query_command = ('python script_driver.py --script={} --host={} '
'--database={} --user={} --password={}').format(
query_name, self.host, self.database, self.user,
self.password)
stdout, _ = self.client_vm.RemoteCommand(query_command)
performance = json.loads(stdout)
details = copy.copy(self.GetMetadata())
details['job_id'] = performance[query_name]['job_id']
return float(performance[query_name]['execution_time']), details
def GetMetadata(self) -> Dict[str, str]:
"""Gets the Metadata attributes for the Client Interface."""
return {'client': FLAGS.redshift_client_interface}
class JdbcClientInterface(edw_service.EdwClientInterface):
"""Native JDBC Client Interface class for Redshift.
https://docs.aws.amazon.com/redshift/latest/mgmt/jdbc20-install.html
Attributes:
host: Host endpoint to be used for interacting with the cluster.
database: Name of the database to run queries against.
user: Redshift username for authentication.
password: Redshift password for authentication.
"""
def __init__(self, database: str, user: str, password: str):
self.database = database
# Use the default port.
self.port = '5439'
self.user = user
self.password = password
def SetProvisionedAttributes(self, bm_spec: benchmark_spec.BenchmarkSpec):
"""Sets any attributes that were unknown during initialization."""
super(JdbcClientInterface, self).SetProvisionedAttributes(bm_spec)
endpoint = bm_spec.edw_service.endpoint
self.host = f'jdbc:redshift://{endpoint}:{self.port}/{self.database}'
def Prepare(self, package_name: str) -> None:
"""Prepares the client vm to execute query.
Installs the redshift tool dependencies.
Args:
package_name: String name of the package defining the preprovisioned data
(certificates, etc.) to extract and use during client vm preparation.
"""
self.client_vm.Install('openjdk')
# Push the executable jar to the working directory on client vm
self.client_vm.InstallPreprovisionedPackageData(package_name,
[REDSHIFT_JDBC_JAR], '')
def ExecuteQuery(self, query_name: Text) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute.
Returns:
A tuple of (execution_time, execution details)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
performance_details: A dictionary of query execution attributes eg. job_id
"""
query_command = ('java -cp {} com.google.cloud.performance.edw.Single '
'--endpoint {} --query_file {}').format(
REDSHIFT_JDBC_JAR, self.host, query_name)
stdout, _ = self.client_vm.RemoteCommand(query_command)
performance = json.loads(stdout)
details = copy.copy(self.GetMetadata())
if 'failure_reason' in performance:
details.update({'failure_reason': performance['failure_reason']})
else:
details.update(performance['details'])
return performance['query_wall_time_in_secs'], details
def ExecuteSimultaneous(self, submission_interval: int,
queries: List[str]) -> str:
"""Executes queries simultaneously on client and return performance details.
Simultaneous app expects queries as white space separated query file names.
Args:
submission_interval: Simultaneous query submission interval in
milliseconds.
queries: List of strings (names) of queries to execute.
Returns:
A serialized dictionary of execution details.
"""
cmd = ('java -cp {} com.google.cloud.performance.edw.Simultaneous '
'--endpoint {} --submission_interval {} --query_files {}'.format(
REDSHIFT_JDBC_JAR, self.host, submission_interval,
' '.join(queries)))
stdout, _ = self.client_vm.RemoteCommand(cmd)
return stdout
def ExecuteThroughput(self, concurrency_streams: List[List[str]]) -> str:
"""Executes a throughput test and returns performance details.
Args:
concurrency_streams: List of streams to execute simultaneously, each of
which is a list of string names of queries.
Returns:
A serialized dictionary of execution details.
"""
cmd = ('java -cp {} com.google.cloud.performance.edw.Throughput '
'--endpoint {} --query_streams {}'.format(
REDSHIFT_JDBC_JAR, self.host,
' '.join([','.join(stream) for stream in concurrency_streams])))
stdout, _ = self.client_vm.RemoteCommand(cmd)
return stdout
def GetMetadata(self) -> Dict[str, str]:
"""Gets the Metadata attributes for the Client Interface."""
return {'client': FLAGS.redshift_client_interface}
class Redshift(edw_service.EdwService):
"""Object representing a Redshift cluster.
Attributes:
cluster_id: ID of the cluster.
project: ID of the project.
"""
CLOUD = aws.CLOUD
SERVICE_TYPE = 'redshift'
READY_TIMEOUT = 7200
def __init__(self, edw_service_spec):
super(Redshift, self).__init__(edw_service_spec)
# pkb setup attribute
self.project = None
self.cmd_prefix = list(util.AWS_PREFIX)
if FLAGS.zones:
self.zone = FLAGS.zones[0]
self.region = util.GetRegionFromZone(self.zone)
else:
self.region = GetDefaultRegion()
self.cmd_prefix += ['--region', self.region]
# Redshift specific attribute (see if they can be set)
self.cluster_subnet_group = None
self.cluster_parameter_group = None
self.arn = ''
self.cluster_subnet_group = aws_cluster_subnet_group.RedshiftClusterSubnetGroup(
self.cmd_prefix)
self.cluster_parameter_group = aws_cluster_parameter_group.RedshiftClusterParameterGroup(
edw_service_spec.concurrency, self.cmd_prefix)
if self.db is None:
self.db = DEFAULT_DATABASE_NAME
self.client_interface = GetRedshiftClientInterface(self.db, self.user,
self.password)
def _CreateDependencies(self):
self.cluster_subnet_group.Create()
self.cluster_parameter_group.Create()
def _Create(self):
"""Create the redshift cluster resource."""
if self.snapshot:
self.Restore(self.snapshot, self.cluster_identifier)
else:
self.Initialize(self.cluster_identifier, self.node_type, self.node_count,
self.user, self.password, self.cluster_parameter_group,
self.cluster_subnet_group)
def Initialize(self, cluster_identifier, node_type, node_count, user,
password, cluster_parameter_group, cluster_subnet_group):
"""Method to initialize a Redshift cluster from an configuration parameters.
The cluster is initialized in the EC2-VPC platform, that runs it in a
virtual private cloud (VPC). This allows control access to the cluster by
associating one or more VPC security groups with the cluster.
To create a cluster in a VPC, first create an Amazon Redshift cluster subnet
group by providing subnet information of the VPC, and then provide the
subnet group when launching the cluster.
Args:
cluster_identifier: A unique identifier for the cluster.
node_type: The node type to be provisioned for the cluster.
Valid Values: ds2.xlarge | ds2.8xlarge | ds2.xlarge | ds2.8xlarge |
dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge
node_count: The number of compute nodes in the cluster.
user: The user name associated with the master user account for the
cluster that is being created.
password: The password associated with the master user account for the
cluster that is being created.
cluster_parameter_group: Cluster Parameter Group associated with the
cluster.
cluster_subnet_group: Cluster Subnet Group associated with the cluster.
Returns:
None
Raises:
MissingOption: If any of the required parameters is missing.
"""
if not (cluster_identifier and node_type and user and password):
raise errors.MissingOption('Need cluster_identifier, user and password '
'set for creating a cluster.')
prefix = [
'redshift', 'create-cluster', '--cluster-identifier', cluster_identifier
]
if node_count == 1:
worker_count_cmd = ['--cluster-type', 'single-node']
else:
worker_count_cmd = ['--number-of-nodes', str(node_count)]
postfix = [
'--node-type', node_type, '--master-username', user,
'--master-user-password', password, '--cluster-parameter-group-name',
cluster_parameter_group.name, '--cluster-subnet-group-name',
cluster_subnet_group.name, '--publicly-accessible',
ELIMINATE_AUTOMATED_SNAPSHOT_RETENTION
]
cmd = self.cmd_prefix + prefix + worker_count_cmd + postfix
stdout, stderr, _ = vm_util.IssueCommand(cmd, raise_on_failure=False)
if not stdout:
raise errors.Resource.CreationError('Cluster creation failure: '
'{}'.format(stderr))
def _ValidateSnapshot(self, snapshot_identifier):
"""Validate the presence of a cluster snapshot based on its metadata."""
cmd = self.cmd_prefix + ['redshift', 'describe-cluster-snapshots',
'--snapshot-identifier', snapshot_identifier]
stdout, _, _ = vm_util.IssueCommand(cmd)
if not stdout:
raise errors.Config.InvalidValue('Cluster snapshot indicated by '
'edw_service_cluster_snapshot does not'
' exist: {}.'
.format(snapshot_identifier))
result = json.loads(stdout)
return result['Snapshots'][0]['Status'] == 'available'
def _SnapshotDetails(self, snapshot_identifier):
"""Delete a redshift cluster and disallow creation of a snapshot."""
cmd = self.cmd_prefix + ['redshift', 'describe-cluster-snapshots',
'--snapshot-identifier', snapshot_identifier]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
node_type = result['Snapshots'][0]['NodeType']
node_count = result['Snapshots'][0]['NumberOfNodes']
return node_type, node_count
def Restore(self, snapshot_identifier, cluster_identifier):
"""Method to restore a Redshift cluster from an existing snapshot.
A snapshot of cluster in VPC can be restored only in VPC. Therefore, subnet
group name where the cluster is to be restored must be provided.
vpc-security-group-ids are not specified at the time of restoration, and it
is expected that the default VPC security group which gets associated with
the cluster has appropriate ingress and egress rules.
Ref: http://docs.aws.amazon.com/cli/latest/reference/
redshift/restore-from-cluster-snapshot.html
Args:
snapshot_identifier: Identifier of the snapshot to restore
cluster_identifier: Identifier of the restored cluster
Returns:
None
"""
if not (self.user and self.password and self.db):
raise errors.MissingOption('Need the db, user and password set for '
'restoring a cluster')
if self._ValidateSnapshot(snapshot_identifier):
node_type, node_count = self._SnapshotDetails(snapshot_identifier)
# For a restored cluster update the cluster shape and size based on the
# snapshot's configuration
self.node_type = node_type
self.node_count = node_count
cmd = self.cmd_prefix + ['redshift', 'restore-from-cluster-snapshot',
'--cluster-identifier', cluster_identifier,
'--snapshot-identifier', snapshot_identifier,
'--cluster-subnet-group-name',
self.cluster_subnet_group.name,
'--cluster-parameter-group-name',
self.cluster_parameter_group.name,
'--publicly-accessible',
'--automated-snapshot-retention-period=1']
stdout, stderr, _ = vm_util.IssueCommand(cmd)
if not stdout:
raise errors.Resource.CreationError('Cluster creation failure: '
'{}'.format(stderr))
def __DescribeCluster(self):
"""Describe a redshift cluster."""
cmd = self.cmd_prefix + ['redshift', 'describe-clusters',
'--cluster-identifier', self.cluster_identifier]
return vm_util.IssueCommand(cmd, raise_on_failure=False)
def _Exists(self):
"""Method to validate the existence of a redshift cluster.
Provision pipeline: returns True during the provisioning (status in
'creating', 'available') to prevent retry of creation
Deletion pipeline: returns True, during the deletion (status in
'deleting') which causes a retry of deletion, an idempotent operation.
TODO(saksena): handle the deletion step more cleanly, and spin till deletion
Returns:
Boolean value indicating the existence of a cluster.
"""
stdout, _, _ = self.__DescribeCluster()
if (not stdout or (json.loads(stdout)['Clusters'][0]['ClusterStatus'] not in
VALID_EXIST_STATUSES)):
return False
else:
return True
def _IsReady(self):
"""Method to return if the cluster is ready to handle queries."""
stdout, _, _ = self.__DescribeCluster()
return json.loads(stdout)['Clusters'][0]['ClusterStatus'] in READY_STATUSES
def _PostCreate(self):
"""Perform general post create operations on the cluster.
Get the endpoint to be used for interacting with the cluster and apply
tags on the cluster.
"""
stdout, _, _ = self.__DescribeCluster()
self.endpoint = json.loads(stdout)['Clusters'][0]['Endpoint']['Address']
account = util.GetAccount()
self.arn = 'arn:aws:redshift:{}:{}:cluster:{}'.format(self.region, account,
self.
cluster_identifier)
AddTags(self.arn, self.region)
def _Delete(self):
"""Delete a redshift cluster and disallow creation of a snapshot."""
cmd = self.cmd_prefix + ['redshift', 'delete-cluster',
'--cluster-identifier', self.cluster_identifier,
'--skip-final-cluster-snapshot']
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _IsDeleting(self):
"""Method to check if the cluster is being deleting."""
stdout, _, _ = self.__DescribeCluster()
if not stdout:
return False
else:
return (json.loads(stdout)['Clusters'][0]['ClusterStatus'] in
DELETION_STATUSES)
def _DeleteDependencies(self):
"""Delete dependencies of a redshift cluster."""
self.cluster_subnet_group.Delete()
self.cluster_parameter_group.Delete()
def GetMetadata(self):
"""Return a dictionary of the metadata for this cluster."""
basic_data = super(Redshift, self).GetMetadata()
basic_data['edw_cluster_concurrency'] = self.concurrency
basic_data['region'] = self.region
if self.snapshot is not None:
basic_data['snapshot'] = self.snapshot
basic_data.update(self.client_interface.GetMetadata())
return basic_data
|
from django.conf import settings
from weblate.machinery import MACHINE_TRANSLATION_SERVICES
from weblate.trans.models import (
Component,
ContributorAgreement,
Project,
Translation,
Unit,
)
from weblate.utils.stats import ProjectLanguage
SPECIALS = {}
def register_perm(*perms):
def wrap_perm(function):
for perm in perms:
SPECIALS[perm] = function
return function
return wrap_perm
def check_global_permission(user, permission, obj):
"""Generic permission check for base classes."""
if user.is_superuser:
return True
return user.groups.filter(roles__permissions__codename=permission).exists()
def check_permission(user, permission, obj):
"""Generic permission check for base classes."""
if user.is_superuser:
return True
if isinstance(obj, ProjectLanguage):
obj = obj.project
if isinstance(obj, Project):
return any(
permission in permissions
for permissions, _langs in user.project_permissions[obj.pk]
)
if isinstance(obj, Component):
return (
not obj.restricted
and any(
permission in permissions
for permissions, _langs in user.project_permissions[obj.project_id]
)
) or any(
permission in permissions
for permissions, _langs in user.component_permissions[obj.pk]
)
if isinstance(obj, Translation):
lang = obj.language_id
return (
not obj.component.restricted
and any(
permission in permissions and lang in langs
for permissions, langs in user.project_permissions[
obj.component.project_id
]
)
) or any(
permission in permissions and lang in langs
for permissions, langs in user.component_permissions[obj.component_id]
)
raise ValueError(
f"Not supported type for permission check: {obj.__class__.__name__}"
)
@register_perm("comment.delete", "suggestion.delete")
def check_delete_own(user, permission, obj):
if user.is_authenticated and obj.user == user:
return True
return check_permission(user, permission, obj.unit.translation)
@register_perm("unit.check")
def check_ignore_check(user, permission, check):
if check.is_enforced():
return False
return check_permission(user, permission, check.unit.translation)
def check_can_edit(user, permission, obj, is_vote=False):
translation = component = None
if isinstance(obj, Translation):
translation = obj
component = obj.component
project = component.project
elif isinstance(obj, Component):
component = obj
project = component.project
elif isinstance(obj, Project):
project = obj
else:
raise ValueError("Uknown object for permission check!")
# Email is needed for user to be able to edit
if user.is_authenticated and not user.email:
return False
if component:
# Check component lock
if component.locked:
return False
# Check contributor agreement
if component.agreement and not ContributorAgreement.objects.has_agreed(
user, component
):
return False
# Perform usual permission check
if not check_permission(user, permission, obj):
return False
# Special check for source strings (templates)
if (
translation
and translation.is_template
and not check_permission(user, "unit.template", obj)
):
return False
# Special checks for voting
if is_vote and component and not component.suggestion_voting:
return False
if (
not is_vote
and translation
and component.suggestion_voting
and component.suggestion_autoaccept > 0
and not check_permission(user, "unit.override", obj)
):
return False
# Billing limits
if not project.paid:
return False
return True
@register_perm("unit.review")
def check_unit_review(user, permission, obj, skip_enabled=False):
if not skip_enabled:
if isinstance(obj, Translation):
if not obj.enable_review:
return False
else:
if isinstance(obj, Component):
project = obj.project
else:
project = obj
if not project.source_review and not project.translation_review:
return False
return check_can_edit(user, permission, obj)
@register_perm("unit.edit", "suggestion.accept")
def check_edit_approved(user, permission, obj):
if isinstance(obj, Unit):
unit = obj
obj = unit.translation
# Read only check is unconditional as there is another one
# in PluralTextarea.render
if unit.readonly or (
unit.approved
and not check_unit_review(user, "unit.review", obj, skip_enabled=True)
):
return False
if isinstance(obj, Translation) and obj.is_readonly:
return False
return check_can_edit(user, permission, obj)
@register_perm("unit.delete")
def check_unit_delete(user, permission, obj):
if isinstance(obj, Unit):
obj = obj.translation
if not obj.is_source or obj.is_readonly:
return False
return check_can_edit(user, permission, obj)
@register_perm("unit.add")
def check_unit_add(user, permission, translation):
if not translation.is_source or translation.is_readonly:
return False
if not translation.component.file_format_cls.can_add_unit:
return False
return check_can_edit(user, permission, translation)
@register_perm("translation.add")
def check_component_locked(user, permission, component):
if component.locked:
return True
return check_permission(user, permission, component)
@register_perm("translation.auto")
def check_autotranslate(user, permission, translation):
if isinstance(translation, Translation) and (
(translation.is_source and not translation.component.intermediate)
or translation.is_readonly
):
return False
return check_can_edit(user, permission, translation)
@register_perm("suggestion.vote")
def check_suggestion_vote(user, permission, obj):
if isinstance(obj, Unit):
obj = obj.translation
return check_can_edit(user, permission, obj, is_vote=True)
@register_perm("suggestion.add")
def check_suggestion_add(user, permission, obj):
if isinstance(obj, Unit):
obj = obj.translation
if not obj.component.enable_suggestions:
return False
# Check contributor agreement
if obj.component.agreement and not ContributorAgreement.objects.has_agreed(
user, obj.component
):
return False
return check_permission(user, permission, obj)
@register_perm("upload.perform")
def check_contribute(user, permission, translation):
# Bilingual source translations
if translation.is_source and not translation.is_template:
return (
translation.is_source
and not translation.component.template
and hasattr(translation.component.file_format_cls, "update_bilingual")
and user.has_perm("source.edit", translation)
)
return check_can_edit(user, permission, translation) and (
check_edit_approved(user, "unit.edit", translation)
or check_suggestion_add(user, "suggestion.add", translation)
)
@register_perm("machinery.view")
def check_machinery(user, permission, obj):
# No permission in case there are no machinery services enabled
if not MACHINE_TRANSLATION_SERVICES.exists():
return False
# No machinery for source without intermediate language
if (
isinstance(obj, Translation)
and obj.is_source
and not obj.component.intermediate
):
return False
# Check the actual machinery.view permission
if not check_permission(user, permission, obj):
return False
# Only show machinery to users allowed to translate or suggest
return check_edit_approved(user, "unit.edit", obj) or check_suggestion_add(
user, "suggestion.add", obj
)
@register_perm("translation.delete")
def check_translation_delete(user, permission, obj):
if obj.is_source:
return False
return check_permission(user, permission, obj)
@register_perm("meta:vcs.status")
def check_repository_status(user, permission, obj):
return (
check_permission(user, "vcs.push", obj)
or check_permission(user, "vcs.commit", obj)
or check_permission(user, "vcs.reset", obj)
or check_permission(user, "vcs.update", obj)
)
@register_perm("billing.view")
def check_billing_view(user, permission, obj):
if hasattr(obj, "all_projects"):
if user.is_superuser or obj.owners.filter(pk=user.pk).exists():
return True
# This is a billing object
return any(check_permission(user, permission, prj) for prj in obj.all_projects)
return check_permission(user, permission, obj)
@register_perm("billing:project.permissions")
def check_billing(user, permission, obj):
if "weblate.billing" in settings.INSTALLED_APPS:
if not any(billing.plan.change_access_control for billing in obj.billings):
return False
return check_permission(user, "project.permissions", obj)
|
from Plugwise_Smile.Smile import Smile
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.plugwise.const import (
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_SCAN_INTERVAL
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
TEST_HOST = "1.1.1.1"
TEST_HOSTNAME = "smileabcdef"
TEST_PASSWORD = "test_password"
TEST_PORT = 81
TEST_DISCOVERY = {
"host": TEST_HOST,
"port": DEFAULT_PORT,
"hostname": f"{TEST_HOSTNAME}.local.",
"server": f"{TEST_HOSTNAME}.local.",
"properties": {
"product": "smile",
"version": "1.2.3",
"hostname": f"{TEST_HOSTNAME}.local.",
},
}
@pytest.fixture(name="mock_smile")
def mock_smile():
"""Create a Mock Smile for testing exceptions."""
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock:
smile_mock.PlugwiseError = Smile.PlugwiseError
smile_mock.InvalidAuthentication = Smile.InvalidAuthentication
smile_mock.ConnectionFailedError = Smile.ConnectionFailedError
smile_mock.return_value.connect.return_value = True
yield smile_mock.return_value
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": TEST_HOST, "password": TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
"host": TEST_HOST,
"password": TEST_PASSWORD,
"port": DEFAULT_PORT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"password": TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
"host": TEST_HOST,
"password": TEST_PASSWORD,
"port": DEFAULT_PORT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{"password": TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result4["type"] == "abort"
assert result4["reason"] == "already_configured"
async def test_form_invalid_auth(hass, mock_smile):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_smile.connect.side_effect = Smile.InvalidAuthentication
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": TEST_HOST, "password": TEST_PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass, mock_smile):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_smile.connect.side_effect = Smile.ConnectionFailedError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": TEST_HOST, "password": TEST_PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_cannot_connect_port(hass, mock_smile):
"""Test we handle cannot connect to port error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_smile.connect.side_effect = Smile.ConnectionFailedError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": TEST_HOST, "password": TEST_PASSWORD, "port": TEST_PORT},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_other_problem(hass, mock_smile):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_smile.connect.side_effect = TimeoutError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": TEST_HOST, "password": TEST_PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_options_flow_power(hass, mock_smile) -> None:
"""Test config flow options DSMR environments."""
entry = MockConfigEntry(
domain=DOMAIN,
title=CONF_NAME,
data={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
hass.data[DOMAIN] = {entry.entry_id: {"api": MagicMock(smile_type="power")}}
entry.add_to_hass(hass)
with patch(
"homeassistant.components.plugwise.async_setup_entry", return_value=True
):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 10}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SCAN_INTERVAL: 10,
}
async def test_options_flow_thermo(hass, mock_smile) -> None:
"""Test config flow options for thermostatic environments."""
entry = MockConfigEntry(
domain=DOMAIN,
title=CONF_NAME,
data={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
hass.data[DOMAIN] = {entry.entry_id: {"api": MagicMock(smile_type="thermostat")}}
entry.add_to_hass(hass)
with patch(
"homeassistant.components.plugwise.async_setup_entry", return_value=True
):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 60}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SCAN_INTERVAL: 60,
}
|
import json
import os
import socket
import time
from typing import List
from typing import Tuple
import a_sync
import mock
import yaml
from behave import given
from behave import then
from behave import when
from itest_utils import clear_mesos_tools_cache
from requests import HTTPError
from paasta_tools import drain_lib
from paasta_tools import mesos_tools
from paasta_tools.adhoc_tools import AdhocJobConfig
from paasta_tools.frameworks.adhoc_scheduler import AdhocScheduler
from paasta_tools.frameworks.native_scheduler import create_driver
from paasta_tools.frameworks.native_scheduler import LIVE_TASK_STATES
from paasta_tools.frameworks.native_scheduler import NativeScheduler
from paasta_tools.frameworks.native_scheduler import TASK_RUNNING
from paasta_tools.frameworks.native_service_config import NativeServiceConfig
from paasta_tools.native_mesos_scheduler import main
from paasta_tools.native_mesos_scheduler import paasta_native_services_running_here
from paasta_tools.utils import load_system_paasta_config
@given("a new adhoc config to be deployed")
def new_adhoc_config(context):
context.cluster = "fake_cluster"
context.instance = "fake_instance"
context.service = "fake_service"
context.new_config = AdhocJobConfig(
cluster=context.cluster,
instance=context.instance,
service=context.service,
config_dict={"cpus": 0.1, "mem": 50},
branch_dict={
"docker_image": "busybox",
"desired_state": "start",
"force_bounce": None,
},
)
@given("a new paasta_native config to be deployed, with {num} instances")
def new_paasta_native_config(context, num):
context.cluster = "fake_cluster"
context.instance = "fake_instance"
context.service = "fake_service"
context.new_config = NativeServiceConfig(
cluster=context.cluster,
instance=context.instance,
service=context.service,
config_dict={
"cpus": 0.1,
"mem": 50,
"instances": int(num),
"cmd": "sleep 50",
"drain_method": "test",
},
branch_dict={
"docker_image": "busybox",
"desired_state": "start",
"force_bounce": None,
},
soa_dir="/fake/etc/services",
service_namespace_config=None,
)
@when(
"we start a {scheduler} scheduler with reconcile_backoff {reconcile_backoff} and name {framework_name}"
)
def start_paasta_native_framework(
context, scheduler, reconcile_backoff, framework_name
):
clear_mesos_tools_cache()
system_paasta_config = load_system_paasta_config()
system_paasta_config.config_dict[
"docker_registry"
] = "docker.io" # so busybox runs.
if scheduler == "paasta_native":
scheduler_class = NativeScheduler
elif scheduler == "adhoc":
scheduler_class = AdhocScheduler
else:
raise Exception("unknown scheduler: %s" % scheduler)
context.framework_name = framework_name
context.scheduler = scheduler_class(
service_name=context.service,
instance_name=context.instance,
cluster=context.cluster,
staging_timeout=30,
system_paasta_config=system_paasta_config,
service_config=context.new_config,
reconcile_backoff=int(reconcile_backoff),
)
context.driver = create_driver(
framework_name=framework_name,
scheduler=context.scheduler,
system_paasta_config=system_paasta_config,
)
context.driver.start()
if not hasattr(context, "framework_ids"):
context.framework_ids = []
for _ in range(10):
if context.scheduler.framework_id:
context.framework_ids.append(context.scheduler.framework_id)
break
time.sleep(1)
else:
raise Exception("Expected scheduler to successfully register before timeout")
@then("it should eventually start {num} tasks")
def should_eventually_start_num_tasks(context, num):
num = int(num)
for _ in range(20):
actual_num = len(
[
p
for p in context.scheduler.task_store.get_all_tasks().values()
if p.mesos_task_state == TASK_RUNNING
]
)
if actual_num >= num:
return
time.sleep(1)
raise Exception("Expected %d tasks before timeout, saw %d" % (num, actual_num))
@given("a fresh soa_dir")
def fresh_soa_dir(context):
soa_dir = "/nail/etc/services/"
context.soa_dir = soa_dir
@given(
"paasta_native-cluster.yaml and deployments.json files for service {service} with instance {instance}"
)
def write_paasta_native_cluster_yaml_files(context, service, instance):
if not os.path.exists(os.path.join(context.soa_dir, service)):
os.makedirs(os.path.join(context.soa_dir, service))
with open(
os.path.join(
context.soa_dir, service, "paasta_native-%s.yaml" % context.cluster
),
"w",
) as f:
f.write(
yaml.safe_dump(
{
instance: {
"cmd": 'echo "Taking a nap..." && sleep 1m && echo "Nap time over, back to work"',
"mem": 100,
"cpus": 0.1,
"instances": 1,
}
}
)
)
with open(os.path.join(context.soa_dir, service, "deployments.json"), "w") as f:
json.dump(
{
"v1": {
f"{service}:paasta-{context.cluster}.{instance}": {
"docker_image": "busybox",
"desired_state": "start",
"force_bounce": None,
}
},
"v2": {
"deployments": {
f"{context.cluster}.{instance}": {
"docker_image": "busybox",
"git_sha": "deadbeef",
}
},
"controls": {
f"{service}:{context.cluster}.{instance}": {
"desired_state": "start",
"force_bounce": None,
}
},
},
},
f,
)
@when("we run native_mesos_scheduler.main()")
def run_native_mesos_scheduler_main(context):
clear_mesos_tools_cache()
context.main_schedulers = main(
[
"--soa-dir",
context.soa_dir,
"--stay-alive-seconds",
"10",
"--periodic-interval",
"1",
]
)
@then("there should be a framework registered with name {name}")
def should_be_framework_with_id(context, name):
clear_mesos_tools_cache()
assert name in [
f.name for f in a_sync.block(mesos_tools.get_all_frameworks, active_only=True)
]
@then("there should not be a framework registered with name {name}")
def should_not_be_framework_with_name(context, name):
clear_mesos_tools_cache()
assert name not in [
f.name for f in a_sync.block(mesos_tools.get_all_frameworks, active_only=True)
]
@when("we terminate that framework")
def terminate_that_framework(context):
try:
print("terminating framework %s" % context.scheduler.framework_id)
mesos_tools.terminate_framework(context.scheduler.framework_id)
except HTTPError as e:
raise Exception(e.response.text)
@when("we stop that framework without terminating")
def stop_that_framework(context):
context.driver.stop(True)
context.driver.join()
@then("it should have the same ID as before")
def should_have_same_id(context):
assert context.framework_ids[-2] == context.framework_ids[-1]
@then("it should have a different ID than before")
def should_have_different_id(context):
assert context.framework_ids[-2] != context.framework_ids[-1]
@when("we sleep {wait} seconds")
def we_sleep_wait_seconds(context, wait):
time.sleep(int(wait))
@when("we change force_bounce")
def we_change_the_config(context):
branch_dict = context.scheduler.service_config.branch_dict
context.old_force_bounce = branch_dict["force_bounce"]
branch_dict["force_bounce"] = str(int(branch_dict["force_bounce"] or 0) + 1)
@when("we change force_bounce back")
def we_change_force_bounce_back(context):
branch_dict = context.scheduler.service_config.branch_dict
branch_dict["force_bounce"] = context.old_force_bounce
@then("it should eventually drain {num} tasks")
def it_should_drain_num_tasks(context, num):
num = int(num)
for _ in range(10):
if len(drain_lib.TestDrainMethod.downed_task_ids) >= num:
# set() to make a copy.
context.drained_tasks = set(drain_lib.TestDrainMethod.downed_task_ids)
return
time.sleep(1)
else:
raise Exception(
"Expected %d tasks to drain before timeout, saw %d"
% (num, len(drain_lib.TestDrainMethod.downed_task_ids))
)
@then(
"it should undrain {num_undrain_expected} tasks and drain {num_drain_expected} more"
)
def it_should_undrain_and_drain(context, num_undrain_expected, num_drain_expected):
num_undrain_expected = int(num_undrain_expected)
num_drain_expected = int(num_drain_expected)
for _ in range(10):
print("currently drained: %r" % drain_lib.TestDrainMethod.downed_task_ids)
print("drained previously: %r" % context.drained_tasks)
num_drained = len(
drain_lib.TestDrainMethod.downed_task_ids - context.drained_tasks
)
num_undrained = len(
context.drained_tasks - drain_lib.TestDrainMethod.downed_task_ids
)
if num_drained >= num_drain_expected and num_undrained >= num_undrain_expected:
return
time.sleep(1)
else:
raise Exception(
"Expected %d tasks to drain and %d to undrain, saw %d and %d"
% (num_drain_expected, num_undrain_expected, num_drained, num_undrained)
)
@then("it should eventually have only {num} tasks")
def it_should_eventually_have_only_num_tasks(context, num):
num = int(num)
for _ in range(60):
actual_num = len(
[
p
for p in context.scheduler.task_store.get_all_tasks().values()
if p.mesos_task_state == TASK_RUNNING
]
)
if actual_num <= num:
return
time.sleep(1)
raise Exception("Expected <= %d tasks before timeout, saw %d" % (num, actual_num))
@when("we call periodic")
def we_call_periodic(context):
with mock.patch.object(context.scheduler, "load_config"):
context.scheduler.periodic(context.driver)
@when("we change instances to {num}")
def we_change_instances_to_num(context, num):
num = int(num)
context.scheduler.service_config.config_dict["instances"] = num
@then("it should not start tasks for {num} seconds")
def should_not_start_tasks_for_num_seconds(context, num):
time.sleep(int(num))
assert [] == [
p
for p in context.scheduler.task_store.get_all_tasks().values()
if (p.mesos_task_state in LIVE_TASK_STATES)
]
@then("periodic() should eventually be called")
def periodic_should_eventually_be_called(context):
for _ in range(30):
for scheduler in context.main_schedulers:
if hasattr(scheduler, "periodic_was_called"):
return
else:
raise Exception("periodic() not called on all schedulers")
@then(
"our service should show up in paasta_native_services_running_here {expected_num:d} times on any of our slaves"
)
def service_should_show_up_in_pnsrh_n_times(context, expected_num):
mesosslave_ips = {x[4][0] for x in socket.getaddrinfo("mesosslave", 5051)}
results: List[Tuple[str, str, int]] = []
for mesosslave_ip in mesosslave_ips:
results.extend(
paasta_native_services_running_here(
hostname=mesosslave_ip,
framework_id=context.scheduler.framework_id, # Ignore anything from other itests.
)
)
matching_results = [
res for res in results if res == (context.service, context.instance, mock.ANY)
]
assert (
len(matching_results) == expected_num
), f"matching results {matching_results!r}, all results {results!r}"
|
import os
import os.path
from django.conf import settings
from django.db import models, transaction
from django.db.models import Count, Value
from django.db.models.functions import Replace
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy
from weblate.lang.models import Language
from weblate.memory.tasks import import_memory
from weblate.trans.defines import PROJECT_NAME_LENGTH
from weblate.trans.mixins import CacheKeyMixin, PathMixin, URLMixin
from weblate.utils.data import data_dir
from weblate.utils.db import FastDeleteModelMixin, FastDeleteQuerySetMixin
from weblate.utils.site import get_site_url
from weblate.utils.stats import ProjectStats
from weblate.utils.validators import validate_language_aliases, validate_slug
class ProjectQuerySet(FastDeleteQuerySetMixin, models.QuerySet):
def order(self):
return self.order_by("name")
def prefetch_project_flags(projects):
lookup = {project.id: project for project in projects}
if lookup:
for alert in projects.values("id").annotate(Count("component__alert")):
lookup[alert["id"]].__dict__["has_alerts"] = bool(
alert["component__alert__count"]
)
for locks in (
projects.filter(component__locked=False)
.values("id")
.distinct()
.annotate(Count("component__id"))
):
lookup[locks["id"]].__dict__["locked"] = locks["component__id__count"] == 0
return projects
class Project(FastDeleteModelMixin, models.Model, URLMixin, PathMixin, CacheKeyMixin):
ACCESS_PUBLIC = 0
ACCESS_PROTECTED = 1
ACCESS_PRIVATE = 100
ACCESS_CUSTOM = 200
ACCESS_CHOICES = (
(ACCESS_PUBLIC, gettext_lazy("Public")),
(ACCESS_PROTECTED, gettext_lazy("Protected")),
(ACCESS_PRIVATE, gettext_lazy("Private")),
(ACCESS_CUSTOM, gettext_lazy("Custom")),
)
name = models.CharField(
verbose_name=gettext_lazy("Project name"),
max_length=PROJECT_NAME_LENGTH,
unique=True,
help_text=gettext_lazy("Display name"),
)
slug = models.SlugField(
verbose_name=gettext_lazy("URL slug"),
unique=True,
max_length=PROJECT_NAME_LENGTH,
help_text=gettext_lazy("Name used in URLs and filenames."),
validators=[validate_slug],
)
web = models.URLField(
verbose_name=gettext_lazy("Project website"),
help_text=gettext_lazy("Main website of translated project."),
)
mail = models.EmailField(
verbose_name=gettext_lazy("Mailing list"),
blank=True,
max_length=254,
help_text=gettext_lazy("Mailing list for translators."),
)
instructions = models.TextField(
verbose_name=gettext_lazy("Translation instructions"),
blank=True,
help_text=gettext_lazy("You can use Markdown and mention users by @username."),
)
set_language_team = models.BooleanField(
verbose_name=gettext_lazy('Set "Language-Team" header'),
default=True,
help_text=gettext_lazy(
'Lets Weblate update the "Language-Team" file header ' "of your project."
),
)
use_shared_tm = models.BooleanField(
verbose_name=gettext_lazy("Use shared translation memory"),
default=settings.DEFAULT_SHARED_TM,
help_text=gettext_lazy(
"Uses the pool of shared translations between projects."
),
)
contribute_shared_tm = models.BooleanField(
verbose_name=gettext_lazy("Contribute to shared translation memory"),
default=settings.DEFAULT_SHARED_TM,
help_text=gettext_lazy(
"Contributes to the pool of shared translations between projects."
),
)
access_control = models.IntegerField(
default=settings.DEFAULT_ACCESS_CONTROL,
choices=ACCESS_CHOICES,
verbose_name=gettext_lazy("Access control"),
help_text=gettext_lazy(
"How to restrict access to this project is detailed "
"in the documentation."
),
)
translation_review = models.BooleanField(
verbose_name=gettext_lazy("Enable reviews"),
default=False,
help_text=gettext_lazy("Requires dedicated reviewers to approve translations."),
)
source_review = models.BooleanField(
verbose_name=gettext_lazy("Enable source reviews"),
default=False,
help_text=gettext_lazy(
"Requires dedicated reviewers to approve source strings."
),
)
enable_hooks = models.BooleanField(
verbose_name=gettext_lazy("Enable hooks"),
default=True,
help_text=gettext_lazy(
"Whether to allow updating this repository by remote hooks."
),
)
language_aliases = models.CharField(
max_length=200,
verbose_name=gettext_lazy("Language aliases"),
default="",
blank=True,
help_text=gettext_lazy(
"Comma-separated list of language code mappings, "
"for example: en_GB:en,en_US:en"
),
validators=[validate_language_aliases],
)
is_lockable = True
_reverse_url_name = "project"
objects = ProjectQuerySet.as_manager()
class Meta:
app_label = "trans"
verbose_name = gettext_lazy("Project")
verbose_name_plural = gettext_lazy("Projects")
def __str__(self):
return self.name
def save(self, *args, **kwargs):
from weblate.trans.tasks import component_alerts
update_tm = self.contribute_shared_tm
# Renaming detection
old = None
if self.id:
old = Project.objects.get(pk=self.id)
# Generate change entries for changes
self.generate_changes(old)
# Detect slug changes and rename directory
self.check_rename(old)
# Rename linked repos
if old.slug != self.slug:
for component in old.component_set.iterator():
new_component = self.component_set.get(pk=component.pk)
new_component.project = self
component.linked_childs.update(
repo=new_component.get_repo_link_url()
)
update_tm = self.contribute_shared_tm and not old.contribute_shared_tm
self.create_path()
super().save(*args, **kwargs)
if old is not None:
# Update alerts if needed
if old.web != self.web:
component_alerts.delay(
list(self.component_set.values_list("id", flat=True))
)
# Update glossaries if needed
if old.name != self.name:
self.glossary_set.filter(name__contains=old.name).update(
name=Replace("name", Value(old.name), Value(self.name))
)
# Update translation memory on enabled sharing
if update_tm:
transaction.on_commit(lambda: import_memory.delay(self.id))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.old_access_control = self.access_control
self.stats = ProjectStats(self)
self.acting_user = None
def generate_changes(self, old):
from weblate.trans.models.change import Change
tracked = (("slug", Change.ACTION_RENAME_PROJECT),)
for attribute, action in tracked:
old_value = getattr(old, attribute)
current_value = getattr(self, attribute)
if old_value != current_value:
Change.objects.create(
action=action,
old=old_value,
target=current_value,
project=self,
user=self.acting_user,
)
@cached_property
def language_aliases_dict(self):
if not self.language_aliases:
return {}
return dict(part.split(":") for part in self.language_aliases.split(","))
def get_language_alias(self, code):
if code in self.language_aliases_dict:
return self.language_aliases_dict[code]
if code in ("source", "src", "default"):
return self.source_language.code
return code
def get_group(self, group):
return self.group_set.get(name=f"{self.name}{group}")
def add_user(self, user, group=None):
"""Add user based on username or email address."""
if group is None:
if self.access_control != self.ACCESS_PUBLIC:
group = "@Translate"
else:
group = "@Administration"
group = self.get_group(group)
user.groups.add(group)
user.profile.watched.add(self)
def remove_user(self, user, group=None):
"""Add user based on username or email address."""
if group is None:
groups = self.group_set.filter(internal=True, name__contains="@")
user.groups.remove(*groups)
else:
group = self.get_group(group)
user.groups.remove(group)
def get_reverse_url_kwargs(self):
"""Return kwargs for URL reversing."""
return {"project": self.slug}
def get_widgets_url(self):
"""Return absolute URL for widgets."""
return get_site_url(reverse("widgets", kwargs={"project": self.slug}))
def get_share_url(self):
"""Return absolute URL usable for sharing."""
return get_site_url(reverse("engage", kwargs={"project": self.slug}))
@cached_property
def locked(self):
return self.component_set.filter(locked=False).count() == 0
def _get_path(self):
return os.path.join(data_dir("vcs"), self.slug)
@cached_property
def languages(self):
"""Return list of all languages used in project."""
return (
Language.objects.filter(translation__component__project=self)
.distinct()
.order()
)
@property
def count_pending_units(self):
"""Check whether there are any uncommitted changes."""
from weblate.trans.models import Unit
return Unit.objects.filter(
translation__component__project=self, pending=True
).count()
def needs_commit(self):
"""Check whether there are some not committed changes."""
return self.count_pending_units > 0
def on_repo_components(self, default, call, *args, **kwargs):
"""Wrapper for operations on repository."""
ret = default
for component in self.all_repo_components:
res = getattr(component, call)(*args, **kwargs)
if default:
ret = ret & res
else:
ret = ret | res
return ret
def commit_pending(self, reason, user):
"""Commit any pending changes."""
return self.on_repo_components(True, "commit_pending", reason, user)
def repo_needs_merge(self):
return self.on_repo_components(False, "repo_needs_merge")
def repo_needs_push(self):
return self.on_repo_components(False, "repo_needs_push")
def do_update(self, request=None, method=None):
"""Update all Git repos."""
return self.on_repo_components(True, "do_update", request, method=method)
def do_push(self, request=None):
"""Push all Git repos."""
return self.on_repo_components(True, "do_push", request)
def do_reset(self, request=None):
"""Push all Git repos."""
return self.on_repo_components(True, "do_reset", request)
def do_cleanup(self, request=None):
"""Push all Git repos."""
return self.on_repo_components(True, "do_cleanup", request)
def can_push(self):
"""Check whether any suprojects can push."""
return self.on_repo_components(False, "can_push")
@cached_property
def all_repo_components(self):
"""Return list of all unique VCS components."""
result = list(self.component_set.with_repo())
included = {component.get_repo_link_url() for component in result}
linked = self.component_set.filter(repo__startswith="weblate:")
for other in linked:
if other.repo in included:
continue
included.add(other.repo)
result.append(other)
return result
@cached_property
def billings(self):
if "weblate.billing" not in settings.INSTALLED_APPS:
return []
return self.billing_set.all()
@property
def billing(self):
return self.billings[0]
@cached_property
def paid(self):
return not self.billings or any(billing.paid for billing in self.billings)
@cached_property
def is_trial(self):
return any(billing.is_trial for billing in self.billings)
@cached_property
def is_libre_trial(self):
return any(billing.is_libre_trial for billing in self.billings)
def post_create(self, user, billing=None):
from weblate.trans.models import Change
if billing:
billing.projects.add(self)
if billing.plan.change_access_control:
self.access_control = Project.ACCESS_PRIVATE
else:
self.access_control = Project.ACCESS_PUBLIC
self.save()
if not user.is_superuser:
self.add_user(user, "@Administration")
Change.objects.create(
action=Change.ACTION_CREATE_PROJECT, project=self, user=user, author=user
)
@cached_property
def all_alerts(self):
from weblate.trans.models import Alert
result = Alert.objects.filter(component__project=self, dismissed=False)
list(result)
return result
@cached_property
def has_alerts(self):
return self.all_alerts.exists()
@cached_property
def all_admins(self):
from weblate.auth.models import User
return User.objects.all_admins(self).select_related("profile")
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import MagicMock
from mock import patch
from mock import call
from diamond.collector import Collector
from mongodb import MongoDBCollector
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
##########################################################################
def run_only_if_pymongo_is_available(func):
try:
import pymongo
except ImportError:
pymongo = None
pred = lambda: pymongo is not None
return run_only(func, pred)
class TestMongoDBCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MongoDBCollector', {
'host': 'localhost:27017',
'databases': '^db'
})
self.collector = MongoDBCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(MongoDBCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_once_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'more_keys.nested_key': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_once_with('dbStats')
metrics = {
'db_keys.db_nested_key': 1,
'dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_once_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'more_keys': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_databases(self,
publish_mock,
connector_mock):
self._annotate_connection(connector_mock, {})
self.collector.collect()
assert call('baddb') not in self.connection.__getitem__.call_args_list
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_collections(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.connection['db1'].collection_names.return_value = ['collection1',
'tmp.mr.tmp1']
self.connection['db1'].command.return_value = {'key': 2,
'string': 'str'}
self.collector.collect()
self.connection.db.command.assert_called_once_with('serverStatus')
self.connection['db1'].collection_names.assert_called_once_with()
self.connection['db1'].command.assert_any_call('dbStats')
self.connection['db1'].command.assert_any_call('collstats',
'collection1')
assert call('collstats', 'tmp.mr.tmp1') not in self.connection['db1'].command.call_args_list # NOQA
metrics = {
'databases.db1.collection1.key': 2,
}
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_ignore_replset_status_if_disabled(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
assert call('replsetSetGetStatus') not in \
self.connection.admin.command.method_calls
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_publish_keys_from_real_server_stats(self,
publish_mock,
connector_mock):
data = json.load(self.getFixture('real_serverStatus_response.json'))
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('serverStatus')
# check for multiple datapoints per metric
# should not happen, but it did (once), so lets check it
datapoints_per_metric = defaultdict(int)
for c in publish_mock.call_args_list:
m = c[0][0]
datapoints_per_metric[m] += 1
dupes = [m for m, n in datapoints_per_metric.iteritems() if n > 1]
self.assertEqual(len(dupes), 0,
'BUG: 1+ point for same metric received: %s' %
', '.join(dupes))
# just a few samples
expected_calls = [
call('opcounters.query', 125030709),
call('opcountersRepl.insert', 7465),
call('extra_info.heap_usage_bytes', 801236248),
call('metrics.document.returned', 536691431),
call('metrics.commands.saslContinue.total', 1400470),
call('wiredTiger.thread.yield.page_acquire_time_sleeping_(usecs)',
3022511),
call('opcounters_per_sec.query', 0, instance=None,
metric_type='COUNTER', precision=0, raw_value=125030709),
]
publish_mock.assert_has_calls(expected_calls, any_order=True)
class TestMongoMultiHostDBCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MongoDBCollector', {
'hosts': ['localhost:27017', 'localhost:27057'],
'databases': '^db',
})
self.collector = MongoDBCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(MongoDBCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'localhost_27017.more_keys.nested_key': 1,
'localhost_27057.more_keys.nested_key': 1,
'localhost_27017.key': 2,
'localhost_27057.key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_with('dbStats')
metrics = {
'localhost_27017.db_keys.db_nested_key': 1,
'localhost_27057.db_keys.db_nested_key': 1,
'localhost_27017.dbkey': 2,
'localhost_27057.dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'localhost_27017.more_keys': 1,
'localhost_27057.more_keys': 1,
'localhost_27017.key': 2,
'localhost_27057.key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_databases(self,
publish_mock,
connector_mock):
self._annotate_connection(connector_mock, {})
self.collector.collect()
assert call('baddb') not in self.connection.__getitem__.call_args_list
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_collections(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.connection['db1'].collection_names.return_value = ['collection1',
'tmp.mr.tmp1']
self.connection['db1'].command.return_value = {'key': 2,
'string': 'str'}
self.collector.collect()
self.connection.db.command.assert_called_with('serverStatus')
self.connection['db1'].collection_names.assert_called_with()
self.connection['db1'].command.assert_any_call('dbStats')
self.connection['db1'].command.assert_any_call('collstats',
'collection1')
assert call('collstats', 'tmp.mr.tmp1') not in self.connection['db1'].command.call_args_list # NOQA
metrics = {
'localhost_27017.databases.db1.collection1.key': 2,
'localhost_27057.databases.db1.collection1.key': 2,
}
self.assertPublishedMany(publish_mock, metrics)
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
class TestMongoDBCollectorWithReplica(CollectorTestCase):
def setUp(self):
config = get_collector_config('MongoDBCollector', {
'host': 'localhost:27017',
'databases': '^db',
'replica': True
})
self.collector = MongoDBCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(MongoDBCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.MongoClient')
@patch.object(Collector, 'publish')
def test_should_publish_replset_status_if_enabled(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.admin.command.assert_called_once_with(
'replSetGetStatus')
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import os.path
import re
import sys
def bump(path, pattern, repl, check=True):
with open(path) as fin:
contents = fin.read()
new_contents = pattern.sub(repl, contents)
if check and new_contents == contents:
print('*' * 79)
print('WARNING: contents of %r unchanged after version bump' % path)
print('*' * 79)
with open(path, 'w') as fout:
fout.write(new_contents)
def bump_setup_py(root, previous_version, new_version):
path = os.path.join(root, 'setup.py')
pattern = re.compile("^ version='%s',$" % previous_version, re.MULTILINE)
repl = " version='%s'," % new_version
bump(path, pattern, repl)
def bump_docs_src_conf_py(root, previous_version, new_version):
path = os.path.join(root, 'docs', 'src', 'conf.py')
short_previous_version = '.'.join(previous_version.split('.')[:2])
short_new_version = new_version # '.'.join(new_version.split('.')[:2])
pattern = re.compile("^version = '%s'$" % short_previous_version, re.MULTILINE)
repl = "version = '%s'" % short_new_version
bump(path, pattern, repl, check=False) # short version won't always change
pattern = re.compile("^release = '%s'$" % previous_version, re.MULTILINE)
repl = "release = '%s'" % new_version
bump(path, pattern, repl)
def bump_gensim_init_py(root, previous_version, new_version):
path = os.path.join(root, 'gensim', '__init__.py')
pattern = re.compile("__version__ = '%s'$" % previous_version, re.MULTILINE)
repl = "__version__ = '%s'" % new_version
bump(path, pattern, repl)
def main():
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
previous_version, new_version = sys.argv[1:3]
bump_setup_py(root, previous_version, new_version)
bump_docs_src_conf_py(root, previous_version, new_version)
bump_gensim_init_py(root, previous_version, new_version)
if __name__ == '__main__':
main()
|
import logging
from .const import DOMAIN
def service_signal(service, *args):
"""Encode signal."""
return "_".join([DOMAIN, service, *args])
def log_update_error(logger, action, name, entity_type, error, level=logging.ERROR):
"""Log an update error."""
logger.log(
level,
"Could not %s %s %s due to error: %s",
action,
name,
entity_type,
error.__class__.__name__,
)
|
import keras
import tensorflow as tf
from .knrm import KNRM
from matchzoo.engine.param import Param
class ConvKNRM(KNRM):
"""
ConvKNRM model.
Examples:
>>> model = ConvKNRM()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 300
>>> model.params['embedding_trainable'] = True
>>> model.params['filters'] = 128
>>> model.params['conv_activation_func'] = 'tanh'
>>> model.params['max_ngram'] = 3
>>> model.params['use_crossmatch'] = True
>>> model.params['kernel_num'] = 11
>>> model.params['sigma'] = 0.1
>>> model.params['exact_sigma'] = 0.001
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
def get_default_params(cls):
"""Get default parameters."""
params = super().get_default_params()
params.add(Param(name='filters', value=128,
desc="The filter size in the convolution"
" layer."))
params.add(Param(name='conv_activation_func', value='relu',
desc="The activation function in the "
"convolution layer."))
params.add(Param(name='max_ngram', value=3,
desc="The maximum length of n-grams for the "
"convolution layer."))
params.add(Param(name='use_crossmatch', value=True,
desc="Whether to match left n-grams and right "
"n-grams of different lengths"))
return params
def build(self):
"""Build model."""
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
q_embed = embedding(query)
d_embed = embedding(doc)
q_convs = []
d_convs = []
for i in range(self._params['max_ngram']):
c = keras.layers.Conv1D(
self._params['filters'], i + 1,
activation=self._params['conv_activation_func'],
padding='same'
)
q_convs.append(c(q_embed))
d_convs.append(c(d_embed))
KM = []
for qi in range(self._params['max_ngram']):
for di in range(self._params['max_ngram']):
# do not match n-gram with different length if use crossmatch
if not self._params['use_crossmatch'] and qi != di:
continue
q_ngram = q_convs[qi]
d_ngram = d_convs[di]
mm = keras.layers.Dot(axes=[2, 2],
normalize=True)([q_ngram, d_ngram])
for i in range(self._params['kernel_num']):
mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (
self._params['kernel_num'] - 1) - 1.0
sigma = self._params['sigma']
if mu > 1.0:
sigma = self._params['exact_sigma']
mu = 1.0
mm_exp = self._kernel_layer(mu, sigma)(mm)
mm_doc_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 2))(
mm_exp)
mm_log = keras.layers.Activation(tf.math.log1p)(mm_doc_sum)
mm_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 1))(mm_log)
KM.append(mm_sum)
phi = keras.layers.Lambda(lambda x: tf.stack(x, 1))(KM)
out = self._make_output_layer()(phi)
self._backend = keras.Model(inputs=[query, doc], outputs=[out])
|
from mock import ANY
from mock import MagicMock
from mock import patch
from pytest import raises
from requests.exceptions import RequestException
from requests.exceptions import SSLError
from paasta_tools.cli.cli import parse_args
from paasta_tools.cli.cmds.push_to_registry import build_command
from paasta_tools.cli.cmds.push_to_registry import is_docker_image_already_in_registry
from paasta_tools.cli.cmds.push_to_registry import paasta_push_to_registry
@patch("paasta_tools.cli.cmds.push_to_registry.build_docker_tag", autospec=True)
def test_build_command(mock_build_docker_tag):
mock_build_docker_tag.return_value = "my-docker-registry/services-foo:paasta-asdf"
expected = "docker push my-docker-registry/services-foo:paasta-asdf"
actual = build_command("foo", "bar")
assert actual == expected
@patch(
"paasta_tools.cli.cmds.push_to_registry.is_docker_image_already_in_registry",
autospec=True,
)
@patch("paasta_tools.cli.cmds.push_to_registry.build_command", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._run", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log_audit", autospec=True)
def test_push_to_registry_run_fail(
mock_log_audit,
mock_log,
mock_run,
mock_validate_service_name,
mock_build_command,
mock_is_docker_image_already_in_registry,
):
mock_build_command.return_value = (
"docker push my-docker-registry/services-foo:paasta-asdf"
)
mock_is_docker_image_already_in_registry.return_value = False
mock_run.return_value = (1, "Bad")
args = MagicMock()
assert paasta_push_to_registry(args) == 1
assert not mock_log_audit.called
@patch(
"paasta_tools.cli.cmds.push_to_registry.is_docker_image_already_in_registry",
autospec=True,
)
@patch("paasta_tools.cli.cmds.push_to_registry.build_command", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._run", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log_audit", autospec=True)
def test_push_to_registry_success(
mock_log_audit,
mock_log,
mock_run,
mock_validate_service_name,
mock_build_command,
mock_is_docker_image_already_in_registry,
):
args, _ = parse_args(["push-to-registry", "-s", "foo", "-c", "abcd" * 10])
mock_build_command.return_value = (
"docker push my-docker-registry/services-foo:paasta-asdf"
)
mock_run.return_value = (0, "Success")
mock_is_docker_image_already_in_registry.return_value = False
assert paasta_push_to_registry(args) == 0
assert mock_build_command.called
assert mock_run.called
mock_log_audit.assert_called_once_with(
action="push-to-registry", action_details={"commit": "abcd" * 10}, service="foo"
)
@patch(
"paasta_tools.cli.cmds.push_to_registry.is_docker_image_already_in_registry",
autospec=True,
)
@patch("paasta_tools.cli.cmds.push_to_registry.build_command", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._run", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log_audit", autospec=True)
def test_push_to_registry_force(
mock_log_audit,
mock_log,
mock_run,
mock_validate_service_name,
mock_build_command,
mock_is_docker_image_already_in_registry,
):
args, _ = parse_args(
["push-to-registry", "-s", "foo", "-c", "abcd" * 10, "--force"]
)
mock_build_command.return_value = (
"docker push fake_registry/services-foo:paasta-abcd"
)
mock_run.return_value = (0, "Success")
assert paasta_push_to_registry(args) == 0
assert not mock_is_docker_image_already_in_registry.called
mock_run.assert_called_once_with(
"docker push fake_registry/services-foo:" "paasta-abcd",
component="build",
log=True,
loglevel="debug",
service="foo",
timeout=3600,
)
mock_log_audit.assert_called_once_with(
action="push-to-registry", action_details={"commit": "abcd" * 10}, service="foo"
)
@patch(
"paasta_tools.cli.cmds.push_to_registry.is_docker_image_already_in_registry",
autospec=True,
)
@patch("paasta_tools.cli.cmds.push_to_registry.build_command", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._run", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log_audit", autospec=True)
def test_push_to_registry_does_not_override_existing_image(
mock_log_audit,
mock_log,
mock_run,
mock_validate_service_name,
mock_build_command,
mock_is_docker_image_already_in_registry,
):
args, _ = parse_args(["push-to-registry", "-s", "foo", "-c", "abcd" * 10])
mock_run.return_value = (0, "Success")
mock_is_docker_image_already_in_registry.return_value = True
assert paasta_push_to_registry(args) == 0
assert not mock_build_command.called
assert not mock_run.called
assert not mock_log_audit.called
@patch("paasta_tools.utils.load_system_paasta_config", autospec=True)
@patch(
"paasta_tools.cli.cmds.push_to_registry.is_docker_image_already_in_registry",
autospec=True,
)
@patch("paasta_tools.cli.cmds.push_to_registry.build_command", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._run", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log_audit", autospec=True)
def test_push_to_registry_does_not_override_when_cant_check_status(
mock_log_audit,
mock_log,
mock_run,
mock_validate_service_name,
mock_build_command,
mock_is_docker_image_already_in_registry,
mock_load_system_paasta_config,
):
args, _ = parse_args(["push-to-registry", "-s", "foo", "-c", "abcd" * 10])
mock_run.return_value = (0, "Success")
mock_is_docker_image_already_in_registry.side_effect = RequestException()
assert paasta_push_to_registry(args) == 1
assert not mock_build_command.called
assert not mock_run.called
assert not mock_log_audit.called
@patch(
"paasta_tools.cli.cmds.push_to_registry.is_docker_image_already_in_registry",
autospec=True,
)
@patch("paasta_tools.cli.cmds.push_to_registry.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._run", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry._log_audit", autospec=True)
@patch("paasta_tools.cli.cmds.push_to_registry.build_command", autospec=True)
def test_push_to_registry_works_when_service_name_starts_with_services_dash(
mock_build_command,
mock_log_audit,
mock_log,
mock_run,
mock_validate_service_name,
mock_is_docker_image_already_in_registry,
):
args, _ = parse_args(["push-to-registry", "-s", "foo", "-c", "abcd" * 10])
mock_run.return_value = (0, "Success")
mock_is_docker_image_already_in_registry.return_value = False
assert paasta_push_to_registry(args) == 0
mock_build_command.assert_called_once_with("foo", "abcd" * 10)
mock_log_audit.assert_called_once_with(
action="push-to-registry", action_details={"commit": "abcd" * 10}, service="foo"
)
@patch(
"paasta_tools.cli.cmds.push_to_registry.get_service_docker_registry", autospec=True
)
@patch("paasta_tools.cli.cmds.push_to_registry.requests.Session.head", autospec=True)
@patch(
"paasta_tools.cli.cmds.push_to_registry.read_docker_registry_creds", autospec=True
)
def test_is_docker_image_already_in_registry_success(
mock_read_docker_registry_creds, mock_request_head, mock_get_service_docker_registry
):
mock_read_docker_registry_creds.return_value = (None, None)
mock_get_service_docker_registry.return_value = "registry"
mock_request_head.return_value = MagicMock(status_code=200)
assert is_docker_image_already_in_registry(
"fake_service", "fake_soa_dir", "fake_sha"
)
mock_request_head.assert_called_with(
ANY,
"https://registry/v2/services-fake_service/manifests/paasta-fake_sha",
timeout=30,
)
@patch(
"paasta_tools.cli.cmds.push_to_registry.get_service_docker_registry", autospec=True
)
@patch("paasta_tools.cli.cmds.push_to_registry.requests.Session.head", autospec=True)
@patch(
"paasta_tools.cli.cmds.push_to_registry.read_docker_registry_creds", autospec=True
)
def test_is_docker_image_already_in_registry_success_with_registry_credentials(
mock_read_docker_registry_creds, mock_request_head, mock_get_service_docker_registry
):
auth = ("username", "password")
mock_read_docker_registry_creds.return_value = auth
mock_get_service_docker_registry.return_value = "registry"
mock_request_head.return_value = MagicMock(status_code=200)
assert is_docker_image_already_in_registry(
"fake_service", "fake_soa_dir", "fake_sha"
)
mock_request_head.assert_called_with(
ANY,
"https://registry/v2/services-fake_service/manifests/paasta-fake_sha",
auth=auth,
timeout=30,
)
@patch(
"paasta_tools.cli.cmds.push_to_registry.get_service_docker_registry", autospec=True
)
@patch("paasta_tools.cli.cmds.push_to_registry.requests.Session.head", autospec=True)
@patch(
"paasta_tools.cli.cmds.push_to_registry.read_docker_registry_creds", autospec=True
)
def test_is_docker_image_already_in_registry_404_no_such_service_yet(
mock_read_docker_registry_creds, mock_request_head, mock_get_service_docker_registry
):
mock_read_docker_registry_creds.return_value = (None, None)
mock_get_service_docker_registry.return_value = "registry"
mock_request_head.return_value = MagicMock(
status_code=404
) # No Such Repository Error
assert not is_docker_image_already_in_registry(
"fake_service", "fake_soa_dir", "fake_sha"
)
mock_request_head.assert_called_with(
ANY,
"https://registry/v2/services-fake_service/manifests/paasta-fake_sha",
timeout=30,
)
@patch(
"paasta_tools.cli.cmds.push_to_registry.get_service_docker_registry", autospec=True
)
@patch("paasta_tools.cli.cmds.push_to_registry.requests.Session.head", autospec=True)
@patch(
"paasta_tools.cli.cmds.push_to_registry.read_docker_registry_creds", autospec=True
)
def test_is_docker_image_already_when_image_does_not_exist(
mock_read_docker_registry_creds, mock_request_head, mock_get_service_docker_registry
):
mock_read_docker_registry_creds.return_value = (None, None)
mock_get_service_docker_registry.return_value = "registry"
mock_request_head.return_value = MagicMock(status_code=404)
assert not is_docker_image_already_in_registry(
"fake_service", "fake_soa_dir", "fake_sha"
)
mock_request_head.assert_called_with(
ANY,
"https://registry/v2/services-fake_service/manifests/paasta-fake_sha",
timeout=30,
)
@patch(
"paasta_tools.cli.cmds.push_to_registry.get_service_docker_registry", autospec=True
)
@patch("paasta_tools.cli.cmds.push_to_registry.requests.Session.head", autospec=True)
@patch(
"paasta_tools.cli.cmds.push_to_registry.read_docker_registry_creds", autospec=True
)
def test_is_docker_image_already_in_registry_401_unauthorized(
mock_read_docker_registry_creds, mock_request_head, mock_get_service_docker_registry
):
mock_read_docker_registry_creds.return_value = (None, None)
mock_request_head.side_effect = RequestException()
with raises(RequestException):
is_docker_image_already_in_registry("fake_service", "fake_soa_dir", "fake_sha")
@patch(
"paasta_tools.cli.cmds.push_to_registry.get_service_docker_registry", autospec=True
)
@patch("paasta_tools.cli.cmds.push_to_registry.requests.Session.head", autospec=True)
@patch(
"paasta_tools.cli.cmds.push_to_registry.read_docker_registry_creds", autospec=True
)
def test_is_docker_image_already_in_registry_http_when_image_does_not_exist(
mock_read_docker_registry_creds, mock_request_head, mock_get_service_docker_registry
):
def mock_head(session, url, timeout):
if url.startswith("https"):
raise SSLError("Uh oh")
return MagicMock(status_code=404)
mock_get_service_docker_registry.return_value = "registry"
mock_request_head.side_effect = mock_head
mock_read_docker_registry_creds.return_value = (None, None)
assert not is_docker_image_already_in_registry(
"fake_service", "fake_soa_dir", "fake_sha"
)
mock_request_head.assert_called_with(
ANY,
"http://registry/v2/services-fake_service/manifests/paasta-fake_sha",
timeout=30,
)
|
import logging
from teslajsonpy import Controller as TeslaAPI, TeslaException
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_TOKEN,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
from .const import (
CONF_WAKE_ON_START,
DEFAULT_SCAN_INTERVAL,
DEFAULT_WAKE_ON_START,
DOMAIN,
MIN_SCAN_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
@callback
def configured_instances(hass):
"""Return a set of configured Tesla instances."""
return {entry.title for entry in hass.config_entries.async_entries(DOMAIN)}
class TeslaConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Tesla."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={},
description_placeholders={},
)
if user_input[CONF_USERNAME] in configured_instances(self.hass):
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={CONF_USERNAME: "already_configured"},
description_placeholders={},
)
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": "cannot_connect"},
description_placeholders={},
)
except InvalidAuth:
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": "invalid_auth"},
description_placeholders={},
)
return self.async_create_entry(title=user_input[CONF_USERNAME], data=info)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Tesla."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): vol.All(cv.positive_int, vol.Clamp(min=MIN_SCAN_INTERVAL)),
vol.Optional(
CONF_WAKE_ON_START,
default=self.config_entry.options.get(
CONF_WAKE_ON_START, DEFAULT_WAKE_ON_START
),
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
config = {}
websession = aiohttp_client.async_get_clientsession(hass)
try:
controller = TeslaAPI(
websession,
email=data[CONF_USERNAME],
password=data[CONF_PASSWORD],
update_interval=DEFAULT_SCAN_INTERVAL,
)
(config[CONF_TOKEN], config[CONF_ACCESS_TOKEN]) = await controller.connect(
test_login=True
)
except TeslaException as ex:
if ex.code == HTTP_UNAUTHORIZED:
_LOGGER.error("Invalid credentials: %s", ex)
raise InvalidAuth() from ex
_LOGGER.error("Unable to communicate with Tesla API: %s", ex)
raise CannotConnect() from ex
_LOGGER.debug("Credentials successfully connected to the Tesla API")
return config
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import re
import threading
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import netperf
from six.moves import range
flags.DEFINE_integer('num_connections', 1,
'Number of connections between each pair of vms.')
flags.DEFINE_integer('num_iterations', 1,
'Number of iterations for each run.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'mesh_network'
BENCHMARK_CONFIG = """
mesh_network:
description: >
Measures VM to VM cross section bandwidth in
a mesh network. Specify the number of VMs in the network
with --num_vms.
vm_groups:
default:
vm_spec: *default_single_core
"""
NETPERF_BENCHMARKSS = ['TCP_RR', 'TCP_STREAM']
VALUE_INDEX = 1
RESULT_LOCK = threading.Lock()
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = FLAGS.num_vms
if FLAGS.num_vms < 2: # Needs at least 2 vms to run the benchmark.
config['vm_groups']['default']['vm_count'] = 2
return config
def PrepareVM(vm):
"""Prepare netperf on a single VM.
Args:
vm: The VM that needs to install netperf package.
"""
vm.RemoteCommand('./netserver')
def Prepare(benchmark_spec):
"""Install vms with necessary softwares.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the banchmark.
"""
vms = benchmark_spec.vms
logging.info('Preparing netperf on %s', vms[0])
vms[0].Install('netperf')
for vm in vms:
vms[0].MoveFile(vm, netperf.NETPERF_PATH)
vms[0].MoveFile(vm, netperf.NETSERVER_PATH)
vm_util.RunThreaded(PrepareVM, vms, len(vms))
def RunNetperf(vm, benchmark_name, servers, result):
"""Spawns netperf on a remote VM, parses results.
Args:
vm: The VM running netperf.
benchmark_name: The netperf benchmark to run.
servers: VMs running netserver.
result: The result variable shared by all threads.
"""
cmd = ''
if FLAGS.duration_in_seconds:
cmd_duration_suffix = '-l %s' % FLAGS.duration_in_seconds
else:
cmd_duration_suffix = ''
for server in servers:
if vm != server:
cmd += ('./netperf -t '
'{benchmark_name} -H {server_ip} -i {iterations} '
'{cmd_suffix} & ').format(
benchmark_name=benchmark_name,
server_ip=server.internal_ip,
iterations=FLAGS.num_iterations,
cmd_suffix=cmd_duration_suffix)
netperf_cmd = ''
for _ in range(FLAGS.num_connections):
netperf_cmd += cmd
netperf_cmd += 'wait'
output, _ = vm.RemoteCommand(netperf_cmd)
logging.info(output)
match = re.findall(r'(\d+\.\d+)\s+\n', output)
value = 0
expected_num_match = (len(servers) - 1) * FLAGS.num_connections
if len(match) != expected_num_match:
raise errors.Benchmarks.RunError(
'Netserver not reachable. Expecting %s results, got %s.' %
(expected_num_match, len(match)))
for res in match:
if benchmark_name == 'TCP_RR':
value += 1.0 / float(res) * 1000.0
else:
value += float(res)
with RESULT_LOCK:
result[VALUE_INDEX] += value
def Run(benchmark_spec):
"""Run netperf on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Total throughput, average latency in the form of tuple. The tuple contains
the sample metric (string), value (float), unit (string).
"""
vms = benchmark_spec.vms
num_vms = len(vms)
results = []
for netperf_benchmark in NETPERF_BENCHMARKSS:
args = []
metadata = {
'number_machines': num_vms,
'number_connections': FLAGS.num_connections
}
if netperf_benchmark == 'TCP_STREAM':
metric = 'TCP_STREAM_Total_Throughput'
unit = 'Mbits/sec'
value = 0.0
else:
metric = 'TCP_RR_Average_Latency'
unit = 'ms'
value = 0.0
result = [metric, value, unit, metadata]
args = [((source, netperf_benchmark, vms, result), {}) for source in vms]
vm_util.RunThreaded(RunNetperf, args, num_vms)
result = sample.Sample(*result)
if netperf_benchmark == 'TCP_RR':
denom = ((num_vms - 1) *
num_vms *
FLAGS.num_connections)
result = result._replace(value=result.value / denom)
results.append(result)
logging.info(results)
return results
def Cleanup(benchmark_spec):
"""Cleanup netperf on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
for vm in vms:
logging.info('uninstalling netperf on %s', vm)
vm.RemoteCommand('pkill -9 netserver')
vm.RemoteCommand('rm netserver')
vm.RemoteCommand('rm netperf')
|
import logging
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.browser import history
from qutebrowser.utils import urlutils, usertypes
from qutebrowser.api import cmdutils
from qutebrowser.misc import sql, objects
@pytest.fixture(autouse=True)
def prerequisites(config_stub, fake_save_manager, init_sql, fake_args):
"""Make sure everything is ready to initialize a WebHistory."""
config_stub.data = {'general': {'private-browsing': False}}
class TestSpecialMethods:
def test_iter(self, web_history):
urlstr = 'http://www.example.com/'
url = QUrl(urlstr)
web_history.add_url(url, atime=12345)
assert list(web_history) == [(urlstr, '', 12345, False)]
def test_len(self, web_history):
assert len(web_history) == 0
url = QUrl('http://www.example.com/')
web_history.add_url(url)
assert len(web_history) == 1
def test_contains(self, web_history):
web_history.add_url(QUrl('http://www.example.com/'),
title='Title', atime=12345)
assert 'http://www.example.com/' in web_history
assert 'www.example.com' not in web_history
assert 'Title' not in web_history
assert 12345 not in web_history
class TestGetting:
def test_get_recent(self, web_history):
web_history.add_url(QUrl('http://www.qutebrowser.org/'), atime=67890)
web_history.add_url(QUrl('http://example.com/'), atime=12345)
assert list(web_history.get_recent()) == [
('http://www.qutebrowser.org/', '', 67890, False),
('http://example.com/', '', 12345, False),
]
def test_entries_between(self, web_history):
web_history.add_url(QUrl('http://www.example.com/1'), atime=12345)
web_history.add_url(QUrl('http://www.example.com/2'), atime=12346)
web_history.add_url(QUrl('http://www.example.com/3'), atime=12347)
web_history.add_url(QUrl('http://www.example.com/4'), atime=12348)
web_history.add_url(QUrl('http://www.example.com/5'), atime=12348)
web_history.add_url(QUrl('http://www.example.com/6'), atime=12349)
web_history.add_url(QUrl('http://www.example.com/7'), atime=12350)
times = [x.atime for x in web_history.entries_between(12346, 12349)]
assert times == [12349, 12348, 12348, 12347]
def test_entries_before(self, web_history):
web_history.add_url(QUrl('http://www.example.com/1'), atime=12346)
web_history.add_url(QUrl('http://www.example.com/2'), atime=12346)
web_history.add_url(QUrl('http://www.example.com/3'), atime=12347)
web_history.add_url(QUrl('http://www.example.com/4'), atime=12348)
web_history.add_url(QUrl('http://www.example.com/5'), atime=12348)
web_history.add_url(QUrl('http://www.example.com/6'), atime=12348)
web_history.add_url(QUrl('http://www.example.com/7'), atime=12349)
web_history.add_url(QUrl('http://www.example.com/8'), atime=12349)
times = [x.atime for x in
web_history.entries_before(12348, limit=3, offset=2)]
assert times == [12348, 12347, 12346]
class TestDelete:
def test_clear(self, qtbot, tmpdir, web_history, mocker):
web_history.add_url(QUrl('http://example.com/'))
web_history.add_url(QUrl('http://www.qutebrowser.org/'))
m = mocker.patch('qutebrowser.browser.history.message.confirm_async',
new=mocker.Mock, spec=[])
history.history_clear()
assert m.called
def test_clear_force(self, qtbot, tmpdir, web_history):
web_history.add_url(QUrl('http://example.com/'))
web_history.add_url(QUrl('http://www.qutebrowser.org/'))
history.history_clear(force=True)
assert not len(web_history)
assert not len(web_history.completion)
@pytest.mark.parametrize('raw, escaped', [
('http://example.com/1', 'http://example.com/1'),
('http://example.com/1 2', 'http://example.com/1%202'),
])
def test_delete_url(self, web_history, raw, escaped):
web_history.add_url(QUrl('http://example.com/'), atime=0)
web_history.add_url(QUrl(escaped), atime=0)
web_history.add_url(QUrl('http://example.com/2'), atime=0)
before = set(web_history)
completion_before = set(web_history.completion)
web_history.delete_url(QUrl(raw))
diff = before.difference(set(web_history))
assert diff == {(escaped, '', 0, False)}
completion_diff = completion_before.difference(
set(web_history.completion))
assert completion_diff == {(raw, '', 0)}
class TestAdd:
@pytest.fixture()
def mock_time(self, mocker):
m = mocker.patch('qutebrowser.browser.history.time')
m.time.return_value = 12345
return 12345
@pytest.mark.parametrize(
'url, atime, title, redirect, history_url, completion_url', [
('http://www.example.com', 12346, 'the title', False,
'http://www.example.com', 'http://www.example.com'),
('http://www.example.com', 12346, 'the title', True,
'http://www.example.com', None),
('http://www.example.com/sp ce', 12346, 'the title', False,
'http://www.example.com/sp%20ce', 'http://www.example.com/sp ce'),
('https://user:[email protected]', 12346, 'the title', False,
'https://[email protected]', 'https://[email protected]'),
]
)
def test_add_url(self, qtbot, web_history,
url, atime, title, redirect, history_url, completion_url):
web_history.add_url(QUrl(url), atime=atime, title=title,
redirect=redirect)
assert list(web_history) == [(history_url, title, atime, redirect)]
if completion_url is None:
assert not len(web_history.completion)
else:
expected = [(completion_url, title, atime)]
assert list(web_history.completion) == expected
def test_no_sql_web_history(self, web_history, monkeypatch):
monkeypatch.setattr(objects, 'debug_flags', {'no-sql-history'})
web_history.add_url(QUrl('https://www.example.com/'), atime=12346,
title='Hello World', redirect=False)
assert not list(web_history)
def test_invalid(self, qtbot, web_history, caplog):
with caplog.at_level(logging.WARNING):
web_history.add_url(QUrl())
assert not list(web_history)
assert not list(web_history.completion)
@pytest.mark.parametrize('known_error', [True, False])
@pytest.mark.parametrize('completion', [True, False])
def test_error(self, monkeypatch, web_history, message_mock, caplog,
known_error, completion):
def raise_error(url, replace=False):
if known_error:
raise sql.KnownError("Error message")
raise sql.BugError("Error message")
if completion:
monkeypatch.setattr(web_history.completion, 'insert', raise_error)
else:
monkeypatch.setattr(web_history, 'insert', raise_error)
if known_error:
with caplog.at_level(logging.ERROR):
web_history.add_url(QUrl('https://www.example.org/'))
msg = message_mock.getmsg(usertypes.MessageLevel.error)
assert msg.text == "Failed to write history: Error message"
else:
with pytest.raises(sql.BugError):
web_history.add_url(QUrl('https://www.example.org/'))
@pytest.mark.parametrize('level, url, req_url, expected', [
(logging.DEBUG, 'a.com', 'a.com', [('a.com', 'title', 12345, False)]),
(logging.DEBUG, 'a.com', 'b.com', [('a.com', 'title', 12345, False),
('b.com', 'title', 12345, True)]),
(logging.WARNING, 'a.com', '', [('a.com', 'title', 12345, False)]),
(logging.WARNING, '', '', []),
(logging.WARNING, 'data:foo', '', []),
(logging.WARNING, 'a.com', 'data:foo', []),
])
def test_from_tab(self, web_history, caplog, mock_time,
level, url, req_url, expected):
with caplog.at_level(level):
web_history.add_from_tab(QUrl(url), QUrl(req_url), 'title')
assert set(web_history) == set(expected)
def test_exclude(self, web_history, config_stub):
"""Excluded URLs should be in the history but not completion."""
config_stub.val.completion.web_history.exclude = ['*.example.org']
url = QUrl('http://www.example.org/')
web_history.add_from_tab(url, url, 'title')
assert list(web_history)
assert not list(web_history.completion)
def test_no_immedate_duplicates(self, web_history, mock_time):
url = QUrl("http://example.com")
url2 = QUrl("http://example2.com")
web_history.add_from_tab(QUrl(url), QUrl(url), 'title')
hist = list(web_history)
assert hist
web_history.add_from_tab(QUrl(url), QUrl(url), 'title')
assert list(web_history) == hist
web_history.add_from_tab(QUrl(url2), QUrl(url2), 'title')
assert list(web_history) != hist
def test_delete_add_tab(self, web_history, mock_time):
url = QUrl("http://example.com")
web_history.add_from_tab(QUrl(url), QUrl(url), 'title')
hist = list(web_history)
assert hist
web_history.delete_url(QUrl(url))
assert len(web_history) == 0
web_history.add_from_tab(QUrl(url), QUrl(url), 'title')
assert list(web_history) == hist
def test_clear_add_tab(self, web_history, mock_time):
url = QUrl("http://example.com")
web_history.add_from_tab(QUrl(url), QUrl(url), 'title')
hist = list(web_history)
assert hist
history.history_clear(force=True)
assert len(web_history) == 0
web_history.add_from_tab(QUrl(url), QUrl(url), 'title')
assert list(web_history) == hist
class TestHistoryInterface:
@pytest.fixture
def hist_interface(self, web_history):
# pylint: disable=invalid-name
QtWebKit = pytest.importorskip('PyQt5.QtWebKit')
from qutebrowser.browser.webkit import webkithistory
QWebHistoryInterface = QtWebKit.QWebHistoryInterface
# pylint: enable=invalid-name
web_history.add_url(url=QUrl('http://www.example.com/'),
title='example')
interface = webkithistory.WebHistoryInterface(web_history)
QWebHistoryInterface.setDefaultInterface(interface)
yield
QWebHistoryInterface.setDefaultInterface(None)
def test_history_interface(self, qtbot, webview, hist_interface):
html = b"<a href='about:blank'>foo</a>"
url = urlutils.data_url('text/html', html)
with qtbot.waitSignal(webview.loadFinished):
webview.load(url)
class TestInit:
@pytest.fixture
def cleanup_init(self):
# prevent test_init from leaking state
yield
if history.web_history is not None:
history.web_history.setParent(None)
history.web_history = None
try:
from PyQt5.QtWebKit import QWebHistoryInterface
QWebHistoryInterface.setDefaultInterface(None)
except ImportError:
pass
@pytest.mark.parametrize('backend', [usertypes.Backend.QtWebEngine,
usertypes.Backend.QtWebKit])
def test_init(self, backend, qapp, tmpdir, monkeypatch, cleanup_init):
if backend == usertypes.Backend.QtWebKit:
pytest.importorskip('PyQt5.QtWebKitWidgets')
else:
assert backend == usertypes.Backend.QtWebEngine
monkeypatch.setattr(history.objects, 'backend', backend)
history.init(qapp)
assert history.web_history.parent() is qapp
try:
from PyQt5.QtWebKit import QWebHistoryInterface
except ImportError:
QWebHistoryInterface = None
if backend == usertypes.Backend.QtWebKit:
default_interface = QWebHistoryInterface.defaultInterface()
assert default_interface._history is history.web_history
else:
assert backend == usertypes.Backend.QtWebEngine
if QWebHistoryInterface is None:
default_interface = None
else:
default_interface = QWebHistoryInterface.defaultInterface()
# For this to work, nothing can ever have called
# setDefaultInterface before (so we need to test webengine before
# webkit)
assert default_interface is None
class TestDump:
def test_debug_dump_history(self, web_history, tmpdir):
web_history.add_url(QUrl('http://example.com/1'),
title="Title1", atime=12345)
web_history.add_url(QUrl('http://example.com/2'),
title="Title2", atime=12346)
web_history.add_url(QUrl('http://example.com/3'),
title="Title3", atime=12347)
web_history.add_url(QUrl('http://example.com/4'),
title="Title4", atime=12348, redirect=True)
histfile = tmpdir / 'history'
history.debug_dump_history(str(histfile))
expected = ['12345 http://example.com/1 Title1',
'12346 http://example.com/2 Title2',
'12347 http://example.com/3 Title3',
'12348-r http://example.com/4 Title4']
assert histfile.read() == '\n'.join(expected)
def test_nonexistent(self, web_history, tmpdir):
histfile = tmpdir / 'nonexistent' / 'history'
with pytest.raises(cmdutils.CommandError):
history.debug_dump_history(str(histfile))
class TestRebuild:
def test_delete(self, web_history, stubs):
web_history.insert({'url': 'example.com/1', 'title': 'example1',
'redirect': False, 'atime': 1})
web_history.insert({'url': 'example.com/1', 'title': 'example1',
'redirect': False, 'atime': 2})
web_history.insert({'url': 'example.com/2%203', 'title': 'example2',
'redirect': False, 'atime': 3})
web_history.insert({'url': 'example.com/3', 'title': 'example3',
'redirect': True, 'atime': 4})
web_history.insert({'url': 'example.com/2 3', 'title': 'example2',
'redirect': False, 'atime': 5})
web_history.completion.delete_all()
hist2 = history.WebHistory(progress=stubs.FakeHistoryProgress())
assert list(hist2.completion) == [
('example.com/1', 'example1', 2),
('example.com/2 3', 'example2', 5),
]
def test_no_rebuild(self, web_history, stubs):
"""Ensure that completion is not regenerated unless empty."""
web_history.add_url(QUrl('example.com/1'), redirect=False, atime=1)
web_history.add_url(QUrl('example.com/2'), redirect=False, atime=2)
web_history.completion.delete('url', 'example.com/2')
hist2 = history.WebHistory(progress=stubs.FakeHistoryProgress())
assert list(hist2.completion) == [('example.com/1', '', 1)]
def test_user_version(self, web_history, stubs, monkeypatch):
"""Ensure that completion is regenerated if user_version changes."""
web_history.add_url(QUrl('example.com/1'), redirect=False, atime=1)
web_history.add_url(QUrl('example.com/2'), redirect=False, atime=2)
web_history.completion.delete('url', 'example.com/2')
hist2 = history.WebHistory(progress=stubs.FakeHistoryProgress())
assert list(hist2.completion) == [('example.com/1', '', 1)]
monkeypatch.setattr(history, '_USER_VERSION',
history._USER_VERSION + 1)
hist3 = history.WebHistory(progress=stubs.FakeHistoryProgress())
assert list(hist3.completion) == [
('example.com/1', '', 1),
('example.com/2', '', 2),
]
def test_force_rebuild(self, web_history, stubs):
"""Ensure that completion is regenerated if we force a rebuild."""
web_history.add_url(QUrl('example.com/1'), redirect=False, atime=1)
web_history.add_url(QUrl('example.com/2'), redirect=False, atime=2)
web_history.completion.delete('url', 'example.com/2')
hist2 = history.WebHistory(progress=stubs.FakeHistoryProgress())
assert list(hist2.completion) == [('example.com/1', '', 1)]
hist2.metainfo['force_rebuild'] = True
hist3 = history.WebHistory(progress=stubs.FakeHistoryProgress())
assert list(hist3.completion) == [
('example.com/1', '', 1),
('example.com/2', '', 2),
]
assert not hist3.metainfo['force_rebuild']
def test_exclude(self, config_stub, web_history, stubs):
"""Ensure that patterns in completion.web_history.exclude are ignored.
This setting should only be used for the completion.
"""
config_stub.val.completion.web_history.exclude = ['*.example.org']
assert web_history.metainfo['force_rebuild']
web_history.add_url(QUrl('http://example.com'),
redirect=False, atime=1)
web_history.add_url(QUrl('http://example.org'),
redirect=False, atime=2)
hist2 = history.WebHistory(progress=stubs.FakeHistoryProgress())
assert list(hist2.completion) == [('http://example.com', '', 1)]
def test_unrelated_config_change(self, config_stub, web_history):
config_stub.val.history_gap_interval = 1234
assert not web_history.metainfo['force_rebuild']
@pytest.mark.parametrize('patch_threshold', [True, False])
def test_progress(self, web_history, config_stub, monkeypatch, stubs,
patch_threshold):
web_history.add_url(QUrl('example.com/1'), redirect=False, atime=1)
web_history.add_url(QUrl('example.com/2'), redirect=False, atime=2)
web_history.metainfo['force_rebuild'] = True
if patch_threshold:
monkeypatch.setattr(history.WebHistory, '_PROGRESS_THRESHOLD', 1)
progress = stubs.FakeHistoryProgress()
history.WebHistory(progress=progress)
assert progress._value == 2
assert progress._finished
assert progress._started == patch_threshold
class TestCompletionMetaInfo:
@pytest.fixture
def metainfo(self):
return history.CompletionMetaInfo()
def test_contains_keyerror(self, metainfo):
with pytest.raises(KeyError):
'does_not_exist' in metainfo # pylint: disable=pointless-statement
def test_getitem_keyerror(self, metainfo):
with pytest.raises(KeyError):
metainfo['does_not_exist'] # pylint: disable=pointless-statement
def test_setitem_keyerror(self, metainfo):
with pytest.raises(KeyError):
metainfo['does_not_exist'] = 42
def test_contains(self, metainfo):
assert 'force_rebuild' in metainfo
def test_modify(self, metainfo):
assert not metainfo['force_rebuild']
metainfo['force_rebuild'] = True
assert metainfo['force_rebuild']
class TestHistoryProgress:
@pytest.fixture
def progress(self):
return history.HistoryProgress()
def test_no_start(self, progress):
"""Test calling tick/finish without start."""
progress.tick()
progress.finish()
assert progress._progress is None
assert progress._value == 1
def test_gui(self, qtbot, progress):
progress.start("Hello World", 42)
dialog = progress._progress
qtbot.add_widget(dialog)
progress.tick()
assert dialog.isVisible()
assert dialog.labelText() == "Hello World"
assert dialog.minimum() == 0
assert dialog.maximum() == 42
assert dialog.value() == 1
assert dialog.minimumDuration() == 500
progress.finish()
assert not dialog.isVisible()
|
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, DEVICE_CLASS_POWER
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_registry import async_get_registry
from .const import (
ATTRIBUTION,
DOMAIN,
MDI_ICONS,
SENSE_DATA,
SENSE_DEVICE_UPDATE,
SENSE_DEVICES_DATA,
SENSE_DISCOVERED_DEVICES_DATA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Sense binary sensor."""
data = hass.data[DOMAIN][config_entry.entry_id][SENSE_DATA]
sense_devices_data = hass.data[DOMAIN][config_entry.entry_id][SENSE_DEVICES_DATA]
sense_monitor_id = data.sense_monitor_id
sense_devices = hass.data[DOMAIN][config_entry.entry_id][
SENSE_DISCOVERED_DEVICES_DATA
]
devices = [
SenseDevice(sense_devices_data, device, sense_monitor_id)
for device in sense_devices
if device["tags"]["DeviceListAllowed"] == "true"
]
await _migrate_old_unique_ids(hass, devices)
async_add_entities(devices)
async def _migrate_old_unique_ids(hass, devices):
registry = await async_get_registry(hass)
for device in devices:
# Migration of old not so unique ids
old_entity_id = registry.async_get_entity_id(
"binary_sensor", DOMAIN, device.old_unique_id
)
if old_entity_id is not None:
_LOGGER.debug(
"Migrating unique_id from [%s] to [%s]",
device.old_unique_id,
device.unique_id,
)
registry.async_update_entity(old_entity_id, new_unique_id=device.unique_id)
def sense_to_mdi(sense_icon):
"""Convert sense icon to mdi icon."""
return "mdi:{}".format(MDI_ICONS.get(sense_icon, "power-plug"))
class SenseDevice(BinarySensorEntity):
"""Implementation of a Sense energy device binary sensor."""
def __init__(self, sense_devices_data, device, sense_monitor_id):
"""Initialize the Sense binary sensor."""
self._name = device["name"]
self._id = device["id"]
self._sense_monitor_id = sense_monitor_id
self._unique_id = f"{sense_monitor_id}-{self._id}"
self._icon = sense_to_mdi(device["icon"])
self._sense_devices_data = sense_devices_data
self._state = None
self._available = False
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def available(self):
"""Return the availability of the binary sensor."""
return self._available
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique id of the binary sensor."""
return self._unique_id
@property
def old_unique_id(self):
"""Return the old not so unique id of the binary sensor."""
return self._id
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def icon(self):
"""Return the icon of the binary sensor."""
return self._icon
@property
def device_class(self):
"""Return the device class of the binary sensor."""
return DEVICE_CLASS_POWER
@property
def should_poll(self):
"""Return the deviceshould not poll for updates."""
return False
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SENSE_DEVICE_UPDATE}-{self._sense_monitor_id}",
self._async_update_from_data,
)
)
@callback
def _async_update_from_data(self):
"""Get the latest data, update state. Must not do I/O."""
new_state = bool(self._sense_devices_data.get_device_by_id(self._id))
if self._available and self._state == new_state:
return
self._available = True
self._state = new_state
self.async_write_ha_state()
|
import functools
import operator
import os
import pathlib
from contextlib import suppress
import numpy as np
from .. import coding
from ..coding.variables import pop_to
from ..core import indexing
from ..core.utils import FrozenDict, close_on_error, is_remote_uri
from ..core.variable import Variable
from .common import (
BackendArray,
BackendEntrypoint,
WritableCFDataStore,
find_root_and_group,
robust_getitem,
)
from .file_manager import CachingFileManager, DummyFileManager
from .locks import HDF5_LOCK, NETCDFC_LOCK, combine_locks, ensure_lock, get_write_lock
from .netcdf3 import encode_nc3_attr_value, encode_nc3_variable
from .store import open_backend_dataset_store
# This lookup table maps from dtype.byteorder to a readable endian
# string used by netCDF4.
_endian_lookup = {"=": "native", ">": "big", "<": "little", "|": "native"}
NETCDF4_PYTHON_LOCK = combine_locks([NETCDFC_LOCK, HDF5_LOCK])
class BaseNetCDF4Array(BackendArray):
__slots__ = ("datastore", "dtype", "shape", "variable_name")
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
dtype = array.dtype
if dtype is str:
# use object dtype because that's the only way in numpy to
# represent variable length strings; it also prevents automatic
# string concatenation via conventions.decode_cf_variable
dtype = np.dtype("O")
self.dtype = dtype
def __setitem__(self, key, value):
with self.datastore.lock:
data = self.get_array(needs_lock=False)
data[key] = value
if self.datastore.autoclose:
self.datastore.close(needs_lock=False)
def get_array(self, needs_lock=True):
raise NotImplementedError("Virtual Method")
class NetCDF4ArrayWrapper(BaseNetCDF4Array):
__slots__ = ()
def get_array(self, needs_lock=True):
ds = self.datastore._acquire(needs_lock)
variable = ds.variables[self.variable_name]
variable.set_auto_maskandscale(False)
# only added in netCDF4-python v1.2.8
with suppress(AttributeError):
variable.set_auto_chartostring(False)
return variable
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _getitem(self, key):
if self.datastore.is_remote: # pragma: no cover
getitem = functools.partial(robust_getitem, catch=RuntimeError)
else:
getitem = operator.getitem
try:
with self.datastore.lock:
original_array = self.get_array(needs_lock=False)
array = getitem(original_array, key)
except IndexError:
# Catch IndexError in netCDF4 and return a more informative
# error message. This is most often called when an unsorted
# indexer is used before the data is loaded from disk.
msg = (
"The indexing operation you are attempting to perform "
"is not valid on netCDF4.Variable object. Try loading "
"your data into memory first by calling .load()."
)
raise IndexError(msg)
return array
def _encode_nc4_variable(var):
for coder in [
coding.strings.EncodedStringCoder(allows_unicode=True),
coding.strings.CharacterArrayCoder(),
]:
var = coder.encode(var)
return var
def _check_encoding_dtype_is_vlen_string(dtype):
if dtype is not str:
raise AssertionError( # pragma: no cover
"unexpected dtype encoding %r. This shouldn't happen: please "
"file a bug report at github.com/pydata/xarray" % dtype
)
def _get_datatype(var, nc_format="NETCDF4", raise_on_invalid_encoding=False):
if nc_format == "NETCDF4":
datatype = _nc4_dtype(var)
else:
if "dtype" in var.encoding:
encoded_dtype = var.encoding["dtype"]
_check_encoding_dtype_is_vlen_string(encoded_dtype)
if raise_on_invalid_encoding:
raise ValueError(
"encoding dtype=str for vlen strings is only supported "
"with format='NETCDF4'."
)
datatype = var.dtype
return datatype
def _nc4_dtype(var):
if "dtype" in var.encoding:
dtype = var.encoding.pop("dtype")
_check_encoding_dtype_is_vlen_string(dtype)
elif coding.strings.is_unicode_dtype(var.dtype):
dtype = str
elif var.dtype.kind in ["i", "u", "f", "c", "S"]:
dtype = var.dtype
else:
raise ValueError(f"unsupported dtype for netCDF4 variable: {var.dtype}")
return dtype
def _netcdf4_create_group(dataset, name):
return dataset.createGroup(name)
def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):
if group in {None, "", "/"}:
# use the root group
return ds
else:
# make sure it's a string
if not isinstance(group, str):
raise ValueError("group must be a string or None")
# support path-like syntax
path = group.strip("/").split("/")
for key in path:
try:
ds = ds.groups[key]
except KeyError as e:
if mode != "r":
ds = create_group(ds, key)
else:
# wrap error to provide slightly more helpful message
raise OSError("group not found: %s" % key, e)
return ds
def _ensure_fill_value_valid(data, attributes):
# work around for netCDF4/scipy issue where _FillValue has the wrong type:
# https://github.com/Unidata/netcdf4-python/issues/271
if data.dtype.kind == "S" and "_FillValue" in attributes:
attributes["_FillValue"] = np.string_(attributes["_FillValue"])
def _force_native_endianness(var):
# possible values for byteorder are:
# = native
# < little-endian
# > big-endian
# | not applicable
# Below we check if the data type is not native or NA
if var.dtype.byteorder not in ["=", "|"]:
# if endianness is specified explicitly, convert to the native type
data = var.data.astype(var.dtype.newbyteorder("="))
var = Variable(var.dims, data, var.attrs, var.encoding)
# if endian exists, remove it from the encoding.
var.encoding.pop("endian", None)
# check to see if encoding has a value for endian its 'native'
if not var.encoding.get("endian", "native") == "native":
raise NotImplementedError(
"Attempt to write non-native endian type, "
"this is not supported by the netCDF4 "
"python library."
)
return var
def _extract_nc4_variable_encoding(
variable,
raise_on_invalid=False,
lsd_okay=True,
h5py_okay=False,
backend="netCDF4",
unlimited_dims=None,
):
if unlimited_dims is None:
unlimited_dims = ()
encoding = variable.encoding.copy()
safe_to_drop = {"source", "original_shape"}
valid_encodings = {
"zlib",
"complevel",
"fletcher32",
"contiguous",
"chunksizes",
"shuffle",
"_FillValue",
"dtype",
}
if lsd_okay:
valid_encodings.add("least_significant_digit")
if h5py_okay:
valid_encodings.add("compression")
valid_encodings.add("compression_opts")
if not raise_on_invalid and encoding.get("chunksizes") is not None:
# It's possible to get encoded chunksizes larger than a dimension size
# if the original file had an unlimited dimension. This is problematic
# if the new file no longer has an unlimited dimension.
chunksizes = encoding["chunksizes"]
chunks_too_big = any(
c > d and dim not in unlimited_dims
for c, d, dim in zip(chunksizes, variable.shape, variable.dims)
)
has_original_shape = "original_shape" in encoding
changed_shape = (
has_original_shape and encoding.get("original_shape") != variable.shape
)
if chunks_too_big or changed_shape:
del encoding["chunksizes"]
var_has_unlim_dim = any(dim in unlimited_dims for dim in variable.dims)
if not raise_on_invalid and var_has_unlim_dim and "contiguous" in encoding.keys():
del encoding["contiguous"]
for k in safe_to_drop:
if k in encoding:
del encoding[k]
if raise_on_invalid:
invalid = [k for k in encoding if k not in valid_encodings]
if invalid:
raise ValueError(
"unexpected encoding parameters for %r backend: %r. Valid "
"encodings are: %r" % (backend, invalid, valid_encodings)
)
else:
for k in list(encoding):
if k not in valid_encodings:
del encoding[k]
return encoding
def _is_list_of_strings(value):
if np.asarray(value).dtype.kind in ["U", "S"] and np.asarray(value).size > 1:
return True
else:
return False
class NetCDF4DataStore(WritableCFDataStore):
"""Store for reading and writing data via the Python-NetCDF4 library.
This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""
__slots__ = (
"autoclose",
"format",
"is_remote",
"lock",
"_filename",
"_group",
"_manager",
"_mode",
)
def __init__(
self, manager, group=None, mode=None, lock=NETCDF4_PYTHON_LOCK, autoclose=False
):
import netCDF4
if isinstance(manager, netCDF4.Dataset):
if group is None:
root, group = find_root_and_group(manager)
else:
if not type(manager) is netCDF4.Dataset:
raise ValueError(
"must supply a root netCDF4.Dataset if the group "
"argument is provided"
)
root = manager
manager = DummyFileManager(root)
self._manager = manager
self._group = group
self._mode = mode
self.format = self.ds.data_model
self._filename = self.ds.filepath()
self.is_remote = is_remote_uri(self._filename)
self.lock = ensure_lock(lock)
self.autoclose = autoclose
@classmethod
def open(
cls,
filename,
mode="r",
format="NETCDF4",
group=None,
clobber=True,
diskless=False,
persist=False,
lock=None,
lock_maker=None,
autoclose=False,
):
import netCDF4
if isinstance(filename, pathlib.Path):
filename = os.fspath(filename)
if not isinstance(filename, str):
raise ValueError(
"can only read bytes or file-like objects "
"with engine='scipy' or 'h5netcdf'"
)
if format is None:
format = "NETCDF4"
if lock is None:
if mode == "r":
if is_remote_uri(filename):
lock = NETCDFC_LOCK
else:
lock = NETCDF4_PYTHON_LOCK
else:
if format is None or format.startswith("NETCDF4"):
base_lock = NETCDF4_PYTHON_LOCK
else:
base_lock = NETCDFC_LOCK
lock = combine_locks([base_lock, get_write_lock(filename)])
kwargs = dict(
clobber=clobber, diskless=diskless, persist=persist, format=format
)
manager = CachingFileManager(
netCDF4.Dataset, filename, mode=mode, kwargs=kwargs
)
return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)
def _acquire(self, needs_lock=True):
with self._manager.acquire_context(needs_lock) as root:
ds = _nc4_require_group(root, self._group, self._mode)
return ds
@property
def ds(self):
return self._acquire()
def open_store_variable(self, name, var):
dimensions = var.dimensions
data = indexing.LazilyOuterIndexedArray(NetCDF4ArrayWrapper(name, self))
attributes = {k: var.getncattr(k) for k in var.ncattrs()}
_ensure_fill_value_valid(data, attributes)
# netCDF4 specific encoding; save _FillValue for later
encoding = {}
filters = var.filters()
if filters is not None:
encoding.update(filters)
chunking = var.chunking()
if chunking is not None:
if chunking == "contiguous":
encoding["contiguous"] = True
encoding["chunksizes"] = None
else:
encoding["contiguous"] = False
encoding["chunksizes"] = tuple(chunking)
# TODO: figure out how to round-trip "endian-ness" without raising
# warnings from netCDF4
# encoding['endian'] = var.endian()
pop_to(attributes, encoding, "least_significant_digit")
# save source so __repr__ can detect if it's local or not
encoding["source"] = self._filename
encoding["original_shape"] = var.shape
encoding["dtype"] = var.dtype
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
dsvars = FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
return dsvars
def get_attrs(self):
attrs = FrozenDict((k, self.ds.getncattr(k)) for k in self.ds.ncattrs())
return attrs
def get_dimensions(self):
dims = FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items())
return dims
def get_encoding(self):
encoding = {}
encoding["unlimited_dims"] = {
k for k, v in self.ds.dimensions.items() if v.isunlimited()
}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
dim_length = length if not is_unlimited else None
self.ds.createDimension(name, size=dim_length)
def set_attribute(self, key, value):
if self.format != "NETCDF4":
value = encode_nc3_attr_value(value)
if _is_list_of_strings(value):
# encode as NC_STRING if attr is list of strings
self.ds.setncattr_string(key, value)
else:
self.ds.setncattr(key, value)
def encode_variable(self, variable):
variable = _force_native_endianness(variable)
if self.format == "NETCDF4":
variable = _encode_nc4_variable(variable)
else:
variable = encode_nc3_variable(variable)
return variable
def prepare_variable(
self, name, variable, check_encoding=False, unlimited_dims=None
):
datatype = _get_datatype(
variable, self.format, raise_on_invalid_encoding=check_encoding
)
attrs = variable.attrs.copy()
fill_value = attrs.pop("_FillValue", None)
if datatype is str and fill_value is not None:
raise NotImplementedError(
"netCDF4 does not yet support setting a fill value for "
"variable-length strings "
"(https://github.com/Unidata/netcdf4-python/issues/730). "
"Either remove '_FillValue' from encoding on variable %r "
"or set {'dtype': 'S1'} in encoding to use the fixed width "
"NC_CHAR type." % name
)
encoding = _extract_nc4_variable_encoding(
variable, raise_on_invalid=check_encoding, unlimited_dims=unlimited_dims
)
if name in self.ds.variables:
nc4_var = self.ds.variables[name]
else:
nc4_var = self.ds.createVariable(
varname=name,
datatype=datatype,
dimensions=variable.dims,
zlib=encoding.get("zlib", False),
complevel=encoding.get("complevel", 4),
shuffle=encoding.get("shuffle", True),
fletcher32=encoding.get("fletcher32", False),
contiguous=encoding.get("contiguous", False),
chunksizes=encoding.get("chunksizes"),
endian="native",
least_significant_digit=encoding.get("least_significant_digit"),
fill_value=fill_value,
)
nc4_var.setncatts(attrs)
target = NetCDF4ArrayWrapper(name, self)
return target, variable.data
def sync(self):
self.ds.sync()
def close(self, **kwargs):
self._manager.close(**kwargs)
def guess_can_open_netcdf4(store_spec):
if isinstance(store_spec, str) and is_remote_uri(store_spec):
return True
try:
_, ext = os.path.splitext(store_spec)
except TypeError:
return False
return ext in {".nc", ".nc4", ".cdf"}
def open_backend_dataset_netcdf4(
filename_or_obj,
mask_and_scale=True,
decode_times=None,
concat_characters=None,
decode_coords=None,
drop_variables=None,
use_cftime=None,
decode_timedelta=None,
group=None,
mode="r",
format="NETCDF4",
clobber=True,
diskless=False,
persist=False,
lock=None,
autoclose=False,
):
store = NetCDF4DataStore.open(
filename_or_obj,
mode=mode,
format=format,
group=group,
clobber=clobber,
diskless=diskless,
persist=persist,
lock=lock,
autoclose=autoclose,
)
with close_on_error(store):
ds = open_backend_dataset_store(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
netcdf4_backend = BackendEntrypoint(
open_dataset=open_backend_dataset_netcdf4, guess_can_open=guess_can_open_netcdf4
)
|
import contextlib
import logging
import warnings
logger = logging.getLogger(__name__)
# AWS Lambda environments do not support multiprocessing.Queue or multiprocessing.Pool.
# However they do support Threads and therefore concurrent.futures's ThreadPoolExecutor.
# We use this flag to allow python 2 backward compatibility, where concurrent.futures doesn't exist.
_CONCURRENT_FUTURES = False
try:
import concurrent.futures
_CONCURRENT_FUTURES = True
except ImportError:
warnings.warn("concurrent.futures could not be imported and won't be used")
# Multiprocessing is unavailable in App Engine (and possibly other sandboxes).
# The only method currently relying on it is iter_bucket, which is instructed
# whether to use it by the MULTIPROCESSING flag.
_MULTIPROCESSING = False
try:
import multiprocessing.pool
_MULTIPROCESSING = True
except ImportError:
warnings.warn("multiprocessing could not be imported and won't be used")
class DummyPool(object):
"""A class that mimics multiprocessing.pool.Pool for our purposes."""
def imap_unordered(self, function, items):
return map(function, items)
def terminate(self):
pass
class ConcurrentFuturesPool(object):
"""A class that mimics multiprocessing.pool.Pool but uses concurrent futures instead of processes."""
def __init__(self, max_workers):
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers)
def imap_unordered(self, function, items):
futures = [self.executor.submit(function, item) for item in items]
for future in concurrent.futures.as_completed(futures):
yield future.result()
def terminate(self):
self.executor.shutdown(wait=True)
@contextlib.contextmanager
def create_pool(processes=1):
if _MULTIPROCESSING and processes:
logger.info("creating multiprocessing pool with %i workers", processes)
pool = multiprocessing.pool.Pool(processes=processes)
elif _CONCURRENT_FUTURES and processes:
logger.info("creating concurrent futures pool with %i workers", processes)
pool = ConcurrentFuturesPool(max_workers=processes)
else:
logger.info("creating dummy pool")
pool = DummyPool()
yield pool
pool.terminate()
|
from typing import Any, Callable, List, Optional, Tuple
import pyvera as veraApi
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
DOMAIN as PLATFORM_DOMAIN,
ENTITY_ID_FORMAT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
import homeassistant.util.color as color_util
from . import VeraDevice
from .common import ControllerData, get_controller_data
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up the sensor config entry."""
controller_data = get_controller_data(hass, entry)
async_add_entities(
[
VeraLight(device, controller_data)
for device in controller_data.devices.get(PLATFORM_DOMAIN)
]
)
class VeraLight(VeraDevice[veraApi.VeraDimmer], LightEntity):
"""Representation of a Vera Light, including dimmable."""
def __init__(
self, vera_device: veraApi.VeraDimmer, controller_data: ControllerData
):
"""Initialize the light."""
self._state = False
self._color = None
self._brightness = None
VeraDevice.__init__(self, vera_device, controller_data)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of the light."""
return self._brightness
@property
def hs_color(self) -> Optional[Tuple[float, float]]:
"""Return the color of the light."""
return self._color
@property
def supported_features(self) -> int:
"""Flag supported features."""
if self._color:
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
return SUPPORT_BRIGHTNESS
def turn_on(self, **kwargs: Any) -> None:
"""Turn the light on."""
if ATTR_HS_COLOR in kwargs and self._color:
rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
self.vera_device.set_color(rgb)
elif ATTR_BRIGHTNESS in kwargs and self.vera_device.is_dimmable:
self.vera_device.set_brightness(kwargs[ATTR_BRIGHTNESS])
else:
self.vera_device.switch_on()
self._state = True
self.schedule_update_ha_state(True)
def turn_off(self, **kwargs: Any):
"""Turn the light off."""
self.vera_device.switch_off()
self._state = False
self.schedule_update_ha_state()
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
def update(self) -> None:
"""Call to update state."""
self._state = self.vera_device.is_switched_on()
if self.vera_device.is_dimmable:
# If it is dimmable, both functions exist. In case color
# is not supported, it will return None
self._brightness = self.vera_device.get_brightness()
rgb = self.vera_device.get_color()
self._color = color_util.color_RGB_to_hs(*rgb) if rgb else None
|
import pytest
from unittest.mock import Mock, call, patch
from kombu.asynchronous.http.curl import READ, WRITE, CurlClient
import t.skip
pytest.importorskip('pycurl')
@t.skip.if_pypy
@pytest.mark.usefixtures('hub')
class test_CurlClient:
class Client(CurlClient):
Curl = Mock(name='Curl')
def test_when_pycurl_missing(self, patching):
patching('kombu.asynchronous.http.curl.pycurl', None)
with pytest.raises(ImportError):
self.Client()
def test_max_clients_set(self):
x = self.Client(max_clients=303)
assert x.max_clients == 303
def test_init(self):
with patch('kombu.asynchronous.http.curl.pycurl') as _pycurl:
x = self.Client()
assert x._multi is not None
assert x._pending is not None
assert x._free_list is not None
assert x._fds is not None
assert x._socket_action == x._multi.socket_action
assert len(x._curls) == x.max_clients
assert x._timeout_check_tref
x._multi.setopt.assert_has_calls([
call(_pycurl.M_TIMERFUNCTION, x._set_timeout),
call(_pycurl.M_SOCKETFUNCTION, x._handle_socket),
])
def test_close(self):
with patch('kombu.asynchronous.http.curl.pycurl'):
x = self.Client()
x._timeout_check_tref = Mock(name='timeout_check_tref')
x.close()
x._timeout_check_tref.cancel.assert_called_with()
for _curl in x._curls:
_curl.close.assert_called_with()
x._multi.close.assert_called_with()
def test_add_request(self):
with patch('kombu.asynchronous.http.curl.pycurl'):
x = self.Client()
x._process_queue = Mock(name='_process_queue')
x._set_timeout = Mock(name='_set_timeout')
request = Mock(name='request')
x.add_request(request)
assert request in x._pending
x._process_queue.assert_called_with()
x._set_timeout.assert_called_with(0)
def test_handle_socket(self):
with patch('kombu.asynchronous.http.curl.pycurl') as _pycurl:
x = self.Client()
fd = Mock(name='fd1')
# POLL_REMOVE
x._fds[fd] = fd
x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl)
assert fd not in x._fds
x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl)
# POLL_IN
fds = [fd, Mock(name='fd2'), Mock(name='fd3')]
x._fds = {f: f for f in fds}
x._handle_socket(_pycurl.POLL_IN, fd, x._multi, None, _pycurl)
assert x._fds[fd] == READ
# POLL_OUT
x._handle_socket(_pycurl.POLL_OUT, fd, x._multi, None, _pycurl)
assert x._fds[fd] == WRITE
# POLL_INOUT
x._handle_socket(_pycurl.POLL_INOUT, fd, x._multi, None, _pycurl)
assert x._fds[fd] == READ | WRITE
# UNKNOWN EVENT
x._handle_socket(0xff3f, fd, x._multi, None, _pycurl)
# FD NOT IN FDS
x._fds.clear()
x._handle_socket(0xff3f, fd, x._multi, None, _pycurl)
def test_set_timeout(self):
x = self.Client()
x._set_timeout(100)
def test_timeout_check(self):
with patch('kombu.asynchronous.http.curl.pycurl') as _pycurl:
hub = Mock(name='hub')
x = self.Client(hub)
fd1, fd2 = Mock(name='fd1'), Mock(name='fd2')
x._fds = {fd1: READ}
x._process_pending_requests = Mock(name='process_pending')
def _side_effect():
x._fds = {fd2: WRITE}
return 333, 1
x._multi.socket_all.side_effect = _side_effect
_pycurl.error = KeyError
x._timeout_check(_pycurl=_pycurl)
hub.remove.assert_called_with(fd1)
hub.add_writer.assert_called_with(fd2, x.on_writable, fd2)
x._multi.socket_all.return_value = None
x._multi.socket_all.side_effect = _pycurl.error(333)
x._timeout_check(_pycurl=_pycurl)
def test_on_readable_on_writeable(self):
with patch('kombu.asynchronous.http.curl.pycurl') as _pycurl:
x = self.Client()
x._on_event = Mock(name='on_event')
fd = Mock(name='fd')
x.on_readable(fd, _pycurl=_pycurl)
x._on_event.assert_called_with(fd, _pycurl.CSELECT_IN)
x.on_writable(fd, _pycurl=_pycurl)
x._on_event.assert_called_with(fd, _pycurl.CSELECT_OUT)
|
from lark import Lark, UnexpectedInput
parser = Lark.open('multiples.lark', parser='lalr')
def is_in_grammar(data):
try:
parser.parse(data)
except UnexpectedInput:
return False
return True
for n_dec in range(100):
n_bin = bin(n_dec)[2:]
assert is_in_grammar('2:{}'.format(n_bin)) == (n_dec % 2 == 0)
assert is_in_grammar('3:{}'.format(n_bin)) == (n_dec % 3 == 0)
|
from typing import List
from arris_tg2492lg import ConnectBox, Device
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD
import homeassistant.helpers.config_validation as cv
DEFAULT_HOST = "192.168.178.1"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
}
)
def get_scanner(hass, config):
"""Return the Arris device scanner."""
conf = config[DOMAIN]
url = f"http://{conf[CONF_HOST]}"
connect_box = ConnectBox(url, conf[CONF_PASSWORD])
return ArrisDeviceScanner(connect_box)
class ArrisDeviceScanner(DeviceScanner):
"""This class queries a Arris TG2492LG router for connected devices."""
def __init__(self, connect_box: ConnectBox):
"""Initialize the scanner."""
self.connect_box = connect_box
self.last_results: List[Device] = []
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
name = next(
(result.hostname for result in self.last_results if result.mac == device),
None,
)
return name
def _update_info(self):
"""Ensure the information from the Arris TG2492LG router is up to date."""
result = self.connect_box.get_connected_devices()
last_results = []
mac_addresses = set()
for device in result:
if device.online and device.mac not in mac_addresses:
last_results.append(device)
mac_addresses.add(device.mac)
self.last_results = last_results
|
import weakref
import inspect
from ._loop import this_is_js
from ._action import BaseDescriptor
from ._dict import Dict
from . import logger
window = None
console = logger
def looks_like_method(func):
if hasattr(func, '__func__'):
return False # this is a bound method
try:
return list(inspect.signature(func).parameters)[0] in ('self', 'this')
except (TypeError, IndexError, ValueError):
return False
def reaction(*connection_strings, mode='normal'):
""" Decorator to turn a method of a Component into a
:class:`Reaction <flexx.event.Reaction>`.
A reaction can be connected to multiple event types. Each connection
string represents an event type to connect to.
Also see the
:func:`Component.reaction() <flexx.event.Component.reaction>` method.
.. code-block:: py
class MyObject(event.Component):
@event.reaction('first_name', 'last_name')
def greet(self, *events):
print('hello %s %s' % (self.first_name, self.last_name))
A reaction can operate in a few different modes. By not specifying any
connection strings, the mode is "auto": the reaction will automatically
trigger when any of the properties used in the function changes.
See :func:`get_mode() <flexx.event.Reaction.get_mode>` for details.
Connection string follow the following syntax rules:
* Connection strings consist of parts separated by dots, thus forming a path.
If an element on the path is a property, the connection will automatically
reset when that property changes (a.k.a. dynamism, more on this below).
* Each part can end with one star ('*'), indicating that the part is a list
and that a connection should be made for each item in the list.
* With two stars, the connection is made *recursively*, e.g. "children**"
connects to "children" and the children's children, etc.
* Stripped of '*', each part must be a valid identifier (ASCII).
* The total string optionally has a label suffix separated by a colon. The
label itself may consist of any characters.
* The string can have a "!" at the very start to suppress warnings for
connections to event types that Flexx is not aware of at initialization
time (i.e. not corresponding to a property or emitter).
An extreme example could be ``"!foo.children**.text:mylabel"``, which connects
to the "text" event of the children (and their children, and their children's
children etc.) of the ``foo`` attribute. The "!" is common in cases like
this to suppress warnings if not all children have a ``text`` event/property.
"""
if (not connection_strings):
raise TypeError('reaction() needs one or more arguments.')
# Validate mode parameter
mode = mode or 'normal' # i.e. allow None
if not isinstance(mode, str):
raise TypeError('Reaction mode must be a string.')
mode = mode.lower()
if mode not in ('normal', 'greedy', 'auto'):
raise TypeError('Reaction mode must "normal", "greedy" or "auto".')
# Extract function if we can
func = None
if len(connection_strings) == 1 and callable(connection_strings[0]):
func = connection_strings[0]
connection_strings = []
for s in connection_strings:
if not (isinstance(s, str) and len(s) > 0):
raise TypeError('Connection string must be nonempty strings.')
def _connect(func):
if not callable(func):
raise TypeError('reaction() decorator requires a callable.')
if not looks_like_method(func):
raise TypeError('reaction() decorator requires a method '
'(first arg must be self).')
return ReactionDescriptor(func, mode, connection_strings)
if func is not None:
return _connect(func)
else:
return _connect
class ReactionDescriptor(BaseDescriptor):
""" Class descriptor for reactions.
"""
def __init__(self, func, mode, connection_strings, ob=None):
self._name = func.__name__
self._func = func
self._mode = mode
if len(connection_strings) == 0:
self._mode = 'auto'
self._connection_strings = connection_strings
self._ob = None if ob is None else weakref.ref(ob)
self.__doc__ = self._format_doc('reaction', self._name, func.__doc__)
def __get__(self, instance, owner):
if instance is None:
return self
private_name = '_' + self._name + '_reaction'
try:
reaction = getattr(instance, private_name)
except AttributeError:
reaction = Reaction(instance if self._ob is None else self._ob(),
(self._func, instance),
self._mode,
self._connection_strings)
setattr(instance, private_name, reaction)
# Make the reaction use *our* func one time. In most situations
# this is the same function that the reaction has, but not when
# using super(); i.e. this allows a reaction to call the same
# reaction of its super class.
reaction._use_once(self._func)
return reaction
@property
def local_connection_strings(self):
""" List of connection strings that are local to the object.
"""
# This is used in e.g. flexx.app
return [s for s in self._connection_strings if '.' not in s]
class Reaction:
""" Reaction objects are wrappers around Component methods. They connect
to one or more events. This class should not be instantiated directly;
use ``event.reaction()`` or ``Component.reaction()`` instead.
"""
_count = 0
def __init__(self, ob, func, mode, connection_strings):
Reaction._count += 1
self._id = 'r%i' % Reaction._count # to ensure a consistent event order
# Store objects using a weakref.
# - ob1 is the Component object of which the connect() method was called
# to create the reaction. Connection strings are relative to this object.
# - ob2 is the object to be passed to func (if it is a method). Is often
# the same as ob1, but not per see. Can be None.
self._ob1 = weakref.ref(ob)
# Get unbounded version of bound methods.
self._ob2 = None # if None, its regarded a regular function
if isinstance(func, tuple):
self._ob2 = weakref.ref(func[1])
func = func[0]
if getattr(func, '__self__', None) is not None: # builtin funcs have __self__
if getattr(func, '__func__', None) is not None:
self._ob2 = weakref.ref(func.__self__)
func = func.__func__
# Store func, name, and docstring (e.g. for sphinx docs)
assert callable(func)
assert mode in ('normal', 'greedy', 'auto')
self._func = func
self._func_once = func
self._mode = mode
self._name = func.__name__
self.__doc__ = BaseDescriptor._format_doc('reaction', self._name, func.__doc__)
self._init(connection_strings)
def _init(self, connection_strings):
""" Init of this reaction that is compatible with PScript.
"""
ichars = '0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
# Init explicit connections: (connection-object, type) tuples
self._connections = []
# Init implicit connections: (component, type) tuples
self._implicit_connections = []
# Notes on connection strings:
# * The string can have a "!" at the start to suppress warnings for
# connections to unknown event types.
# * The string can have a label suffix separated by a colon. The
# label may consist of any chars.
# * Connection strings consist of parts separated by dots.
# * Each part can end with one star ('*'), indicating that connections
# should be made for each item in the list, or two stars, indicating
# that connections should be made *recursively* for each item in the
# list (a.k.a. a deep connector).
# * Stripped of '*', each part must be a valid identifier.
# * An extreme example: "!foo.bar*.spam.eggs**:meh"
for ic in range(len(connection_strings)):
fullname = connection_strings[ic]
# Separate label and exclamation mark from the string path
force = fullname.startswith('!')
s, _, label = fullname.lstrip('!').partition(':')
s0 = s
# Backwards compat: "foo.*.bar* becomes "foo*.bar"
if '.*.' in s + '.':
s = s.replace('.*', '*')
console.warn('Connection string syntax "foo.*.bar" is deprecated, '
'use "%s" instead of "%s":.' % (s, s0))
# Help put exclamation at the start
if '!' in s:
s = s.replace('!', '')
force = True
console.warn('Exclamation marks in connection strings must come at '
'the very start, use "!%s" instead of "%s".' % (s, s0))
# Check that all parts are identifiers
parts = s.split('.')
for ipart in range(len(parts)):
part = parts[ipart].rstrip('*')
is_identifier = len(part) > 0
for i in range(len(part)):
is_identifier = is_identifier and (part[i] in ichars)
if is_identifier is False:
raise ValueError('Connection string %r contains '
'non-identifier part %r' % (s, part))
# Init connection
d = Dict() # don't do Dict(foo=x) bc PScript only supports that for dict
self._connections.append(d)
d.fullname = fullname # original, used in logs, so is searchable
d.parts = parts
d.type = parts[-1].rstrip('*') + ':' + (label or self._name)
d.force = force
d.objects = []
# Connect
for ic in range(len(self._connections)):
self.reconnect(ic)
def __repr__(self):
c = '+'.join([str(len(c.objects)) for c in self._connections])
cname = self.__class__.__name__
t = '<%s %r (%s) with %s connections at 0x%x>'
return t % (cname, self._name, self._mode, c, id(self))
def get_mode(self):
""" Get the mode for this reaction:
* 'normal': events are handled in the order that they were emitted.
Consequently, there can be multiple calls per event loop iteration
if other reactions were triggered as well.
* 'greedy': this reaction receives all its events (since the last event
loop iteration) in a single call (even if this breaks the order of
events with respect to other reactions). Use this when multiple related
events must be handled simultenously (e.g. when syncing properties).
* 'auto': this reaction tracks what properties it uses, and is
automatically triggered when any of these properties changes. Like
'greedy' there is at most one call per event loop iteration.
Reactions with zero connection strings always have mode 'auto'.
The 'normal' mode generally offers the most consistent behaviour.
The 'greedy' mode allows the event system to make some optimizations.
Combined with the fact that there is at most one call per event loop
iteration, this can provide higher performance where it matters.
Reactions with mode 'auto' can be a convenient way to connect things
up. Although it allows the event system to make the same optimizations
as 'greedy', it also needs to reconnect the reaction after each time
it is called, which can degregade performance especially if many
properties are accessed by the reaction.
"""
return self._mode
def get_name(self):
""" Get the name of this reaction, usually corresponding to the name
of the function that this reaction wraps.
"""
return self._name
def get_connection_info(self):
""" Get a list of tuples (name, connection_names), where
connection_names is a list of type names (including label) for
the made connections.
"""
return [(c.fullname, [u[1] for u in c.objects])
for c in self._connections]
## Calling / handling
def _use_once(self, func):
self._func_once = func
def __call__(self, *events):
""" Call the reaction function.
"""
func = self._func_once
self._func_once = self._func
if self._ob2 is not None:
if self._ob2() is not None:
res = func(self._ob2(), *events)
else: # pragma: no cover
# We detected that the object that wants the events no longer exist
self.dispose()
return
else:
res = func(*events)
return res
## Connecting
def dispose(self):
""" Disconnect all connections so that there are no more references
to components.
"""
if len(self._connections) == 0 and len(self._implicit_connections) == 0:
return
if not this_is_js():
self._ob1 = lambda: None
logger.debug('Disposing reaction %r ' % self)
while len(self._implicit_connections):
ob, type = self._implicit_connections.pop(0)
ob.disconnect(type, self)
for ic in range(len(self._connections)):
connection = self._connections[ic]
while len(connection.objects) > 0:
ob, type = connection.objects.pop(0)
ob.disconnect(type, self)
self._connections = []
def _update_implicit_connections(self, connections):
""" Update the list of implicit (i.e. automatic) connections.
Used by the loop.
"""
# Init - each connection is a (component, type) tuple
old_conns = self._implicit_connections
new_conns = connections
self._implicit_connections = new_conns
# Reconnect in a smart way
self._connect_and_disconnect(old_conns, new_conns)
def _clear_component_refs(self, ob):
""" Clear all references to the given Component instance. This is
called from a Component' dispose() method. This reaction remains
working, but wont receive events from that object anymore.
"""
for i in range(len(self._implicit_connections)-1, -1, -1):
if self._implicit_connections[i][0] is ob:
self._implicit_connections.pop(i)
for ic in range(len(self._connections)):
connection = self._connections[ic]
for i in range(len(connection.objects)-1, -1, -1):
if connection.objects[i][0] is ob:
connection.objects.pop(i)
def reconnect(self, index):
""" (re)connect the index'th connection.
"""
connection = self._connections[index]
# Prepare disconnecting
old_objects = connection.objects # (ob, type) tuples
connection.objects = []
# Obtain root object and setup connections
ob = self._ob1()
if ob is not None:
self._seek_event_object(index, connection.parts, ob)
new_objects = connection.objects
# Verify
if len(new_objects) == 0:
raise RuntimeError('Could not connect to %r' % connection.fullname)
# Reconnect in a smart way
self._connect_and_disconnect(old_objects, new_objects, connection.force)
def _connect_and_disconnect(self, old_objects, new_objects, force=False):
""" Update connections by disconnecting old and connecting new,
but try to keep connections that do not change.
"""
# Keep track of what connections we skip, i.e. which we should not remove.
# Otherwise we may remove duplicate objects. See issue #460.
should_stay = {}
# Skip common objects from the start
i1 = 0
while (i1 < len(new_objects) and i1 < len(old_objects) and
new_objects[i1][0] is old_objects[i1][0] and
new_objects[i1][1] == old_objects[i1][1]):
should_stay[new_objects[i1][0].id + '-' + new_objects[i1][1]] = True
i1 += 1
# Skip common objects from the end
i2, i3 = len(new_objects) - 1, len(old_objects) - 1
while (i2 >= i1 and i3 >= i1 and
new_objects[i2][0] is old_objects[i3][0] and
new_objects[i2][1] == old_objects[i3][1]):
should_stay[new_objects[i2][0].id + '-' + new_objects[i2][1]] = True
i2 -= 1
i3 -= 1
# Disconnect remaining old
for i in range(i1, i3+1):
ob, type = old_objects[i]
if should_stay.get(ob.id + '-' + type, False) is False:
ob.disconnect(type, self)
# Connect remaining new
for i in range(i1, i2+1):
ob, type = new_objects[i]
ob._register_reaction(type, self, force)
def _seek_event_object(self, index, path, ob):
""" Seek an event object based on the name (PScript compatible).
The path is a list: the path to the event, the last element being the
event type.
"""
connection = self._connections[index]
# Should we make connection or stop?
if ob is None or len(path) == 0:
return # We cannot seek further
if len(path) == 1:
# Path only consists of event type now: make connection
# connection.type consists of event type name (no stars) plus a label
if hasattr(ob, '_IS_COMPONENT'):
connection.objects.append((ob, connection.type))
# Reached end or continue?
if not path[0].endswith('**'):
return
# Resolve name
obname_full, path = path[0], path[1:]
obname = obname_full.rstrip('*')
selector = obname_full[len(obname):]
# Internally, 3-star notation is used for optional selectors
if selector == '***':
self._seek_event_object(index, path, ob)
# Select object
if hasattr(ob, '_IS_COMPONENT') and obname in ob.__properties__:
name_label = obname + ':reconnect_' + str(index)
connection.objects.append((ob, name_label))
new_ob = getattr(ob, obname, None)
else:
new_ob = getattr(ob, obname, None)
# Look inside?
if len(selector) and selector in '***' and isinstance(new_ob, (tuple, list)):
if len(selector) > 1:
path = [obname + '***'] + path # recurse (avoid insert for space)
for isub in range(len(new_ob)):
self._seek_event_object(index, path, new_ob[isub])
return
elif selector == '*': # "**" is recursive, so allow more
t = "Invalid connection {name_full} because {name} is not a tuple/list."
raise RuntimeError(t.replace("{name_full}", obname_full)
.replace("{name}", obname))
else:
return self._seek_event_object(index, path, new_ob)
|
from itertools import izip
try:
import redis
from redis.sentinel import Sentinel
except ImportError:
redis = None
import diamond.collector
class SidekiqCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(SidekiqCollector,
self).get_default_config_help()
config_help.update({
'host': 'Redis hostname',
'ports': 'Redis ports',
'password': 'Redis Auth password',
'databases': 'how many database instances to collect',
'sentinel_ports': 'Redis sentinel ports',
'sentinel_name': 'Redis sentinel name',
'cluster_prefix': 'Redis cluster name prefix'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SidekiqCollector, self).get_default_config()
config.update({
'path': 'sidekiq',
'host': 'localhost',
'ports': '6379',
'password': None,
'databases': 16,
'sentinel_ports': None,
'sentinel_name': None,
'cluster_prefix': None
})
return config
def get_master(self, host, port, sentinel_port, sentinel_name):
"""
:param host: Redis host to send request
:param port: Redis port to send request
:param sentinel_port: sentinel_port optional
:param sentinel_name: sentinel_name optional
:return: master ip and port
"""
if sentinel_port and sentinel_name:
master = Sentinel([(host, sentinel_port)], socket_timeout=1)\
.discover_master(sentinel_name)
return master
return host, port
def get_redis_client(self):
"""
:return: Redis client
"""
host = self.config['host']
ports = self.config['ports']
sentinel_ports = self.config['sentinel_ports']
sentinel_name = self.config['sentinel_name']
password = self.config['password']
databases = self.config['databases']
if not isinstance(ports, list):
ports = [ports]
if not isinstance(sentinel_ports, list):
sentinel_ports = [sentinel_ports]
if sentinel_ports:
assert len(sentinel_ports) == len(ports)
else:
sentinel_ports = [None for _ in xrange(len(ports))]
for port, sentinel_port in izip(ports, sentinel_ports):
for db in xrange(0, int(databases)):
master = self.get_master(
host, port, sentinel_port, sentinel_name
)
pool = redis.ConnectionPool(
host=master[0], port=int(master[1]),
password=password, db=db
)
yield redis.Redis(connection_pool=pool), port, db
def collect(self):
"""
Collect Sidekiq metrics
:return:
"""
if redis is None:
self.log.error('Unable to import module redis')
return {}
try:
for redis_client, port, db in self.get_redis_client():
try:
self.publish_queue_length(redis_client, port, db)
self.publish_schedule_length(redis_client, port, db)
self.publish_retry_length(redis_client, port, db)
except Exception as execption:
self.log.error(execption)
except Exception as execption:
self.log.error(execption)
def publish_schedule_length(self, redis_client, port, db):
"""
:param redis_client: Redis client
:param db: Redis Database index
:param port: Redis port
:return: Redis schedule length
"""
schedule_length = redis_client.zcard('schedule')
self.__publish(port, db, 'schedule', schedule_length)
def publish_retry_length(self, redis_client, port, db):
"""
:param redis_client: Redis client
:param db: Redis Database index
:param port: Redis port
:return: Redis schedule length
"""
retry_length = redis_client.zcard('retry')
self.__publish(port, db, 'retry', retry_length)
def publish_queue_length(self, redis_client, port, db):
"""
:param redis_client: Redis client
:param db: Redis Database index
:param port: Redis port
:return: Redis queue length
"""
for queue in redis_client.smembers('queues'):
queue_length = redis_client.llen('queue:%s' % queue)
self.__publish(port, db, queue, queue_length)
def __publish(self, port, db, queue, queue_length):
"""
:param port: Redis port
:param db: Redis db index to report
:param queue: Queue name to report
:param queue_length: Queue length to report
:return:
"""
metric_name_segaments = ['queue']
cluster = self.config['cluster_prefix']
if cluster:
metric_name_segaments.append(cluster)
metric_name_segaments.append(port)
metric_name_segaments.append(str(db))
metric_name_segaments.append(queue)
self.publish_gauge(
name='.'.join(metric_name_segaments), value=queue_length
)
|
import logging
import ambiclimate
from homeassistant import config_entries
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.network import get_url
from .const import (
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
DOMAIN,
STORAGE_KEY,
STORAGE_VERSION,
)
DATA_AMBICLIMATE_IMPL = "ambiclimate_flow_implementation"
_LOGGER = logging.getLogger(__name__)
@callback
def register_flow_implementation(hass, client_id, client_secret):
"""Register a ambiclimate implementation.
client_id: Client id.
client_secret: Client secret.
"""
hass.data.setdefault(DATA_AMBICLIMATE_IMPL, {})
hass.data[DATA_AMBICLIMATE_IMPL] = {
CONF_CLIENT_ID: client_id,
CONF_CLIENT_SECRET: client_secret,
}
@config_entries.HANDLERS.register("ambiclimate")
class AmbiclimateFlowHandler(config_entries.ConfigFlow):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize flow."""
self._registered_view = False
self._oauth = None
async def async_step_user(self, user_input=None):
"""Handle external yaml configuration."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="already_configured")
config = self.hass.data.get(DATA_AMBICLIMATE_IMPL, {})
if not config:
_LOGGER.debug("No config")
return self.async_abort(reason="missing_configuration")
return await self.async_step_auth()
async def async_step_auth(self, user_input=None):
"""Handle a flow start."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="already_configured")
errors = {}
if user_input is not None:
errors["base"] = "follow_link"
if not self._registered_view:
self._generate_view()
return self.async_show_form(
step_id="auth",
description_placeholders={
"authorization_url": await self._get_authorize_url(),
"cb_url": self._cb_url(),
},
errors=errors,
)
async def async_step_code(self, code=None):
"""Received code for authentication."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="already_configured")
token_info = await self._get_token_info(code)
if token_info is None:
return self.async_abort(reason="access_token")
config = self.hass.data[DATA_AMBICLIMATE_IMPL].copy()
config["callback_url"] = self._cb_url()
return self.async_create_entry(title="Ambiclimate", data=config)
async def _get_token_info(self, code):
oauth = self._generate_oauth()
try:
token_info = await oauth.get_access_token(code)
except ambiclimate.AmbiclimateOauthError:
_LOGGER.error("Failed to get access token", exc_info=True)
return None
store = self.hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
await store.async_save(token_info)
return token_info
def _generate_view(self):
self.hass.http.register_view(AmbiclimateAuthCallbackView())
self._registered_view = True
def _generate_oauth(self):
config = self.hass.data[DATA_AMBICLIMATE_IMPL]
clientsession = async_get_clientsession(self.hass)
callback_url = self._cb_url()
return ambiclimate.AmbiclimateOAuth(
config.get(CONF_CLIENT_ID),
config.get(CONF_CLIENT_SECRET),
callback_url,
clientsession,
)
def _cb_url(self):
return f"{get_url(self.hass)}{AUTH_CALLBACK_PATH}"
async def _get_authorize_url(self):
oauth = self._generate_oauth()
return oauth.get_authorize_url()
class AmbiclimateAuthCallbackView(HomeAssistantView):
"""Ambiclimate Authorization Callback View."""
requires_auth = False
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
async def get(self, request):
"""Receive authorization token."""
code = request.query.get("code")
if code is None:
return "No code"
hass = request.app["hass"]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": "code"}, data=code
)
)
return "OK!"
|
import os
import tempfile
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from django.conf import settings
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from sentry_sdk import add_breadcrumb
from weblate_language_data.countries import DEFAULT_LANGS
from weblate.utils.hash import calculate_hash
EXPAND_LANGS = {
code[:2]: "{}_{}".format(code[:2], code[3:].upper()) for code in DEFAULT_LANGS
}
class UnitNotFound(Exception):
def __str__(self):
args = list(self.args)
if "" in args:
args.remove("")
return "Unit not found: {}".format(", ".join(args))
class UpdateError(Exception):
def __init__(self, cmd, output):
super().__init__(output)
self.cmd = cmd
self.output = output
class TranslationUnit:
"""Wrapper for translate-toolkit unit.
It handles ID/template based translations and other API differences.
"""
def __init__(self, parent, unit, template=None):
"""Create wrapper object."""
self.unit = unit
self.template = template
self.parent = parent
if template is not None:
self.mainunit = template
else:
self.mainunit = unit
def _invalidate_target(self):
"""Invalidate target cache."""
if "target" in self.__dict__:
del self.__dict__["target"]
@cached_property
def locations(self):
"""Return comma separated list of locations."""
return ""
@cached_property
def flags(self):
"""Return flags or typecomments from units."""
return ""
@cached_property
def notes(self):
"""Return notes from units."""
return ""
@cached_property
def source(self):
"""Return source string from a ttkit unit."""
raise NotImplementedError()
@cached_property
def target(self):
"""Return target string from a ttkit unit."""
raise NotImplementedError()
@cached_property
def context(self):
"""Return context of message.
In some cases we have to use ID here to make all backends consistent.
"""
raise NotImplementedError()
@cached_property
def previous_source(self):
"""Return previous message source if there was any."""
return ""
@cached_property
def id_hash(self):
"""Return hash of source string, used for quick lookup.
We use siphash as it is fast and works well for our purpose.
"""
if self.template is None:
return calculate_hash(self.source, self.context)
return calculate_hash(self.context)
def is_translated(self):
"""Check whether unit is translated."""
return bool(self.target)
def is_approved(self, fallback=False):
"""Check whether unit is appoved."""
return fallback
def is_fuzzy(self, fallback=False):
"""Check whether unit needs edit."""
return fallback
def has_content(self):
"""Check whether unit has content."""
return True
def is_readonly(self):
"""Check whether unit is read only."""
return False
def set_target(self, target):
"""Set translation unit target."""
raise NotImplementedError()
def mark_fuzzy(self, fuzzy):
"""Set fuzzy flag on translated unit."""
raise NotImplementedError()
def mark_approved(self, value):
"""Set approved flag on translated unit."""
raise NotImplementedError()
class TranslationFormat:
"""Generic object defining file format loader."""
name: str = ""
format_id: str = ""
monolingual: Optional[bool] = None
check_flags: Tuple[str, ...] = ()
unit_class: Type[TranslationUnit] = TranslationUnit
autoload: Tuple[str, ...] = ()
can_add_unit: bool = True
language_format: str = "posix"
simple_filename: bool = True
new_translation: Optional[Union[str, bytes]] = None
autoaddon: Dict[str, Dict[str, str]] = {}
@classmethod
def get_identifier(cls):
return cls.format_id
@classmethod
def parse(
cls, storefile, template_store=None, language_code=None, is_template=False
):
"""Parse store and returns TranslationFormat instance.
This wrapper is needed for AutodetectFormat to be able to return instance of
different class.
"""
return cls(storefile, template_store, language_code, is_template)
def __init__(
self, storefile, template_store=None, language_code=None, is_template=False
):
"""Create file format object, wrapping up translate-toolkit's store."""
if not isinstance(storefile, str) and not hasattr(storefile, "mode"):
storefile.mode = "r"
self.storefile = storefile
# Load store
self.store = self.load(storefile, template_store)
# Remember template
self.template_store = template_store
self.is_template = is_template
self.add_breadcrumb(
"Loaded translation file {}".format(
getattr(storefile, "filename", storefile)
),
template_store=str(template_store),
is_template=is_template,
)
def check_valid(self):
"""Check store validity."""
if not self.is_valid():
raise ValueError(
_("Failed to load strings from the file, try choosing other format.")
)
def get_filenames(self):
if isinstance(self.storefile, str):
return [self.storefile]
return [self.storefile.name]
@classmethod
def load(cls, storefile, template_store):
raise NotImplementedError()
def get_plural(self, language):
"""Return matching plural object."""
return language.plural
@cached_property
def has_template(self):
"""Check whether class is using template."""
return (
self.monolingual or self.monolingual is None
) and self.template_store is not None
@cached_property
def _context_index(self):
"""ID based index for units."""
return {unit.context: unit for unit in self.mono_units}
def find_unit_mono(self, context: str) -> Optional[Any]:
try:
# The mono units always have only template set
return self._context_index[context].template
except KeyError:
return None
def _find_unit_template(self, context: str) -> Tuple[Any, bool]:
# Need to create new unit based on template
template_ttkit_unit = self.template_store.find_unit_mono(context)
if template_ttkit_unit is None:
raise UnitNotFound(context)
# We search by ID when using template
ttkit_unit = self.find_unit_mono(context)
# We always need new unit to translate
if ttkit_unit is None:
ttkit_unit = deepcopy(template_ttkit_unit)
add = True
else:
add = False
return (self.unit_class(self, ttkit_unit, template_ttkit_unit), add)
@cached_property
def _source_index(self):
"""Context and source based index for units."""
return {(unit.context, unit.source): unit for unit in self.all_units}
def _find_unit_bilingual(self, context: str, source: str) -> Tuple[Any, bool]:
try:
return (self._source_index[context, source], False)
except KeyError:
raise UnitNotFound(context, source)
def find_unit(self, context: str, source: Optional[str] = None) -> Tuple[Any, bool]:
"""Find unit by context and source.
Returns tuple (ttkit_unit, created) indicating whether returned unit is new one.
"""
if self.has_template:
return self._find_unit_template(context)
return self._find_unit_bilingual(context, source)
def add_unit(self, ttkit_unit):
"""Add new unit to underlaying store."""
raise NotImplementedError()
def update_header(self, **kwargs):
"""Update store header if available."""
return
def save_atomic(self, filename, callback):
dirname, basename = os.path.split(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
temp = tempfile.NamedTemporaryFile(prefix=basename, dir=dirname, delete=False)
try:
callback(temp)
temp.close()
os.replace(temp.name, filename)
finally:
if os.path.exists(temp.name):
os.unlink(temp.name)
def save(self):
"""Save underlaying store to disk."""
raise NotImplementedError()
@property
def all_store_units(self):
"""Wrapper for all store units for possible filtering."""
return self.store.units
@cached_property
def mono_units(self):
return [self.unit_class(self, None, unit) for unit in self.all_store_units]
@cached_property
def all_units(self):
"""List of all units."""
if not self.has_template:
return [self.unit_class(self, unit) for unit in self.all_store_units]
return [
self.unit_class(self, self.find_unit_mono(unit.context), unit.template)
for unit in self.template_store.mono_units
]
@property
def content_units(self):
yield from (unit for unit in self.all_units if unit.has_content())
@staticmethod
def mimetype():
"""Return most common mime type for format."""
return "text/plain"
@staticmethod
def extension():
"""Return most common file extension for format."""
return "txt"
def is_valid(self):
"""Check whether store seems to be valid."""
return True
@classmethod
def is_valid_base_for_new(cls, base, monolingual, errors: Optional[List] = None):
"""Check whether base is valid."""
raise NotImplementedError()
@classmethod
def get_language_code(cls, code: str, language_format: Optional[str] = None) -> str:
"""Do any possible formatting needed for language code."""
if not language_format:
language_format = cls.language_format
return getattr(cls, f"get_language_{language_format}")(code)
@staticmethod
def get_language_posix(code: str) -> str:
return code
@staticmethod
def get_language_bcp(code: str) -> str:
return code.replace("_", "-")
@staticmethod
def get_language_posix_long(code: str) -> str:
if code in EXPAND_LANGS:
return EXPAND_LANGS[code]
return code
@classmethod
def get_language_bcp_long(cls, code: str) -> str:
return cls.get_language_posix_long(code).replace("_", "-")
@staticmethod
def get_language_android(code: str) -> str:
# Android doesn't use Hans/Hant, but rather TW/CN variants
if code == "zh_Hans":
return "zh-rCN"
if code == "zh_Hant":
return "zh-rTW"
sanitized = code.replace("-", "_")
if "_" in sanitized and len(sanitized.split("_")[1]) > 2:
return "b+{}".format(sanitized.replace("_", "+"))
return sanitized.replace("_", "-r")
@classmethod
def get_language_java(cls, code: str) -> str:
# Java doesn't use Hans/Hant, but rather TW/CN variants
if code == "zh_Hans":
return "zh-CN"
if code == "zh_Hant":
return "zh-TW"
if code == "zh_Hans_SG":
return "zh-SG"
if code == "zh_Hant_HK":
return "zh-HK"
return cls.get_language_bcp(code)
@classmethod
def get_language_filename(cls, mask: str, code: str) -> str:
"""
Returns full filename of a language file.
Calculated for given path, filemask and language code.
"""
return mask.replace("*", code)
@classmethod
def add_language(cls, filename, language, base):
"""Add new language file."""
# Create directory for a translation
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
cls.create_new_file(filename, language, base)
@classmethod
def create_new_file(cls, filename, language, base):
"""Handle creation of new translation file."""
raise NotImplementedError()
def iterate_merge(self, fuzzy):
"""Iterate over units for merging.
Note: This can change fuzzy state of units!
"""
for unit in self.content_units:
# Skip fuzzy (if asked for that)
if unit.is_fuzzy():
if not fuzzy:
continue
elif not unit.is_translated():
continue
# Unmark unit as fuzzy (to allow merge)
set_fuzzy = False
if fuzzy and unit.is_fuzzy():
unit.mark_fuzzy(False)
if fuzzy != "approve":
set_fuzzy = True
yield set_fuzzy, unit
def create_unit(self, key: str, source: Union[str, List[str]]):
raise NotImplementedError()
def new_unit(self, key: str, source: Union[str, List[str]]):
"""Add new unit to monolingual store."""
unit = self.create_unit(key, source)
self.add_unit(unit)
self.save()
@classmethod
def get_class(cls):
raise NotImplementedError()
@classmethod
def add_breadcrumb(cls, message, **data):
if settings.SENTRY_DSN:
add_breadcrumb(category="storage", message=message, data=data, level="info")
def delete_unit(self, ttkit_unit) -> Optional[str]:
raise NotImplementedError()
def cleanup_unused(self) -> List[str]:
"""Removes unused strings, returning list of additional changed files."""
existing = {unit.context for unit in self.template_store.mono_units}
changed = False
result = []
for ttkit_unit in self.all_store_units:
if self.unit_class(self, ttkit_unit, ttkit_unit).context not in existing:
item = self.delete_unit(ttkit_unit)
if item is not None:
result.append(item)
else:
changed = True
if changed:
self.save()
return result
def cleanup_blank(self) -> List[str]:
"""
Removes strings without translations.
Returning list of additional changed files.
"""
changed = False
result = []
for ttkit_unit in self.all_store_units:
target = self.unit_class(self, ttkit_unit, ttkit_unit).target
if not target or (isinstance(target, list) and not any(target)):
item = self.delete_unit(ttkit_unit)
if item is not None:
result.append(item)
else:
changed = True
if changed:
self.save()
return result
def remove_unit(self, ttkit_unit) -> List[str]:
"""High level wrapper for unit removal."""
changed = False
result = []
item = self.delete_unit(ttkit_unit)
if item is not None:
result.append(item)
else:
changed = True
if changed:
self.save()
return result
class EmptyFormat(TranslationFormat):
"""For testing purposes."""
@classmethod
def load(cls, storefile, template_store):
return type("", (object,), {"units": []})()
def save(self):
return
class BilingualUpdateMixin:
@classmethod
def do_bilingual_update(cls, in_file: str, out_file: str, template: str, **kwargs):
raise NotImplementedError()
@classmethod
def update_bilingual(cls, filename: str, template: str, **kwargs):
temp = tempfile.NamedTemporaryFile(
prefix=filename, dir=os.path.dirname(filename), delete=False
)
temp.close()
try:
cls.do_bilingual_update(filename, temp.name, template, **kwargs)
os.replace(temp.name, filename)
finally:
if os.path.exists(temp.name):
os.unlink(temp.name)
|
from __future__ import division
import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from chainercv.links.model.ssd import Multibox
from chainercv.links.model.ssd import Normalize
from chainercv.links.model.ssd import SSD
from chainercv import utils
# RGB, (C, 1, 1) format
_imagenet_mean = np.array((123, 117, 104)).reshape((-1, 1, 1))
class VGG16(chainer.Chain):
"""An extended VGG-16 model for SSD300 and SSD512.
This is an extended VGG-16 model proposed in [#]_.
The differences from original VGG-16 [#]_ are shown below.
* :obj:`conv5_1`, :obj:`conv5_2` and :obj:`conv5_3` are changed from \
:class:`~chainer.links.Convolution2d` to \
:class:`~chainer.links.DilatedConvolution2d`.
* :class:`~chainercv.links.model.ssd.Normalize` is \
inserted after :obj:`conv4_3`.
* The parameters of max pooling after :obj:`conv5_3` are changed.
* :obj:`fc6` and :obj:`fc7` are converted to :obj:`conv6` and :obj:`conv7`.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan,
Christian Szegedy, Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
.. [#] Karen Simonyan, Andrew Zisserman.
Very Deep Convolutional Networks for Large-Scale Image Recognition.
ICLR 2015.
"""
def __init__(self):
super(VGG16, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(64, 3, pad=1)
self.conv1_2 = L.Convolution2D(64, 3, pad=1)
self.conv2_1 = L.Convolution2D(128, 3, pad=1)
self.conv2_2 = L.Convolution2D(128, 3, pad=1)
self.conv3_1 = L.Convolution2D(256, 3, pad=1)
self.conv3_2 = L.Convolution2D(256, 3, pad=1)
self.conv3_3 = L.Convolution2D(256, 3, pad=1)
self.conv4_1 = L.Convolution2D(512, 3, pad=1)
self.conv4_2 = L.Convolution2D(512, 3, pad=1)
self.conv4_3 = L.Convolution2D(512, 3, pad=1)
self.norm4 = Normalize(512, initial=initializers.Constant(20))
self.conv5_1 = L.DilatedConvolution2D(512, 3, pad=1)
self.conv5_2 = L.DilatedConvolution2D(512, 3, pad=1)
self.conv5_3 = L.DilatedConvolution2D(512, 3, pad=1)
self.conv6 = L.DilatedConvolution2D(1024, 3, pad=6, dilate=6)
self.conv7 = L.Convolution2D(1024, 1)
def forward(self, x):
ys = []
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.max_pooling_2d(h, 2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
ys.append(self.norm4(h))
h = F.max_pooling_2d(h, 2)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 3, stride=1, pad=1)
h = F.relu(self.conv6(h))
h = F.relu(self.conv7(h))
ys.append(h)
return ys
class VGG16Extractor300(VGG16):
"""A VGG-16 based feature extractor for SSD300.
This is a feature extractor for :class:`~chainercv.links.model.ssd.SSD300`.
This extractor is based on :class:`~chainercv.links.model.ssd.VGG16`.
"""
insize = 300
grids = (38, 19, 10, 5, 3, 1)
def __init__(self):
init = {
'initialW': initializers.LeCunUniform(),
'initial_bias': initializers.Zero(),
}
super(VGG16Extractor300, self).__init__()
with self.init_scope():
self.conv8_1 = L.Convolution2D(256, 1, **init)
self.conv8_2 = L.Convolution2D(512, 3, stride=2, pad=1, **init)
self.conv9_1 = L.Convolution2D(128, 1, **init)
self.conv9_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)
self.conv10_1 = L.Convolution2D(128, 1, **init)
self.conv10_2 = L.Convolution2D(256, 3, **init)
self.conv11_1 = L.Convolution2D(128, 1, **init)
self.conv11_2 = L.Convolution2D(256, 3, **init)
def forward(self, x):
"""Compute feature maps from a batch of images.
This method extracts feature maps from
:obj:`conv4_3`, :obj:`conv7`, :obj:`conv8_2`,
:obj:`conv9_2`, :obj:`conv10_2`, and :obj:`conv11_2`.
Args:
x (ndarray): An array holding a batch of images.
The images should be resized to :math:`300\\times 300`.
Returns:
list of Variable:
Each variable contains a feature map.
"""
ys = super(VGG16Extractor300, self).forward(x)
for i in range(8, 11 + 1):
h = ys[-1]
h = F.relu(self['conv{:d}_1'.format(i)](h))
h = F.relu(self['conv{:d}_2'.format(i)](h))
ys.append(h)
return ys
class VGG16Extractor512(VGG16):
"""A VGG-16 based feature extractor for SSD512.
This is a feature extractor for :class:`~chainercv.links.model.ssd.SSD512`.
This extractor is based on :class:`~chainercv.links.model.ssd.VGG16`.
"""
insize = 512
grids = (64, 32, 16, 8, 4, 2, 1)
def __init__(self):
init = {
'initialW': initializers.LeCunUniform(),
'initial_bias': initializers.Zero(),
}
super(VGG16Extractor512, self).__init__()
with self.init_scope():
self.conv8_1 = L.Convolution2D(256, 1, **init)
self.conv8_2 = L.Convolution2D(512, 3, stride=2, pad=1, **init)
self.conv9_1 = L.Convolution2D(128, 1, **init)
self.conv9_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)
self.conv10_1 = L.Convolution2D(128, 1, **init)
self.conv10_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)
self.conv11_1 = L.Convolution2D(128, 1, **init)
self.conv11_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)
self.conv12_1 = L.Convolution2D(128, 1, **init)
self.conv12_2 = L.Convolution2D(256, 4, pad=1, **init)
def forward(self, x):
"""Compute feature maps from a batch of images.
This method extracts feature maps from
:obj:`conv4_3`, :obj:`conv7`, :obj:`conv8_2`,
:obj:`conv9_2`, :obj:`conv10_2`, :obj:`conv11_2`, and :obj:`conv12_2`.
Args:
x (ndarray): An array holding a batch of images.
The images should be resized to :math:`512\\times 512`.
Returns:
list of Variable:
Each variable contains a feature map.
"""
ys = super(VGG16Extractor512, self).forward(x)
for i in range(8, 12 + 1):
h = ys[-1]
h = F.relu(self['conv{:d}_1'.format(i)](h))
h = F.relu(self['conv{:d}_2'.format(i)](h))
ys.append(h)
return ys
class SSD300(SSD):
"""Single Shot Multibox Detector with 300x300 inputs.
This is a model of Single Shot Multibox Detector [#]_.
This model uses :class:`~chainercv.links.model.ssd.VGG16Extractor300` as
its feature extractor.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
n_fg_class (int): The number of classes excluding the background.
pretrained_model (string): The weight file to be loaded.
This can take :obj:`'voc0712'`, `filepath` or :obj:`None`.
The default value is :obj:`None`.
* :obj:`'voc0712'`: Load weights trained on trainval split of \
PASCAL VOC 2007 and 2012. \
The weight file is downloaded and cached automatically. \
:obj:`n_fg_class` must be :obj:`20` or :obj:`None`. \
These weights were converted from the Caffe model provided by \
`the original implementation \
<https://github.com/weiliu89/caffe/tree/ssd>`_. \
The conversion code is `chainercv/examples/ssd/caffe2npz.py`.
* :obj:`'imagenet'`: Load weights of VGG-16 trained on ImageNet. \
The weight file is downloaded and cached automatically. \
This option initializes weights partially and the rests are \
initialized randomly. In this case, :obj:`n_fg_class` \
can be set to any number.
* `filepath`: A path of npz file. In this case, :obj:`n_fg_class` \
must be specified properly.
* :obj:`None`: Do not load weights.
"""
_models = {
'voc0712': {
'param': {'n_fg_class': 20},
'url': 'https://chainercv-models.preferred.jp/'
'ssd300_voc0712_converted_2017_06_06.npz',
'cv2': True
},
'imagenet': {
'url': 'https://chainercv-models.preferred.jp/'
'ssd_vgg16_imagenet_converted_2017_06_09.npz',
'cv2': True
},
}
def __init__(self, n_fg_class=None, pretrained_model=None):
param, path = utils.prepare_pretrained_model(
{'n_fg_class': n_fg_class}, pretrained_model, self._models)
super(SSD300, self).__init__(
extractor=VGG16Extractor300(),
multibox=Multibox(
n_class=param['n_fg_class'] + 1,
aspect_ratios=((2,), (2, 3), (2, 3), (2, 3), (2,), (2,))),
steps=(8, 16, 32, 64, 100, 300),
sizes=(30, 60, 111, 162, 213, 264, 315),
mean=_imagenet_mean)
if path:
chainer.serializers.load_npz(path, self, strict=False)
class SSD512(SSD):
"""Single Shot Multibox Detector with 512x512 inputs.
This is a model of Single Shot Multibox Detector [#]_.
This model uses :class:`~chainercv.links.model.ssd.VGG16Extractor512` as
its feature extractor.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
n_fg_class (int): The number of classes excluding the background.
pretrained_model (string): The weight file to be loaded.
This can take :obj:`'voc0712'`, `filepath` or :obj:`None`.
The default value is :obj:`None`.
* :obj:`'voc0712'`: Load weights trained on trainval split of \
PASCAL VOC 2007 and 2012. \
The weight file is downloaded and cached automatically. \
:obj:`n_fg_class` must be :obj:`20` or :obj:`None`. \
These weights were converted from the Caffe model provided by \
`the original implementation \
<https://github.com/weiliu89/caffe/tree/ssd>`_. \
The conversion code is `chainercv/examples/ssd/caffe2npz.py`.
* :obj:`'imagenet'`: Load weights of VGG-16 trained on ImageNet. \
The weight file is downloaded and cached automatically. \
This option initializes weights partially and the rests are \
initialized randomly. In this case, :obj:`n_fg_class` \
can be set to any number.
* `filepath`: A path of npz file. In this case, :obj:`n_fg_class` \
must be specified properly.
* :obj:`None`: Do not load weights.
"""
_models = {
'voc0712': {
'param': {'n_fg_class': 20},
'url': 'https://chainercv-models.preferred.jp/'
'ssd512_voc0712_converted_2017_06_06.npz',
'cv2': True
},
'imagenet': {
'url': 'https://chainercv-models.preferred.jp/'
'ssd_vgg16_imagenet_converted_2017_06_09.npz',
'cv2': True
},
}
def __init__(self, n_fg_class=None, pretrained_model=None):
param, path = utils.prepare_pretrained_model(
{'n_fg_class': n_fg_class}, pretrained_model, self._models)
super(SSD512, self).__init__(
extractor=VGG16Extractor512(),
multibox=Multibox(
n_class=param['n_fg_class'] + 1,
aspect_ratios=(
(2,), (2, 3), (2, 3), (2, 3), (2, 3), (2,), (2,))),
steps=(8, 16, 32, 64, 128, 256, 512),
sizes=(35.84, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6),
mean=_imagenet_mean)
if path:
chainer.serializers.load_npz(path, self, strict=False)
|
from homeassistant.const import CONF_UNIT_OF_MEASUREMENT, CONF_USERNAME
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import COORDINATOR, DOMAIN, GLUCOSE_TREND_ICON, GLUCOSE_VALUE_ICON, MG_DL
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Dexcom sensors."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
username = config_entry.data[CONF_USERNAME]
unit_of_measurement = config_entry.options[CONF_UNIT_OF_MEASUREMENT]
sensors = []
sensors.append(DexcomGlucoseTrendSensor(coordinator, username))
sensors.append(DexcomGlucoseValueSensor(coordinator, username, unit_of_measurement))
async_add_entities(sensors, False)
class DexcomGlucoseValueSensor(CoordinatorEntity):
"""Representation of a Dexcom glucose value sensor."""
def __init__(self, coordinator, username, unit_of_measurement):
"""Initialize the sensor."""
super().__init__(coordinator)
self._state = None
self._unit_of_measurement = unit_of_measurement
self._attribute_unit_of_measurement = (
"mg_dl" if unit_of_measurement == MG_DL else "mmol_l"
)
self._name = f"{DOMAIN}_{username}_glucose_value"
self._unique_id = f"{username}-value"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return GLUCOSE_VALUE_ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the device."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
if self.coordinator.data:
return getattr(self.coordinator.data, self._attribute_unit_of_measurement)
return None
@property
def unique_id(self):
"""Device unique id."""
return self._unique_id
class DexcomGlucoseTrendSensor(CoordinatorEntity):
"""Representation of a Dexcom glucose trend sensor."""
def __init__(self, coordinator, username):
"""Initialize the sensor."""
super().__init__(coordinator)
self._state = None
self._name = f"{DOMAIN}_{username}_glucose_trend"
self._unique_id = f"{username}-trend"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
if self.coordinator.data:
return GLUCOSE_TREND_ICON[self.coordinator.data.trend]
return GLUCOSE_TREND_ICON[0]
@property
def state(self):
"""Return the state of the sensor."""
if self.coordinator.data:
return self.coordinator.data.trend_description
return None
@property
def unique_id(self):
"""Device unique id."""
return self._unique_id
|
import os.path
import pytest_bdd as bdd
from helpers import utils
bdd.scenarios('urlmarks.feature')
def _check_marks(quteproc, quickmarks, expected, contains):
"""Make sure the given line does (not) exist in the bookmarks.
Args:
quickmarks: True to check the quickmarks file instead of bookmarks.
expected: The line to search for.
contains: True if the line should be there, False otherwise.
"""
if quickmarks:
mark_file = os.path.join(quteproc.basedir, 'config', 'quickmarks')
else:
mark_file = os.path.join(quteproc.basedir, 'config', 'bookmarks',
'urls')
quteproc.clear_data() # So we don't match old messages
quteproc.send_cmd(':save')
quteproc.wait_for(message='Saved to {}'.format(mark_file))
with open(mark_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
matched_line = any(
utils.pattern_match(pattern=expected, value=line.rstrip('\n'))
for line in lines)
assert matched_line == contains, lines
@bdd.then(bdd.parsers.parse('the bookmark file should contain "{line}"'))
def bookmark_file_contains(quteproc, line):
_check_marks(quteproc, quickmarks=False, expected=line, contains=True)
@bdd.then(bdd.parsers.parse('the bookmark file should not contain "{line}"'))
def bookmark_file_does_not_contain(quteproc, line):
_check_marks(quteproc, quickmarks=False, expected=line, contains=False)
@bdd.then(bdd.parsers.parse('the quickmark file should contain "{line}"'))
def quickmark_file_contains(quteproc, line):
_check_marks(quteproc, quickmarks=True, expected=line, contains=True)
@bdd.then(bdd.parsers.parse('the quickmark file should not contain "{line}"'))
def quickmark_file_does_not_contain(quteproc, line):
_check_marks(quteproc, quickmarks=True, expected=line, contains=False)
|
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
state as state_trigger,
)
from homeassistant.const import (
CONF_ABOVE,
CONF_BELOW,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_FOR,
CONF_PLATFORM,
CONF_TYPE,
PERCENTAGE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN, const
TRIGGER_TYPES = {
"current_temperature_changed",
"current_humidity_changed",
"hvac_mode_changed",
}
HVAC_MODE_TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): "hvac_mode_changed",
vol.Required(state_trigger.CONF_TO): vol.In(const.HVAC_MODES),
}
)
CURRENT_TRIGGER_SCHEMA = vol.All(
TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(
["current_temperature_changed", "current_humidity_changed"]
),
vol.Optional(CONF_BELOW): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_ABOVE): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
TRIGGER_SCHEMA = vol.Any(HVAC_MODE_TRIGGER_SCHEMA, CURRENT_TRIGGER_SCHEMA)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Climate devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
# Add triggers for each entity that belongs to this integration
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "hvac_mode_changed",
}
)
if state and const.ATTR_CURRENT_TEMPERATURE in state.attributes:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "current_temperature_changed",
}
)
if state and const.ATTR_CURRENT_HUMIDITY in state.attributes:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "current_humidity_changed",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
trigger_type = config[CONF_TYPE]
if trigger_type == "hvac_mode_changed":
state_config = {
state_trigger.CONF_PLATFORM: "state",
state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_TO: config[state_trigger.CONF_TO],
state_trigger.CONF_FROM: [
mode
for mode in const.HVAC_MODES
if mode != config[state_trigger.CONF_TO]
],
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
numeric_state_config = {
numeric_state_trigger.CONF_PLATFORM: "numeric_state",
numeric_state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
}
if trigger_type == "current_temperature_changed":
numeric_state_config[
numeric_state_trigger.CONF_VALUE_TEMPLATE
] = "{{ state.attributes.current_temperature }}"
else:
numeric_state_config[
numeric_state_trigger.CONF_VALUE_TEMPLATE
] = "{{ state.attributes.current_humidity }}"
if CONF_ABOVE in config:
numeric_state_config[CONF_ABOVE] = config[CONF_ABOVE]
if CONF_BELOW in config:
numeric_state_config[CONF_BELOW] = config[CONF_BELOW]
if CONF_FOR in config:
numeric_state_config[CONF_FOR] = config[CONF_FOR]
numeric_state_config = numeric_state_trigger.TRIGGER_SCHEMA(numeric_state_config)
return await numeric_state_trigger.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
async def async_get_trigger_capabilities(hass: HomeAssistant, config):
"""List trigger capabilities."""
trigger_type = config[CONF_TYPE]
if trigger_type == "hvac_action_changed":
return None
if trigger_type == "hvac_mode_changed":
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
if trigger_type == "current_temperature_changed":
unit_of_measurement = hass.config.units.temperature_unit
else:
unit_of_measurement = PERCENTAGE
return {
"extra_fields": vol.Schema(
{
vol.Optional(
CONF_ABOVE, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(
CONF_BELOW, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
}
|
import unittest
import numpy as np
from chainer import testing
from chainer.datasets import TupleDataset
from chainercv.datasets import SiameseDataset
from chainercv.utils.testing.assertions.assert_is_image import assert_is_image
N = 15
@testing.parameterize(
# Positive and negative samples
{'labels_0': np.arange(N, dtype=np.int32) % 3,
'labels_1': np.arange(N, dtype=np.int32) % 3,
'pos_exist': True, 'neg_exist': True,
},
# No positive
{'labels_0': np.zeros(N, dtype=np.int32),
'labels_1': np.ones(N, dtype=np.int32),
'pos_exist': False, 'neg_exist': True
},
# No negative
{'labels_0': np.ones(N, dtype=np.int32),
'labels_1': np.ones(N, dtype=np.int32),
'pos_exist': True, 'neg_exist': False},
)
class TestSiameseDataset(unittest.TestCase):
img_shape = (3, 32, 48)
def setUp(self):
np.random.shuffle(self.labels_0)
np.random.shuffle(self.labels_1)
self.dataset_0 = TupleDataset(
np.random.uniform(size=(N,) + self.img_shape), self.labels_0)
self.dataset_1 = TupleDataset(
np.random.uniform(size=(N,) + self.img_shape), self.labels_1)
self.n_class = np.max((self.labels_0, self.labels_1)) + 1
def _check_example(self, example):
assert_is_image(example[0])
self.assertEqual(example[0].shape, self.img_shape)
assert_is_image(example[2])
self.assertEqual(example[2].shape, self.img_shape)
self.assertIsInstance(example[1], np.int32)
self.assertEqual(example[1].ndim, 0)
self.assertTrue(example[1] >= 0 and example[1] < self.n_class)
self.assertIsInstance(example[3], np.int32)
self.assertEqual(example[3].ndim, 0)
self.assertTrue(example[3] >= 0 and example[1] < self.n_class)
def test_no_pos_ratio(self):
dataset = SiameseDataset(self.dataset_0, self.dataset_1)
for i in range(10):
example = dataset[i]
self._check_example(example)
self.assertEqual(len(dataset), N)
def test_pos_ratio(self):
if self.pos_exist and self.neg_exist:
dataset = SiameseDataset(self.dataset_0, self.dataset_1, 0.5,
labels_0=self.labels_0,
labels_1=self.labels_1)
for i in range(10):
example = dataset[i]
self._check_example(example)
self.assertEqual(len(dataset), N)
else:
with self.assertRaises(ValueError):
dataset = SiameseDataset(self.dataset_0, self.dataset_1, 0.5,
labels_0=self.labels_0,
labels_1=self.labels_1)
def test_pos_ratio_equals_0(self):
if self.neg_exist:
dataset = SiameseDataset(self.dataset_0, self.dataset_1, 0)
for i in range(10):
example = dataset[i]
self._check_example(example)
if self.neg_exist:
self.assertNotEqual(example[1], example[3])
self.assertEqual(len(dataset), N)
else:
with self.assertRaises(ValueError):
dataset = SiameseDataset(self.dataset_0, self.dataset_1, 0)
def test_pos_ratio_equals_1(self):
if self.pos_exist:
dataset = SiameseDataset(self.dataset_0, self.dataset_1, 1)
for i in range(10):
example = dataset[i]
self._check_example(example)
if self.pos_exist:
self.assertEqual(example[1], example[3])
self.assertEqual(len(dataset), N)
else:
with self.assertRaises(ValueError):
dataset = SiameseDataset(self.dataset_0, self.dataset_1, 1)
def test_length_manual(self):
dataset = SiameseDataset(self.dataset_0, self.dataset_1, length=100)
self.assertEqual(len(dataset), 100)
testing.run_module(__name__, __file__)
|
from . import helpers, pprint
VOWELS = ('a', 'e', 'i', 'o', 'u')
def parser(help_msg, subcmd, subcmd_aliases=[]):
def f(subparsers, repo):
p = subparsers.add_parser(
subcmd, help=help_msg, description=help_msg.capitalize(), aliases=subcmd_aliases)
p.add_argument(
'files', nargs='+', help='the file(s) to {0}'.format(subcmd),
action=helpers.PathProcessor, repo=repo,
skip_dir_test=repo and repo.current_branch.path_is_ignored,
skip_dir_cb=lambda path: pprint.warn(
'Skipped files under directory {0} since they are all '
'ignored'.format(path)))
p.set_defaults(func=main(subcmd))
return f
def main(subcmd):
def f(args, repo):
curr_b = repo.current_branch
success = True
for fp in args.files:
try:
getattr(curr_b, subcmd + '_file')(fp)
pprint.ok(
'File {0} is now a{1} {2}{3}d file'.format(
fp, 'n' if subcmd.startswith(VOWELS) else '', subcmd,
'' if subcmd.endswith('e') else 'e'))
except KeyError:
pprint.err('Can\'t {0} non-existent file {1}'.format(subcmd, fp))
success = False
except ValueError as e:
pprint.err(e)
success = False
return success
return f
|
import os
import sh
from molecule import logger
from molecule import util
from molecule.provisioner.lint import base
LOG = logger.get_logger(__name__)
class AnsibleLint(base.Base):
"""
`Ansible Lint`_ is the default role linter.
`Ansible Lint`_ checks playbooks for practices, and behaviour that could
potentially be improved.
Additional options can be passed to `ansible-lint` through the options
dict. Any option set in this section will override the defaults.
.. code-block:: yaml
provisioner:
name: ansible
lint:
name: ansible-lint
options:
exclude:
- path/exclude1
- path/exclude2
x: ["ANSIBLE0011,ANSIBLE0012"]
force-color: True
The `x` option has to be passed like this due to a `bug`_ in Ansible Lint.
The role linting can be disabled by setting `enabled` to False.
.. code-block:: yaml
provisioner:
name: ansible
lint:
name: ansible-lint
enabled: False
Environment variables can be passed to lint.
.. code-block:: yaml
provisioner:
name: ansible
lint:
name: ansible-lint
env:
FOO: bar
.. _`Ansible Lint`: https://github.com/ansible/ansible-lint
.. _`bug`: https://github.com/ansible/ansible-lint/issues/279
"""
def __init__(self, config):
"""
Sets up the requirements to execute `ansible-lint` and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
super(AnsibleLint, self).__init__(config)
self._ansible_lint_command = None
@property
def default_options(self):
d = {
'default_exclude': [self._config.scenario.ephemeral_directory],
'exclude': [],
'x': [],
}
if self._config.debug:
d['v'] = True
return d
@property
def default_env(self):
env = util.merge_dicts(os.environ.copy(), self._config.env)
env = util.merge_dicts(env, self._config.provisioner.env)
return env
def bake(self):
"""
Bake an `ansible-lint` command so it's ready to execute and returns
None.
:return: None
"""
options = self.options
default_exclude_list = options.pop('default_exclude')
options_exclude_list = options.pop('exclude')
excludes = default_exclude_list + options_exclude_list
x_list = options.pop('x')
exclude_args = ['--exclude={}'.format(exclude) for exclude in excludes]
x_args = tuple(('-x', x) for x in x_list)
self._ansible_lint_command = sh.ansible_lint.bake(
options,
exclude_args,
sum(x_args, ()),
self._config.provisioner.playbooks.converge,
_env=self.env,
_out=LOG.out,
_err=LOG.error)
def execute(self):
if not self.enabled:
msg = 'Skipping, lint is disabled.'
LOG.warn(msg)
return
if self._ansible_lint_command is None:
self.bake()
msg = 'Executing Ansible Lint on {}...'.format(
self._config.provisioner.playbooks.converge)
LOG.info(msg)
try:
util.run_command(
self._ansible_lint_command, debug=self._config.debug)
msg = 'Lint completed successfully.'
LOG.success(msg)
except sh.ErrorReturnCode as e:
util.sysexit(e.exit_code)
|
from unittest import TestCase
import numpy as np
from scattertext.domain.CombineDocsIntoDomains import CombineDocsIntoDomains
from scattertext.test.test_TermDocMat import get_hamlet_term_doc_matrix
class TestCombineDocsIntoDomains(TestCase):
def test_get_new_term_doc_mat(self):
hamlet = get_hamlet_term_doc_matrix()
domains = np.arange(hamlet.get_num_docs()) % 3
tdm = CombineDocsIntoDomains(hamlet).get_new_term_doc_mat(domains)
self.assertEquals(tdm.shape, (3, hamlet.get_num_terms()))
self.assertEquals(tdm.sum(), hamlet.get_term_doc_mat().sum())
|
import collections.abc
from typing import Any, Dict, Hashable, Iterable, Mapping, Optional, Tuple, Union
import numpy as np
import pandas as pd
from . import formatting
from .utils import is_scalar
from .variable import Variable
def remove_unused_levels_categories(index: pd.Index) -> pd.Index:
"""
Remove unused levels from MultiIndex and unused categories from CategoricalIndex
"""
if isinstance(index, pd.MultiIndex):
index = index.remove_unused_levels()
# if it contains CategoricalIndex, we need to remove unused categories
# manually. See https://github.com/pandas-dev/pandas/issues/30846
if any(isinstance(lev, pd.CategoricalIndex) for lev in index.levels):
levels = []
for i, level in enumerate(index.levels):
if isinstance(level, pd.CategoricalIndex):
level = level[index.codes[i]].remove_unused_categories()
else:
level = level[index.codes[i]]
levels.append(level)
# TODO: calling from_array() reorders MultiIndex levels. It would
# be best to avoid this, if possible, e.g., by using
# MultiIndex.remove_unused_levels() (which does not reorder) on the
# part of the MultiIndex that is not categorical, or by fixing this
# upstream in pandas.
index = pd.MultiIndex.from_arrays(levels, names=index.names)
elif isinstance(index, pd.CategoricalIndex):
index = index.remove_unused_categories()
return index
class Indexes(collections.abc.Mapping):
"""Immutable proxy for Dataset or DataArrary indexes."""
__slots__ = ("_indexes",)
def __init__(self, indexes):
"""Not for public consumption.
Parameters
----------
indexes : Dict[Any, pandas.Index]
Indexes held by this object.
"""
self._indexes = indexes
def __iter__(self):
return iter(self._indexes)
def __len__(self):
return len(self._indexes)
def __contains__(self, key):
return key in self._indexes
def __getitem__(self, key):
return self._indexes[key]
def __repr__(self):
return formatting.indexes_repr(self)
def default_indexes(
coords: Mapping[Any, Variable], dims: Iterable
) -> Dict[Hashable, pd.Index]:
"""Default indexes for a Dataset/DataArray.
Parameters
----------
coords : Mapping[Any, xarray.Variable]
Coordinate variables from which to draw default indexes.
dims : iterable
Iterable of dimension names.
Returns
-------
Mapping from indexing keys (levels/dimension names) to indexes used for
indexing along that dimension.
"""
return {key: coords[key].to_index() for key in dims if key in coords}
def isel_variable_and_index(
name: Hashable,
variable: Variable,
index: pd.Index,
indexers: Mapping[Hashable, Union[int, slice, np.ndarray, Variable]],
) -> Tuple[Variable, Optional[pd.Index]]:
"""Index a Variable and pandas.Index together."""
if not indexers:
# nothing to index
return variable.copy(deep=False), index
if len(variable.dims) > 1:
raise NotImplementedError(
"indexing multi-dimensional variable with indexes is not supported yet"
)
new_variable = variable.isel(indexers)
if new_variable.dims != (name,):
# can't preserve a index if result has new dimensions
return new_variable, None
# we need to compute the new index
(dim,) = variable.dims
indexer = indexers[dim]
if isinstance(indexer, Variable):
indexer = indexer.data
new_index = index[indexer]
return new_variable, new_index
def roll_index(index: pd.Index, count: int, axis: int = 0) -> pd.Index:
"""Roll an pandas.Index."""
count %= index.shape[0]
if count != 0:
return index[-count:].append(index[:-count])
else:
return index[:]
def propagate_indexes(
indexes: Optional[Dict[Hashable, pd.Index]], exclude: Optional[Any] = None
) -> Optional[Dict[Hashable, pd.Index]]:
"""Creates new indexes dict from existing dict optionally excluding some dimensions."""
if exclude is None:
exclude = ()
if is_scalar(exclude):
exclude = (exclude,)
if indexes is not None:
new_indexes = {k: v for k, v in indexes.items() if k not in exclude}
else:
new_indexes = None # type: ignore
return new_indexes
|
import json
import os
import mock
import pytest
import requests
from paasta_tools.envoy_tools import are_services_up_in_pod
from paasta_tools.envoy_tools import get_backends
from paasta_tools.envoy_tools import get_casper_endpoints
from paasta_tools.envoy_tools import match_backends_and_pods
from paasta_tools.envoy_tools import match_backends_and_tasks
def test_get_backends():
testdir = os.path.dirname(os.path.realpath(__file__))
testdata = os.path.join(testdir, "envoy_admin_clusters_snapshot.txt")
with open(testdata, "r") as fd:
mock_envoy_admin_clusters_data = json.load(fd)
mock_response = mock.Mock()
mock_response.json.return_value = mock_envoy_admin_clusters_data
mock_get = mock.Mock(return_value=(mock_response))
hosts = {
"10.46.6.90": ("host2.two.com", None, None),
"10.46.6.88": ("host3.three.com", None, None),
"10.46.6.103": ("host4.four.com", None, None),
}
with mock.patch.object(requests.Session, "get", mock_get):
with mock.patch(
"socket.gethostbyaddr", side_effect=lambda x: hosts[x], autospec=True,
):
expected = {
"service1.main": [
(
{
"address": "10.46.6.88",
"port_value": 13833,
"hostname": "host3",
"eds_health_status": "HEALTHY",
"weight": 1,
},
False,
),
(
{
"address": "10.46.6.90",
"port_value": 13833,
"hostname": "host2",
"eds_health_status": "HEALTHY",
"weight": 1,
},
False,
),
]
}
assert expected == get_backends("service1.main", "host", 123, "something")
def test_get_casper_endpoints():
testdir = os.path.dirname(os.path.realpath(__file__))
testdata = os.path.join(testdir, "envoy_admin_clusters_snapshot.txt")
with open(testdata, "r") as fd:
mock_envoy_admin_clusters_data = json.load(fd)
expected = frozenset([("10.46.6.106", 13819)])
assert expected == get_casper_endpoints(mock_envoy_admin_clusters_data)
@pytest.fixture
def mock_backends():
return [
{
"address": "10.50.2.4",
"port_value": 31000,
"eds_health_status": "HEALTHY",
"weight": 1,
"has_associated_task": False,
},
{
"address": "10.50.2.5",
"port_value": 31001,
"eds_health_status": "HEALTHY",
"weight": 1,
"has_associated_task": False,
},
{
"address": "10.50.2.6",
"port_value": 31001,
"eds_health_status": "HEALTHY",
"weight": 1,
"has_associated_task": False,
},
{
"address": "10.50.2.6",
"port_value": 31002,
"eds_health_status": "HEALTHY",
"weight": 1,
"has_associated_task": False,
},
{
"address": "10.50.2.8",
"port_value": 31000,
"eds_health_status": "HEALTHY",
"weight": 1,
"has_associated_task": False,
},
]
def test_match_backends_and_tasks(mock_backends):
backends = mock_backends
good_task1 = mock.Mock(host="box4", ports=[31000])
good_task2 = mock.Mock(host="box5", ports=[31001])
bad_task = mock.Mock(host="box7", ports=[31000])
tasks = [good_task1, good_task2, bad_task]
hostnames = {
"box4": "10.50.2.4",
"box5": "10.50.2.5",
"box6": "10.50.2.6",
"box7": "10.50.2.7",
"box8": "10.50.2.8",
}
with mock.patch(
"paasta_tools.envoy_tools.socket.gethostbyname",
side_effect=lambda x: hostnames[x],
autospec=True,
):
expected = [
(backends[0], good_task1),
(backends[1], good_task2),
(None, bad_task),
(backends[2], None),
(backends[3], None),
(backends[4], None),
]
actual = match_backends_and_tasks(backends, tasks)
def keyfunc(t):
return tuple(sorted((t[0] or {}).items())), t[1]
assert sorted(actual, key=keyfunc) == sorted(expected, key=keyfunc)
def test_match_backends_and_pods(mock_backends):
backends = mock_backends
good_pod_1 = mock.Mock(status=mock.Mock(pod_ip="10.50.2.4"))
good_pod_2 = mock.Mock(status=mock.Mock(pod_ip="10.50.2.5"))
bad_pod_1 = mock.Mock(status=mock.Mock(pod_ip="10.50.2.10"))
pods = [good_pod_1, good_pod_2, bad_pod_1]
expected = [
(backends[0], good_pod_1),
(backends[1], good_pod_2),
(None, bad_pod_1),
(backends[2], None),
(backends[3], None),
(backends[4], None),
]
actual = match_backends_and_pods(backends, pods)
def keyfunc(t):
sorted_backend = tuple(sorted((t[0] or {}).items()))
pod_ip = t[1].status.pod_ip if t[1] else ""
return sorted_backend, pod_ip
assert sorted(actual, key=keyfunc) == sorted(expected, key=keyfunc)
class TestServicesUpInPod:
pod_ip = "10.40.1.1"
pod_port = 8888
@pytest.fixture
def cluster(self):
def _make_cluster(health, ip=self.pod_ip):
return (
{
"eds_health_status": health,
"address": ip,
"port_value": self.pod_port,
},
False,
)
return _make_cluster
@pytest.fixture
def mock_get_multiple_backends(self):
with mock.patch(
"paasta_tools.envoy_tools.get_multiple_backends", autospec=True
) as mock_get_multiple_backends:
yield mock_get_multiple_backends
def test_are_services_up_on_port_no_clusters(self, mock_get_multiple_backends):
mock_get_multiple_backends.return_value = {}
assert not are_services_up_in_pod(
envoy_host="1.2.3.4",
envoy_admin_port=3212,
envoy_admin_endpoint_format="http://{bla}:{more_bla}",
registrations=["service1.instance1", "service1.instance2"],
pod_ip=self.pod_ip,
pod_port=self.pod_port,
)
def test_are_services_up_on_port_all_backends_healthy(
self, mock_get_multiple_backends, cluster
):
mock_get_multiple_backends.side_effect = [
{
"service1.instance1": [
cluster("HEALTHY"),
cluster("HEALTHY"),
cluster("HEALTHY"),
]
},
{
"service1.instance2": [
cluster("HEALTHY"),
cluster("HEALTHY"),
cluster("HEALTHY"),
]
},
]
assert are_services_up_in_pod(
envoy_host="1.2.3.4",
envoy_admin_port=3212,
envoy_admin_endpoint_format="http://{bla}:{more_bla}",
registrations=["service1.instance1", "service1.instance2"],
pod_ip=self.pod_ip,
pod_port=self.pod_port,
)
def test_are_services_up_on_port_unhealthy_service(
self, mock_get_multiple_backends, cluster
):
mock_get_multiple_backends.side_effect = [
{
"service1.instance1": [
cluster("HEALTHY"),
cluster("HEALTHY"),
cluster("HEALTHY"),
]
},
{
"service1.instance2": [
cluster("UNHEALTHY"),
cluster("UNHEALTHY"),
cluster("UNHEALTHY"),
]
},
]
assert not are_services_up_in_pod(
envoy_host="1.2.3.4",
envoy_admin_port=3212,
envoy_admin_endpoint_format="http://{bla}:{more_bla}",
registrations=["service1.instance1", "service1.instance2"],
pod_ip=self.pod_ip,
pod_port=self.pod_port,
)
def test_are_services_up_on_port_partial_health_backend(
self, mock_get_multiple_backends, cluster
):
mock_get_multiple_backends.return_value = [
cluster("HEALTHY"),
cluster("HEALTHY"),
cluster("UNHEALTHY"),
]
mock_get_multiple_backends.side_effect = [
{
"service1.instance1": [
cluster("HEALTHY"),
cluster("HEALTHY"),
cluster("UNHEALTHY"),
]
},
{
"service1.instance2": [
cluster("HEALTHY"),
cluster("UNHEALTHY"),
cluster("HEALTHY"),
]
},
]
assert are_services_up_in_pod(
envoy_host="1.2.3.4",
envoy_admin_port=3212,
envoy_admin_endpoint_format="http://{bla}:{more_bla}",
registrations=["service1.instance1", "service1.instance2"],
pod_ip=self.pod_ip,
pod_port=self.pod_port,
)
def test_are_services_up_on_port_missing_backend(
self, mock_get_multiple_backends, cluster
):
mock_get_multiple_backends.side_effect = [
[cluster("HEALTHY"), cluster("HEALTHY"), cluster("HEALTHY")],
[cluster("HEALTHY"), cluster("HEALTHY"), cluster("HEALTHY")],
[],
]
mock_get_multiple_backends.side_effect = [
{
"service1.instance1": [
cluster("HEALTHY"),
cluster("HEALTHY"),
cluster("HEALTHY"),
]
},
{
"service1.instance2": [
cluster("HEALTHY"),
cluster("UNHEALTHY"),
cluster("HEALTHY"),
]
},
{},
]
# all up and present but service1.instance3 not present
assert not are_services_up_in_pod(
envoy_host="1.2.3.4",
envoy_admin_port=3212,
envoy_admin_endpoint_format="http://{bla}:{more_bla}",
registrations=[
"service1.instance1",
"service1.instance2",
"service1.instance3",
],
pod_ip=self.pod_ip,
pod_port=self.pod_port,
)
|
import copy
from itertools import *
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
serialized, children, nochange, BytesIO)
TEXT = "some ASCII text"
UTEXT = u"some klingon: \F8D2"
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
@nochange
def bench_iter_children(self, root):
for child in root:
pass
@nochange
def bench_iter_children_reversed(self, root):
for child in reversed(root):
pass
@nochange
def bench_first_child(self, root):
for i in self.repeat1000:
child = root[0]
@nochange
def bench_last_child(self, root):
for i in self.repeat1000:
child = root[-1]
@nochange
def bench_middle_child(self, root):
pos = len(root) // 2
for i in self.repeat1000:
child = root[pos]
@nochange
@with_attributes(False)
@with_text(text=True)
def bench_tostring_text_ascii(self, root):
self.etree.tostring(root, method="text")
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
def bench_tostring_text_unicode(self, root):
self.etree.tostring(root, method="text", encoding='unicode')
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
def bench_tostring_text_utf16(self, root):
self.etree.tostring(root, method="text", encoding='UTF-16')
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe')
@children
def bench_tostring_text_utf8_with_tail(self, children):
for child in children:
self.etree.tostring(child, method="text",
encoding='UTF-8', with_tail=True)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8(self, root):
self.etree.tostring(root, encoding='UTF-8')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf16(self, root):
self.etree.tostring(root, encoding='UTF-16')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8_unicode_XML(self, root):
xml = self.etree.tostring(root, encoding='UTF-8').decode('UTF-8')
self.etree.XML(xml)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_write_utf8_parse_bytesIO(self, root):
f = BytesIO()
self.etree.ElementTree(root).write(f, encoding='UTF-8')
f.seek(0)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_parse_bytesIO(self, root_xml):
f = BytesIO(root_xml)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_XML(self, root_xml):
self.etree.XML(root_xml)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_bytesIO(self, root_xml):
f = BytesIO(root_xml)
for event, element in self.etree.iterparse(f):
pass
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_bytesIO_clear(self, root_xml):
f = BytesIO(root_xml)
for event, element in self.etree.iterparse(f):
element.clear()
def bench_append_from_document(self, root1, root2):
# == "1,2 2,3 1,3 3,1 3,2 2,1" # trees 1 and 2, or 2 and 3, or ...
for el in root2:
root1.append(el)
def bench_insert_from_document(self, root1, root2):
pos = len(root1)//2
for el in root2:
root1.insert(pos, el)
pos = pos + 1
def bench_rotate_children(self, root):
# == "1 2 3" # runs on any single tree independently
for i in range(100):
el = root[0]
del root[0]
root.append(el)
def bench_reorder(self, root):
for i in range(1,len(root)//2):
el = root[0]
del root[0]
root[-i:-i] = [ el ]
def bench_reorder_slice(self, root):
for i in range(1,len(root)//2):
els = root[0:1]
del root[0]
root[-i:-i] = els
def bench_clear(self, root):
root.clear()
@nochange
@children
def bench_has_children(self, children):
for child in children:
if child and child and child and child and child:
pass
@nochange
@children
def bench_len(self, children):
for child in children:
map(len, repeat(child, 20))
@children
def bench_create_subelements(self, children):
SubElement = self.etree.SubElement
for child in children:
SubElement(child, '{test}test')
def bench_append_elements(self, root):
Element = self.etree.Element
for child in root:
el = Element('{test}test')
child.append(el)
@nochange
@children
def bench_makeelement(self, children):
empty_attrib = {}
for child in children:
child.makeelement('{test}test', empty_attrib)
@nochange
@children
def bench_create_elements(self, children):
Element = self.etree.Element
for child in children:
Element('{test}test')
@children
def bench_replace_children_element(self, children):
Element = self.etree.Element
for child in children:
el = Element('{test}test')
child[:] = [el]
@children
def bench_replace_children(self, children):
els = [ self.etree.Element("newchild") ]
for child in children:
child[:] = els
def bench_remove_children(self, root):
for child in root:
root.remove(child)
def bench_remove_children_reversed(self, root):
for child in reversed(root):
root.remove(child)
@children
def bench_set_attributes(self, children):
for child in children:
child.set('a', 'bla')
@with_attributes(True)
@children
@nochange
def bench_get_attributes(self, children):
for child in children:
child.get('bla1')
child.get('{attr}test1')
@children
def bench_setget_attributes(self, children):
for child in children:
child.set('a', 'bla')
for child in children:
child.get('a')
@nochange
def bench_root_getchildren(self, root):
root.getchildren()
@nochange
def bench_root_list_children(self, root):
list(root)
@nochange
@children
def bench_getchildren(self, children):
for child in children:
child.getchildren()
@nochange
@children
def bench_get_children_slice(self, children):
for child in children:
child[:]
@nochange
@children
def bench_get_children_slice_2x(self, children):
for child in children:
child[:]
child[:]
@nochange
@children
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy(self, children):
for child in children:
copy.deepcopy(child)
@nochange
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy_all(self, root):
copy.deepcopy(root)
@nochange
@children
def bench_tag(self, children):
for child in children:
child.tag
@nochange
@children
def bench_tag_repeat(self, children):
for child in children:
for i in self.repeat100:
child.tag
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text(self, children):
for child in children:
child.text
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text_repeat(self, children):
for child in children:
for i in self.repeat500:
child.text
@children
def bench_set_text(self, children):
text = TEXT
for child in children:
child.text = text
@children
def bench_set_utext(self, children):
text = UTEXT
for child in children:
child.text = text
@nochange
@onlylib('lxe')
def bench_index(self, root):
for child in root:
root.index(child)
@nochange
@onlylib('lxe')
def bench_index_slice(self, root):
for child in root[5:100]:
root.index(child, 5, 100)
@nochange
@onlylib('lxe')
def bench_index_slice_neg(self, root):
for child in root[-100:-5]:
root.index(child, start=-100, stop=-5)
@nochange
def bench_iter_all(self, root):
list(root.iter())
@nochange
def bench_iter_one_at_a_time(self, root):
list(islice(root.iter(), 2**30, None))
@nochange
def bench_iter_islice(self, root):
list(islice(root.iter(), 10, 110))
@nochange
def bench_iter_tag(self, root):
list(islice(root.iter(self.SEARCH_TAG), 3, 10))
@nochange
def bench_iter_tag_all(self, root):
list(root.iter(self.SEARCH_TAG))
@nochange
def bench_iter_tag_one_at_a_time(self, root):
list(islice(root.iter(self.SEARCH_TAG), 2**30, None))
@nochange
def bench_iter_tag_none(self, root):
list(root.iter("{ThisShould}NeverExist"))
@nochange
def bench_iter_tag_text(self, root):
[ e.text for e in root.iter(self.SEARCH_TAG) ]
@nochange
def bench_findall(self, root):
root.findall(".//*")
@nochange
def bench_findall_child(self, root):
root.findall(".//*/" + self.SEARCH_TAG)
@nochange
def bench_findall_tag(self, root):
root.findall(".//" + self.SEARCH_TAG)
@nochange
def bench_findall_path(self, root):
root.findall(".//*[%s]/./%s/./*" % (self.SEARCH_TAG, self.SEARCH_TAG))
@nochange
@onlylib('lxe')
def bench_xpath_path(self, root):
ns, tag = self.SEARCH_TAG[1:].split('}')
root.xpath(".//*[p:%s]/./p:%s/./*" % (tag,tag),
namespaces = {'p':ns})
@nochange
def bench_iterfind(self, root):
list(root.iterfind(".//*"))
@nochange
def bench_iterfind_tag(self, root):
list(root.iterfind(".//" + self.SEARCH_TAG))
@nochange
def bench_iterfind_islice(self, root):
list(islice(root.iterfind(".//*"), 10, 110))
_bench_xpath_single_xpath = None
@nochange
@onlylib('lxe')
def bench_xpath_single(self, root):
xpath = self._bench_xpath_single_xpath
if xpath is None:
ns, tag = self.SEARCH_TAG[1:].split('}')
xpath = self._bench_xpath_single_xpath = self.etree.XPath(
'.//p:%s[1]' % tag, namespaces={'p': ns})
xpath(root)
@nochange
def bench_find_single(self, root):
root.find(".//%s" % self.SEARCH_TAG)
@nochange
def bench_iter_single(self, root):
next(root.iter(self.SEARCH_TAG))
_bench_xpath_two_xpath = None
@nochange
@onlylib('lxe')
def bench_xpath_two(self, root):
xpath = self._bench_xpath_two_xpath
if xpath is None:
ns, tag = self.SEARCH_TAG[1:].split('}')
xpath = self._bench_xpath_two_xpath = self.etree.XPath(
'.//p:%s[position() < 3]' % tag, namespaces={'p': ns})
xpath(root)
@nochange
def bench_iterfind_two(self, root):
it = root.iterfind(".//%s" % self.SEARCH_TAG)
next(it)
next(it)
@nochange
def bench_iter_two(self, root):
it = root.iter(self.SEARCH_TAG)
next(it)
next(it)
if __name__ == '__main__':
benchbase.main(BenchMark)
|
import asyncio
import pytest
from hangups import event
def coroutine_test(coro):
"""Decorator to create a coroutine that starts and stops its own loop."""
def wrapper(*args, **kwargs):
future = coro(*args, **kwargs)
loop = asyncio.new_event_loop()
loop.run_until_complete(future)
return wrapper
@coroutine_test
async def test_event():
e = event.Event('MyEvent')
res = []
async def a(arg):
res.append('a' + arg)
async def b(arg):
res.append('b' + arg)
e.add_observer(a)
await e.fire('1')
e.add_observer(b)
await e.fire('2')
e.remove_observer(a)
await e.fire('3')
e.remove_observer(b)
await e.fire('4')
assert res == ['a1', 'a2', 'b2', 'b3']
@coroutine_test
async def test_function_observer():
e = event.Event('MyEvent')
res = []
e.add_observer(lambda arg: res.append('a' + arg))
await e.fire('1')
assert res == ['a1']
@coroutine_test
async def test_coroutine_observer():
e = event.Event('MyEvent')
res = []
async def a(arg):
res.append('a' + arg)
e.add_observer(a)
await e.fire('1')
assert res == ['a1']
def test_already_added():
def a(arg):
print('A: got {}'.format(arg))
e = event.Event('MyEvent')
e.add_observer(a)
with pytest.raises(ValueError):
e.add_observer(a)
def test_remove_nonexistant():
e = event.Event('MyEvent')
with pytest.raises(ValueError):
e.remove_observer(lambda a: print('A: got {}'.format(a)))
|
from functools import wraps
from . import filters
from .asyncsupport import auto_aiter
from .asyncsupport import auto_await
async def auto_to_seq(value):
seq = []
if hasattr(value, "__aiter__"):
async for item in value:
seq.append(item)
else:
for item in value:
seq.append(item)
return seq
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
if seq:
async for item in auto_aiter(seq):
if func(item):
yield item
def dualfilter(normal_filter, async_filter):
wrap_evalctx = False
if getattr(normal_filter, "environmentfilter", False) is True:
def is_async(args):
return args[0].is_async
wrap_evalctx = False
else:
has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True
has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True
wrap_evalctx = not has_evalctxfilter and not has_ctxfilter
def is_async(args):
return args[0].environment.is_async
@wraps(normal_filter)
def wrapper(*args, **kwargs):
b = is_async(args)
if wrap_evalctx:
args = args[1:]
if b:
return async_filter(*args, **kwargs)
return normal_filter(*args, **kwargs)
if wrap_evalctx:
wrapper.evalcontextfilter = True
wrapper.asyncfiltervariant = True
return wrapper
def asyncfiltervariant(original):
def decorator(f):
return dualfilter(original, f)
return decorator
@asyncfiltervariant(filters.do_first)
async def do_first(environment, seq):
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
return environment.undefined("No first item, sequence was empty.")
@asyncfiltervariant(filters.do_groupby)
async def do_groupby(environment, value, attribute):
expr = filters.make_attrgetter(environment, attribute)
return [
filters._GroupTuple(key, await auto_to_seq(values))
for key, values in filters.groupby(
sorted(await auto_to_seq(value), key=expr), expr
)
]
@asyncfiltervariant(filters.do_join)
async def do_join(eval_ctx, value, d="", attribute=None):
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
@asyncfiltervariant(filters.do_list)
async def do_list(value):
return await auto_to_seq(value)
@asyncfiltervariant(filters.do_reject)
async def do_reject(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: not x, False)
@asyncfiltervariant(filters.do_rejectattr)
async def do_rejectattr(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: not x, True)
@asyncfiltervariant(filters.do_select)
async def do_select(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: x, False)
@asyncfiltervariant(filters.do_selectattr)
async def do_selectattr(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: x, True)
@asyncfiltervariant(filters.do_map)
async def do_map(*args, **kwargs):
seq, func = filters.prepare_map(args, kwargs)
if seq:
async for item in auto_aiter(seq):
yield await auto_await(func(item))
@asyncfiltervariant(filters.do_sum)
async def do_sum(environment, iterable, attribute=None, start=0):
rv = start
if attribute is not None:
func = filters.make_attrgetter(environment, attribute)
else:
def func(x):
return x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
@asyncfiltervariant(filters.do_slice)
async def do_slice(value, slices, fill_with=None):
return filters.do_slice(await auto_to_seq(value), slices, fill_with)
ASYNC_FILTERS = {
"first": do_first,
"groupby": do_groupby,
"join": do_join,
"list": do_list,
# we intentionally do not support do_last because it may not be safe in async
"reject": do_reject,
"rejectattr": do_rejectattr,
"map": do_map,
"select": do_select,
"selectattr": do_selectattr,
"sum": do_sum,
"slice": do_slice,
}
|
from homeassistant.components.abode import (
DOMAIN as ABODE_DOMAIN,
SERVICE_CAPTURE_IMAGE,
SERVICE_SETTINGS,
SERVICE_TRIGGER_AUTOMATION,
)
from homeassistant.components.alarm_control_panel import DOMAIN as ALARM_DOMAIN
from .common import setup_platform
from tests.async_mock import patch
async def test_change_settings(hass):
"""Test change_setting service."""
await setup_platform(hass, ALARM_DOMAIN)
with patch("abodepy.Abode.set_setting") as mock_set_setting:
await hass.services.async_call(
ABODE_DOMAIN,
SERVICE_SETTINGS,
{"setting": "confirm_snd", "value": "loud"},
blocking=True,
)
await hass.async_block_till_done()
mock_set_setting.assert_called_once()
async def test_unload_entry(hass):
"""Test unloading the Abode entry."""
mock_entry = await setup_platform(hass, ALARM_DOMAIN)
with patch("abodepy.Abode.logout") as mock_logout, patch(
"abodepy.event_controller.AbodeEventController.stop"
) as mock_events_stop:
assert await hass.config_entries.async_unload(mock_entry.entry_id)
mock_logout.assert_called_once()
mock_events_stop.assert_called_once()
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_SETTINGS)
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_CAPTURE_IMAGE)
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_TRIGGER_AUTOMATION)
|
import os
from babelfish import Language
import pytest
from vcr import VCR
from subliminal.providers.napiprojekt import NapiProjektProvider, NapiProjektSubtitle
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
cassette_library_dir=os.path.realpath(os.path.join('tests', 'cassettes', 'napiprojekt')))
def test_get_matches(movies):
subtitle = NapiProjektSubtitle(Language('pol'), '6303e7ee6a835e9fcede9fb2fb00cb36')
matches = subtitle.get_matches(movies['man_of_steel'])
assert matches == {'hash'}
def test_get_matches_no_match(episodes):
subtitle = NapiProjektSubtitle(Language('pol'), 'de2e9caa58dd53a6ab9d241e6b251234')
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == set()
@pytest.mark.integration
@vcr.use_cassette
def test_query(movies):
language = Language('pol')
video = movies['man_of_steel']
with NapiProjektProvider() as provider:
subtitle = provider.query(language, video.hashes['napiprojekt'])
assert subtitle.language == language
assert subtitle.content is not None
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_hash():
with NapiProjektProvider() as provider:
subtitle = provider.query(Language('pol'), 'abcdabdcabcd1234abcd1234abcd123')
assert subtitle is None
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles(episodes):
video = episodes['bbt_s07e05']
languages = {Language('pol')}
with NapiProjektProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert len(subtitles) == 1
assert {subtitle.language for subtitle in subtitles} == languages
assert subtitles[0].content is not None
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from compare_gan import utils
from compare_gan.architectures import arch_ops
from compare_gan.architectures import resnet_biggan
import gin
import numpy as np
import tensorflow as tf
def guess_initializer(var, graph=None):
"""Helper function to guess the initializer of a variable.
The function looks at the operations in the initializer name space for the
variable (e.g. my_scope/my_var_name/Initializer/*). The TF core initializers
have characteristic sets of operations that can be used to determine the
initializer.
Args:
var: `tf.Variable`. The function will use the name to look for initializer
operations in the same scope.
graph: Optional `tf.Graph` that contains the variable. If None the default
graph is used.
Returns:
Tuple of the name of the guessed initializer.
"""
if graph is None:
graph = tf.get_default_graph()
prefix = var.op.name + "/Initializer"
ops = [op for op in graph.get_operations()
if op.name.startswith(prefix)]
assert ops, "No operations found for prefix {}".format(prefix)
op_names = [op.name[len(prefix) + 1:] for op in ops]
if len(op_names) == 1:
if op_names[0] == "Const":
value = ops[0].get_attr("value").float_val[0]
if value == 0.0:
return "zeros"
if np.isclose(value, 1.0):
return "ones"
return "constant"
return op_names[0] # ones or zeros
if "Qr" in op_names and "DiagPart" in op_names:
return "orthogonal"
if "random_uniform" in op_names:
return "glorot_uniform"
stddev_ops = [op for op in ops if op.name.endswith("stddev")]
if stddev_ops:
assert len(stddev_ops) == 1
stddev = stddev_ops[0].get_attr("value").float_val[0]
else:
stddev = None
if "random_normal" in op_names:
return "random_normal"
if "truncated_normal" in op_names:
if len(str(stddev)) > 5:
return "glorot_normal"
return "truncated_normal"
class ResNet5BigGanTest(tf.test.TestCase):
def setUp(self):
super(ResNet5BigGanTest, self).setUp()
gin.clear_config()
def testNumberOfParameters(self):
with tf.Graph().as_default():
batch_size = 16
z = tf.zeros((batch_size, 120))
y = tf.one_hot(tf.ones((batch_size,), dtype=tf.int32), 1000)
generator = resnet_biggan.Generator(
image_shape=(128, 128, 3),
batch_norm_fn=arch_ops.conditional_batch_norm)
fake_images = generator(z, y=y, is_training=True, reuse=False)
self.assertEqual(fake_images.shape.as_list(), [batch_size, 128, 128, 3])
discriminator = resnet_biggan.Discriminator()
predictions = discriminator(fake_images, y, is_training=True)
self.assertLen(predictions, 3)
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if "generator" in var.name]
d_vars = [var for var in t_vars if "discriminator" in var.name]
g_param_overview = utils.get_parameter_overview(g_vars, limit=None)
d_param_overview = utils.get_parameter_overview(d_vars, limit=None)
logging.info("Generator variables:\n%s", g_param_overview)
logging.info("Discriminator variables:\n%s", d_param_overview)
for v in g_vars:
parts = v.op.name.split("/")
layer, var_name = parts[-2], parts[-1]
layers_with_bias = {"fc_noise", "up_conv_shortcut", "up_conv1",
"same_conv2", "final_conv"}
# No biases in conditional BN or self-attention.
if layer not in layers_with_bias:
self.assertNotEqual(var_name, "bias", msg=str(v))
# Batch norm variables.
if parts[-3] == "condition":
if parts[-4] == "final_bn":
self.assertEqual(var_name, "kernel", msg=str(v))
self.assertEqual(v.shape.as_list(), [1, 1, 1, 96], msg=str(v))
else:
self.assertEqual(var_name, "kernel", msg=str(v))
self.assertEqual(v.shape[0].value, 148, msg=str(v))
# Embedding layer.
if layer == "embed_y":
self.assertEqual(var_name, "kernel", msg=str(v))
self.assertAllEqual(v.shape.as_list(), [1000, 128], msg=str(v))
# Shortcut connections use 1x1 convolution.
if layer == "up_conv_shortcut" and var_name == "kernel":
self.assertEqual(v.shape.as_list()[:2], [1, 1], msg=str(v))
g_num_weights = sum([v.get_shape().num_elements() for v in g_vars])
self.assertEqual(g_num_weights, 70433988)
for v in d_vars:
parts = v.op.name.split("/")
layer, var_name = parts[-2], parts[-1]
layers_with_bias = {"down_conv_shortcut", "same_conv1", "down_conv2",
"same_conv_shortcut", "same_conv2", "final_fc"}
# No biases in conditional BN or self-attention.
if layer not in layers_with_bias:
self.assertNotEqual(var_name, "bias", msg=str(v))
# no Shortcut in last block.
if parts[-3] == "B6":
self.assertNotEqual(layer, "same_shortcut", msg=str(v))
d_num_weights = sum([v.get_shape().num_elements() for v in d_vars])
self.assertEqual(d_num_weights, 87982370)
def testInitializers(self):
gin.bind_parameter("weights.initializer", "orthogonal")
with tf.Graph().as_default():
z = tf.zeros((8, 120))
y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 1000)
generator = resnet_biggan.Generator(
image_shape=(128, 128, 3),
batch_norm_fn=arch_ops.conditional_batch_norm)
fake_images = generator(z, y=y, is_training=True, reuse=False)
discriminator = resnet_biggan.Discriminator()
discriminator(fake_images, y, is_training=True)
for v in tf.trainable_variables():
parts = v.op.name.split("/")
layer, var_name = parts[-2], parts[-1]
initializer_name = guess_initializer(v)
logging.info("%s => %s", v.op.name, initializer_name)
if layer == "embedding_fc" and var_name == "kernel":
self.assertEqual(initializer_name, "glorot_normal")
elif layer == "non_local_block" and var_name == "sigma":
self.assertEqual(initializer_name, "zeros")
elif layer == "final_norm" and var_name == "gamma":
self.assertEqual(initializer_name, "ones")
elif layer == "final_norm" and var_name == "beta":
self.assertEqual(initializer_name, "zeros")
elif var_name == "kernel":
self.assertEqual(initializer_name, "orthogonal")
elif var_name == "bias":
self.assertEqual(initializer_name, "zeros")
else:
self.fail("Unknown variables {}".format(v))
if __name__ == "__main__":
tf.test.main()
|
import zigpy.zcl.clusters.measurement as measurement
from .. import registries
from ..const import (
REPORT_CONFIG_DEFAULT,
REPORT_CONFIG_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT,
)
from .base import ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.FlowMeasurement.cluster_id)
class FlowMeasurement(ZigbeeChannel):
"""Flow Measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.IlluminanceLevelSensing.cluster_id
)
class IlluminanceLevelSensing(ZigbeeChannel):
"""Illuminance Level Sensing channel."""
REPORT_CONFIG = [{"attr": "level_status", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.IlluminanceMeasurement.cluster_id
)
class IlluminanceMeasurement(ZigbeeChannel):
"""Illuminance Measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.BINARY_SENSOR_CLUSTERS.register(measurement.OccupancySensing.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.OccupancySensing.cluster_id)
class OccupancySensing(ZigbeeChannel):
"""Occupancy Sensing channel."""
REPORT_CONFIG = [{"attr": "occupancy", "config": REPORT_CONFIG_IMMEDIATE}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.PressureMeasurement.cluster_id)
class PressureMeasurement(ZigbeeChannel):
"""Pressure measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.RelativeHumidity.cluster_id)
class RelativeHumidity(ZigbeeChannel):
"""Relative Humidity measurement channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 50),
}
]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.TemperatureMeasurement.cluster_id
)
class TemperatureMeasurement(ZigbeeChannel):
"""Temperature measurement channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 50),
}
]
|
import unittest
from homeassistant.components.kira import sensor as kira
from tests.async_mock import MagicMock
from tests.common import get_test_home_assistant
TEST_CONFIG = {kira.DOMAIN: {"sensors": [{"host": "127.0.0.1", "port": 17324}]}}
DISCOVERY_INFO = {"name": "kira", "device": "kira"}
class TestKiraSensor(unittest.TestCase):
"""Tests the Kira Sensor platform."""
# pylint: disable=invalid-name
DEVICES = []
def add_entities(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
mock_kira = MagicMock()
self.hass.data[kira.DOMAIN] = {kira.CONF_SENSOR: {}}
self.hass.data[kira.DOMAIN][kira.CONF_SENSOR]["kira"] = mock_kira
self.addCleanup(self.hass.stop)
# pylint: disable=protected-access
def test_kira_sensor_callback(self):
"""Ensure Kira sensor properly updates its attributes from callback."""
kira.setup_platform(self.hass, TEST_CONFIG, self.add_entities, DISCOVERY_INFO)
assert len(self.DEVICES) == 1
sensor = self.DEVICES[0]
assert sensor.name == "kira"
sensor.hass = self.hass
codeName = "FAKE_CODE"
deviceName = "FAKE_DEVICE"
codeTuple = (codeName, deviceName)
sensor._update_callback(codeTuple)
assert sensor.state == codeName
assert sensor.device_state_attributes == {kira.CONF_DEVICE: deviceName}
|
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import DATA_CHARGING, DATA_LEAF, DATA_PLUGGED_IN, LeafEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up of a Nissan Leaf binary sensor."""
if discovery_info is None:
return
devices = []
for vin, datastore in hass.data[DATA_LEAF].items():
_LOGGER.debug("Adding binary_sensors for vin=%s", vin)
devices.append(LeafPluggedInSensor(datastore))
devices.append(LeafChargingSensor(datastore))
add_entities(devices, True)
class LeafPluggedInSensor(LeafEntity, BinarySensorEntity):
"""Plugged In Sensor class."""
@property
def name(self):
"""Sensor name."""
return f"{self.car.leaf.nickname} Plug Status"
@property
def is_on(self):
"""Return true if plugged in."""
return self.car.data[DATA_PLUGGED_IN]
@property
def icon(self):
"""Icon handling."""
if self.car.data[DATA_PLUGGED_IN]:
return "mdi:power-plug"
return "mdi:power-plug-off"
class LeafChargingSensor(LeafEntity, BinarySensorEntity):
"""Charging Sensor class."""
@property
def name(self):
"""Sensor name."""
return f"{self.car.leaf.nickname} Charging Status"
@property
def is_on(self):
"""Return true if charging."""
return self.car.data[DATA_CHARGING]
@property
def icon(self):
"""Icon handling."""
if self.car.data[DATA_CHARGING]:
return "mdi:flash"
return "mdi:flash-off"
|
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import CONF_UNIT_SYSTEM, CONF_UNIT_SYSTEM_IMPERIAL
from homeassistant.util import dt as dt_util, location
from tests.async_mock import patch
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
@pytest.fixture
async def client(hass, hass_ws_client):
"""Fixture that can interact with the config manager API."""
with patch.object(config, "SECTIONS", ["core"]):
assert await async_setup_component(hass, "config", {})
return await hass_ws_client(hass)
async def test_validate_config_ok(hass, hass_client):
"""Test checking config."""
with patch.object(config, "SECTIONS", ["core"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
with patch(
"homeassistant.components.config.core.async_check_ha_config_file",
return_value=None,
):
resp = await client.post("/api/config/core/check_config")
assert resp.status == 200
result = await resp.json()
assert result["result"] == "valid"
assert result["errors"] is None
with patch(
"homeassistant.components.config.core.async_check_ha_config_file",
return_value="beer",
):
resp = await client.post("/api/config/core/check_config")
assert resp.status == 200
result = await resp.json()
assert result["result"] == "invalid"
assert result["errors"] == "beer"
async def test_websocket_core_update(hass, client):
"""Test core config update websocket command."""
assert hass.config.latitude != 60
assert hass.config.longitude != 50
assert hass.config.elevation != 25
assert hass.config.location_name != "Huis"
assert hass.config.units.name != CONF_UNIT_SYSTEM_IMPERIAL
assert hass.config.time_zone.zone != "America/New_York"
assert hass.config.external_url != "https://www.example.com"
assert hass.config.internal_url != "http://example.com"
with patch("homeassistant.util.dt.set_default_time_zone") as mock_set_tz:
await client.send_json(
{
"id": 5,
"type": "config/core/update",
"latitude": 60,
"longitude": 50,
"elevation": 25,
"location_name": "Huis",
CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_IMPERIAL,
"time_zone": "America/New_York",
"external_url": "https://www.example.com",
"internal_url": "http://example.local",
}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert hass.config.latitude == 60
assert hass.config.longitude == 50
assert hass.config.elevation == 25
assert hass.config.location_name == "Huis"
assert hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL
assert hass.config.external_url == "https://www.example.com"
assert hass.config.internal_url == "http://example.local"
assert len(mock_set_tz.mock_calls) == 1
assert mock_set_tz.mock_calls[0][1][0].zone == "America/New_York"
async def test_websocket_core_update_not_admin(hass, hass_ws_client, hass_admin_user):
"""Test core config fails for non admin."""
hass_admin_user.groups = []
with patch.object(config, "SECTIONS", ["core"]):
await async_setup_component(hass, "config", {})
client = await hass_ws_client(hass)
await client.send_json({"id": 6, "type": "config/core/update", "latitude": 23})
msg = await client.receive_json()
assert msg["id"] == 6
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == "unauthorized"
async def test_websocket_bad_core_update(hass, client):
"""Test core config update fails with bad parameters."""
await client.send_json({"id": 7, "type": "config/core/update", "latituude": 23})
msg = await client.receive_json()
assert msg["id"] == 7
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == "invalid_format"
async def test_detect_config(hass, client):
"""Test detect config."""
with patch(
"homeassistant.util.location.async_detect_location_info",
return_value=None,
):
await client.send_json({"id": 1, "type": "config/core/detect"})
msg = await client.receive_json()
assert msg["success"] is True
assert msg["result"] == {}
async def test_detect_config_fail(hass, client):
"""Test detect config."""
with patch(
"homeassistant.util.location.async_detect_location_info",
return_value=location.LocationInfo(
ip=None,
country_code=None,
country_name=None,
region_code=None,
region_name=None,
city=None,
zip_code=None,
latitude=None,
longitude=None,
use_metric=True,
time_zone="Europe/Amsterdam",
),
):
await client.send_json({"id": 1, "type": "config/core/detect"})
msg = await client.receive_json()
assert msg["success"] is True
assert msg["result"] == {"unit_system": "metric", "time_zone": "Europe/Amsterdam"}
|
try:
from collections.abc import MutableSet
except ImportError:
from collections import MutableSet
class SetMixin(MutableSet):
"""
Mix-in for sets. You must define __iter__, add, remove
"""
def __len__(self):
length = 0
for item in self:
length += 1
return length
def __contains__(self, item):
for has_item in self:
if item == has_item:
return True
return False
issubset = MutableSet.__le__
issuperset = MutableSet.__ge__
union = MutableSet.__or__
intersection = MutableSet.__and__
difference = MutableSet.__sub__
symmetric_difference = MutableSet.__xor__
def copy(self):
return set(self)
def update(self, other):
self |= other
def intersection_update(self, other):
self &= other
def difference_update(self, other):
self -= other
def symmetric_difference_update(self, other):
self ^= other
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
@classmethod
def _from_iterable(cls, it):
return set(it)
|
from box.box import Box
class SBox(Box):
"""
ShorthandBox (SBox) allows for
property access of `dict` `json` and `yaml`
"""
_protected_keys = dir({}) + [
"to_dict",
"to_json",
"to_yaml",
"json",
"yaml",
"from_yaml",
"from_json",
"dict",
"toml",
"from_toml",
"to_toml",
]
@property
def dict(self):
return self.to_dict()
@property
def json(self):
return self.to_json()
@property
def yaml(self):
return self.to_yaml()
@property
def toml(self):
return self.to_toml()
def __repr__(self):
return "<ShorthandBox: {0}>".format(str(self.to_dict()))
def copy(self):
return SBox(super(SBox, self).copy())
def __copy__(self):
return SBox(super(SBox, self).copy())
|
from sandman2 import AutomapModel
class User(AutomapModel):
"""A user of the blogging application."""
__tablename__ = 'user'
def __str__(self):
return self.name
__unicode__ = __str__
class Blog(AutomapModel):
"""An online weblog."""
__tablename__ = 'blog'
def __str__(self):
return self.name
__unicode__ = __str__
class Post(AutomapModel):
"""An individual blog post."""
__tablename__ = 'post'
def __str__(self):
return self.title
__unicode__ = __str__
|
import logging
from lmnotify import LaMetricManager
import voluptuous as vol
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "lametric"
LAMETRIC_DEVICES = "LAMETRIC_DEVICES"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the LaMetricManager."""
_LOGGER.debug("Setting up LaMetric platform")
conf = config[DOMAIN]
hlmn = HassLaMetricManager(
client_id=conf[CONF_CLIENT_ID], client_secret=conf[CONF_CLIENT_SECRET]
)
devices = hlmn.manager.get_devices()
if not devices:
_LOGGER.error("No LaMetric devices found")
return False
hass.data[DOMAIN] = hlmn
for dev in devices:
_LOGGER.debug("Discovered LaMetric device: %s", dev)
return True
class HassLaMetricManager:
"""A class that encapsulated requests to the LaMetric manager."""
def __init__(self, client_id, client_secret):
"""Initialize HassLaMetricManager and connect to LaMetric."""
_LOGGER.debug("Connecting to LaMetric")
self.manager = LaMetricManager(client_id, client_secret)
self._client_id = client_id
self._client_secret = client_secret
|
import ipaddress
import re
from brother import Brother, SnmpError, UnsupportedModel
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_HOST, CONF_TYPE
from .const import DOMAIN, PRINTER_TYPES # pylint:disable=unused-import
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST, default=""): str,
vol.Optional(CONF_TYPE, default="laser"): vol.In(PRINTER_TYPES),
}
)
def host_valid(host):
"""Return True if hostname or IP address is valid."""
try:
if ipaddress.ip_address(host).version == (4 or 6):
return True
except ValueError:
disallowed = re.compile(r"[^a-zA-Z\d\-]")
return all(x and not disallowed.search(x) for x in host.split("."))
class BrotherConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Brother Printer."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self.brother = None
self.host = None
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
if not host_valid(user_input[CONF_HOST]):
raise InvalidHost()
brother = Brother(user_input[CONF_HOST])
await brother.async_update()
brother.shutdown()
await self.async_set_unique_id(brother.serial.lower())
self._abort_if_unique_id_configured()
title = f"{brother.model} {brother.serial}"
return self.async_create_entry(title=title, data=user_input)
except InvalidHost:
errors[CONF_HOST] = "wrong_host"
except ConnectionError:
errors["base"] = "cannot_connect"
except SnmpError:
errors["base"] = "snmp_error"
except UnsupportedModel:
return self.async_abort(reason="unsupported_model")
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_zeroconf(self, discovery_info):
"""Handle zeroconf discovery."""
if discovery_info is None:
return self.async_abort(reason="cannot_connect")
if not discovery_info.get("name") or not discovery_info["name"].startswith(
"Brother"
):
return self.async_abort(reason="not_brother_printer")
# Hostname is format: brother.local.
self.host = discovery_info["hostname"].rstrip(".")
self.brother = Brother(self.host)
try:
await self.brother.async_update()
except (ConnectionError, SnmpError, UnsupportedModel):
return self.async_abort(reason="cannot_connect")
# Check if already configured
await self.async_set_unique_id(self.brother.serial.lower())
self._abort_if_unique_id_configured()
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context.update(
{
"title_placeholders": {
"serial_number": self.brother.serial,
"model": self.brother.model,
}
}
)
return await self.async_step_zeroconf_confirm()
async def async_step_zeroconf_confirm(self, user_input=None):
"""Handle a flow initiated by zeroconf."""
if user_input is not None:
title = f"{self.brother.model} {self.brother.serial}"
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
return self.async_create_entry(
title=title,
data={CONF_HOST: self.host, CONF_TYPE: user_input[CONF_TYPE]},
)
return self.async_show_form(
step_id="zeroconf_confirm",
data_schema=vol.Schema(
{vol.Optional(CONF_TYPE, default="laser"): vol.In(PRINTER_TYPES)}
),
description_placeholders={
"serial_number": self.brother.serial,
"model": self.brother.model,
},
)
class InvalidHost(exceptions.HomeAssistantError):
"""Error to indicate that hostname/IP address is invalid."""
|
import hangups
from common import run_example
MAX_REQUESTS = 3
MAX_EVENTS = 5
async def get_events(client, args):
_, conversation_list = await hangups.build_user_conversation_list(client)
try:
conversation = conversation_list.get(args.conversation_id)
except KeyError:
print('conversation {!r} not found'.format(args.conversation_id))
return
# Load events from the server
all_events = await _get_events(conversation)
# Load events cached in the conversation
all_events_cached = await _get_events(conversation)
assert (
[event.timestamp for event in all_events] ==
[event.timestamp for event in all_events_cached]
)
# Print the events oldest to newest
for event in all_events:
print('{} {} {!r}'.format(
event.timestamp.strftime('%c'), event.__class__.__name__,
getattr(event, 'text')
))
async def _get_events(conversation):
all_events = [] # newest-first
event_id = None
for _ in range(MAX_REQUESTS):
events = await conversation.get_events(
event_id=event_id, max_events=MAX_EVENTS
)
event_id = events[0].id_ # oldest event
all_events.extend(reversed(events))
return list(reversed(all_events)) # oldest-first
if __name__ == '__main__':
run_example(get_events, '--conversation-id')
|
import tensornetwork as tn
import pytest
import numpy as np
import tensornetwork.linalg
import tensornetwork.linalg.node_linalg
def test_split_node(backend):
a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right, _ = tn.split_node(a, left_edges, right_edges)
tn.check_correct({left, right})
np.testing.assert_allclose(left.tensor, np.zeros((2, 3, 4, 24)))
np.testing.assert_allclose(right.tensor, np.zeros((24, 5, 6)))
def test_split_node_mixed_order(backend):
a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend)
left_edges = []
for i in [0, 2, 4]:
left_edges.append(a[i])
right_edges = []
for i in [1, 3]:
right_edges.append(a[i])
left, right, _ = tn.split_node(a, left_edges, right_edges)
tn.check_correct({left, right})
np.testing.assert_allclose(left.tensor, np.zeros((2, 4, 6, 15)))
np.testing.assert_allclose(right.tensor, np.zeros((15, 3, 5)))
def test_split_node_full_svd(backend):
unitary1 = np.array([[1.0, 1.0], [1.0, -1.0]]) / np.sqrt(2.0)
unitary2 = np.array([[0.0, 1.0], [1.0, 0.0]])
singular_values = np.array([9.1, 7.5], dtype=np.float32)
val = np.dot(unitary1, np.dot(np.diag(singular_values), (unitary2.T)))
a = tn.Node(val, backend=backend)
e1 = a[0]
e2 = a[1]
_, s, _, _, = tn.split_node_full_svd(a, [e1], [e2])
tn.check_correct(tn.reachable(s))
np.testing.assert_allclose(s.tensor, np.diag([9.1, 7.5]), rtol=1e-5)
def test_svd_consistency(backend):
if backend == "pytorch":
pytest.skip("Complex numbers currently not supported in PyTorch")
original_tensor = np.array(
[[1.0, 2.0j, 3.0, 4.0], [5.0, 6.0 + 1.0j, 3.0j, 2.0 + 1.0j]],
dtype=np.complex64)
node = tn.Node(original_tensor, backend=backend)
u, vh, _ = tn.split_node(node, [node[0]], [node[1]])
final_node = tn.contract_between(u, vh)
np.testing.assert_allclose(final_node.tensor, original_tensor, rtol=1e-6)
def test_svd_consistency_symmetric_real_matrix(backend):
original_tensor = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 3.0, 2.0]],
dtype=np.float64)
node = tn.Node(original_tensor, backend=backend)
u, vh, _ = tn.split_node(node, [node[0]], [node[1]])
final_node = tn.contract_between(u, vh)
np.testing.assert_allclose(final_node.tensor, original_tensor, rtol=1e-6)
def test_split_node_full_svd_names(backend):
a = tn.Node(np.random.rand(10, 10), backend=backend)
e1 = a[0]
e2 = a[1]
left, s, right, _, = tn.split_node_full_svd(
a, [e1], [e2],
left_name='left',
middle_name='center',
right_name='right',
left_edge_name='left_edge',
right_edge_name='right_edge')
assert left.name == 'left'
assert s.name == 'center'
assert right.name == 'right'
assert left.edges[-1].name == 'left_edge'
assert s[0].name == 'left_edge'
assert s[1].name == 'right_edge'
assert right.edges[0].name == 'right_edge'
def test_split_node_rq_names(backend):
a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_rq(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
def test_split_node_qr_names(backend):
a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_qr(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
def test_split_node_names(backend):
a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right, _ = tn.split_node(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
def test_split_node_rq(backend):
a = tn.Node(np.random.rand(2, 3, 4, 5, 6), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, _ = tn.split_node_rq(a, left_edges, right_edges)
tn.check_correct(tn.reachable(left))
np.testing.assert_allclose(a.tensor, tn.contract(left[3]).tensor)
def test_split_node_qr(backend):
a = tn.Node(np.random.rand(2, 3, 4, 5, 6), backend=backend)
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, _ = tn.split_node_qr(a, left_edges, right_edges)
tn.check_correct(tn.reachable(left))
np.testing.assert_allclose(a.tensor, tn.contract(left[3]).tensor)
def test_split_node_rq_unitarity_complex(backend):
if backend == "pytorch":
pytest.skip("Complex numbers currently not supported in PyTorch")
if backend == "jax":
pytest.skip("Complex QR crashes jax")
a = tn.Node(np.random.rand(3, 3) + 1j * np.random.rand(3, 3), backend=backend)
_, q = tn.split_node_rq(a, [a[0]], [a[1]])
n1 = tn.Node(q.tensor, backend=backend)
n2 = tn.linalg.node_linalg.conj(q)
n1[1] ^ n2[1]
u1 = tn.contract_between(n1, n2)
n1 = tn.Node(q.tensor, backend=backend)
n2 = tn.linalg.node_linalg.conj(q)
n2[0] ^ n1[0]
u2 = tn.contract_between(n1, n2)
np.testing.assert_almost_equal(u1.tensor, np.eye(3))
np.testing.assert_almost_equal(u2.tensor, np.eye(3))
def test_split_node_rq_unitarity_float(backend):
a = tn.Node(np.random.rand(3, 3), backend=backend)
_, q = tn.split_node_rq(a, [a[0]], [a[1]])
n1 = tn.Node(q.tensor, backend=backend)
n2 = tn.linalg.node_linalg.conj(q)
n1[1] ^ n2[1]
u1 = tn.contract_between(n1, n2)
n1 = tn.Node(q.tensor, backend=backend)
n2 = tn.Node(q.tensor, backend=backend)
n2[0] ^ n1[0]
u2 = tn.contract_between(n1, n2)
np.testing.assert_almost_equal(u1.tensor, np.eye(3))
np.testing.assert_almost_equal(u2.tensor, np.eye(3))
def test_split_node_qr_unitarity_complex(backend):
if backend == "pytorch":
pytest.skip("Complex numbers currently not supported in PyTorch")
if backend == "jax":
pytest.skip("Complex QR crashes jax")
a = tn.Node(np.random.rand(3, 3) + 1j * np.random.rand(3, 3), backend=backend)
q, _ = tn.split_node_qr(a, [a[0]], [a[1]])
n1 = tn.Node(q.tensor, backend=backend)
n2 = tn.linalg.node_linalg.conj(q)
n1[1] ^ n2[1]
u1 = tn.contract_between(n1, n2)
n1 = tn.Node(q.tensor, backend=backend)
n2 = tn.linalg.node_linalg.conj(q)
n2[0] ^ n1[0]
u2 = tn.contract_between(n1, n2)
np.testing.assert_almost_equal(u1.tensor, np.eye(3))
np.testing.assert_almost_equal(u2.tensor, np.eye(3))
def test_split_node_qr_unitarity_float(backend):
a = tn.Node(np.random.rand(3, 3), backend=backend)
q, _ = tn.split_node_qr(a, [a[0]], [a[1]])
n1 = tn.Node(q.tensor, backend=backend)
n2 = tn.linalg.node_linalg.conj(q)
n1[1] ^ n2[1]
u1 = tn.contract_between(n1, n2)
n1 = tn.Node(q.tensor, backend=backend)
n2 = tn.Node(q.tensor, backend=backend)
n2[0] ^ n1[0]
u2 = tn.contract_between(n1, n2)
np.testing.assert_almost_equal(u1.tensor, np.eye(3))
np.testing.assert_almost_equal(u2.tensor, np.eye(3))
|
from flexx import flx
class Tester(flx.Widget):
def init(self):
super().init()
with flx.VBox():
flx.Label(text='You should see 5 pairs of buttons')
with flx.HFix(): # Use minsize in CSS of button widget
with flx.GroupWidget(title='asdas'):
with flx.HFix():
flx.Button(text='foo')
flx.Button(text='bar')
with flx.HFix(minsize=50): # Set minsize prop on container
flx.Button(text='foo')
flx.Button(text='bar')
with flx.HFix(): # Set minsize prop on widget
flx.Button(text='foo', minsize=50)
flx.Button(text='bar')
with flx.HFix(): # Old school setting of style
flx.Button(text='foo', style='min-height:50px;')
flx.Button(text='bar', )
with flx.Widget(): # Singleton widgets (e.g. custom classes)
with flx.HFix():
flx.Button(text='foo')
flx.Button(text='bar')
flx.Widget(flex=1, style='background:#f99;') # spacer
if __name__ == '__main__':
m = flx.launch(Tester, 'firefox')
flx.run()
|
from pygal.graph.graph import Graph
from pygal.view import HorizontalLogView, HorizontalView
class HorizontalGraph(Graph):
"""Horizontal graph mixin"""
def __init__(self, *args, **kwargs):
"""Set the horizontal flag to True"""
self.horizontal = True
super(HorizontalGraph, self).__init__(*args, **kwargs)
def _post_compute(self):
"""After computations transpose labels"""
self._x_labels, self._y_labels = self._y_labels, self._x_labels
self._x_labels_major, self._y_labels_major = (
self._y_labels_major, self._x_labels_major
)
self._x_2nd_labels, self._y_2nd_labels = (
self._y_2nd_labels, self._x_2nd_labels
)
self.show_y_guides, self.show_x_guides = (
self.show_x_guides, self.show_y_guides
)
def _axes(self):
"""Set the _force_vertical flag when rendering axes"""
self.view._force_vertical = True
super(HorizontalGraph, self)._axes()
self.view._force_vertical = False
def _set_view(self):
"""Assign a horizontal view to current graph"""
if self.logarithmic:
view_class = HorizontalLogView
else:
view_class = HorizontalView
self.view = view_class(
self.width - self.margin_box.x, self.height - self.margin_box.y,
self._box
)
def _get_x_label(self, i):
"""Convenience function to get the x_label of a value index"""
if not self.x_labels or not self._y_labels or len(self._y_labels) <= i:
return
return self._y_labels[i][0]
|
from typing import Any, Dict
from dynalite_devices_lib import const as dyn_const
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, CONF_ROOM, CONF_TYPE
from .const import (
ACTIVE_INIT,
ACTIVE_OFF,
ACTIVE_ON,
CONF_ACTIVE,
CONF_AREA,
CONF_AUTO_DISCOVER,
CONF_CHANNEL,
CONF_CHANNEL_COVER,
CONF_CLOSE_PRESET,
CONF_DEFAULT,
CONF_DEVICE_CLASS,
CONF_DURATION,
CONF_FADE,
CONF_LEVEL,
CONF_NO_DEFAULT,
CONF_OPEN_PRESET,
CONF_POLL_TIMER,
CONF_PRESET,
CONF_ROOM_OFF,
CONF_ROOM_ON,
CONF_STOP_PRESET,
CONF_TEMPLATE,
CONF_TILT_TIME,
CONF_TIME_COVER,
)
ACTIVE_MAP = {
ACTIVE_INIT: dyn_const.ACTIVE_INIT,
False: dyn_const.ACTIVE_OFF,
ACTIVE_OFF: dyn_const.ACTIVE_OFF,
ACTIVE_ON: dyn_const.ACTIVE_ON,
True: dyn_const.ACTIVE_ON,
}
TEMPLATE_MAP = {
CONF_ROOM: dyn_const.CONF_ROOM,
CONF_TIME_COVER: dyn_const.CONF_TIME_COVER,
}
def convert_with_map(config, conf_map):
"""Create the initial converted map with just the basic key:value pairs updated."""
result = {}
for conf in conf_map:
if conf in config:
result[conf_map[conf]] = config[conf]
return result
def convert_channel(config: Dict[str, Any]) -> Dict[str, Any]:
"""Convert the config for a channel."""
my_map = {
CONF_NAME: dyn_const.CONF_NAME,
CONF_FADE: dyn_const.CONF_FADE,
CONF_TYPE: dyn_const.CONF_CHANNEL_TYPE,
}
return convert_with_map(config, my_map)
def convert_preset(config: Dict[str, Any]) -> Dict[str, Any]:
"""Convert the config for a preset."""
my_map = {
CONF_NAME: dyn_const.CONF_NAME,
CONF_FADE: dyn_const.CONF_FADE,
CONF_LEVEL: dyn_const.CONF_LEVEL,
}
return convert_with_map(config, my_map)
def convert_area(config: Dict[str, Any]) -> Dict[str, Any]:
"""Convert the config for an area."""
my_map = {
CONF_NAME: dyn_const.CONF_NAME,
CONF_FADE: dyn_const.CONF_FADE,
CONF_NO_DEFAULT: dyn_const.CONF_NO_DEFAULT,
CONF_ROOM_ON: dyn_const.CONF_ROOM_ON,
CONF_ROOM_OFF: dyn_const.CONF_ROOM_OFF,
CONF_CHANNEL_COVER: dyn_const.CONF_CHANNEL_COVER,
CONF_DEVICE_CLASS: dyn_const.CONF_DEVICE_CLASS,
CONF_OPEN_PRESET: dyn_const.CONF_OPEN_PRESET,
CONF_CLOSE_PRESET: dyn_const.CONF_CLOSE_PRESET,
CONF_STOP_PRESET: dyn_const.CONF_STOP_PRESET,
CONF_DURATION: dyn_const.CONF_DURATION,
CONF_TILT_TIME: dyn_const.CONF_TILT_TIME,
}
result = convert_with_map(config, my_map)
if CONF_CHANNEL in config:
result[dyn_const.CONF_CHANNEL] = {
channel: convert_channel(channel_conf)
for (channel, channel_conf) in config[CONF_CHANNEL].items()
}
if CONF_PRESET in config:
result[dyn_const.CONF_PRESET] = {
preset: convert_preset(preset_conf)
for (preset, preset_conf) in config[CONF_PRESET].items()
}
if CONF_TEMPLATE in config:
result[dyn_const.CONF_TEMPLATE] = TEMPLATE_MAP[config[CONF_TEMPLATE]]
return result
def convert_default(config: Dict[str, Any]) -> Dict[str, Any]:
"""Convert the config for the platform defaults."""
return convert_with_map(config, {CONF_FADE: dyn_const.CONF_FADE})
def convert_template(config: Dict[str, Any]) -> Dict[str, Any]:
"""Convert the config for a template."""
my_map = {
CONF_ROOM_ON: dyn_const.CONF_ROOM_ON,
CONF_ROOM_OFF: dyn_const.CONF_ROOM_OFF,
CONF_CHANNEL_COVER: dyn_const.CONF_CHANNEL_COVER,
CONF_DEVICE_CLASS: dyn_const.CONF_DEVICE_CLASS,
CONF_OPEN_PRESET: dyn_const.CONF_OPEN_PRESET,
CONF_CLOSE_PRESET: dyn_const.CONF_CLOSE_PRESET,
CONF_STOP_PRESET: dyn_const.CONF_STOP_PRESET,
CONF_DURATION: dyn_const.CONF_DURATION,
CONF_TILT_TIME: dyn_const.CONF_TILT_TIME,
}
return convert_with_map(config, my_map)
def convert_config(config: Dict[str, Any]) -> Dict[str, Any]:
"""Convert a config dict by replacing component consts with library consts."""
my_map = {
CONF_NAME: dyn_const.CONF_NAME,
CONF_HOST: dyn_const.CONF_HOST,
CONF_PORT: dyn_const.CONF_PORT,
CONF_AUTO_DISCOVER: dyn_const.CONF_AUTO_DISCOVER,
CONF_POLL_TIMER: dyn_const.CONF_POLL_TIMER,
}
result = convert_with_map(config, my_map)
if CONF_AREA in config:
result[dyn_const.CONF_AREA] = {
area: convert_area(area_conf)
for (area, area_conf) in config[CONF_AREA].items()
}
if CONF_DEFAULT in config:
result[dyn_const.CONF_DEFAULT] = convert_default(config[CONF_DEFAULT])
if CONF_ACTIVE in config:
result[dyn_const.CONF_ACTIVE] = ACTIVE_MAP[config[CONF_ACTIVE]]
if CONF_PRESET in config:
result[dyn_const.CONF_PRESET] = {
preset: convert_preset(preset_conf)
for (preset, preset_conf) in config[CONF_PRESET].items()
}
if CONF_TEMPLATE in config:
result[dyn_const.CONF_TEMPLATE] = {
TEMPLATE_MAP[template]: convert_template(template_conf)
for (template, template_conf) in config[CONF_TEMPLATE].items()
}
return result
|
from typing import List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_CODE,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_CODE,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
)
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from . import ATTR_CODE_ARM_REQUIRED, DOMAIN
from .const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_TRIGGER,
)
ACTION_TYPES = {"arm_away", "arm_home", "arm_night", "disarm", "trigger"}
ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(ACTION_TYPES),
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN),
vol.Optional(CONF_CODE): cv.string,
}
)
async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device actions for Alarm control panel devices."""
registry = await entity_registry.async_get_registry(hass)
actions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
# We need a state or else we can't populate the HVAC and preset modes.
if state is None:
continue
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
# Add actions for each entity that belongs to this integration
if supported_features & SUPPORT_ALARM_ARM_AWAY:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "arm_away",
}
)
if supported_features & SUPPORT_ALARM_ARM_HOME:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "arm_home",
}
)
if supported_features & SUPPORT_ALARM_ARM_NIGHT:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "arm_night",
}
)
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "disarm",
}
)
if supported_features & SUPPORT_ALARM_TRIGGER:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "trigger",
}
)
return actions
async def async_call_action_from_config(
hass: HomeAssistant, config: dict, variables: dict, context: Optional[Context]
) -> None:
"""Execute a device action."""
config = ACTION_SCHEMA(config)
service_data = {ATTR_ENTITY_ID: config[CONF_ENTITY_ID]}
if CONF_CODE in config:
service_data[ATTR_CODE] = config[CONF_CODE]
if config[CONF_TYPE] == "arm_away":
service = SERVICE_ALARM_ARM_AWAY
elif config[CONF_TYPE] == "arm_home":
service = SERVICE_ALARM_ARM_HOME
elif config[CONF_TYPE] == "arm_night":
service = SERVICE_ALARM_ARM_NIGHT
elif config[CONF_TYPE] == "disarm":
service = SERVICE_ALARM_DISARM
elif config[CONF_TYPE] == "trigger":
service = SERVICE_ALARM_TRIGGER
await hass.services.async_call(
DOMAIN, service, service_data, blocking=True, context=context
)
async def async_get_action_capabilities(hass, config):
"""List action capabilities."""
state = hass.states.get(config[CONF_ENTITY_ID])
code_required = state.attributes.get(ATTR_CODE_ARM_REQUIRED) if state else False
if config[CONF_TYPE] == "trigger" or (
config[CONF_TYPE] != "disarm" and not code_required
):
return {}
return {"extra_fields": vol.Schema({vol.Optional(CONF_CODE): str})}
|
Subsets and Splits