text
stringlengths 213
32.3k
|
---|
from datetime import timedelta
import logging
from typing import Optional
from geojson_client.generic_feed import GenericFeedManager
import voluptuous as vol
from homeassistant.components.geo_location import PLATFORM_SCHEMA, GeolocationEvent
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
CONF_URL,
EVENT_HOMEASSISTANT_START,
LENGTH_KILOMETERS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.event import track_time_interval
_LOGGER = logging.getLogger(__name__)
ATTR_EXTERNAL_ID = "external_id"
DEFAULT_RADIUS_IN_KM = 20.0
SCAN_INTERVAL = timedelta(minutes=5)
SOURCE = "geo_json_events"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GeoJSON Events platform."""
url = config[CONF_URL]
scan_interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
coordinates = (
config.get(CONF_LATITUDE, hass.config.latitude),
config.get(CONF_LONGITUDE, hass.config.longitude),
)
radius_in_km = config[CONF_RADIUS]
# Initialize the entity manager.
feed = GeoJsonFeedEntityManager(
hass, add_entities, scan_interval, coordinates, url, radius_in_km
)
def start_feed_manager(event):
"""Start feed manager."""
feed.startup()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_feed_manager)
class GeoJsonFeedEntityManager:
"""Feed Entity Manager for GeoJSON feeds."""
def __init__(
self, hass, add_entities, scan_interval, coordinates, url, radius_in_km
):
"""Initialize the GeoJSON Feed Manager."""
self._hass = hass
self._feed_manager = GenericFeedManager(
self._generate_entity,
self._update_entity,
self._remove_entity,
coordinates,
url,
filter_radius=radius_in_km,
)
self._add_entities = add_entities
self._scan_interval = scan_interval
def startup(self):
"""Start up this manager."""
self._feed_manager.update()
self._init_regular_updates()
def _init_regular_updates(self):
"""Schedule regular updates at the specified interval."""
track_time_interval(
self._hass, lambda now: self._feed_manager.update(), self._scan_interval
)
def get_entry(self, external_id):
"""Get feed entry by external id."""
return self._feed_manager.feed_entries.get(external_id)
def _generate_entity(self, external_id):
"""Generate new entity."""
new_entity = GeoJsonLocationEvent(self, external_id)
# Add new entities to HA.
self._add_entities([new_entity], True)
def _update_entity(self, external_id):
"""Update entity."""
dispatcher_send(self._hass, f"geo_json_events_update_{external_id}")
def _remove_entity(self, external_id):
"""Remove entity."""
dispatcher_send(self._hass, f"geo_json_events_delete_{external_id}")
class GeoJsonLocationEvent(GeolocationEvent):
"""This represents an external event with GeoJSON data."""
def __init__(self, feed_manager, external_id):
"""Initialize entity with data from feed entry."""
self._feed_manager = feed_manager
self._external_id = external_id
self._name = None
self._distance = None
self._latitude = None
self._longitude = None
self._remove_signal_delete = None
self._remove_signal_update = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_delete = async_dispatcher_connect(
self.hass,
f"geo_json_events_delete_{self._external_id}",
self._delete_callback,
)
self._remove_signal_update = async_dispatcher_connect(
self.hass,
f"geo_json_events_update_{self._external_id}",
self._update_callback,
)
@callback
def _delete_callback(self):
"""Remove this entity."""
self._remove_signal_delete()
self._remove_signal_update()
self.hass.async_create_task(self.async_remove())
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for GeoJSON location events."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._external_id)
feed_entry = self._feed_manager.get_entry(self._external_id)
if feed_entry:
self._update_from_feed(feed_entry)
def _update_from_feed(self, feed_entry):
"""Update the internal state from the provided feed entry."""
self._name = feed_entry.title
self._distance = feed_entry.distance_to_home
self._latitude = feed_entry.coordinates[0]
self._longitude = feed_entry.coordinates[1]
@property
def source(self) -> str:
"""Return source value of this external event."""
return SOURCE
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return self._name
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return self._distance
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return self._latitude
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return self._longitude
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return LENGTH_KILOMETERS
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if not self._external_id:
return {}
return {ATTR_EXTERNAL_ID: self._external_id}
|
import sys
from xmlrpc.client import (
loads as xmlrpc_loads, dumps as xmlrpc_dumps,
Fault as XMLRPCFault
)
import cherrypy
from cherrypy._cpcompat import ntob
def process_body():
"""Return (params, method) from request body."""
try:
return xmlrpc_loads(cherrypy.request.body.read())
except Exception:
return ('ERROR PARAMS', ), 'ERRORMETHOD'
def patched_path(path):
"""Return 'path', doctored for RPC."""
if not path.endswith('/'):
path += '/'
if path.startswith('/RPC2/'):
# strip the first /rpc2
path = path[5:]
return path
def _set_response(body):
"""Set up HTTP status, headers and body within CherryPy."""
# The XML-RPC spec (http://www.xmlrpc.com/spec) says:
# "Unless there's a lower-level error, always return 200 OK."
# Since Python's xmlrpc_client interprets a non-200 response
# as a "Protocol Error", we'll just return 200 every time.
response = cherrypy.response
response.status = '200 OK'
response.body = ntob(body, 'utf-8')
response.headers['Content-Type'] = 'text/xml'
response.headers['Content-Length'] = len(body)
def respond(body, encoding='utf-8', allow_none=0):
"""Construct HTTP response body."""
if not isinstance(body, XMLRPCFault):
body = (body,)
_set_response(
xmlrpc_dumps(
body, methodresponse=1,
encoding=encoding,
allow_none=allow_none
)
)
def on_error(*args, **kwargs):
"""Construct HTTP response body for an error response."""
body = str(sys.exc_info()[1])
_set_response(xmlrpc_dumps(XMLRPCFault(1, body)))
|
import logging
import re
from weakref import WeakValueDictionary
import six
__all__ = ['get_arctic_lib']
logger = logging.getLogger(__name__)
# Application environment variables
arctic_cache = WeakValueDictionary()
CONNECTION_STR = re.compile(r"(^\w+\.?\w+)@([^\s:]+:?\w+)$")
def get_arctic_lib(connection_string, **kwargs):
"""
Returns a mongo library for the given connection string
Parameters
---------
connection_string: `str`
Format must be one of the following:
library@trading for known mongo servers
library@hostname:port
Returns:
--------
Arctic library
"""
m = CONNECTION_STR.match(connection_string)
if not m:
raise ValueError("connection string incorrectly formed: %s" % connection_string)
library, host = m.group(1), m.group(2)
return _get_arctic(host, **kwargs)[library]
def _get_arctic(instance, **kwargs):
# Consider any kwargs passed to the Arctic as discriminators for the cache
key = instance, frozenset(six.iteritems(kwargs))
# Don't create lots of Arctic instances
arctic = arctic_cache.get(key, None)
if not arctic:
# Create the instance. Note that Arctic now connects
# lazily so this doesn't connect until on creation.
from .arctic import Arctic
arctic = Arctic(instance, **kwargs)
arctic_cache[key] = arctic
return arctic
|
import os
import posixpath
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
AWS_CREDENTIAL_LOCATION = '.aws'
DEFAULT_AWS_REGION = 'us-east-1'
class S3Service(object_storage_service.ObjectStorageService):
"""Interface to Amazon S3."""
STORAGE_NAME = aws.CLOUD
def PrepareService(self, location):
self.region = location or DEFAULT_AWS_REGION
def MakeBucket(self, bucket_name, raise_on_failure=True):
command = [
'aws', 's3', 'mb',
's3://%s' % bucket_name,
'--region=%s' % self.region
]
_, stderr, ret_code = vm_util.IssueCommand(command, raise_on_failure=False)
if ret_code and raise_on_failure:
raise errors.Benchmarks.BucketCreationError(stderr)
# Tag the bucket with the persistent timeout flag so that buckets can
# optionally stick around after PKB runs.
default_tags = util.MakeFormattedDefaultTags(
timeout_minutes=max(FLAGS.timeout_minutes,
FLAGS.persistent_timeout_minutes))
tag_set = ','.join('{%s}' % tag for tag in default_tags)
vm_util.IssueRetryableCommand(
['aws', 's3api', 'put-bucket-tagging',
'--bucket', bucket_name,
'--tagging', 'TagSet=[%s]' % tag_set,
'--region=%s' % self.region])
def Copy(self, src_url, dst_url, recursive=False):
"""See base class."""
cmd = ['aws', 's3', 'cp', '--region', self.region]
if recursive:
cmd.append('--recursive')
# Fix cp to mimic gsutil behavior
dst_url = os.path.join(dst_url, os.path.basename(src_url))
cmd += [src_url, dst_url]
vm_util.IssueCommand(cmd)
def CopyToBucket(self, src_path, bucket, object_path):
"""See base class."""
dst_url = self.MakeRemoteCliDownloadUrl(bucket, object_path)
vm_util.IssueCommand(['aws', 's3', 'cp', src_path, dst_url,
'--region', self.region])
def MakeRemoteCliDownloadUrl(self, bucket, object_path):
"""See base class."""
path = posixpath.join(bucket, object_path)
return 's3://' + path
def GenerateCliDownloadFileCommand(self, src_url, local_path):
"""See base class."""
return 'aws s3 cp "%s" "%s" --region=%s' % (
src_url, local_path, self.region)
def List(self, bucket):
"""See base class."""
stdout, _, _ = vm_util.IssueCommand(
['aws', 's3', 'ls', bucket, '--region', self.region])
return stdout
def ListTopLevelSubfolders(self, bucket):
"""Lists the top level folders (not files) in a bucket.
Each result that is a folder has "PRE" in front of the name (meaning
prefix), eg. "PRE customer/", so that part is removed from each line. When
there's more than one result, splitting on the newline returns a final blank
row, so blank values are skipped.
Args:
bucket: Name of the bucket to list the top level subfolders of.
Returns:
A list of top level subfolder names. Can be empty if there are no folders.
"""
return [
obj.split('PRE ')[1].strip().replace('/', '')
for obj in self.List(bucket).split('\n')
if obj and obj.endswith('/')
]
@vm_util.Retry()
def DeleteBucket(self, bucket):
"""See base class."""
def _suppress_failure(stdout, stderr, retcode):
"""Suppresses failure when bucket does not exist."""
del stdout # unused
if retcode and 'NoSuchBucket' in stderr:
return True
return False
vm_util.IssueCommand(
['aws', 's3', 'rb',
's3://%s' % bucket,
'--region', self.region,
'--force'], # --force deletes even if bucket contains objects.
suppress_failure=_suppress_failure)
def EmptyBucket(self, bucket):
vm_util.IssueCommand(
['aws', 's3', 'rm',
's3://%s' % bucket,
'--region', self.region,
'--recursive'])
def PrepareVM(self, vm):
vm.Install('awscli')
vm.Install('boto3')
vm.PushFile(
object_storage_service.FindCredentialFile('~/' +
AWS_CREDENTIAL_LOCATION),
AWS_CREDENTIAL_LOCATION)
vm.PushFile(object_storage_service.FindBotoFile(),
object_storage_service.DEFAULT_BOTO_LOCATION)
def CleanupVM(self, vm):
vm.Uninstall('awscli')
def CLIUploadDirectory(self, vm, directory, file_names, bucket):
return vm.RemoteCommand(
'time aws s3 sync %s s3://%s/' % (directory, bucket))
def CLIDownloadBucket(self, vm, bucket, objects, dest):
return vm.RemoteCommand(
'time aws s3 sync s3://%s/ %s' % (bucket, dest))
def Metadata(self, vm):
return {
object_storage_service.BOTO_LIB_VERSION:
linux_packages.GetPipPackageVersion(vm, 'boto3')
}
def APIScriptArgs(self):
return ['--region=' + self.region]
@classmethod
def APIScriptFiles(cls):
return ['s3.py']
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db, compare_type=True)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
if __name__ == '__main__':
manager.run()
|
import logging
import requests
from homeassistant.components import ios
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
from homeassistant.const import HTTP_CREATED, HTTP_TOO_MANY_REQUESTS
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
PUSH_URL = "https://ios-push.home-assistant.io/push"
# pylint: disable=invalid-name
def log_rate_limits(hass, target, resp, level=20):
"""Output rate limit log line at given level."""
rate_limits = resp["rateLimits"]
resetsAt = dt_util.parse_datetime(rate_limits["resetsAt"])
resetsAtTime = resetsAt - dt_util.utcnow()
rate_limit_msg = (
"iOS push notification rate limits for %s: "
"%d sent, %d allowed, %d errors, "
"resets in %s"
)
_LOGGER.log(
level,
rate_limit_msg,
ios.device_name_for_push_id(hass, target),
rate_limits["successful"],
rate_limits["maximum"],
rate_limits["errors"],
str(resetsAtTime).split(".")[0],
)
def get_service(hass, config, discovery_info=None):
"""Get the iOS notification service."""
if "notify.ios" not in hass.config.components:
# Need this to enable requirements checking in the app.
hass.config.components.add("notify.ios")
if not ios.devices_with_push(hass):
return None
return iOSNotificationService()
class iOSNotificationService(BaseNotificationService):
"""Implement the notification service for iOS."""
def __init__(self):
"""Initialize the service."""
@property
def targets(self):
"""Return a dictionary of registered targets."""
return ios.devices_with_push(self.hass)
def send_message(self, message="", **kwargs):
"""Send a message to the Lambda APNS gateway."""
data = {ATTR_MESSAGE: message}
if kwargs.get(ATTR_TITLE) is not None:
# Remove default title from notifications.
if kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT:
data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = ios.enabled_push_ids(self.hass)
if kwargs.get(ATTR_DATA) is not None:
data[ATTR_DATA] = kwargs.get(ATTR_DATA)
for target in targets:
if target not in ios.enabled_push_ids(self.hass):
_LOGGER.error("The target (%s) does not exist in .ios.conf", targets)
return
data[ATTR_TARGET] = target
req = requests.post(PUSH_URL, json=data, timeout=10)
if req.status_code != HTTP_CREATED:
fallback_error = req.json().get("errorMessage", "Unknown error")
fallback_message = (
f"Internal server error, please try again later: {fallback_error}"
)
message = req.json().get("message", fallback_message)
if req.status_code == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(message)
log_rate_limits(self.hass, target, req.json(), 30)
else:
_LOGGER.error(message)
else:
log_rate_limits(self.hass, target, req.json())
|
import re
from coverage import env
class TempliteSyntaxError(ValueError):
"""Raised when a template has a syntax error."""
pass
class TempliteValueError(ValueError):
"""Raised when an expression won't evaluate in a template."""
pass
class CodeBuilder(object):
"""Build source code conveniently."""
def __init__(self, indent=0):
self.code = []
self.indent_level = indent
def __str__(self):
return "".join(str(c) for c in self.code)
def add_line(self, line):
"""Add a line of source to the code.
Indentation and newline will be added for you, don't provide them.
"""
self.code.extend([" " * self.indent_level, line, "\n"])
def add_section(self):
"""Add a section, a sub-CodeBuilder."""
section = CodeBuilder(self.indent_level)
self.code.append(section)
return section
INDENT_STEP = 4 # PEP8 says so!
def indent(self):
"""Increase the current indent for following lines."""
self.indent_level += self.INDENT_STEP
def dedent(self):
"""Decrease the current indent for following lines."""
self.indent_level -= self.INDENT_STEP
def get_globals(self):
"""Execute the code, and return a dict of globals it defines."""
# A check that the caller really finished all the blocks they started.
assert self.indent_level == 0
# Get the Python source as a single string.
python_source = str(self)
# Execute the source, defining globals, and return them.
global_namespace = {}
exec(python_source, global_namespace)
return global_namespace
class Templite(object):
"""A simple template renderer, for a nano-subset of Django syntax.
Supported constructs are extended variable access::
{{var.modifier.modifier|filter|filter}}
loops::
{% for var in list %}...{% endfor %}
and ifs::
{% if var %}...{% endif %}
Comments are within curly-hash markers::
{# This will be ignored #}
Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped
and joined. Be careful, this could join words together!
Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`),
which will collapse the whitespace following the tag.
Construct a Templite with the template text, then use `render` against a
dictionary context to create a finished string::
templite = Templite('''
<h1>Hello {{name|upper}}!</h1>
{% for topic in topics %}
<p>You are interested in {{topic}}.</p>
{% endif %}
''',
{'upper': str.upper},
)
text = templite.render({
'name': "Ned",
'topics': ['Python', 'Geometry', 'Juggling'],
})
"""
def __init__(self, text, *contexts):
"""Construct a Templite with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
These are good for filters and global values.
"""
self.context = {}
for context in contexts:
self.context.update(context)
self.all_vars = set()
self.loop_vars = set()
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
code = CodeBuilder()
code.add_line("def render_function(context, do_dots):")
code.indent()
vars_code = code.add_section()
code.add_line("result = []")
code.add_line("append_result = result.append")
code.add_line("extend_result = result.extend")
if env.PY2:
code.add_line("to_str = unicode")
else:
code.add_line("to_str = str")
buffered = []
def flush_output():
"""Force `buffered` to the code builder."""
if len(buffered) == 1:
code.add_line("append_result(%s)" % buffered[0])
elif len(buffered) > 1:
code.add_line("extend_result([%s])" % ", ".join(buffered))
del buffered[:]
ops_stack = []
# Split the text to form a list of tokens.
tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
squash = in_joined = False
for token in tokens:
if token.startswith('{'):
start, end = 2, -2
squash = (token[-3] == '-')
if squash:
end = -3
if token.startswith('{#'):
# Comment: ignore it and move on.
continue
elif token.startswith('{{'):
# An expression to evaluate.
expr = self._expr_code(token[start:end].strip())
buffered.append("to_str(%s)" % expr)
else:
# token.startswith('{%')
# Action tag: split into words and parse further.
flush_output()
words = token[start:end].strip().split()
if words[0] == 'if':
# An if statement: evaluate the expression to determine if.
if len(words) != 2:
self._syntax_error("Don't understand if", token)
ops_stack.append('if')
code.add_line("if %s:" % self._expr_code(words[1]))
code.indent()
elif words[0] == 'for':
# A loop: iterate over expression result.
if len(words) != 4 or words[2] != 'in':
self._syntax_error("Don't understand for", token)
ops_stack.append('for')
self._variable(words[1], self.loop_vars)
code.add_line(
"for c_%s in %s:" % (
words[1],
self._expr_code(words[3])
)
)
code.indent()
elif words[0] == 'joined':
ops_stack.append('joined')
in_joined = True
elif words[0].startswith('end'):
# Endsomething. Pop the ops stack.
if len(words) != 1:
self._syntax_error("Don't understand end", token)
end_what = words[0][3:]
if not ops_stack:
self._syntax_error("Too many ends", token)
start_what = ops_stack.pop()
if start_what != end_what:
self._syntax_error("Mismatched end tag", end_what)
if end_what == 'joined':
in_joined = False
else:
code.dedent()
else:
self._syntax_error("Don't understand tag", words[0])
else:
# Literal content. If it isn't empty, output it.
if in_joined:
token = re.sub(r"\s*\n\s*", "", token.strip())
elif squash:
token = token.lstrip()
if token:
buffered.append(repr(token))
if ops_stack:
self._syntax_error("Unmatched action tag", ops_stack[-1])
flush_output()
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line("c_%s = context[%r]" % (var_name, var_name))
code.add_line('return "".join(result)')
code.dedent()
self._render_function = code.get_globals()['render_function']
def _expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
code = self._expr_code(pipes[0])
for func in pipes[1:]:
self._variable(func, self.all_vars)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
code = self._expr_code(dots[0])
args = ", ".join(repr(d) for d in dots[1:])
code = "do_dots(%s, %s)" % (code, args)
else:
self._variable(expr, self.all_vars)
code = "c_%s" % expr
return code
def _syntax_error(self, msg, thing):
"""Raise a syntax error using `msg`, and showing `thing`."""
raise TempliteSyntaxError("%s: %r" % (msg, thing))
def _variable(self, name, vars_set):
"""Track that `name` is used as a variable.
Adds the name to `vars_set`, a set of variable names.
Raises an syntax error if `name` is not a valid name.
"""
if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
self._syntax_error("Not a valid name", name)
vars_set.add(name)
def render(self, context=None):
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
"""
# Make the complete context we'll use.
render_context = dict(self.context)
if context:
render_context.update(context)
return self._render_function(render_context, self._do_dots)
def _do_dots(self, value, *dots):
"""Evaluate dotted expressions at run-time."""
for dot in dots:
try:
value = getattr(value, dot)
except AttributeError:
try:
value = value[dot]
except (TypeError, KeyError):
raise TempliteValueError(
"Couldn't evaluate %r.%s" % (value, dot)
)
if callable(value):
value = value()
return value
|
import operator
import enum
from typing import TYPE_CHECKING, Any, Optional, Sequence, TypeVar, Union
import attr
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QTimer
from PyQt5.QtCore import QUrl
from qutebrowser.utils import log, qtutils, utils
if TYPE_CHECKING:
# Protocol was added in Python 3.8
from typing import Protocol
class SupportsLessThan(Protocol):
"""Protocol for the _T TypeVar below."""
def __lt__(self, other: Any) -> bool:
...
_T = TypeVar('_T', bound='SupportsLessThan')
class Unset:
"""Class for an unset object."""
__slots__ = ()
def __repr__(self) -> str:
return '<UNSET>'
UNSET = Unset()
class NeighborList(Sequence[_T]):
"""A list of items which saves its current position.
Class attributes:
Modes: Different modes, see constructor documentation.
Attributes:
fuzzyval: The value which is currently set but not in the list.
_idx: The current position in the list.
_items: A list of all items, accessed through item property.
_mode: The current mode.
"""
class Modes(enum.Enum):
"""Behavior for the 'mode' argument."""
edge = enum.auto()
exception = enum.auto()
def __init__(self, items: Sequence[_T] = None,
default: Union[_T, Unset] = UNSET,
mode: Modes = Modes.exception) -> None:
"""Constructor.
Args:
items: The list of items to iterate in.
_default: The initially selected value.
_mode: Behavior when the first/last item is reached.
Modes.edge: Go to the first/last item
Modes.exception: Raise an IndexError.
"""
if not isinstance(mode, self.Modes):
raise TypeError("Mode {} is not a Modes member!".format(mode))
if items is None:
self._items: Sequence[_T] = []
else:
self._items = list(items)
self._default = default
if not isinstance(default, Unset):
idx = self._items.index(default)
self._idx: Optional[int] = idx
else:
self._idx = None
self._mode = mode
self.fuzzyval: Optional[int] = None
def __getitem__(self, key: int) -> _T: # type: ignore[override]
return self._items[key]
def __len__(self) -> int:
return len(self._items)
def __repr__(self) -> str:
return utils.get_repr(self, items=self._items, mode=self._mode,
idx=self._idx, fuzzyval=self.fuzzyval)
def _snap_in(self, offset: int) -> bool:
"""Set the current item to the closest item to self.fuzzyval.
Args:
offset: negative to get the next smaller item, positive for the
next bigger one.
Return:
True if the value snapped in (changed),
False when the value already was in the list.
"""
assert isinstance(self.fuzzyval, (int, float)), self.fuzzyval
op = operator.le if offset < 0 else operator.ge
items = [(idx, e) for (idx, e) in enumerate(self._items)
if op(e, self.fuzzyval)]
if items:
item = min(
items,
key=lambda tpl:
abs(self.fuzzyval - tpl[1])) # type: ignore[operator]
else:
sorted_items = sorted(enumerate(self.items), key=lambda e: e[1])
idx = 0 if offset < 0 else -1
item = sorted_items[idx]
self._idx = item[0]
return self.fuzzyval not in self._items
def _get_new_item(self, offset: int) -> _T:
"""Logic for getitem to get the item at offset.
Args:
offset: The offset of the current item, relative to the last one.
Return:
The new item.
"""
assert self._idx is not None
try:
if self._idx + offset >= 0:
new = self._items[self._idx + offset]
else:
raise IndexError
except IndexError:
if self._mode == self.Modes.edge:
assert offset != 0
if offset > 0:
new = self.lastitem()
else:
new = self.firstitem()
elif self._mode == self.Modes.exception: # pragma: no branch
raise
else:
self._idx += offset
return new
@property
def items(self) -> Sequence[_T]:
"""Getter for items, which should not be set."""
return self._items
def getitem(self, offset: int) -> _T:
"""Get the item with a relative position.
Args:
offset: The offset of the current item, relative to the last one.
Return:
The new item.
"""
log.misc.debug("{} items, idx {}, offset {}".format(
len(self._items), self._idx, offset))
if not self._items:
raise IndexError("No items found!")
if self.fuzzyval is not None:
# Value has been set to something not in the list, so we snap in to
# the closest value in the right direction and count this as one
# step towards offset.
snapped = self._snap_in(offset)
if snapped and offset > 0:
offset -= 1
elif snapped:
offset += 1
self.fuzzyval = None
return self._get_new_item(offset)
def curitem(self) -> _T:
"""Get the current item in the list."""
if self._idx is not None:
return self._items[self._idx]
else:
raise IndexError("No current item!")
def nextitem(self) -> _T:
"""Get the next item in the list."""
return self.getitem(1)
def previtem(self) -> _T:
"""Get the previous item in the list."""
return self.getitem(-1)
def firstitem(self) -> _T:
"""Get the first item in the list."""
if not self._items:
raise IndexError("No items found!")
self._idx = 0
return self.curitem()
def lastitem(self) -> _T:
"""Get the last item in the list."""
if not self._items:
raise IndexError("No items found!")
self._idx = len(self._items) - 1
return self.curitem()
def reset(self) -> _T:
"""Reset the position to the default."""
if self._default is UNSET:
raise ValueError("No default set!")
self._idx = self._items.index(self._default)
return self.curitem()
class PromptMode(enum.Enum):
"""The mode of a Question."""
yesno = enum.auto()
text = enum.auto()
user_pwd = enum.auto()
alert = enum.auto()
download = enum.auto()
class ClickTarget(enum.Enum):
"""How to open a clicked link."""
normal = enum.auto() #: Open the link in the current tab
tab = enum.auto() #: Open the link in a new foreground tab
tab_bg = enum.auto() #: Open the link in a new background tab
window = enum.auto() #: Open the link in a new window
hover = enum.auto() #: Only hover over the link
class KeyMode(enum.Enum):
"""Key input modes."""
normal = enum.auto() #: Normal mode (no mode was entered)
hint = enum.auto() #: Hint mode (showing labels for links)
command = enum.auto() #: Command mode (after pressing the colon key)
yesno = enum.auto() #: Yes/No prompts
prompt = enum.auto() #: Text prompts
insert = enum.auto() #: Insert mode (passing through most keys)
passthrough = enum.auto() #: Passthrough mode (passing through all keys)
caret = enum.auto() #: Caret mode (moving cursor with keys)
set_mark = enum.auto()
jump_mark = enum.auto()
record_macro = enum.auto()
run_macro = enum.auto()
# 'register' is a bit of an oddball here: It's not really a "real" mode,
# but it's used in the config for common bindings for
# set_mark/jump_mark/record_macro/run_macro.
register = enum.auto()
class Exit(enum.IntEnum):
"""Exit statuses for errors. Needs to be an int for sys.exit."""
ok = 0
reserved = 1
exception = 2
err_ipc = 3
err_init = 4
class LoadStatus(enum.Enum):
"""Load status of a tab."""
none = enum.auto()
success = enum.auto()
success_https = enum.auto()
error = enum.auto()
warn = enum.auto()
loading = enum.auto()
class Backend(enum.Enum):
"""The backend being used (usertypes.backend)."""
QtWebKit = enum.auto()
QtWebEngine = enum.auto()
class JsWorld(enum.Enum):
"""World/context to run JavaScript code in."""
main = enum.auto() #: Same world as the web page's JavaScript.
application = enum.auto() #: Application world, used by qutebrowser internally.
user = enum.auto() #: User world, currently not used.
jseval = enum.auto() #: World used for the jseval-command.
class JsLogLevel(enum.Enum):
"""Log level of a JS message.
This needs to match up with the keys allowed for the
content.javascript.log setting.
"""
unknown = enum.auto()
info = enum.auto()
warning = enum.auto()
error = enum.auto()
class MessageLevel(enum.Enum):
"""The level of a message being shown."""
error = enum.auto()
warning = enum.auto()
info = enum.auto()
class IgnoreCase(enum.Enum):
"""Possible values for the 'search.ignore_case' setting."""
smart = enum.auto()
never = enum.auto()
always = enum.auto()
class CommandValue(enum.Enum):
"""Special values which are injected when running a command handler."""
count = enum.auto()
win_id = enum.auto()
cur_tab = enum.auto()
count_tab = enum.auto()
class Question(QObject):
"""A question asked to the user, e.g. via the status bar.
Note the creator is responsible for cleaning up the question after it
doesn't need it anymore, e.g. via connecting Question.completed to
Question.deleteLater.
Attributes:
mode: A PromptMode enum member.
yesno: A question which can be answered with yes/no.
text: A question which requires a free text answer.
user_pwd: A question for a username and password.
default: The default value.
For yesno, None (no default), True or False.
For text, a default text as string.
For user_pwd, a default username as string.
title: The question title to show.
text: The prompt text to display to the user.
url: Any URL referenced in prompts.
option: Boolean option to be set when answering always/never.
answer: The value the user entered (as password for user_pwd).
is_aborted: Whether the question was aborted.
interrupted: Whether the question was interrupted by another one.
Signals:
answered: Emitted when the question has been answered by the user.
arg: The answer to the question.
cancelled: Emitted when the question has been cancelled by the user.
aborted: Emitted when the question was aborted programmatically.
In this case, cancelled is not emitted.
answered_yes: Convenience signal emitted when a yesno question was
answered with yes.
answered_no: Convenience signal emitted when a yesno question was
answered with no.
completed: Emitted when the question was completed in any way.
"""
answered = pyqtSignal(object)
cancelled = pyqtSignal()
aborted = pyqtSignal()
answered_yes = pyqtSignal()
answered_no = pyqtSignal()
completed = pyqtSignal()
def __init__(self, parent: QObject = None) -> None:
super().__init__(parent)
self.mode: Optional[PromptMode] = None
self.default: Union[bool, str, None] = None
self.title: Optional[str] = None
self.text: Optional[str] = None
self.url: Optional[str] = None
self.option: Optional[bool] = None
self.answer: Union[str, bool, None] = None
self.is_aborted = False
self.interrupted = False
def __repr__(self) -> str:
return utils.get_repr(self, title=self.title, text=self.text,
mode=self.mode, default=self.default,
option=self.option)
@pyqtSlot()
def done(self) -> None:
"""Must be called when the question was answered completely."""
self.answered.emit(self.answer)
if self.mode == PromptMode.yesno:
if self.answer:
self.answered_yes.emit()
else:
self.answered_no.emit()
self.completed.emit()
@pyqtSlot()
def cancel(self) -> None:
"""Cancel the question (resulting from user-input)."""
self.cancelled.emit()
self.completed.emit()
@pyqtSlot()
def abort(self) -> None:
"""Abort the question."""
if self.is_aborted:
log.misc.debug("Question was already aborted")
return
self.is_aborted = True
self.aborted.emit()
self.completed.emit()
class Timer(QTimer):
"""A timer which has a name to show in __repr__ and checks for overflows.
Attributes:
_name: The name of the timer.
"""
def __init__(self, parent: QObject = None, name: str = None) -> None:
super().__init__(parent)
if name is None:
self._name = "unnamed"
else:
self.setObjectName(name)
self._name = name
def __repr__(self) -> str:
return utils.get_repr(self, name=self._name)
def setInterval(self, msec: int) -> None:
"""Extend setInterval to check for overflows."""
qtutils.check_overflow(msec, 'int')
super().setInterval(msec)
def start(self, msec: int = None) -> None:
"""Extend start to check for overflows."""
if msec is not None:
qtutils.check_overflow(msec, 'int')
super().start(msec)
else:
super().start()
class AbstractCertificateErrorWrapper:
"""A wrapper over an SSL/certificate error."""
def __init__(self, error: Any) -> None:
self._error = error
def __str__(self) -> str:
raise NotImplementedError
def __repr__(self) -> str:
raise NotImplementedError
def is_overridable(self) -> bool:
raise NotImplementedError
@attr.s
class NavigationRequest:
"""A request to navigate to the given URL."""
class Type(enum.Enum):
"""The type of a request.
Based on QWebEngineUrlRequestInfo::NavigationType and QWebPage::NavigationType.
"""
#: Navigation initiated by clicking a link.
link_clicked = 1
#: Navigation explicitly initiated by typing a URL (QtWebEngine only).
typed = 2
#: Navigation submits a form.
form_submitted = 3
#: An HTML form was submitted a second time (QtWebKit only).
form_resubmitted = 4
#: Navigation initiated by a history action.
back_forward = 5
#: Navigation initiated by refreshing the page.
reloaded = 6
#: Navigation triggered automatically by page content or remote server
#: (QtWebEngine >= 5.14 only)
redirect = 7
#: None of the above.
other = 8
url: QUrl = attr.ib()
navigation_type: Type = attr.ib()
is_main_frame: bool = attr.ib()
accepted: bool = attr.ib(default=True)
|
import speech_recognition as sr
from kalliope.core import Utils
from kalliope.stt.Utils import SpeechRecognition
class Bing(SpeechRecognition):
def __init__(self, callback=None, **kwargs):
"""
Start recording the microphone and analyse audio with Bing api
:param callback: The callback function to call to send the text
:param kwargs:
"""
# give the audio file path to process directly to the mother class if exist
SpeechRecognition.__init__(self, kwargs.get('audio_file_path', None))
# callback function to call after the translation speech/tex
self.main_controller_callback = callback
self.key = kwargs.get('key', None)
self.language = kwargs.get('language', "en-US")
self.show_all = kwargs.get('show_all', False)
# start listening in the background
self.set_callback(self.bing_callback)
# start processing, record a sample from the microphone if no audio file path provided, else read the file
self.start_processing()
def bing_callback(self, recognizer, audio):
"""
called from the background thread
"""
try:
captured_audio = recognizer.recognize_bing(audio,
key=self.key,
language=self.language,
show_all=self.show_all)
Utils.print_success("Bing Speech Recognition thinks you said %s" % captured_audio)
self._analyse_audio(captured_audio)
except sr.UnknownValueError:
Utils.print_warning("Bing Speech Recognition could not understand audio")
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except sr.RequestError as e:
Utils.print_danger("Could not request results from Bing Speech Recognition service; {0}".format(e))
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except AssertionError:
Utils.print_warning("No audio caught from microphone")
self._analyse_audio(audio_to_text=None)
def _analyse_audio(self, audio_to_text):
"""
Confirm the audio exists and run it in a Callback
:param audio_to_text: the captured audio
"""
if self.main_controller_callback is not None:
self.main_controller_callback(audio_to_text)
|
import pytest
from homeassistant.components import conversation
from homeassistant.core import DOMAIN as HASS_DOMAIN, Context
from homeassistant.helpers import intent
from homeassistant.setup import async_setup_component
from tests.common import async_mock_intent, async_mock_service
async def test_calling_intent(hass):
"""Test calling an intent from a conversation."""
intents = async_mock_intent(hass, "OrderBeer")
result = await async_setup_component(hass, "homeassistant", {})
assert result
result = await async_setup_component(
hass,
"conversation",
{"conversation": {"intents": {"OrderBeer": ["I would like the {type} beer"]}}},
)
assert result
context = Context()
await hass.services.async_call(
"conversation",
"process",
{conversation.ATTR_TEXT: "I would like the Grolsch beer"},
context=context,
)
await hass.async_block_till_done()
assert len(intents) == 1
intent = intents[0]
assert intent.platform == "conversation"
assert intent.intent_type == "OrderBeer"
assert intent.slots == {"type": {"value": "Grolsch"}}
assert intent.text_input == "I would like the Grolsch beer"
assert intent.context is context
async def test_register_before_setup(hass):
"""Test calling an intent from a conversation."""
intents = async_mock_intent(hass, "OrderBeer")
hass.components.conversation.async_register("OrderBeer", ["A {type} beer, please"])
result = await async_setup_component(
hass,
"conversation",
{"conversation": {"intents": {"OrderBeer": ["I would like the {type} beer"]}}},
)
assert result
await hass.services.async_call(
"conversation", "process", {conversation.ATTR_TEXT: "A Grolsch beer, please"}
)
await hass.async_block_till_done()
assert len(intents) == 1
intent = intents[0]
assert intent.platform == "conversation"
assert intent.intent_type == "OrderBeer"
assert intent.slots == {"type": {"value": "Grolsch"}}
assert intent.text_input == "A Grolsch beer, please"
await hass.services.async_call(
"conversation",
"process",
{conversation.ATTR_TEXT: "I would like the Grolsch beer"},
)
await hass.async_block_till_done()
assert len(intents) == 2
intent = intents[1]
assert intent.platform == "conversation"
assert intent.intent_type == "OrderBeer"
assert intent.slots == {"type": {"value": "Grolsch"}}
assert intent.text_input == "I would like the Grolsch beer"
async def test_http_processing_intent(hass, hass_client, hass_admin_user):
"""Test processing intent via HTTP API."""
class TestIntentHandler(intent.IntentHandler):
"""Test Intent Handler."""
intent_type = "OrderBeer"
async def async_handle(self, intent):
"""Handle the intent."""
assert intent.context.user_id == hass_admin_user.id
response = intent.create_response()
response.async_set_speech(
"I've ordered a {}!".format(intent.slots["type"]["value"])
)
response.async_set_card(
"Beer ordered", "You chose a {}.".format(intent.slots["type"]["value"])
)
return response
intent.async_register(hass, TestIntentHandler())
result = await async_setup_component(
hass,
"conversation",
{"conversation": {"intents": {"OrderBeer": ["I would like the {type} beer"]}}},
)
assert result
client = await hass_client()
resp = await client.post(
"/api/conversation/process", json={"text": "I would like the Grolsch beer"}
)
assert resp.status == 200
data = await resp.json()
assert data == {
"card": {
"simple": {"content": "You chose a Grolsch.", "title": "Beer ordered"}
},
"speech": {"plain": {"extra_data": None, "speech": "I've ordered a Grolsch!"}},
}
@pytest.mark.parametrize("sentence", ("turn on kitchen", "turn kitchen on"))
async def test_turn_on_intent(hass, sentence):
"""Test calling the turn on intent."""
result = await async_setup_component(hass, "homeassistant", {})
assert result
result = await async_setup_component(hass, "conversation", {})
assert result
hass.states.async_set("light.kitchen", "off")
calls = async_mock_service(hass, HASS_DOMAIN, "turn_on")
await hass.services.async_call(
"conversation", "process", {conversation.ATTR_TEXT: sentence}
)
await hass.async_block_till_done()
assert len(calls) == 1
call = calls[0]
assert call.domain == HASS_DOMAIN
assert call.service == "turn_on"
assert call.data == {"entity_id": "light.kitchen"}
@pytest.mark.parametrize("sentence", ("turn off kitchen", "turn kitchen off"))
async def test_turn_off_intent(hass, sentence):
"""Test calling the turn on intent."""
result = await async_setup_component(hass, "homeassistant", {})
assert result
result = await async_setup_component(hass, "conversation", {})
assert result
hass.states.async_set("light.kitchen", "on")
calls = async_mock_service(hass, HASS_DOMAIN, "turn_off")
await hass.services.async_call(
"conversation", "process", {conversation.ATTR_TEXT: sentence}
)
await hass.async_block_till_done()
assert len(calls) == 1
call = calls[0]
assert call.domain == HASS_DOMAIN
assert call.service == "turn_off"
assert call.data == {"entity_id": "light.kitchen"}
@pytest.mark.parametrize("sentence", ("toggle kitchen", "kitchen toggle"))
async def test_toggle_intent(hass, sentence):
"""Test calling the turn on intent."""
result = await async_setup_component(hass, "homeassistant", {})
assert result
result = await async_setup_component(hass, "conversation", {})
assert result
hass.states.async_set("light.kitchen", "on")
calls = async_mock_service(hass, HASS_DOMAIN, "toggle")
await hass.services.async_call(
"conversation", "process", {conversation.ATTR_TEXT: sentence}
)
await hass.async_block_till_done()
assert len(calls) == 1
call = calls[0]
assert call.domain == HASS_DOMAIN
assert call.service == "toggle"
assert call.data == {"entity_id": "light.kitchen"}
async def test_http_api(hass, hass_client):
"""Test the HTTP conversation API."""
assert await async_setup_component(hass, "homeassistant", {})
assert await async_setup_component(hass, "conversation", {})
assert await async_setup_component(hass, "intent", {})
client = await hass_client()
hass.states.async_set("light.kitchen", "off")
calls = async_mock_service(hass, HASS_DOMAIN, "turn_on")
resp = await client.post(
"/api/conversation/process", json={"text": "Turn the kitchen on"}
)
assert resp.status == 200
assert len(calls) == 1
call = calls[0]
assert call.domain == HASS_DOMAIN
assert call.service == "turn_on"
assert call.data == {"entity_id": "light.kitchen"}
async def test_http_api_wrong_data(hass, hass_client):
"""Test the HTTP conversation API."""
result = await async_setup_component(hass, "homeassistant", {})
assert result
result = await async_setup_component(hass, "conversation", {})
assert result
client = await hass_client()
resp = await client.post("/api/conversation/process", json={"text": 123})
assert resp.status == 400
resp = await client.post("/api/conversation/process", json={})
assert resp.status == 400
async def test_custom_agent(hass, hass_client, hass_admin_user):
"""Test a custom conversation agent."""
calls = []
class MyAgent(conversation.AbstractConversationAgent):
"""Test Agent."""
async def async_process(self, text, context, conversation_id):
"""Process some text."""
calls.append((text, context, conversation_id))
response = intent.IntentResponse()
response.async_set_speech("Test response")
return response
conversation.async_set_agent(hass, MyAgent())
assert await async_setup_component(hass, "conversation", {})
client = await hass_client()
resp = await client.post(
"/api/conversation/process",
json={"text": "Test Text", "conversation_id": "test-conv-id"},
)
assert resp.status == 200
assert await resp.json() == {
"card": {},
"speech": {"plain": {"extra_data": None, "speech": "Test response"}},
}
assert len(calls) == 1
assert calls[0][0] == "Test Text"
assert calls[0][1].user_id == hass_admin_user.id
assert calls[0][2] == "test-conv-id"
|
import logging
import argparse
import itertools
import sys
import warnings
import attr
import pytest
import _pytest.logging
from PyQt5 import QtCore
from qutebrowser import qutebrowser
from qutebrowser.utils import log
from qutebrowser.misc import utilcmds
from qutebrowser.api import cmdutils
@pytest.fixture(autouse=True)
def restore_loggers():
"""Fixture to save/restore the logging state.
Based on CPython's Lib/test/test_logging.py.
"""
logging.captureWarnings(False)
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
saved_handlers = logging._handlers.copy()
saved_handler_list = logging._handlerList[:]
saved_loggers = saved_loggers = logger_dict.copy()
saved_name_to_level = logging._nameToLevel.copy()
saved_level_to_name = logging._levelToName.copy()
logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name], 'disabled',
None)
finally:
logging._releaseLock()
root_logger = logging.getLogger("")
root_handlers = root_logger.handlers[:]
original_logging_level = root_logger.getEffectiveLevel()
yield
while root_logger.handlers:
h = root_logger.handlers[0]
root_logger.removeHandler(h)
if not isinstance(h, _pytest.logging.LogCaptureHandler):
h.close()
root_logger.setLevel(original_logging_level)
for h in root_handlers:
if not isinstance(h, _pytest.logging.LogCaptureHandler):
# https://github.com/qutebrowser/qutebrowser/issues/856
root_logger.addHandler(h)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(saved_handlers)
logging._handlerList[:] = saved_handler_list
logger_dict = logging.getLogger().manager.loggerDict
logger_dict.clear()
logger_dict.update(saved_loggers)
for name in logger_states:
if logger_states[name] is not None:
saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
@pytest.fixture(scope='session')
def log_counter():
"""Counter for logger fixture to get unique loggers."""
return itertools.count()
@pytest.fixture
def logger(log_counter):
"""Fixture which provides a logger for tests.
Unique throwaway loggers are used to make sure the tests don't influence
each other.
"""
i = next(log_counter)
return logging.getLogger('qutebrowser-unittest-logger-{}'.format(i))
class TestLogFilter:
def _make_record(self, logger, name, level=logging.DEBUG):
"""Create a bogus logging record with the supplied logger name."""
return logger.makeRecord(name, level=level, fn=None, lno=0, msg="",
args=None, exc_info=None)
@pytest.mark.parametrize('filters, negated, category, logged', [
# Filter letting all messages through
(set(), False, 'eggs.bacon.spam', True),
(set(), False, 'eggs', True),
(set(), True, 'ham', True),
# Matching records
({'eggs', 'bacon'}, False, 'eggs', True),
({'eggs', 'bacon'}, False, 'bacon', True),
({'eggs'}, False, 'eggs.fried', True),
# Non-matching records
({'eggs', 'bacon'}, False, 'spam', False),
({'eggs'}, False, 'eggsauce', False),
({'fried'}, False, 'eggs.fried', False),
# Suppressed records
({'eggs', 'bacon'}, True, 'eggs', False),
({'eggs', 'bacon'}, True, 'bacon', False),
# Non-suppressed records
({'eggs', 'bacon'}, True, 'spam', True),
({'eggs'}, True, 'eggsauce', True),
])
def test_logfilter(self, logger, filters, negated, category, logged):
"""Ensure the multi-record filtering filterer filters multiple records.
(Blame @toofar for this comment)
"""
logfilter = log.LogFilter(filters, negated=negated)
record = self._make_record(logger, category)
assert logfilter.filter(record) == logged
def test_logfilter_benchmark(self, logger, benchmark):
record = self._make_record(logger, 'unfiltered')
filters = set(log.LOGGER_NAMES) # Extreme case
logfilter = log.LogFilter(filters, negated=False)
benchmark(lambda: logfilter.filter(record))
@pytest.mark.parametrize('only_debug', [True, False])
def test_debug(self, logger, only_debug):
"""Test if messages more important than debug are never filtered."""
logfilter = log.LogFilter({'eggs'}, only_debug=only_debug)
record = self._make_record(logger, 'bacon', level=logging.INFO)
assert logfilter.filter(record) == only_debug
@pytest.mark.parametrize(
'category, filter_str, logged_before, logged_after', [
('init', 'url,js', True, False),
('url', 'url,js', False, True),
('js', 'url,js', False, True),
('js', 'none', False, True),
]
)
def test_debug_log_filter_cmd(self, monkeypatch, logger, category,
filter_str, logged_before, logged_after):
"""Test the :debug-log-filter command handler."""
logfilter = log.LogFilter({"init"})
monkeypatch.setattr(log, 'console_filter', logfilter)
record = self._make_record(logger, category)
assert logfilter.filter(record) == logged_before
utilcmds.debug_log_filter('url,js')
assert logfilter.filter(record) == logged_after
def test_debug_log_filter_cmd_invalid(self, monkeypatch):
logfilter = log.LogFilter(set())
monkeypatch.setattr(log, 'console_filter', logfilter)
with pytest.raises(cmdutils.CommandError,
match='Invalid log category blabla'):
utilcmds.debug_log_filter('blabla')
@pytest.mark.parametrize('filter_str, expected_names, negated', [
('!js,misc', {'js', 'misc'}, True),
('js,misc', {'js', 'misc'}, False),
('js, misc', {'js', 'misc'}, False),
('JS, Misc', {'js', 'misc'}, False),
(None, set(), False),
('none', set(), False),
])
def test_parsing(self, filter_str, expected_names, negated):
logfilter = log.LogFilter.parse(filter_str)
assert logfilter.names == expected_names
assert logfilter.negated == negated
@pytest.mark.parametrize('filter_str, invalid', [
('js,!misc', '!misc'),
('blabla,js,blablub', 'blabla, blablub'),
])
def test_parsing_invalid(self, filter_str, invalid):
with pytest.raises(
log.InvalidLogFilterError,
match='Invalid log category {} - '
'valid categories: statusbar, .*'.format(invalid)):
log.LogFilter.parse(filter_str)
@pytest.mark.parametrize('data, expected', [
# Less data
(['one'], ['one']),
# Exactly filled
(['one', 'two'], ['one', 'two']),
# More data
(['one', 'two', 'three'], ['two', 'three']),
])
def test_ram_handler(logger, data, expected):
handler = log.RAMHandler(capacity=2)
handler.setLevel(logging.NOTSET)
logger.addHandler(handler)
for line in data:
logger.debug(line)
assert [rec.msg for rec in handler._data] == expected
assert handler.dump_log() == '\n'.join(expected)
@pytest.mark.integration
class TestInitLog:
"""Tests for init_log."""
def _get_default_args(self):
return argparse.Namespace(debug=True, loglevel='debug', color=True,
loglines=10, logfilter=None,
force_color=False, json_logging=False,
debug_flags=set())
@pytest.fixture(autouse=True)
def setup(self, mocker):
mocker.patch('qutebrowser.utils.log.QtCore.qInstallMessageHandler',
autospec=True)
yield
# Make sure logging is in a sensible default state
args = self._get_default_args()
log.init_log(args)
@pytest.fixture
def args(self):
"""Fixture providing an argparse namespace for init_log."""
return self._get_default_args()
@pytest.fixture
def parser(self):
return qutebrowser.get_argparser()
@pytest.fixture
def empty_args(self, parser):
"""Logging commandline arguments without any customization."""
return parser.parse_args([])
def test_stderr_none(self, args):
"""Test init_log with sys.stderr = None."""
old_stderr = sys.stderr
sys.stderr = None
log.init_log(args)
sys.stderr = old_stderr
def test_python_warnings(self, args, caplog):
log.init_log(args)
with caplog.at_level(logging.WARNING):
warnings.warn("test warning", PendingDeprecationWarning)
expected = "PendingDeprecationWarning: test warning"
assert expected in caplog.records[0].message
def test_python_warnings_werror(self, args):
args.debug_flags = {'werror'}
log.init_log(args)
with pytest.raises(PendingDeprecationWarning):
warnings.warn("test warning", PendingDeprecationWarning)
@pytest.mark.parametrize('cli, conf, expected', [
(None, 'info', logging.INFO),
(None, 'warning', logging.WARNING),
('info', 'warning', logging.INFO),
('warning', 'info', logging.WARNING),
])
def test_init_from_config_console(self, cli, conf, expected, args,
config_stub):
args.debug = False
args.loglevel = cli
log.init_log(args)
config_stub.val.logging.level.console = conf
log.init_from_config(config_stub.val)
assert log.console_handler.level == expected
@pytest.mark.parametrize('conf, expected', [
('vdebug', logging.VDEBUG),
('debug', logging.DEBUG),
('info', logging.INFO),
('critical', logging.CRITICAL),
])
def test_init_from_config_ram(self, conf, expected, args, config_stub):
args.debug = False
log.init_log(args)
config_stub.val.logging.level.ram = conf
log.init_from_config(config_stub.val)
assert log.ram_handler.level == expected
def test_init_from_config_consistent_default(self, config_stub, empty_args):
"""Ensure config defaults are consistent with the builtin defaults."""
log.init_log(empty_args)
assert log.ram_handler.level == logging.DEBUG
assert log.console_handler.level == logging.INFO
log.init_from_config(config_stub.val)
assert log.ram_handler.level == logging.DEBUG
assert log.console_handler.level == logging.INFO
def test_init_from_config_format(self, config_stub, empty_args):
"""If we change to the debug level, make sure the format changes."""
log.init_log(empty_args)
assert log.console_handler.formatter._fmt == log.SIMPLE_FMT
config_stub.val.logging.level.console = 'debug'
log.init_from_config(config_stub.val)
assert log.console_handler.formatter._fmt == log.EXTENDED_FMT
def test_logfilter(self, parser):
args = parser.parse_args(['--logfilter', 'misc'])
log.init_log(args)
assert log.console_filter.names == {'misc'}
class TestHideQtWarning:
"""Tests for hide_qt_warning/QtWarningFilter."""
@pytest.fixture()
def qt_logger(self):
return logging.getLogger('qt-tests')
def test_unfiltered(self, qt_logger, caplog):
with log.hide_qt_warning("World", 'qt-tests'):
with caplog.at_level(logging.WARNING, 'qt-tests'):
qt_logger.warning("Hello World")
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == 'WARNING'
assert record.message == "Hello World"
@pytest.mark.parametrize('line', [
"Hello", # exact match
"Hello World", # match at start of line
" Hello World ", # match with spaces
])
def test_filtered(self, qt_logger, caplog, line):
with log.hide_qt_warning("Hello", 'qt-tests'):
with caplog.at_level(logging.WARNING, 'qt-tests'):
qt_logger.warning(line)
assert not caplog.records
@pytest.mark.parametrize('suffix, expected', [
('', 'STUB: test_stub'),
('foo', 'STUB: test_stub (foo)'),
])
def test_stub(caplog, suffix, expected):
with caplog.at_level(logging.WARNING, 'misc'):
log.stub(suffix)
assert caplog.messages == [expected]
def test_py_warning_filter(caplog):
logging.captureWarnings(True)
with log.py_warning_filter(category=UserWarning):
warnings.warn("hidden", UserWarning)
with caplog.at_level(logging.WARNING):
warnings.warn("not hidden", UserWarning)
assert len(caplog.records) == 1
msg = caplog.messages[0].splitlines()[0]
assert msg.endswith("UserWarning: not hidden")
def test_py_warning_filter_error(caplog):
warnings.simplefilter('ignore')
warnings.warn("hidden", UserWarning)
with log.py_warning_filter('error'):
with pytest.raises(UserWarning):
warnings.warn("error", UserWarning)
def test_warning_still_errors():
# Mainly a sanity check after the tests messing with warnings above.
with pytest.raises(UserWarning):
warnings.warn("error", UserWarning)
class TestQtMessageHandler:
@attr.s
class Context:
"""Fake QMessageLogContext."""
function = attr.ib(default=None)
category = attr.ib(default=None)
file = attr.ib(default=None)
line = attr.ib(default=None)
@pytest.fixture(autouse=True)
def init_args(self):
parser = qutebrowser.get_argparser()
args = parser.parse_args([])
log.init_log(args)
def test_empty_message(self, caplog):
"""Make sure there's no crash with an empty message."""
log.qt_message_handler(QtCore.QtDebugMsg, self.Context(), "")
assert caplog.messages == ["Logged empty message!"]
|
import diamond.collector
import base64
from contextlib import closing
import json
import re
import urllib
import urllib2
class JolokiaCollector(diamond.collector.Collector):
LIST_URL = "/list"
"""
These domains contain MBeans that are for management purposes,
or otherwise do not contain useful metrics
"""
IGNORE_DOMAINS = ['JMImplementation', 'jmx4perl', 'jolokia',
'com.sun.management', 'java.util.logging']
def get_default_config_help(self):
config_help = super(JolokiaCollector,
self).get_default_config_help()
config_help.update({
'domains': "Pipe delimited list of JMX domains from which to"
" collect stats. If not provided, the list of all"
" domains will be downloaded from jolokia.",
'mbeans': "Pipe delimited list of MBeans for which to collect"
" stats. If not provided, all stats will"
" be collected.",
'regex': "Contols if mbeans option matches with regex,"
" False by default.",
'username': "Username for authentication",
'password': "Password for authentication",
'host': 'Hostname',
'port': 'Port',
'rewrite': "This sub-section of the config contains pairs of"
" from-to regex rewrites.",
'path': 'Path component of the reported metrics.',
# https://github.com/rhuss/jolokia/blob/959424888a82abc2b1906c60547cd4df280f3b71/client/java/src/main/java/org/jolokia/client/request/J4pQueryParameter.java#L68
'use_canonical_names': 'Whether property keys of ObjectNames'
' should be ordered in the canonical way'
' or in the way that they are created. The'
' allowed values are either "True" in'
' which case the canonical key order (=='
' alphabetical sorted) is used or "False"'
' for getting the keys as registered.'
' Default is "True',
'jolokia_path': 'Path to jolokia. typically "jmx" or "jolokia".'
' Defaults to the value of "path" variable.',
})
return config_help
def get_default_config(self):
config = super(JolokiaCollector, self).get_default_config()
config.update({
'mbeans': [],
'regex': False,
'rewrite': [],
'path': 'jolokia',
'jolokia_path': None,
'username': None,
'password': None,
'host': 'localhost',
'port': 8778,
'use_canonical_names': True,
})
return config
def __init__(self, *args, **kwargs):
super(JolokiaCollector, self).__init__(*args, **kwargs)
self.mbeans = []
if isinstance(self.config['mbeans'], basestring):
for mbean in self.config['mbeans'].split('|'):
self.mbeans.append(mbean.strip())
elif isinstance(self.config['mbeans'], list):
self.mbeans = self.config['mbeans']
if self.config['regex'] is not None:
self.mbeans = [re.compile(mbean) for mbean in self.mbeans]
self.rewrite = [
(re.compile('["\'(){}<>\[\]]'), ''),
(re.compile('[:,.]+'), '.'),
(re.compile('[^a-zA-Z0-9_.+-]+'), '_'),
]
if isinstance(self.config['rewrite'], dict):
self.rewrite.extend([(re.compile(old), new) for old, new in
self.config['rewrite'].items()])
self.domains = []
if 'domains' in self.config:
if isinstance(self.config['domains'], basestring):
for domain in self.config['domains'].split('|'):
self.domains.append(domain.strip())
elif isinstance(self.config['domains'], list):
self.domains = self.config['domains']
if self.config['jolokia_path'] is not None:
self.jolokia_path = self.config['jolokia_path']
else:
self.jolokia_path = self.config['path']
if not isinstance(self.config['use_canonical_names'], bool):
if self.config['use_canonical_names'] == 'True':
self.config['use_canonical_names'] = True
elif self.config['use_canonical_names'] == 'False':
self.config['use_canonical_names'] = False
else:
self.log.error('Unexpected value "%s" for "use_canonical_names"'
' setting. Expected "True" or "False". Using'
' default value.',
self.config['use_canonical_names'])
default = self.get_default_config()['use_canonical_names']
self.config['use_canonical_names'] = default
def _get_domains(self):
# if not set it __init__
if not self.domains:
listing = self._list_request()
try:
if listing['status'] == 200:
self.domains = listing['value'].keys()
else:
self.log.error('Jolokia status %s while retrieving MBean '
'listing.', listing['status'])
except KeyError:
# The reponse was totally empty, or not an expected format
self.log.error('Unable to retrieve MBean listing.')
def _check_mbean(self, mbean):
if not self.mbeans:
return True
mbeanfix = self.clean_up(mbean)
if self.config['regex'] is not None:
for chkbean in self.mbeans:
if chkbean.match(mbean) is not None or \
chkbean.match(mbeanfix) is not None:
return True
else:
if mbean in self.mbeans or mbeanfix in self.mbeans:
return True
def collect(self):
if not self.domains:
self._get_domains()
for domain in self.domains:
if domain not in self.IGNORE_DOMAINS:
obj = self._read_request(domain)
try:
mbeans = obj['value'] if obj['status'] == 200 else {}
except KeyError:
# The reponse was totally empty, or not an expected format
self.log.error('Unable to retrieve domain %s.', domain)
continue
for k, v in mbeans.iteritems():
if self._check_mbean(k):
self.collect_bean(k, v)
def _read_json(self, request):
json_str = request.read()
return json.loads(json_str)
def _list_request(self):
"""Returns a dictionary with JMX domain names as keys"""
try:
# https://jolokia.org/reference/html/protocol.html
#
# A maxDepth of 1 restricts the return value to a map with the JMX
# domains as keys. The values of the maps don't have any meaning
# and are dummy values.
#
# maxCollectionSize=0 means "unlimited". This works around an issue
# prior to Jolokia 1.3 where results were truncated at 1000
#
url = "http://%s:%s/%s%s?maxDepth=1&maxCollectionSize=0" % (
self.config['host'],
self.config['port'],
self.jolokia_path,
self.LIST_URL)
# need some time to process the downloaded metrics, so that's why
# timeout is lower than the interval.
timeout = max(2, float(self.config['interval']) * 2 / 3)
with closing(urllib2.urlopen(self._create_request(url),
timeout=timeout)) as response:
return self._read_json(response)
except (urllib2.HTTPError, ValueError) as e:
self.log.error('Unable to read JSON response: %s', str(e))
return {}
def _read_request(self, domain):
try:
url_path = '/?%s' % urllib.urlencode({
'maxCollectionSize': '0',
'ignoreErrors': 'true',
'canonicalNaming':
'true' if self.config['use_canonical_names'] else 'false',
'p': 'read/%s:*' % self._escape_domain(domain),
})
url = "http://%s:%s/%s%s" % (self.config['host'],
self.config['port'],
self.jolokia_path,
url_path)
# need some time to process the downloaded metrics, so that's why
# timeout is lower than the interval.
timeout = max(2, float(self.config['interval']) * 2 / 3)
with closing(urllib2.urlopen(self._create_request(url),
timeout=timeout)) as response:
return self._read_json(response)
except (urllib2.HTTPError, ValueError):
self.log.error('Unable to read JSON response.')
return {}
# escape JMX domain per https://jolokia.org/reference/html/protocol.html
# the Jolokia documentation suggests that when using the p query parameter,
# simply urlencoding should be sufficient, but in practice, the '!' appears
# necessary (and not harmful)
def _escape_domain(self, domain):
domain = re.sub('!', '!!', domain)
domain = re.sub('/', '!/', domain)
domain = re.sub('"', '!"', domain)
domain = urllib.quote(domain)
return domain
def _create_request(self, url):
req = urllib2.Request(url)
username = self.config["username"]
password = self.config["password"]
if username is not None and password is not None:
base64string = base64.encodestring('%s:%s' % (
username, password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
return req
def clean_up(self, text):
for (oldregex, newstr) in self.rewrite:
text = oldregex.sub(newstr, text)
return text
def collect_bean(self, prefix, obj):
for k, v in obj.iteritems():
if type(v) in [int, float, long]:
key = "%s.%s" % (prefix, k)
key = self.clean_up(key)
if key != "":
self.publish(key, v)
elif type(v) in [dict]:
self.collect_bean("%s.%s" % (prefix, k), v)
elif type(v) in [list]:
self.interpret_bean_with_list("%s.%s" % (prefix, k), v)
# There's no unambiguous way to interpret list values, so
# this hook lets subclasses handle them.
def interpret_bean_with_list(self, prefix, values):
pass
|
from datetime import timedelta
import logging
import hpilo
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_VARIABLES,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SENSOR_TYPE,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "HP ILO"
DEFAULT_PORT = 443
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
SENSOR_TYPES = {
"server_name": ["Server Name", "get_server_name"],
"server_fqdn": ["Server FQDN", "get_server_fqdn"],
"server_host_data": ["Server Host Data", "get_host_data"],
"server_oa_info": ["Server Onboard Administrator Info", "get_oa_info"],
"server_power_status": ["Server Power state", "get_host_power_status"],
"server_power_readings": ["Server Power readings", "get_power_readings"],
"server_power_on_time": ["Server Power On time", "get_server_power_on_time"],
"server_asset_tag": ["Server Asset Tag", "get_asset_tag"],
"server_uid_status": ["Server UID light", "get_uid_status"],
"server_health": ["Server Health", "get_embedded_health"],
"network_settings": ["Network Settings", "get_network_settings"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE): vol.All(
cv.string, vol.In(SENSOR_TYPES)
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
],
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the HP iLO sensors."""
hostname = config.get(CONF_HOST)
port = config.get(CONF_PORT)
login = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
monitored_variables = config.get(CONF_MONITORED_VARIABLES)
# Create a data fetcher to support all of the configured sensors. Then make
# the first call to init the data and confirm we can connect.
try:
hp_ilo_data = HpIloData(hostname, port, login, password)
except ValueError as error:
_LOGGER.error(error)
return
# Initialize and add all of the sensors.
devices = []
for monitored_variable in monitored_variables:
new_device = HpIloSensor(
hass=hass,
hp_ilo_data=hp_ilo_data,
sensor_name=f"{config.get(CONF_NAME)} {monitored_variable[CONF_NAME]}",
sensor_type=monitored_variable[CONF_SENSOR_TYPE],
sensor_value_template=monitored_variable.get(CONF_VALUE_TEMPLATE),
unit_of_measurement=monitored_variable.get(CONF_UNIT_OF_MEASUREMENT),
)
devices.append(new_device)
add_entities(devices, True)
class HpIloSensor(Entity):
"""Representation of a HP iLO sensor."""
def __init__(
self,
hass,
hp_ilo_data,
sensor_type,
sensor_name,
sensor_value_template,
unit_of_measurement,
):
"""Initialize the HP iLO sensor."""
self._hass = hass
self._name = sensor_name
self._unit_of_measurement = unit_of_measurement
self._ilo_function = SENSOR_TYPES[sensor_type][1]
self.hp_ilo_data = hp_ilo_data
if sensor_value_template is not None:
sensor_value_template.hass = hass
self._sensor_value_template = sensor_value_template
self._state = None
self._state_attributes = None
_LOGGER.debug("Created HP iLO sensor %r", self)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the sensor."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._state_attributes
def update(self):
"""Get the latest data from HP iLO and updates the states."""
# Call the API for new data. Each sensor will re-trigger this
# same exact call, but that's fine. Results should be cached for
# a short period of time to prevent hitting API limits.
self.hp_ilo_data.update()
ilo_data = getattr(self.hp_ilo_data.data, self._ilo_function)()
if self._sensor_value_template is not None:
ilo_data = self._sensor_value_template.render(
ilo_data=ilo_data, parse_result=False
)
self._state = ilo_data
class HpIloData:
"""Gets the latest data from HP iLO."""
def __init__(self, host, port, login, password):
"""Initialize the data object."""
self._host = host
self._port = port
self._login = login
self._password = password
self.data = None
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from HP iLO."""
try:
self.data = hpilo.Ilo(
hostname=self._host,
login=self._login,
password=self._password,
port=self._port,
)
except (
hpilo.IloError,
hpilo.IloCommunicationError,
hpilo.IloLoginFailed,
) as error:
raise ValueError(f"Unable to init HP ILO, {error}") from error
|
from pathlib import Path
from .model import Info
TEMPLATE_DIR = Path(__file__).parent / "templates"
TEMPLATE_INTEGRATION = TEMPLATE_DIR / "integration"
TEMPLATE_TESTS = TEMPLATE_DIR / "tests"
def generate(template: str, info: Info) -> None:
"""Generate a template."""
print(f"Scaffolding {template} for the {info.domain} integration...")
_ensure_tests_dir_exists(info)
_generate(TEMPLATE_DIR / template / "integration", info.integration_dir, info)
_generate(TEMPLATE_DIR / template / "tests", info.tests_dir, info)
_custom_tasks(template, info)
print()
def _generate(src_dir, target_dir, info: Info) -> None:
"""Generate an integration."""
replaces = {"NEW_DOMAIN": info.domain, "NEW_NAME": info.name}
if not target_dir.exists():
target_dir.mkdir()
for source_file in src_dir.glob("**/*"):
content = source_file.read_text()
for to_search, to_replace in replaces.items():
content = content.replace(to_search, to_replace)
target_file = target_dir / source_file.relative_to(src_dir)
# If the target file exists, create our template as EXAMPLE_<filename>.
# Exception: If we are creating a new integration, we can end up running integration base
# and a config flows on top of one another. In that case, we want to override the files.
if not info.is_new and target_file.exists():
new_name = f"EXAMPLE_{target_file.name}"
print(f"File {target_file} already exists, creating {new_name} instead.")
target_file = target_file.parent / new_name
info.examples_added.add(target_file)
elif src_dir.name == "integration":
info.files_added.add(target_file)
else:
info.tests_added.add(target_file)
print(f"Writing {target_file}")
target_file.write_text(content)
def _ensure_tests_dir_exists(info: Info) -> None:
"""Ensure a test dir exists."""
if info.tests_dir.exists():
return
info.tests_dir.mkdir()
print(f"Writing {info.tests_dir / '__init__.py'}")
(info.tests_dir / "__init__.py").write_text(
f'"""Tests for the {info.name} integration."""\n'
)
def _append(path: Path, text):
"""Append some text to a path."""
path.write_text(path.read_text() + text)
def _custom_tasks(template, info) -> None:
"""Handle custom tasks for templates."""
if template == "integration":
changes = {"codeowners": [info.codeowner]}
if info.requirement:
changes["requirements"] = [info.requirement]
info.update_manifest(**changes)
elif template == "device_trigger":
info.update_strings(
device_automation={
**info.strings().get("device_automation", {}),
"trigger_type": {
"turned_on": "{entity_name} turned on",
"turned_off": "{entity_name} turned off",
},
}
)
elif template == "device_condition":
info.update_strings(
device_automation={
**info.strings().get("device_automation", {}),
"condition_type": {
"is_on": "{entity_name} is on",
"is_off": "{entity_name} is off",
},
}
)
elif template == "device_action":
info.update_strings(
device_automation={
**info.strings().get("device_automation", {}),
"action_type": {
"turn_on": "Turn on {entity_name}",
"turn_off": "Turn off {entity_name}",
},
}
)
elif template == "config_flow":
info.update_manifest(config_flow=True)
info.update_strings(
title=info.name,
config={
"step": {
"user": {
"data": {
"host": "[%key:common::config_flow::data::host%]",
"username": "[%key:common::config_flow::data::username%]",
"password": "[%key:common::config_flow::data::password%]",
},
}
},
"error": {
"cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
"invalid_auth": "[%key:common::config_flow::error::invalid_auth%]",
"unknown": "[%key:common::config_flow::error::unknown%]",
},
"abort": {
"already_configured": "[%key:common::config_flow::abort::already_configured_device%]"
},
},
)
elif template == "config_flow_discovery":
info.update_manifest(config_flow=True)
info.update_strings(
title=info.name,
config={
"step": {
"confirm": {
"description": "[%key:common::config_flow::description::confirm_setup%]",
}
},
"abort": {
"single_instance_allowed": "[%key:common::config_flow::abort::single_instance_allowed%]",
"no_devices_found": "[%key:common::config_flow::abort::no_devices_found%]",
},
},
)
elif template == "config_flow_oauth2":
info.update_manifest(config_flow=True, dependencies=["http"])
info.update_strings(
title=info.name,
config={
"step": {
"pick_implementation": {
"title": "[%key:common::config_flow::title::oauth2_pick_implementation%]"
}
},
"abort": {
"missing_configuration": "[%key:common::config_flow::abort::oauth2_missing_configuration%]",
"authorize_url_timeout": "[%key:common::config_flow::abort::oauth2_authorize_url_timeout%]",
"no_url_available": "[%key:common::config_flow::abort::oauth2_no_url_available%]",
},
"create_entry": {
"default": "[%key:common::config_flow::create_entry::authenticated%]"
},
},
)
|
import sys
from collections import defaultdict
from datetime import timedelta
from itertools import groupby
import arrow
from flask import current_app
from sqlalchemy import and_
from sqlalchemy.sql.expression import false, true
from lemur import database
from lemur.certificates import service as certificates_service
from lemur.certificates.models import Certificate
from lemur.certificates.schemas import certificate_notification_output_schema
from lemur.common.utils import windowed_query, is_selfsigned
from lemur.constants import FAILURE_METRIC_STATUS, SUCCESS_METRIC_STATUS
from lemur.extensions import metrics, sentry
from lemur.pending_certificates.schemas import pending_certificate_output_schema
from lemur.plugins import plugins
from lemur.plugins.utils import get_plugin_option
def get_certificates(exclude=None):
"""
Finds all certificates that are eligible for expiration notifications.
:param exclude:
:return:
"""
now = arrow.utcnow()
max = now + timedelta(days=90)
q = (
database.db.session.query(Certificate)
.filter(Certificate.not_after <= max)
.filter(Certificate.notify == true())
.filter(Certificate.expired == false())
.filter(Certificate.revoked == false())
)
exclude_conditions = []
if exclude:
for e in exclude:
exclude_conditions.append(~Certificate.name.ilike("%{}%".format(e)))
q = q.filter(and_(*exclude_conditions))
certs = []
for c in windowed_query(q, Certificate.id, 10000):
if needs_notification(c):
certs.append(c)
return certs
def get_certificates_for_security_summary_email(exclude=None):
"""
Finds all certificates that are eligible for expiration notifications for the security expiration summary.
:param exclude:
:return:
"""
now = arrow.utcnow()
threshold_days = current_app.config.get("LEMUR_EXPIRATION_SUMMARY_EMAIL_THRESHOLD_DAYS", 14)
max_not_after = now + timedelta(days=threshold_days + 1)
q = (
database.db.session.query(Certificate)
.filter(Certificate.not_after <= max_not_after)
.filter(Certificate.notify == true())
.filter(Certificate.expired == false())
.filter(Certificate.revoked == false())
)
exclude_conditions = []
if exclude:
for e in exclude:
exclude_conditions.append(~Certificate.name.ilike("%{}%".format(e)))
q = q.filter(and_(*exclude_conditions))
certs = []
for c in windowed_query(q, Certificate.id, 10000):
days_remaining = (c.not_after - now).days
if days_remaining <= threshold_days:
certs.append(c)
return certs
def get_expiring_authority_certificates():
"""
Finds all certificate authority certificates that are eligible for expiration notifications.
:return:
"""
now = arrow.utcnow()
authority_expiration_intervals = current_app.config.get("LEMUR_AUTHORITY_CERT_EXPIRATION_EMAIL_INTERVALS",
[365, 180])
max_not_after = now + timedelta(days=max(authority_expiration_intervals) + 1)
q = (
database.db.session.query(Certificate)
.filter(Certificate.not_after < max_not_after)
.filter(Certificate.notify == true())
.filter(Certificate.expired == false())
.filter(Certificate.revoked == false())
.filter(Certificate.root_authority_id.isnot(None))
.filter(Certificate.authority_id.is_(None))
)
certs = []
for c in windowed_query(q, Certificate.id, 10000):
days_remaining = (c.not_after - now).days
if days_remaining in authority_expiration_intervals:
certs.append(c)
return certs
def get_eligible_certificates(exclude=None):
"""
Finds all certificates that are eligible for certificate expiration notification.
Returns the set of all eligible certificates, grouped by owner, with a list of applicable notifications.
:param exclude:
:return:
"""
certificates = defaultdict(dict)
certs = get_certificates(exclude=exclude)
# group by owner
for owner, items in groupby(certs, lambda x: x.owner):
notification_groups = []
for certificate in items:
notifications = needs_notification(certificate)
if notifications:
for notification in notifications:
notification_groups.append((notification, certificate))
# group by notification
for notification, items in groupby(notification_groups, lambda x: x[0].label):
certificates[owner][notification] = list(items)
return certificates
def get_eligible_security_summary_certs(exclude=None):
certificates = defaultdict(list)
all_certs = get_certificates_for_security_summary_email(exclude=exclude)
now = arrow.utcnow()
# group by expiration interval
for interval, interval_certs in groupby(all_certs, lambda x: (x.not_after - now).days):
certificates[interval] = list(interval_certs)
return certificates
def get_eligible_authority_certificates():
"""
Finds all certificate authority certificates that are eligible for certificate expiration notification.
Returns the set of all eligible CA certificates, grouped by owner and interval, with a list of applicable certs.
:return:
"""
certificates = defaultdict(dict)
all_certs = get_expiring_authority_certificates()
now = arrow.utcnow()
# group by owner
for owner, owner_certs in groupby(all_certs, lambda x: x.owner):
# group by expiration interval
for interval, interval_certs in groupby(owner_certs, lambda x: (x.not_after - now).days):
certificates[owner][interval] = list(interval_certs)
return certificates
def send_plugin_notification(event_type, data, recipients, notification):
"""
Executes the plugin and handles failure.
:param event_type:
:param data:
:param recipients:
:param notification:
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {
"function": function,
"message": f"Sending {event_type} notification for to recipients {recipients}",
"notification_type": event_type,
"notification_plugin": notification.plugin.slug,
"certificate_targets": recipients,
}
status = FAILURE_METRIC_STATUS
try:
current_app.logger.debug(log_data)
notification.plugin.send(event_type, data, recipients, notification.options)
status = SUCCESS_METRIC_STATUS
except Exception as e:
log_data["message"] = f"Unable to send {event_type} notification to recipients {recipients}"
current_app.logger.error(log_data, exc_info=True)
sentry.captureException()
metrics.send(
"notification",
"counter",
1,
metric_tags={"status": status, "event_type": event_type, "plugin": notification.plugin.slug},
)
if status == SUCCESS_METRIC_STATUS:
return True
def send_expiration_notifications(exclude):
"""
This function will check for upcoming certificate expiration,
and send out notification emails at given intervals.
"""
success = failure = 0
# security team gets all
security_email = current_app.config.get("LEMUR_SECURITY_TEAM_EMAIL")
for owner, notification_group in get_eligible_certificates(exclude=exclude).items():
for notification_label, certificates in notification_group.items():
notification_data = []
notification = certificates[0][0]
for data in certificates:
n, certificate = data
cert_data = certificate_notification_output_schema.dump(
certificate
).data
notification_data.append(cert_data)
email_recipients = notification.plugin.get_recipients(notification.options, security_email + [owner])
# Plugin will ONLY use the provided recipients if it's email; any other notification plugin ignores them
if send_plugin_notification(
"expiration", notification_data, email_recipients, notification
):
success += len(email_recipients)
else:
failure += len(email_recipients)
# If we're using an email plugin, we're done,
# since "security_email + [owner]" were added as email_recipients.
# If we're not using an email plugin, we also need to send an email to the security team and owner,
# since the plugin notification didn't send anything to them.
if notification.plugin.slug != "email-notification":
if send_default_notification(
"expiration", notification_data, email_recipients, notification.options
):
success = 1 + len(email_recipients)
else:
failure = 1 + len(email_recipients)
return success, failure
def send_authority_expiration_notifications():
"""
This function will check for upcoming certificate authority certificate expiration,
and send out notification emails at configured intervals.
"""
success = failure = 0
# security team gets all
security_email = current_app.config.get("LEMUR_SECURITY_TEAM_EMAIL")
for owner, owner_cert_groups in get_eligible_authority_certificates().items():
for interval, certificates in owner_cert_groups.items():
notification_data = []
for certificate in certificates:
cert_data = certificate_notification_output_schema.dump(
certificate
).data
cert_data['self_signed'] = is_selfsigned(certificate.parsed_cert)
cert_data['issued_cert_count'] = certificates_service.get_issued_cert_count_for_authority(certificate.root_authority)
notification_data.append(cert_data)
email_recipients = security_email + [owner]
if send_default_notification(
"authority_expiration", notification_data, email_recipients,
notification_options=[{'name': 'interval', 'value': interval}]
):
success = len(email_recipients)
else:
failure = len(email_recipients)
return success, failure
def send_default_notification(notification_type, data, targets, notification_options=None):
"""
Sends a report to the specified target via the default notification plugin. Applicable for any notification_type.
At present, "default" means email, as the other notification plugins do not support dynamically configured targets.
:param notification_type:
:param data:
:param targets:
:param notification_options:
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
status = FAILURE_METRIC_STATUS
notification_plugin = plugins.get(
current_app.config.get("LEMUR_DEFAULT_NOTIFICATION_PLUGIN", "email-notification")
)
log_data = {
"function": function,
"message": f"Sending {notification_type} notification for certificate data {data} to targets {targets}",
"notification_type": notification_type,
"notification_plugin": notification_plugin.slug,
}
try:
current_app.logger.debug(log_data)
# we need the notification.options here because the email templates utilize the interval/unit info
notification_plugin.send(notification_type, data, targets, notification_options)
status = SUCCESS_METRIC_STATUS
except Exception as e:
log_data["message"] = f"Unable to send {notification_type} notification for certificate data {data} " \
f"to targets {targets}"
current_app.logger.error(log_data, exc_info=True)
sentry.captureException()
metrics.send(
"notification",
"counter",
1,
metric_tags={"status": status, "event_type": notification_type, "plugin": notification_plugin.slug},
)
if status == SUCCESS_METRIC_STATUS:
return True
def send_rotation_notification(certificate):
data = certificate_notification_output_schema.dump(certificate).data
return send_default_notification("rotation", data, [data["owner"]])
def send_pending_failure_notification(
pending_cert, notify_owner=True, notify_security=True
):
"""
Sends a report to certificate owners when their pending certificate failed to be created.
:param pending_cert:
:param notify_owner:
:param notify_security:
:return:
"""
data = pending_certificate_output_schema.dump(pending_cert).data
data["security_email"] = current_app.config.get("LEMUR_SECURITY_TEAM_EMAIL")
email_recipients = []
if notify_owner:
email_recipients = email_recipients + [data["owner"]]
if notify_security:
email_recipients = email_recipients + data["security_email"]
return send_default_notification("failed", data, email_recipients, pending_cert)
def needs_notification(certificate):
"""
Determine if notifications for a given certificate should currently be sent.
For each notification configured for the cert, verifies it is active, properly configured,
and that the configured expiration period is currently met.
:param certificate:
:return:
"""
now = arrow.utcnow()
days = (certificate.not_after - now).days
notifications = []
for notification in certificate.notifications:
if not notification.active or not notification.options:
continue
interval = get_plugin_option("interval", notification.options)
unit = get_plugin_option("unit", notification.options)
if unit == "weeks":
interval *= 7
elif unit == "months":
interval *= 30
elif unit == "days": # it's nice to be explicit about the base unit
pass
else:
raise Exception(
f"Invalid base unit for expiration interval: {unit}"
)
if days == interval:
notifications.append(notification)
return notifications
def send_security_expiration_summary(exclude=None):
"""
Sends a report to the security team with a summary of all expiring certificates.
All expiring certificates are included here, regardless of notification configuration.
Certificates with notifications disabled are omitted.
:param exclude:
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
status = FAILURE_METRIC_STATUS
notification_plugin = plugins.get(
current_app.config.get("LEMUR_DEFAULT_NOTIFICATION_PLUGIN", "email-notification")
)
notification_type = "expiration_summary"
log_data = {
"function": function,
"message": "Sending expiration summary notification for to security team",
"notification_type": notification_type,
"notification_plugin": notification_plugin.slug,
}
intervals_and_certs = get_eligible_security_summary_certs(exclude)
security_email = current_app.config.get("LEMUR_SECURITY_TEAM_EMAIL")
try:
current_app.logger.debug(log_data)
message_data = []
for interval, certs in intervals_and_certs.items():
cert_data = []
for certificate in certs:
cert_data.append(certificate_notification_output_schema.dump(certificate).data)
interval_data = {"interval": interval, "certificates": cert_data}
message_data.append(interval_data)
notification_plugin.send(notification_type, message_data, security_email, None)
status = SUCCESS_METRIC_STATUS
except Exception:
log_data["message"] = f"Unable to send {notification_type} notification for certificates " \
f"{intervals_and_certs} to targets {security_email}"
current_app.logger.error(log_data, exc_info=True)
sentry.captureException()
metrics.send(
"notification",
"counter",
1,
metric_tags={"status": status, "event_type": notification_type, "plugin": notification_plugin.slug},
)
if status == SUCCESS_METRIC_STATUS:
return True
|
from datetime import timedelta
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.utility_meter.const import (
ATTR_TARIFF,
DOMAIN,
SERVICE_RESET,
SERVICE_SELECT_NEXT_TARIFF,
SERVICE_SELECT_TARIFF,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_UNIT_OF_MEASUREMENT,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
async def test_services(hass):
"""Test energy sensor reset service."""
config = {
"utility_meter": {
"energy_bill": {
"source": "sensor.energy",
"cycle": "hourly",
"tariffs": ["peak", "offpeak"],
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
entity_id = config[DOMAIN]["energy_bill"]["source"]
hass.states.async_set(
entity_id, 1, {ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR}
)
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
3,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_peak")
assert state.state == "2"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state.state == "0"
# Next tariff
data = {ATTR_ENTITY_ID: "utility_meter.energy_bill"}
await hass.services.async_call(DOMAIN, SERVICE_SELECT_NEXT_TARIFF, data)
await hass.async_block_till_done()
now += timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
4,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_peak")
assert state.state == "2"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state.state == "1"
# Change tariff
data = {ATTR_ENTITY_ID: "utility_meter.energy_bill", ATTR_TARIFF: "peak"}
await hass.services.async_call(DOMAIN, SERVICE_SELECT_TARIFF, data)
await hass.async_block_till_done()
now += timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
5,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_peak")
assert state.state == "3"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state.state == "1"
# Reset meters
data = {ATTR_ENTITY_ID: "utility_meter.energy_bill"}
await hass.services.async_call(DOMAIN, SERVICE_RESET, data)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_peak")
assert state.state == "0"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state.state == "0"
|
import six
from stash.tests.stashtest import StashTestCase
class WheelsTests(StashTestCase):
"""tests fpr the wheel-support."""
def test_wheel_is_compatible(self):
"""test wheel_is_compatible() result"""
from stashutils import wheels
wheelnamecompatibility = [
("package-1.0.0-py2.py3-none-any.whl", True), # full compatibility
("package-1.0.0-2-py2.py3-none-any.whl", True), # full compatible with build tag
("package-1.0.0-py2-none-any.whl", not six.PY3), # only py2 compatible
("package-1.0.0-py3-none-any.whl", six.PY3), # only py3 compatible
("package-1.0.0-py2.py3-cp33m-any.whl", False), # incompatible abi-tag
("package-1.0.0-py2.py3-none-linux_x86_64.whl", False), # incompatible platform tag
("package-1.0.0-py2.py3-cp33m-linux_x86_64.whl", False), # incompatible abi and platform tags
("package-1.0.0-cpy2-none-any.whl", False), # cpython 2 incompatibility
("package-1.0.0-cpy3-none-any.whl", False), # cpython 3 incompatibility
]
for wheelname, is_compatible in wheelnamecompatibility:
ic = wheels.wheel_is_compatible(wheelname)
self.assertEqual(ic, is_compatible)
def test_wheel_is_compatible_raises(self):
"""test wheel_is_compatible() error handling"""
from stashutils import wheels
wrong_wheelnames = [
"nonwheel-1.0.0-py2.py3-none-any.txt",
"noabi-1.0.0-py2.py3-any.whl",
"toomany-1.0.0.-py2.py3-none-any-extra-fields.whl",
]
for wheelname in wrong_wheelnames:
try:
wheels.wheel_is_compatible(wheelname)
except wheels.WheelError:
pass
else:
raise AssertionError("wheels.wheel_is_compatible() did not raise WheelError when required.")
def test_parse_wheel_name(self):
"""test parse_wheel_name()"""
from stashutils import wheels
name1 = "distribution-version-buildtag-pythontag-abitag-platformtag.whl"
result1 = wheels.parse_wheel_name(name1)
expected1 = {
"distribution": "distribution",
"version": "version",
"build_tag": "buildtag",
"python_tag": "pythontag",
"abi_tag": "abitag",
"platform_tag": "platformtag",
}
self.assertEqual(result1, expected1)
name2 = "stashutils-0.7.0-py2.py3-none-any.whl"
result2 = wheels.parse_wheel_name(name2)
expected2 = {
"distribution": "stashutils",
"version": "0.7.0",
"build_tag": None,
"python_tag": "py2.py3",
"abi_tag": "none",
"platform_tag": "any",
}
self.assertEqual(result2, expected2)
def test_generate_filename(self):
"""test generate_filename()"""
from stashutils import wheels
data = {
"distribution": "somepackage",
"version": "1.0.0",
"python_tag": "py27",
}
expected = "somepackage-1.0.0-py27-none-any.whl"
result = wheels.generate_filename(**data)
self.assertEqual(result, expected)
|
from marshmallow import fields, post_dump
from lemur.common.schema import LemurInputSchema, LemurOutputSchema
from lemur.schemas import PluginInputSchema, PluginOutputSchema
class DestinationInputSchema(LemurInputSchema):
id = fields.Integer()
label = fields.String(required=True)
description = fields.String(required=True)
active = fields.Boolean()
plugin = fields.Nested(PluginInputSchema, required=True)
class DestinationOutputSchema(LemurOutputSchema):
id = fields.Integer()
label = fields.String()
description = fields.String()
active = fields.Boolean()
plugin = fields.Nested(PluginOutputSchema)
options = fields.List(fields.Dict())
@post_dump
def fill_object(self, data):
if data:
data["plugin"]["pluginOptions"] = data["options"]
for option in data["plugin"]["pluginOptions"]:
if "export-plugin" in option["type"]:
option["value"]["pluginOptions"] = option["value"]["plugin_options"]
return data
class DestinationNestedOutputSchema(DestinationOutputSchema):
__envelope__ = False
destination_input_schema = DestinationInputSchema()
destinations_output_schema = DestinationOutputSchema(many=True)
destination_output_schema = DestinationOutputSchema()
|
import pytest
from homeassistant.components.media_player.const import DOMAIN, SUPPORT_TURN_ON
from homeassistant.components.samsungtv.const import (
CONF_ON_ACTION,
DOMAIN as SAMSUNGTV_DOMAIN,
)
from homeassistant.components.samsungtv.media_player import SUPPORT_SAMSUNGTV
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_HOST,
CONF_NAME,
SERVICE_VOLUME_UP,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, call, patch
ENTITY_ID = f"{DOMAIN}.fake_name"
MOCK_CONFIG = {
SAMSUNGTV_DOMAIN: [
{
CONF_HOST: "fake_host",
CONF_NAME: "fake_name",
CONF_ON_ACTION: [{"delay": "00:00:01"}],
}
]
}
REMOTE_CALL = {
"name": "HomeAssistant",
"description": "HomeAssistant",
"id": "ha.component.samsung",
"method": "legacy",
"host": MOCK_CONFIG[SAMSUNGTV_DOMAIN][0][CONF_HOST],
"port": None,
"timeout": 1,
}
@pytest.fixture(name="remote")
def remote_fixture():
"""Patch the samsungctl Remote."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote"
) as remote_class, patch(
"homeassistant.components.samsungtv.config_flow.socket"
) as socket1, patch(
"homeassistant.components.samsungtv.socket"
) as socket2:
remote = Mock()
remote.__enter__ = Mock()
remote.__exit__ = Mock()
remote_class.return_value = remote
socket1.gethostbyname.return_value = "FAKE_IP_ADDRESS"
socket2.gethostbyname.return_value = "FAKE_IP_ADDRESS"
yield remote
async def test_setup(hass, remote):
"""Test Samsung TV integration is setup."""
with patch("homeassistant.components.samsungtv.bridge.Remote") as remote:
await async_setup_component(hass, SAMSUNGTV_DOMAIN, MOCK_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
# test name and turn_on
assert state
assert state.name == "fake_name"
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON
)
# test host and port
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
assert remote.call_args == call(REMOTE_CALL)
async def test_setup_duplicate_config(hass, remote, caplog):
"""Test duplicate setup of platform."""
DUPLICATE = {
SAMSUNGTV_DOMAIN: [
MOCK_CONFIG[SAMSUNGTV_DOMAIN][0],
MOCK_CONFIG[SAMSUNGTV_DOMAIN][0],
]
}
await async_setup_component(hass, SAMSUNGTV_DOMAIN, DUPLICATE)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_ID) is None
assert len(hass.states.async_all()) == 0
assert "duplicate host entries found" in caplog.text
async def test_setup_duplicate_entries(hass, remote, caplog):
"""Test duplicate setup of platform."""
await async_setup_component(hass, SAMSUNGTV_DOMAIN, MOCK_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_ID)
assert len(hass.states.async_all()) == 1
await async_setup_component(hass, SAMSUNGTV_DOMAIN, MOCK_CONFIG)
assert len(hass.states.async_all()) == 1
|
import copy
import os
import warnings
import chainer
from chainercv.visualizations.vis_bbox import vis_bbox
try:
import matplotlib # NOQA
_available = True
except (ImportError, TypeError):
_available = False
def _check_available():
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
class DetectionVisReport(chainer.training.extension.Extension):
"""An extension that visualizes output of a detection model.
This extension visualizes the predicted bounding boxes together with the
ground truth bounding boxes.
Internally, this extension takes examples from an iterator,
predict bounding boxes from the images in the examples,
and visualizes them using :meth:`chainercv.visualizations.vis_bbox`.
The process can be illustrated in the following code.
.. code:: python
batch = next(iterator)
# Convert batch -> imgs, gt_bboxes, gt_labels
pred_bboxes, pred_labels, pred_scores = target.predict(imgs)
# Visualization code
for img, gt_bbox, gt_label, pred_bbox, pred_label, pred_score \\
in zip(imgs, gt_boxes, gt_labels,
pred_bboxes, pred_labels, pred_scores):
# the ground truth
vis_bbox(img, gt_bbox, gt_label)
# the prediction
vis_bbox(img, pred_bbox, pred_label, pred_score)
.. note::
:obj:`gt_bbox` and :obj:`pred_bbox` are float arrays
of shape :math:`(R, 4)`, where :math:`R` is the number of
bounding boxes in the image. Each bounding box is organized
by :math:`(y_{min}, x_{min}, y_{max}, x_{max})` in the second axis.
:obj:`gt_label` and :obj:`pred_label` are intenger arrays
of shape :math:`(R,)`. Each label indicates the class of
the bounding box.
:obj:`pred_score` is a float array of shape :math:`(R,)`.
Each score indicates how confident the prediction is.
Args:
iterator: Iterator object that produces images and ground truth.
target: Link object used for detection.
label_names (iterable of strings): Name of labels ordered according
to label ids. If this is :obj:`None`, labels will be skipped.
filename (str): Basename for the saved image. It can contain two
keywords, :obj:`'{iteration}'` and :obj:`'{index}'`. They are
replaced with the iteration of the trainer and the index of
the sample when this extension save an image. The default value is
:obj:`'detection_iter={iteration}_idx={index}.jpg'`.
"""
def __init__(
self, iterator, target, label_names=None,
filename='detection_iter={iteration}_idx={index}.jpg'):
_check_available()
self.iterator = iterator
self.target = target
self.label_names = label_names
self.filename = filename
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if _available:
# Dynamically import pyplot so that the backend of matplotlib
# can be configured after importing chainercv.
import matplotlib.pyplot as plt
else:
return
if hasattr(self.iterator, 'reset'):
self.iterator.reset()
it = self.iterator
else:
it = copy.copy(self.iterator)
idx = 0
while True:
try:
batch = next(it)
except StopIteration:
break
imgs = [img for img, _, _ in batch]
pred_bboxes, pred_labels, pred_scores = self.target.predict(imgs)
for (img, gt_bbox, gt_label), pred_bbox, pred_label, pred_score \
in zip(batch, pred_bboxes, pred_labels, pred_scores):
pred_bbox = chainer.backends.cuda.to_cpu(pred_bbox)
pred_label = chainer.backends.cuda.to_cpu(pred_label)
pred_score = chainer.backends.cuda.to_cpu(pred_score)
out_file = self.filename.format(
index=idx, iteration=trainer.updater.iteration)
out_file = os.path.join(trainer.out, out_file)
fig = plt.figure()
ax_gt = fig.add_subplot(2, 1, 1)
ax_gt.set_title('ground truth')
vis_bbox(
img, gt_bbox, gt_label,
label_names=self.label_names, ax=ax_gt)
ax_pred = fig.add_subplot(2, 1, 2)
ax_pred.set_title('prediction')
vis_bbox(
img, pred_bbox, pred_label, pred_score,
label_names=self.label_names, ax=ax_pred)
plt.savefig(out_file, bbox_inches='tight')
plt.close()
idx += 1
|
import logging
from homeassistant.components.cover import ATTR_POSITION, CoverEntity
from homeassistant.components.soma import API, DEVICES, DOMAIN, SomaEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Soma cover platform."""
devices = hass.data[DOMAIN][DEVICES]
async_add_entities(
[SomaCover(cover, hass.data[DOMAIN][API]) for cover in devices], True
)
class SomaCover(SomaEntity, CoverEntity):
"""Representation of a Soma cover device."""
def close_cover(self, **kwargs):
"""Close the cover."""
response = self.api.set_shade_position(self.device["mac"], 100)
if response["result"] != "success":
_LOGGER.error(
"Unable to reach device %s (%s)", self.device["name"], response["msg"]
)
def open_cover(self, **kwargs):
"""Open the cover."""
response = self.api.set_shade_position(self.device["mac"], 0)
if response["result"] != "success":
_LOGGER.error(
"Unable to reach device %s (%s)", self.device["name"], response["msg"]
)
def stop_cover(self, **kwargs):
"""Stop the cover."""
# Set cover position to some value where up/down are both enabled
self.current_position = 50
response = self.api.stop_shade(self.device["mac"])
if response["result"] != "success":
_LOGGER.error(
"Unable to reach device %s (%s)", self.device["name"], response["msg"]
)
def set_cover_position(self, **kwargs):
"""Move the cover shutter to a specific position."""
self.current_position = kwargs[ATTR_POSITION]
response = self.api.set_shade_position(
self.device["mac"], 100 - kwargs[ATTR_POSITION]
)
if response["result"] != "success":
_LOGGER.error(
"Unable to reach device %s (%s)", self.device["name"], response["msg"]
)
@property
def current_cover_position(self):
"""Return the current position of cover shutter."""
return self.current_position
@property
def is_closed(self):
"""Return if the cover is closed."""
return self.current_position == 0
|
from pathlib import Path
from typing import Dict
from .model import Config, Integration
DONT_IGNORE = (
"config_flow.py",
"device_action.py",
"device_condition.py",
"device_trigger.py",
"group.py",
"intent.py",
"logbook.py",
"media_source.py",
"scene.py",
)
# They were violating when we introduced this check
# Need to be fixed in a future PR.
ALLOWED_IGNORE_VIOLATIONS = {
("ambient_station", "config_flow.py"),
("cast", "config_flow.py"),
("daikin", "config_flow.py"),
("doorbird", "config_flow.py"),
("doorbird", "logbook.py"),
("elkm1", "config_flow.py"),
("elkm1", "scene.py"),
("fibaro", "scene.py"),
("flume", "config_flow.py"),
("hangouts", "config_flow.py"),
("harmony", "config_flow.py"),
("hisense_aehw4a1", "config_flow.py"),
("home_connect", "config_flow.py"),
("huawei_lte", "config_flow.py"),
("ifttt", "config_flow.py"),
("ios", "config_flow.py"),
("iqvia", "config_flow.py"),
("knx", "scene.py"),
("konnected", "config_flow.py"),
("lcn", "scene.py"),
("life360", "config_flow.py"),
("lifx", "config_flow.py"),
("lutron", "scene.py"),
("mobile_app", "config_flow.py"),
("nest", "config_flow.py"),
("plaato", "config_flow.py"),
("point", "config_flow.py"),
("rachio", "config_flow.py"),
("sense", "config_flow.py"),
("sms", "config_flow.py"),
("solarlog", "config_flow.py"),
("somfy", "config_flow.py"),
("sonos", "config_flow.py"),
("speedtestdotnet", "config_flow.py"),
("spider", "config_flow.py"),
("starline", "config_flow.py"),
("tado", "config_flow.py"),
("tahoma", "scene.py"),
("totalconnect", "config_flow.py"),
("tradfri", "config_flow.py"),
("tuya", "config_flow.py"),
("tuya", "scene.py"),
("upnp", "config_flow.py"),
("velux", "scene.py"),
("wemo", "config_flow.py"),
("wiffi", "config_flow.py"),
("wink", "scene.py"),
}
def validate(integrations: Dict[str, Integration], config: Config):
"""Validate coverage."""
coverage_path = config.root / ".coveragerc"
not_found = []
checking = False
with coverage_path.open("rt") as fp:
for line in fp:
line = line.strip()
if not line or line.startswith("#"):
continue
if not checking:
if line == "omit =":
checking = True
continue
# Finished
if line == "[report]":
break
path = Path(line)
# Discard wildcard
path_exists = path
while "*" in path_exists.name:
path_exists = path_exists.parent
if not path_exists.exists():
not_found.append(line)
continue
if (
not line.startswith("homeassistant/components/")
or not len(path.parts) == 4
or not path.parts[-1] == "*"
):
continue
integration_path = path.parent
integration = integrations[integration_path.name]
for check in DONT_IGNORE:
if (integration_path.name, check) in ALLOWED_IGNORE_VIOLATIONS:
continue
if (integration_path / check).exists():
integration.add_error(
"coverage",
f"{check} must not be ignored by the .coveragerc file",
)
if not not_found:
return
errors = []
if not_found:
errors.append(
f".coveragerc references files that don't exist: {', '.join(not_found)}."
)
raise RuntimeError(" ".join(errors))
|
import json
from pytest_flask.fixtures import client
from tests.resources import (
GET_ERROR_MESSAGE,
INVALID_ACTION_MESSAGE,
)
model_module = 'tests.user_models'
database = 'blog.sqlite3'
def test_validate_get(client):
"""Do we get back an error message when making a GET request that fails
validation?"""
response = client.get('/user/')
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_get_single_resource(client):
"""Do we get back an error message when making a GET request for a
single resource which fails validation ?"""
response = client.get('/user/1')
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_get_datetime(client):
"""Do we get back a properly formatted datetime on a model that defines one?"""
response = client.get('/post/1.0')
assert response.status_code == 200
assert response.json['posted_at'] is not None
def test_validate_post(client):
"""Do we get back an error message when making a POST request that fails
validation?"""
response = client.post(
'/user/',
data=json.dumps({
'name': 'Jeff Knupp',
'email': '[email protected]',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_post_existing_resource(client):
"""Do we get back an error message when making a POST request on a resource that already exists?"""
response = client.post(
'/user/',
data=json.dumps({
'name': 'Jeff Knupp',
'email': '[email protected]',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_put_existing(client):
"""Do we get back an error message when making a PUT request for
an exisitng resource?"""
response = client.put(
'/user/1',
data=json.dumps({
'name': 'Jeff Knupp',
'email': '[email protected]',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_put_new(client):
"""Do we get back an error message when making a PUT request for a
totally new resource?"""
response = client.put(
'/user/2',
data=json.dumps({
'name': 'Elissa Knupp',
'email': '[email protected]',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_patch(client):
"""Do we get back an error message when making a PATCH request on an
existing resource?"""
response = client.patch(
'/user/1',
data=json.dumps({
'name': 'Jeff Knupp',
}),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
def test_validate_delete(client):
"""Do we get back an error message when making a DELETE request that fails
validation?"""
response = client.delete('/user/1')
assert response.status_code == 400
assert response.json['message'] == INVALID_ACTION_MESSAGE
|
import argparse
from collections import defaultdict
import os
import chainer
import numpy as np
from chainer.dataset import concat_examples
from chainer.datasets import TransformDataset
from chainer import iterators
from chainer import optimizers
from chainer import training
from chainer.training import extensions
from chainercv.datasets import camvid_label_names
from chainercv.datasets import CamVidDataset
from chainercv.extensions import SemanticSegmentationEvaluator
from chainercv.links import PixelwiseSoftmaxClassifier
from chainercv.links import SegNetBasic
# https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator
try:
import cv2
cv2.setNumThreads(0)
except ImportError:
pass
def recalculate_bn_statistics(model, batchsize):
train = CamVidDataset(split='train')
it = chainer.iterators.SerialIterator(
train, batchsize, repeat=False, shuffle=False)
bn_avg_mean = defaultdict(np.float32)
bn_avg_var = defaultdict(np.float32)
n_iter = 0
for batch in it:
imgs, _ = concat_examples(batch)
model(model.xp.array(imgs))
for name, link in model.namedlinks():
if name.endswith('_bn'):
bn_avg_mean[name] += link.avg_mean
bn_avg_var[name] += link.avg_var
n_iter += 1
for name, link in model.namedlinks():
if name.endswith('_bn'):
link.avg_mean = bn_avg_mean[name] / n_iter
link.avg_var = bn_avg_var[name] / n_iter
return model
def transform(in_data):
img, label = in_data
if np.random.rand() > 0.5:
img = img[:, :, ::-1]
label = label[:, ::-1]
return img, label
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=12)
parser.add_argument('--class-weight', type=str, default='class_weight.npy')
parser.add_argument('--out', type=str, default='result')
args = parser.parse_args()
# Triggers
log_trigger = (50, 'iteration')
validation_trigger = (2000, 'iteration')
end_trigger = (16000, 'iteration')
# Dataset
train = CamVidDataset(split='train')
train = TransformDataset(train, transform)
val = CamVidDataset(split='val')
# Iterator
train_iter = iterators.MultiprocessIterator(train, args.batchsize)
val_iter = iterators.MultiprocessIterator(
val, args.batchsize, shuffle=False, repeat=False)
# Model
class_weight = np.load(args.class_weight)
model = SegNetBasic(n_class=len(camvid_label_names))
model = PixelwiseSoftmaxClassifier(
model, class_weight=class_weight)
if args.gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Optimizer
optimizer = optimizers.MomentumSGD(lr=0.1, momentum=0.9)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005))
# Updater
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu)
# Trainer
trainer = training.Trainer(updater, end_trigger, out=args.out)
trainer.extend(extensions.LogReport(trigger=log_trigger))
trainer.extend(extensions.observe_lr(), trigger=log_trigger)
trainer.extend(extensions.dump_graph('main/loss'))
if extensions.PlotReport.available():
trainer.extend(extensions.PlotReport(
['main/loss'], x_key='iteration',
file_name='loss.png'))
trainer.extend(extensions.PlotReport(
['validation/main/miou'], x_key='iteration',
file_name='miou.png'))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'elapsed_time', 'lr',
'main/loss', 'validation/main/miou',
'validation/main/mean_class_accuracy',
'validation/main/pixel_accuracy']),
trigger=log_trigger)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
SemanticSegmentationEvaluator(
val_iter, model.predictor,
camvid_label_names),
trigger=validation_trigger)
trainer.run()
chainer.serializers.save_npz(
os.path.join(args.out, 'snapshot_model.npz'),
recalculate_bn_statistics(model.predictor, 24))
if __name__ == '__main__':
main()
|
import unittest
from trashcli.put import TrashPutCmd
from trashcli.put import TopDirRelativePaths, AbsolutePaths
from trashcli.put import TopTrashDirWriteRules, all_is_ok_checker
from unit_tests.myStringIO import StringIO
from integration_tests.asserts import assert_equals_with_unidiff
from textwrap import dedent
from mock import Mock, call
class TestTrashPutTrashDirectory(unittest.TestCase):
def setUp(self):
parent_path = lambda _ : None
volume_of = lambda _ : '/'
self.try_trash_file_using_candidates = Mock()
self.cmd = TrashPutCmd(None,
None,
{'XDG_DATA_HOME':'~/xdh'},
volume_of,
parent_path,
None,
None,
None,
None)
self.cmd.getuid = lambda : '123'
self.cmd.try_trash_file_using_candidates = self.try_trash_file_using_candidates
def test_normally(self):
self.cmd.run(['trash-put', 'file'])
assert [call('file', '/', [
('~/xdh/Trash', '/', AbsolutePaths, all_is_ok_checker),
('/.Trash/123', '/', TopDirRelativePaths, TopTrashDirWriteRules),
('/.Trash-123', '/', TopDirRelativePaths, all_is_ok_checker),
])] == self.try_trash_file_using_candidates.mock_calls
def test_with_a_specified_trashdir(self):
self.cmd.run(['trash-put', '--trash-dir=/Trash2', 'file'])
assert [call('file', '/', [
('/Trash2', '/', TopDirRelativePaths, all_is_ok_checker),
])] == self.try_trash_file_using_candidates.mock_calls
class TrashPutTest(unittest.TestCase):
def run_trash_put(self, *arg):
self.stderr = StringIO()
self.stdout = StringIO()
args = ['trash-put'] + list(arg)
cmd = TrashPutCmd(self.stdout,
self.stderr,
None,
None,
None,
None,
None,
None,
None)
self._collect_exit_code(lambda:cmd.run(args))
def _collect_exit_code(self, main_function):
self.exit_code = 0
result=main_function()
if result is not None:
self.exit_code=result
def stderr_should_be(self, expected_err):
assert_equals_with_unidiff(expected_err, self._actual_stderr())
def stdout_should_be(self, expected_out):
assert_equals_with_unidiff(expected_out, self._actual_stdout())
def _actual_stderr(self):
return self.stderr.getvalue()
def _actual_stdout(self):
return self.stdout.getvalue()
class TestWhenNoArgs(TrashPutTest):
def setUp(self):
self.run_trash_put()
def test_should_report_usage(self):
assert_line_in_text('Usage: trash-put [OPTION]... FILE...',
self.stderr.getvalue())
def test_exit_code_should_be_not_zero(self):
assert 2 == self.exit_code
class TestTrashPutWithWrongOption(TrashPutTest):
def test_something(self):
self.run_trash_put('--wrong-option')
self.stderr_should_be(dedent('''\
Usage: trash-put [OPTION]... FILE...
trash-put: error: no such option: --wrong-option
'''))
self.stdout_should_be('')
assert 2 == self.exit_code
def assert_line_in_text(expected_line, text):
assert expected_line in text.splitlines(), (
'Line not found in text\n'
'line: %s\n' % expected_line +
'text:\n%s\n' % format(text.splitlines()))
class TestTrashPutCmd(TrashPutTest):
def test_on_help_option_print_help(self):
self.run_trash_put('--help')
self.stdout_should_be(dedent('''\
Usage: trash-put [OPTION]... FILE...
Put files in trash
Options:
--version show program's version number and exit
-h, --help show this help message and exit
-d, --directory ignored (for GNU rm compatibility)
-f, --force silently ignore nonexistent files
-i, --interactive ignored (for GNU rm compatibility)
-r, -R, --recursive ignored (for GNU rm compatibility)
--trash-dir=TRASHDIR use TRASHDIR as trash folder
-v, --verbose explain what is being done
To remove a file whose name starts with a '-', for example '-foo',
use one of these commands:
trash -- -foo
trash ./-foo
Report bugs to https://github.com/andreafrancia/trash-cli/issues
'''))
def test_it_should_skip_dot_entry(self):
self.run_trash_put('.')
self.stderr_should_be("trash-put: cannot trash directory '.'\n")
def test_it_should_skip_dotdot_entry(self):
self.run_trash_put('..')
self.stderr_should_be("trash-put: cannot trash directory '..'\n")
def test_it_should_print_usage_on_no_argument(self):
self.run_trash_put()
self.stderr_should_be(
'Usage: trash-put [OPTION]... FILE...\n'
'\n'
'trash-put: error: Please specify the files to trash.\n')
self.stdout_should_be('')
def test_it_should_skip_missing_files(self):
self.run_trash_put('-f', 'this_file_does_not_exist', 'nor_does_this_file')
self.stderr_should_be('')
self.stdout_should_be('')
|
import os
import unittest
import mock
import time
from kalliope.core.NeuronModule import MissingParameterException
from kalliope.neurons.shell.shell import Shell
from kalliope.core.NeuronModule import NeuronModule
class TestShell(unittest.TestCase):
def setUp(self):
self.cmd = "cmd"
self.random = "random"
self.test_file = "/tmp/kalliope_text_shell.txt"
def testParameters(self):
def run_test(parameters_to_test):
with self.assertRaises(MissingParameterException):
Shell(**parameters_to_test)
# empty
parameters = dict()
run_test(parameters)
# missing cmd
parameters = {
"random": self.random
}
run_test(parameters)
def test_shell_returned_code(self):
"""
To test that the shell neuron works, we ask it to create a file
"""
parameters = {
"cmd": "touch %s" % self.test_file
}
with mock.patch.object(NeuronModule, 'say', return_value=None) as mock_method:
shell = Shell(**parameters)
self.assertTrue(os.path.isfile(self.test_file))
self.assertEqual(shell.returncode, 0)
# remove the test file
os.remove(self.test_file)
def test_shell_content(self):
"""
Test we can get a content from the launched command
"""
text_to_write = 'kalliope'
# we write a content into a file
with open(self.test_file, 'w') as myFile:
myFile.write(text_to_write)
# get the output with the neuron
parameters = {
"cmd": "cat %s" % self.test_file
}
with mock.patch.object(NeuronModule, 'say', return_value=None) as mock_method:
shell = Shell(**parameters)
self.assertEqual(shell.output, text_to_write)
self.assertEqual(shell.returncode, 0)
# remove the test file
os.remove(self.test_file)
def test_async_shell(self):
"""
Test that the neuron can run a shell command asynchronously
"""
parameters = {
"cmd": "touch %s" % self.test_file,
"async": True
}
Shell(**parameters)
# let the time the the thread to perform the action
time.sleep(0.5)
self.assertTrue(os.path.isfile(self.test_file))
# remove the test file
os.remove(self.test_file)
if __name__ == '__main__':
unittest.main()
|
import mock
from slackclient import SlackClient
from paasta_tools.slack import PaastaSlackClient
@mock.patch("slackclient.SlackClient", autospec=True)
def test_slack_client_doesnt_post_with_no_token(mock_SlackClient):
psc = PaastaSlackClient(token=None)
assert psc.post(channels=["foo"], message="bar") == []
assert mock_SlackClient.api_call.call_count == 0
def test_slack_client_posts_to_multiple_channels():
fake_sc = mock.create_autospec(SlackClient)
fake_sc.api_call.side_effect = ({"ok": True}, {"ok": False, "error": "blah"})
with mock.patch(
"paasta_tools.slack.SlackClient", autospec=True, return_value=fake_sc
):
psc = PaastaSlackClient(token="fake_token")
assert psc.post(channels=["1", "2"], message="bar") == [
{"ok": True},
{"ok": False, "error": "blah"},
]
assert fake_sc.api_call.call_count == 2, fake_sc.call_args
|
from unittest import TestCase
import numpy as np
import pandas as pd
from scattertext import chinese_nlp, CorpusDF
from scattertext import whitespace_nlp
from scattertext.CorpusFromPandas import CorpusFromPandas
def get_docs_categories():
documents = [u"What art thou that usurp'st this time of night,",
u'Together with that fair and warlike form',
u'In which the majesty of buried Denmark',
u'Did sometimes march? by heaven I charge thee, speak!',
u'Halt! Who goes there?',
u'[Intro]',
u'It is I sire Tone from Brooklyn.',
u'Well, speak up man what is it?',
u'News from the East sire! THE BEST OF BOTH WORLDS HAS RETURNED!',
u'Speak up, speak up, this is a repeat bigram.'
]
categories = ['hamlet'] * 4 + ['jay-z/r. kelly'] * 5 + ['???']
return categories, documents
class TestCorpusFromPandas(TestCase):
def test_term_doc(self):
self.assertIsInstance(self.corpus, CorpusDF)
self.assertEqual(set(self.corpus.get_categories()),
set(['hamlet', 'jay-z/r. kelly', '???']))
self.assertEqual(self.corpus.get_num_docs(), 10)
term_doc_df = self.corpus.get_term_freq_df()
self.assertEqual(term_doc_df.loc['of'].sum(), 3)
self.corpus.get_df()
def test_chinese_error(self):
with self.assertRaises(Exception):
CorpusFromPandas(self.df,
'category',
'text',
nlp=chinese_nlp).build()
def test_get_texts(self):
self.assertTrue(all(self.df['text'] == self.corpus.get_texts()))
def test_search(self):
expected = pd.DataFrame({'text': ["What art thou that usurp'st this time of night,",
"Together with that fair and warlike form"],
'category': ['hamlet', 'hamlet'],
'index': [0, 1]})
self.assertIsInstance(self.corpus, CorpusDF)
returned = self.corpus.search('that')
pd.testing.assert_frame_equal(expected, returned[expected.columns])
def test_search_bigram(self):
expected = pd.DataFrame({'text': [u'Well, speak up man what is it?',
u'Speak up, speak up, this is a repeat bigram.'],
'category': ['jay-z/r. kelly', '???'],
'index': [7, 9]}).reset_index(drop=True)
self.assertIsInstance(self.corpus, CorpusDF)
returned = self.corpus.search('speak up').reset_index(drop=True)
pd.testing.assert_frame_equal(expected,
returned[expected.columns])
def test_search_index(self):
expected = np.array([7, 9])
self.assertIsInstance(self.corpus, CorpusDF)
returned = self.corpus.search_index('speak up')
np.testing.assert_array_equal(expected, returned)
@classmethod
def setUp(cls):
categories, documents = get_docs_categories()
cls.df = pd.DataFrame({'category': categories,
'text': documents})
cls.corpus = CorpusFromPandas(cls.df,
'category',
'text',
nlp=whitespace_nlp).build()
|
import argparse
import copy
import numpy as np
import chainer
from chainer.datasets import ConcatenatedDataset
from chainer.datasets import TransformDataset
from chainer.optimizer_hooks import WeightDecay
from chainer import serializers
from chainer import training
from chainer.training import extensions
from chainer.training import triggers
from chainercv.datasets import voc_bbox_label_names
from chainercv.datasets import VOCBboxDataset
from chainercv.extensions import DetectionVOCEvaluator
from chainercv.links.model.ssd import GradientScaling
from chainercv.links.model.ssd import multibox_loss
from chainercv.links import SSD300
from chainercv.links import SSD512
from chainercv import transforms
from chainercv.links.model.ssd import random_crop_with_bbox_constraints
from chainercv.links.model.ssd import random_distort
from chainercv.links.model.ssd import resize_with_random_interpolation
# https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator
import cv2
cv2.setNumThreads(0)
class MultiboxTrainChain(chainer.Chain):
def __init__(self, model, alpha=1, k=3):
super(MultiboxTrainChain, self).__init__()
with self.init_scope():
self.model = model
self.alpha = alpha
self.k = k
def forward(self, imgs, gt_mb_locs, gt_mb_labels):
mb_locs, mb_confs = self.model(imgs)
loc_loss, conf_loss = multibox_loss(
mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)
loss = loc_loss * self.alpha + conf_loss
chainer.reporter.report(
{'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},
self)
return loss
class Transform(object):
def __init__(self, coder, size, mean):
# to send cpu, make a copy
self.coder = copy.copy(coder)
self.coder.to_cpu()
self.size = size
self.mean = mean
def __call__(self, in_data):
# There are five data augmentation steps
# 1. Color augmentation
# 2. Random expansion
# 3. Random cropping
# 4. Resizing with random interpolation
# 5. Random horizontal flipping
img, bbox, label = in_data
# 1. Color augmentation
img = random_distort(img)
# 2. Random expansion
if np.random.randint(2):
img, param = transforms.random_expand(
img, fill=self.mean, return_param=True)
bbox = transforms.translate_bbox(
bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])
# 3. Random cropping
img, param = random_crop_with_bbox_constraints(
img, bbox, return_param=True)
bbox, param = transforms.crop_bbox(
bbox, y_slice=param['y_slice'], x_slice=param['x_slice'],
allow_outside_center=False, return_param=True)
label = label[param['index']]
# 4. Resizing with random interpolatation
_, H, W = img.shape
img = resize_with_random_interpolation(img, (self.size, self.size))
bbox = transforms.resize_bbox(bbox, (H, W), (self.size, self.size))
# 5. Random horizontal flipping
img, params = transforms.random_flip(
img, x_random=True, return_param=True)
bbox = transforms.flip_bbox(
bbox, (self.size, self.size), x_flip=params['x_flip'])
# Preparation for SSD network
img -= self.mean
mb_loc, mb_label = self.coder.encode(bbox, label)
return img, mb_loc, mb_label
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--batchsize', type=int, default=32)
parser.add_argument('--iteration', type=int, default=120000)
parser.add_argument('--step', type=int, nargs='*', default=[80000, 100000])
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--out', default='result')
parser.add_argument('--resume')
args = parser.parse_args()
if args.model == 'ssd300':
model = SSD300(
n_fg_class=len(voc_bbox_label_names),
pretrained_model='imagenet')
elif args.model == 'ssd512':
model = SSD512(
n_fg_class=len(voc_bbox_label_names),
pretrained_model='imagenet')
model.use_preset('evaluate')
train_chain = MultiboxTrainChain(model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
train = TransformDataset(
ConcatenatedDataset(
VOCBboxDataset(year='2007', split='trainval'),
VOCBboxDataset(year='2012', split='trainval')
),
Transform(model.coder, model.insize, model.mean))
train_iter = chainer.iterators.MultiprocessIterator(train, args.batchsize)
test = VOCBboxDataset(
year='2007', split='test',
use_difficult=True, return_difficult=True)
test_iter = chainer.iterators.SerialIterator(
test, args.batchsize, repeat=False, shuffle=False)
# initial lr is set to 1e-3 by ExponentialShift
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(train_chain)
for param in train_chain.params():
if param.name == 'b':
param.update_rule.add_hook(GradientScaling(2))
else:
param.update_rule.add_hook(WeightDecay(0.0005))
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(
updater, (args.iteration, 'iteration'), args.out)
trainer.extend(
extensions.ExponentialShift('lr', 0.1, init=1e-3),
trigger=triggers.ManualScheduleTrigger(args.step, 'iteration'))
trainer.extend(
DetectionVOCEvaluator(
test_iter, model, use_07_metric=True,
label_names=voc_bbox_label_names),
trigger=triggers.ManualScheduleTrigger(
args.step + [args.iteration], 'iteration'))
log_interval = 10, 'iteration'
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'lr',
'main/loss', 'main/loss/loc', 'main/loss/conf',
'validation/main/map']),
trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
extensions.snapshot(),
trigger=triggers.ManualScheduleTrigger(
args.step + [args.iteration], 'iteration'))
trainer.extend(
extensions.snapshot_object(model, 'model_iter_{.updater.iteration}'),
trigger=(args.iteration, 'iteration'))
if args.resume:
serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
|
import asyncio
from hlk_sw16 import create_hlk_sw16_connection
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant
from .const import (
CONNECTION_TIMEOUT,
DEFAULT_KEEP_ALIVE_INTERVAL,
DEFAULT_PORT,
DEFAULT_RECONNECT_INTERVAL,
DOMAIN,
)
from .errors import AlreadyConfigured, CannotConnect
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): vol.Coerce(int),
}
)
async def connect_client(hass, user_input):
"""Connect the HLK-SW16 client."""
client_aw = create_hlk_sw16_connection(
host=user_input[CONF_HOST],
port=user_input[CONF_PORT],
loop=hass.loop,
timeout=CONNECTION_TIMEOUT,
reconnect_interval=DEFAULT_RECONNECT_INTERVAL,
keep_alive_interval=DEFAULT_KEEP_ALIVE_INTERVAL,
)
return await asyncio.wait_for(client_aw, timeout=CONNECTION_TIMEOUT)
async def validate_input(hass: HomeAssistant, user_input):
"""Validate the user input allows us to connect."""
for entry in hass.config_entries.async_entries(DOMAIN):
if (
entry.data[CONF_HOST] == user_input[CONF_HOST]
and entry.data[CONF_PORT] == user_input[CONF_PORT]
):
raise AlreadyConfigured
try:
client = await connect_client(hass, user_input)
except asyncio.TimeoutError as err:
raise CannotConnect from err
try:
def disconnect_callback():
if client.in_transaction:
client.active_transaction.set_exception(CannotConnect)
client.disconnect_callback = disconnect_callback
await client.status()
except CannotConnect:
client.disconnect_callback = None
client.stop()
raise
else:
client.disconnect_callback = None
client.stop()
class SW16FlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a HLK-SW16 config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_import(self, user_input):
"""Handle import."""
return await self.async_step_user(user_input)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
await validate_input(self.hass, user_input)
address = f"{user_input[CONF_HOST]}:{user_input[CONF_PORT]}"
return self.async_create_entry(title=address, data=user_input)
except AlreadyConfigured:
errors["base"] = "already_configured"
except CannotConnect:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
|
from keras.models import model_from_json
def model_to_dict(model):
"""Turns a Keras model into a Python dictionary
:param model: Keras model instance
:return: dictionary with model information
"""
return dict(model=model.to_json(), weights=model.get_weights())
def dict_to_model(dict):
"""Turns a Python dictionary with model architecture and weights
back into a Keras model
:param dict: dictionary with `model` and `weights` keys.
:return: Keras model instantiated from dictionary
"""
model = model_from_json(dict['model'])
model.set_weights(dict['weights'])
return model
|
import logging
from luftdaten import Luftdaten
from luftdaten.exceptions import LuftdatenError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONF_MONITORED_CONDITIONS,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_SHOW_ON_MAP,
PERCENTAGE,
PRESSURE_PA,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .config_flow import configured_sensors, duplicate_stations
from .const import CONF_SENSOR_ID, DEFAULT_SCAN_INTERVAL, DOMAIN
_LOGGER = logging.getLogger(__name__)
DATA_LUFTDATEN = "luftdaten"
DATA_LUFTDATEN_CLIENT = "data_luftdaten_client"
DATA_LUFTDATEN_LISTENER = "data_luftdaten_listener"
DEFAULT_ATTRIBUTION = "Data provided by luftdaten.info"
SENSOR_HUMIDITY = "humidity"
SENSOR_PM10 = "P1"
SENSOR_PM2_5 = "P2"
SENSOR_PRESSURE = "pressure"
SENSOR_PRESSURE_AT_SEALEVEL = "pressure_at_sealevel"
SENSOR_TEMPERATURE = "temperature"
TOPIC_UPDATE = f"{DOMAIN}_data_update"
SENSORS = {
SENSOR_TEMPERATURE: ["Temperature", "mdi:thermometer", TEMP_CELSIUS],
SENSOR_HUMIDITY: ["Humidity", "mdi:water-percent", PERCENTAGE],
SENSOR_PRESSURE: ["Pressure", "mdi:arrow-down-bold", PRESSURE_PA],
SENSOR_PRESSURE_AT_SEALEVEL: ["Pressure at sealevel", "mdi:download", PRESSURE_PA],
SENSOR_PM10: [
"PM10",
"mdi:thought-bubble",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
],
SENSOR_PM2_5: [
"PM2.5",
"mdi:thought-bubble-outline",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
],
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)): vol.All(
cv.ensure_list, [vol.In(SENSORS)]
)
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_SENSOR_ID): cv.positive_int,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
}
)
},
extra=vol.ALLOW_EXTRA,
)
@callback
def _async_fixup_sensor_id(hass, config_entry, sensor_id):
hass.config_entries.async_update_entry(
config_entry, data={**config_entry.data, CONF_SENSOR_ID: int(sensor_id)}
)
async def async_setup(hass, config):
"""Set up the Luftdaten component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT] = {}
hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
station_id = conf[CONF_SENSOR_ID]
if station_id not in configured_sensors(hass):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_SENSORS: conf[CONF_SENSORS],
CONF_SENSOR_ID: conf[CONF_SENSOR_ID],
CONF_SHOW_ON_MAP: conf[CONF_SHOW_ON_MAP],
},
)
)
hass.data[DOMAIN][CONF_SCAN_INTERVAL] = conf[CONF_SCAN_INTERVAL]
return True
async def async_setup_entry(hass, config_entry):
"""Set up Luftdaten as config entry."""
if not isinstance(config_entry.data[CONF_SENSOR_ID], int):
_async_fixup_sensor_id(hass, config_entry, config_entry.data[CONF_SENSOR_ID])
if (
config_entry.data[CONF_SENSOR_ID] in duplicate_stations(hass)
and config_entry.source == SOURCE_IMPORT
):
_LOGGER.warning(
"Removing duplicate sensors for station %s",
config_entry.data[CONF_SENSOR_ID],
)
hass.async_create_task(hass.config_entries.async_remove(config_entry.entry_id))
return False
session = async_get_clientsession(hass)
try:
luftdaten = LuftDatenData(
Luftdaten(config_entry.data[CONF_SENSOR_ID], hass.loop, session),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)
),
)
await luftdaten.async_update()
hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT][config_entry.entry_id] = luftdaten
except LuftdatenError as err:
raise ConfigEntryNotReady from err
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
async def refresh_sensors(event_time):
"""Refresh Luftdaten data."""
await luftdaten.async_update()
async_dispatcher_send(hass, TOPIC_UPDATE)
hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER][
config_entry.entry_id
] = async_track_time_interval(
hass,
refresh_sensors,
hass.data[DOMAIN].get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL),
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an Luftdaten config entry."""
remove_listener = hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER].pop(
config_entry.entry_id
)
remove_listener()
hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT].pop(config_entry.entry_id)
return await hass.config_entries.async_forward_entry_unload(config_entry, "sensor")
class LuftDatenData:
"""Define a generic Luftdaten object."""
def __init__(self, client, sensor_conditions):
"""Initialize the Luftdata object."""
self.client = client
self.data = {}
self.sensor_conditions = sensor_conditions
async def async_update(self):
"""Update sensor/binary sensor data."""
try:
await self.client.get_data()
self.data[DATA_LUFTDATEN] = self.client.values
self.data[DATA_LUFTDATEN].update(self.client.meta)
except LuftdatenError:
_LOGGER.error("Unable to retrieve data from luftdaten.info")
|
from collections import defaultdict
from datetime import timedelta
import spotcrime
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_API_KEY,
CONF_EXCLUDE,
CONF_INCLUDE,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_RADIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
CONF_DAYS = "days"
DEFAULT_DAYS = 1
NAME = "spotcrime"
EVENT_INCIDENT = f"{NAME}_incident"
SCAN_INTERVAL = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_RADIUS): vol.Coerce(float),
vol.Required(CONF_API_KEY): cv.string,
vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude,
vol.Optional(CONF_DAYS, default=DEFAULT_DAYS): cv.positive_int,
vol.Optional(CONF_INCLUDE): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXCLUDE): vol.All(cv.ensure_list, [cv.string]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Crime Reports platform."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
name = config[CONF_NAME]
radius = config[CONF_RADIUS]
api_key = config[CONF_API_KEY]
days = config.get(CONF_DAYS)
include = config.get(CONF_INCLUDE)
exclude = config.get(CONF_EXCLUDE)
add_entities(
[
SpotCrimeSensor(
name, latitude, longitude, radius, include, exclude, api_key, days
)
],
True,
)
class SpotCrimeSensor(Entity):
"""Representation of a Spot Crime Sensor."""
def __init__(
self, name, latitude, longitude, radius, include, exclude, api_key, days
):
"""Initialize the Spot Crime sensor."""
self._name = name
self._include = include
self._exclude = exclude
self.api_key = api_key
self.days = days
self._spotcrime = spotcrime.SpotCrime(
(latitude, longitude),
radius,
self._include,
self._exclude,
self.api_key,
self.days,
)
self._attributes = None
self._state = None
self._previous_incidents = set()
self._attributes = {ATTR_ATTRIBUTION: spotcrime.ATTRIBUTION}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def _incident_event(self, incident):
data = {
"type": incident.get("type"),
"timestamp": incident.get("timestamp"),
"address": incident.get("location"),
}
if incident.get("coordinates"):
data.update(
{
ATTR_LATITUDE: incident.get("lat"),
ATTR_LONGITUDE: incident.get("lon"),
}
)
self.hass.bus.fire(EVENT_INCIDENT, data)
def update(self):
"""Update device state."""
incident_counts = defaultdict(int)
incidents = self._spotcrime.get_incidents()
if len(incidents) < len(self._previous_incidents):
self._previous_incidents = set()
for incident in incidents:
incident_type = slugify(incident.get("type"))
incident_counts[incident_type] += 1
if (
self._previous_incidents
and incident.get("id") not in self._previous_incidents
):
self._incident_event(incident)
self._previous_incidents.add(incident.get("id"))
self._attributes.update(incident_counts)
self._state = len(incidents)
|
import sys
import logging
import argparse
from gensim import utils
from gensim.utils import deprecated
from gensim.models.keyedvectors import KeyedVectors
logger = logging.getLogger(__name__)
def get_glove_info(glove_file_name):
"""Get number of vectors in provided `glove_file_name` and dimension of vectors.
Parameters
----------
glove_file_name : str
Path to file in GloVe format.
Returns
-------
(int, int)
Number of vectors (lines) of input file and its dimension.
"""
with utils.open(glove_file_name, 'rb') as f:
num_lines = sum(1 for _ in f)
with utils.open(glove_file_name, 'rb') as f:
num_dims = len(f.readline().split()) - 1
return num_lines, num_dims
@deprecated("KeyedVectors.load_word2vec_format(.., binary=False, no_header=True) loads GLoVE text vectors.")
def glove2word2vec(glove_input_file, word2vec_output_file):
"""Convert `glove_input_file` in GloVe format to word2vec format and write it to `word2vec_output_file`.
Parameters
----------
glove_input_file : str
Path to file in GloVe format.
word2vec_output_file: str
Path to output file.
Returns
-------
(int, int)
Number of vectors (lines) of input file and its dimension.
"""
glovekv = KeyedVectors.load_word2vec_format(glove_input_file, binary=False, no_header=True)
num_lines, num_dims = len(glovekv), glovekv.vector_size
logger.info("converting %i vectors from %s to %s", num_lines, glove_input_file, word2vec_output_file)
glovekv.save_word2vec_format(word2vec_output_file, binary=False)
return num_lines, num_dims
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input", required=True, help="Path to input file in GloVe format")
parser.add_argument("-o", "--output", required=True, help="Path to output file")
args = parser.parse_args()
logger.info("running %s", ' '.join(sys.argv))
num_lines, num_dims = glove2word2vec(args.input, args.output)
logger.info('Converted model with %i vectors and %i dimensions', num_lines, num_dims)
|
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "binary_sensor"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# On means low, Off means normal
DEVICE_CLASS_BATTERY = "battery"
# On means charging, Off means not charging
DEVICE_CLASS_BATTERY_CHARGING = "battery_charging"
# On means cold, Off means normal
DEVICE_CLASS_COLD = "cold"
# On means connected, Off means disconnected
DEVICE_CLASS_CONNECTIVITY = "connectivity"
# On means open, Off means closed
DEVICE_CLASS_DOOR = "door"
# On means open, Off means closed
DEVICE_CLASS_GARAGE_DOOR = "garage_door"
# On means gas detected, Off means no gas (clear)
DEVICE_CLASS_GAS = "gas"
# On means hot, Off means normal
DEVICE_CLASS_HEAT = "heat"
# On means light detected, Off means no light
DEVICE_CLASS_LIGHT = "light"
# On means open (unlocked), Off means closed (locked)
DEVICE_CLASS_LOCK = "lock"
# On means wet, Off means dry
DEVICE_CLASS_MOISTURE = "moisture"
# On means motion detected, Off means no motion (clear)
DEVICE_CLASS_MOTION = "motion"
# On means moving, Off means not moving (stopped)
DEVICE_CLASS_MOVING = "moving"
# On means occupied, Off means not occupied (clear)
DEVICE_CLASS_OCCUPANCY = "occupancy"
# On means open, Off means closed
DEVICE_CLASS_OPENING = "opening"
# On means plugged in, Off means unplugged
DEVICE_CLASS_PLUG = "plug"
# On means power detected, Off means no power
DEVICE_CLASS_POWER = "power"
# On means home, Off means away
DEVICE_CLASS_PRESENCE = "presence"
# On means problem detected, Off means no problem (OK)
DEVICE_CLASS_PROBLEM = "problem"
# On means unsafe, Off means safe
DEVICE_CLASS_SAFETY = "safety"
# On means smoke detected, Off means no smoke (clear)
DEVICE_CLASS_SMOKE = "smoke"
# On means sound detected, Off means no sound (clear)
DEVICE_CLASS_SOUND = "sound"
# On means vibration detected, Off means no vibration
DEVICE_CLASS_VIBRATION = "vibration"
# On means open, Off means closed
DEVICE_CLASS_WINDOW = "window"
DEVICE_CLASSES = [
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_COLD,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GARAGE_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_MOVING,
DEVICE_CLASS_OCCUPANCY,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PLUG,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESENCE,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_VIBRATION,
DEVICE_CLASS_WINDOW,
]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
async def async_setup(hass, config):
"""Track states and offer events for binary sensors."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class BinarySensorEntity(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
class BinarySensorDevice(BinarySensorEntity):
"""Represent a binary sensor (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"BinarySensorDevice is deprecated, modify %s to extend BinarySensorEntity",
cls.__name__,
)
|
import os
import pytest
from molecule import config
from molecule.driver import lxd
@pytest.fixture
def _instance(config_instance):
return lxd.LXD(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_testinfra_options_property(_instance):
assert {
'connection': 'ansible',
'ansible-inventory': _instance._config.provisioner.inventory_file
} == _instance.testinfra_options
def test_name_property(_instance):
assert 'lxd' == _instance.name
def test_options_property(_instance):
x = {'managed': True}
assert x == _instance.options
def test_login_cmd_template_property(_instance):
assert 'lxc exec {instance} bash' == _instance.login_cmd_template
def test_safe_files_property(_instance):
assert [] == _instance.safe_files
def test_default_safe_files_property(_instance):
assert [] == _instance.default_safe_files
def test_delegated_property(_instance):
assert not _instance.delegated
def test_managed_property(_instance):
assert _instance.managed
def test_default_ssh_connection_options_property(_instance):
assert [] == _instance.default_ssh_connection_options
def test_login_options(_instance):
assert {'instance': 'foo'} == _instance.login_options('foo')
def test_ansible_connection_options(_instance):
x = {'ansible_connection': 'lxd'}
assert x == _instance.ansible_connection_options('foo')
def test_instance_config_property(_instance):
x = os.path.join(_instance._config.scenario.ephemeral_directory,
'instance_config.yml')
assert x == _instance.instance_config
def test_ssh_connection_options_property(_instance):
assert [] == _instance.ssh_connection_options
def test_status(mocker, _instance):
result = _instance.status()
assert 2 == len(result)
assert result[0].instance_name == 'instance-1'
assert result[0].driver_name == 'lxd'
assert result[0].provisioner_name == 'ansible'
assert result[0].scenario_name == 'default'
assert result[0].created == 'false'
assert result[0].converged == 'false'
assert result[1].instance_name == 'instance-2'
assert result[1].driver_name == 'lxd'
assert result[1].provisioner_name == 'ansible'
assert result[1].scenario_name == 'default'
assert result[1].created == 'false'
assert result[1].converged == 'false'
def test_created(_instance):
assert 'false' == _instance._created()
def test_converged(_instance):
assert 'false' == _instance._converged()
|
import os
from PyQt5.QtCore import QUrl, QUrlQuery
from qutebrowser.utils import utils, javascript, jinja, standarddir, log
from qutebrowser.config import config
_SYSTEM_PATHS = [
# Debian pdf.js-common
# Arch Linux pdfjs (AUR)
'/usr/share/pdf.js/',
# Flatpak (Flathub)
'/app/share/pdf.js/',
# Arch Linux pdf.js (AUR)
'/usr/share/javascript/pdf.js/',
# Debian libjs-pdf
'/usr/share/javascript/pdf/',
]
class PDFJSNotFound(Exception):
"""Raised when no pdf.js installation is found.
Attributes:
path: path of the file that was requested but not found.
"""
def __init__(self, path):
self.path = path
message = "Path '{}' not found".format(path)
super().__init__(message)
def generate_pdfjs_page(filename, url):
"""Return the html content of a page that displays a file with pdfjs.
Returns a string.
Args:
filename: The filename of the PDF to open.
url: The URL being opened.
"""
if not is_available():
pdfjs_dir = os.path.join(standarddir.data(), 'pdfjs')
return jinja.render('no_pdfjs.html',
url=url.toDisplayString(),
title="PDF.js not found",
pdfjs_dir=pdfjs_dir)
html = get_pdfjs_res('web/viewer.html').decode('utf-8')
script = _generate_pdfjs_script(filename)
html = html.replace('</body>',
'</body><script>{}</script>'.format(script))
# WORKAROUND for the fact that PDF.js tries to use the Fetch API even with
# qute:// URLs.
pdfjs_script = '<script src="../build/pdf.js"></script>'
html = html.replace(pdfjs_script,
'<script>window.Response = undefined;</script>\n' +
pdfjs_script)
return html
def _generate_pdfjs_script(filename):
"""Generate the script that shows the pdf with pdf.js.
Args:
filename: The name of the file to open.
"""
url = QUrl('qute://pdfjs/file')
url_query = QUrlQuery()
url_query.addQueryItem('filename', filename)
url.setQuery(url_query)
js_url = javascript.to_js(
url.toString(QUrl.FullyEncoded)) # type: ignore[arg-type]
return jinja.js_environment.from_string("""
document.addEventListener("DOMContentLoaded", function() {
if (typeof window.PDFJS !== 'undefined') {
// v1.x
window.PDFJS.verbosity = window.PDFJS.VERBOSITY_LEVELS.info;
} else {
// v2.x
const options = window.PDFViewerApplicationOptions;
options.set('verbosity', pdfjsLib.VerbosityLevel.INFOS);
}
const viewer = window.PDFView || window.PDFViewerApplication;
viewer.open({{ url }});
});
""").render(url=js_url)
def get_pdfjs_res_and_path(path):
"""Get a pdf.js resource in binary format.
Returns a (content, path) tuple, where content is the file content and path
is the path where the file was found. If path is None, the bundled version
was used.
Args:
path: The path inside the pdfjs directory.
"""
path = path.lstrip('/')
content = None
file_path = None
system_paths = _SYSTEM_PATHS + [
# fallback
os.path.join(standarddir.data(), 'pdfjs'),
# hardcoded fallback for --temp-basedir
os.path.expanduser('~/.local/share/qutebrowser/pdfjs/'),
]
# First try a system wide installation
# System installations might strip off the 'build/' or 'web/' prefixes.
# qute expects them, so we need to adjust for it.
names_to_try = [path, _remove_prefix(path)]
for system_path in system_paths:
content, file_path = _read_from_system(system_path, names_to_try)
if content is not None:
break
# Fallback to bundled pdf.js
if content is None:
res_path = '3rdparty/pdfjs/{}'.format(path)
try:
content = utils.read_file(res_path, binary=True)
except FileNotFoundError:
raise PDFJSNotFound(path) from None
except OSError as e:
log.misc.warning("OSError while reading PDF.js file: {}".format(e))
raise PDFJSNotFound(path) from None
return content, file_path
def get_pdfjs_res(path):
"""Get a pdf.js resource in binary format.
Args:
path: The path inside the pdfjs directory.
"""
content, _path = get_pdfjs_res_and_path(path)
return content
def _remove_prefix(path):
"""Remove the web/ or build/ prefix of a pdfjs-file-path.
Args:
path: Path as string where the prefix should be stripped off.
"""
prefixes = {'web/', 'build/'}
if any(path.startswith(prefix) for prefix in prefixes):
return path.split('/', maxsplit=1)[1]
# Return the unchanged path if no prefix is found
return path
def _read_from_system(system_path, names):
"""Try to read a file with one of the given names in system_path.
Returns a (content, path) tuple, where the path is the filepath that was
used.
Each file in names is considered equal, the first file that is found
is read and its binary content returned.
Returns (None, None) if no file could be found
Args:
system_path: The folder where the file should be searched.
names: List of possible file names.
"""
for name in names:
try:
full_path = os.path.join(system_path, name)
with open(full_path, 'rb') as f:
return (f.read(), full_path)
except FileNotFoundError:
continue
except OSError as e:
log.misc.warning("OSError while reading PDF.js file: {}".format(e))
continue
return (None, None)
def is_available():
"""Return true if a pdfjs installation is available."""
try:
get_pdfjs_res('build/pdf.js')
get_pdfjs_res('web/viewer.html')
except PDFJSNotFound:
return False
else:
return True
def should_use_pdfjs(mimetype, url):
"""Check whether PDF.js should be used."""
# e.g. 'blob:qute%3A///b45250b3-787e-44d1-a8d8-c2c90f81f981'
is_download_url = (url.scheme() == 'blob' and
QUrl(url.path()).scheme() == 'qute')
is_pdf = mimetype in ['application/pdf', 'application/x-pdf']
config_enabled = config.instance.get('content.pdfjs', url=url)
return is_pdf and not is_download_url and config_enabled
def get_main_url(filename: str, original_url: QUrl) -> QUrl:
"""Get the URL to be opened to view a local PDF."""
url = QUrl('qute://pdfjs/web/viewer.html')
query = QUrlQuery()
query.addQueryItem('filename', filename) # read from our JS
query.addQueryItem('file', '') # to avoid pdfjs opening the default PDF
urlstr = original_url.toString(QUrl.FullyEncoded) # type: ignore[arg-type]
query.addQueryItem('source', urlstr)
url.setQuery(query)
return url
|
import os
import anyconfig
from ansible.module_utils.parsing.convert_bool import boolean
import six
from molecule import interpolation
from molecule import logger
from molecule import platforms
from molecule import scenario
from molecule import state
from molecule import util
from molecule.dependency import ansible_galaxy
from molecule.dependency import gilt
from molecule.dependency import shell
from molecule.driver import azure
from molecule.driver import delegated
from molecule.driver import digitalocean
from molecule.driver import docker
from molecule.driver import ec2
from molecule.driver import gce
from molecule.driver import linode
from molecule.driver import lxc
from molecule.driver import lxd
from molecule.driver import openstack
from molecule.driver import vagrant
from molecule.lint import yamllint
from molecule.model import schema_v2
from molecule.provisioner import ansible
from molecule.verifier import goss
from molecule.verifier import inspec
from molecule.verifier import testinfra
LOG = logger.get_logger(__name__)
MOLECULE_DEBUG = boolean(os.environ.get('MOLECULE_DEBUG', 'False'))
MOLECULE_DIRECTORY = 'molecule'
MOLECULE_FILE = 'molecule.yml'
MERGE_STRATEGY = anyconfig.MS_DICTS
MOLECULE_KEEP_STRING = 'MOLECULE_'
# https://stackoverflow.com/questions/16017397/injecting-function-call-after-init-with-decorator # noqa
class NewInitCaller(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.after_init()
return obj
@six.add_metaclass(NewInitCaller)
class Config(object):
"""
Molecule searches the current directory for ``molecule.yml`` files by
globbing `molecule/*/molecule.yml`. The files are instantiated into
a list of Molecule :class:`.Config` objects, and each Molecule subcommand
operates on this list.
The directory in which the ``molecule.yml`` resides is the Scenario's
directory. Molecule performs most functions within this directory.
The :class:`.Config` object instantiates Dependency_, Driver_,
:ref:`root_lint`, Platforms_, Provisioner_, Verifier_,
:ref:`root_scenario`, and State_ references.
"""
def __init__(self,
molecule_file,
args={},
command_args={},
ansible_args=()):
"""
Initialize a new config class and returns None.
:param molecule_file: A string containing the path to the Molecule file
to be parsed.
:param args: An optional dict of options, arguments and commands from
the CLI.
:param command_args: An optional dict of options passed to the
subcommand from the CLI.
:param ansible_args: An optional tuple of arguments provided to the
``ansible-playbook`` command.
:returns: None
"""
self.molecule_file = molecule_file
self.args = args
self.command_args = command_args
self.ansible_args = ansible_args
self.config = self._get_config()
self._action = None
def after_init(self):
self.config = self._reget_config()
self._validate()
@property
def debug(self):
return self.args.get('debug', MOLECULE_DEBUG)
@property
def env_file(self):
return util.abs_path(self.args.get('env_file'))
@property
def subcommand(self):
return self.command_args['subcommand']
@property
def action(self):
return self._action
@action.setter
def action(self, value):
self._action = value
@property
def project_directory(self):
return os.getcwd()
@property
def molecule_directory(self):
return molecule_directory(self.project_directory)
@property
@util.memoize
def dependency(self):
dependency_name = self.config['dependency']['name']
if dependency_name == 'galaxy':
return ansible_galaxy.AnsibleGalaxy(self)
elif dependency_name == 'gilt':
return gilt.Gilt(self)
elif dependency_name == 'shell':
return shell.Shell(self)
@property
@util.memoize
def driver(self):
driver_name = self._get_driver_name()
driver = None
if driver_name == 'azure':
driver = azure.Azure(self)
elif driver_name == 'delegated':
driver = delegated.Delegated(self)
elif driver_name == 'digitalocean':
driver = digitalocean.DigitalOcean(self)
elif driver_name == 'docker':
driver = docker.Docker(self)
elif driver_name == 'ec2':
driver = ec2.EC2(self)
elif driver_name == 'gce':
driver = gce.GCE(self)
elif driver_name == 'linode':
driver = linode.Linode(self)
elif driver_name == 'lxc':
driver = lxc.LXC(self)
elif driver_name == 'lxd':
driver = lxd.LXD(self)
elif driver_name == 'openstack':
driver = openstack.Openstack(self)
elif driver_name == 'vagrant':
driver = vagrant.Vagrant(self)
driver.name = driver_name
return driver
@property
def drivers(self):
return molecule_drivers()
@property
def env(self):
return {
'MOLECULE_DEBUG': str(self.debug),
'MOLECULE_FILE': self.molecule_file,
'MOLECULE_ENV_FILE': self.env_file,
'MOLECULE_INVENTORY_FILE': self.provisioner.inventory_file,
'MOLECULE_EPHEMERAL_DIRECTORY': self.scenario.ephemeral_directory,
'MOLECULE_SCENARIO_DIRECTORY': self.scenario.directory,
'MOLECULE_PROJECT_DIRECTORY': self.project_directory,
'MOLECULE_INSTANCE_CONFIG': self.driver.instance_config,
'MOLECULE_DEPENDENCY_NAME': self.dependency.name,
'MOLECULE_DRIVER_NAME': self.driver.name,
'MOLECULE_LINT_NAME': self.lint.name,
'MOLECULE_PROVISIONER_NAME': self.provisioner.name,
'MOLECULE_PROVISIONER_LINT_NAME': self.provisioner.lint.name,
'MOLECULE_SCENARIO_NAME': self.scenario.name,
'MOLECULE_VERIFIER_NAME': self.verifier.name,
'MOLECULE_VERIFIER_LINT_NAME': self.verifier.lint.name,
'MOLECULE_VERIFIER_TEST_DIRECTORY': self.verifier.directory,
}
@property
@util.memoize
def lint(self):
lint_name = self.config['lint']['name']
if lint_name == 'yamllint':
return yamllint.Yamllint(self)
@property
@util.memoize
def platforms(self):
return platforms.Platforms(self)
@property
@util.memoize
def provisioner(self):
provisioner_name = self.config['provisioner']['name']
if provisioner_name == 'ansible':
return ansible.Ansible(self)
@property
@util.memoize
def scenario(self):
return scenario.Scenario(self)
@property
@util.memoize
def state(self):
return state.State(self)
@property
@util.memoize
def verifier(self):
verifier_name = self.config['verifier']['name']
if verifier_name == 'testinfra':
return testinfra.Testinfra(self)
elif verifier_name == 'inspec':
return inspec.Inspec(self)
elif verifier_name == 'goss':
return goss.Goss(self)
@property
@util.memoize
def verifiers(self):
return molecule_verifiers()
def _get_driver_name(self):
driver_from_state_file = self.state.driver
driver_from_cli = self.command_args.get('driver_name')
if driver_from_state_file:
driver_name = driver_from_state_file
elif driver_from_cli:
driver_name = driver_from_cli
else:
driver_name = self.config['driver']['name']
if driver_from_cli and (driver_from_cli != driver_name):
msg = ("Instance(s) were created with the '{}' driver, but the "
"subcommand is using '{}' driver.").format(
driver_name, driver_from_cli)
util.sysexit_with_message(msg)
return driver_name
def _get_config(self):
"""
Perform a prioritized recursive merge of config files, and returns
a new dict. Prior to merging the config files are interpolated with
environment variables.
:return: dict
"""
return self._combine(keep_string=MOLECULE_KEEP_STRING)
def _reget_config(self):
"""
Perform the same prioritized recursive merge from `get_config`, this
time, interpolating the ``keep_string`` left behind in the original
``get_config`` call. This is probably __very__ bad.
:return: dict
"""
env = util.merge_dicts(os.environ.copy(), self.env)
env = set_env_from_file(env, self.env_file)
return self._combine(env=env)
def _combine(self, env=os.environ, keep_string=None):
"""
Perform a prioritized recursive merge of config files, and returns
a new dict. Prior to merging the config files are interpolated with
environment variables.
1. Loads Molecule defaults.
2. Loads a base config (if provided) and merges ontop of defaults.
3. Loads the scenario's ``molecule file`` and merges ontop of previous
merge.
:return: dict
"""
defaults = self._get_defaults()
base_config = self.args.get('base_config')
if base_config and os.path.exists(base_config):
with util.open_file(base_config) as stream:
s = stream.read()
self._preflight(s)
interpolated_config = self._interpolate(s, env, keep_string)
defaults = util.merge_dicts(
defaults, util.safe_load(interpolated_config))
with util.open_file(self.molecule_file) as stream:
s = stream.read()
self._preflight(s)
interpolated_config = self._interpolate(s, env, keep_string)
defaults = util.merge_dicts(defaults,
util.safe_load(interpolated_config))
return defaults
def _interpolate(self, stream, env, keep_string):
env = set_env_from_file(env, self.env_file)
i = interpolation.Interpolator(interpolation.TemplateWithDefaults, env)
try:
return i.interpolate(stream, keep_string)
except interpolation.InvalidInterpolation as e:
msg = ("parsing config file '{}'.\n\n"
'{}\n{}'.format(self.molecule_file, e.place, e.string))
util.sysexit_with_message(msg)
def _get_defaults(self):
scenario_name = (os.path.basename(os.path.dirname(self.molecule_file))
or 'default')
return {
'dependency': {
'name': 'galaxy',
'command': None,
'enabled': True,
'options': {},
'env': {},
},
'driver': {
'name': 'docker',
'provider': {
'name': None,
},
'options': {
'managed': True,
},
'ssh_connection_options': [],
'safe_files': [],
},
'lint': {
'name': 'yamllint',
'enabled': True,
'options': {},
'env': {},
},
'platforms': [],
'provisioner': {
'name': 'ansible',
'config_options': {},
'ansible_args': [],
'connection_options': {},
'options': {},
'env': {},
'inventory': {
'hosts': {},
'host_vars': {},
'group_vars': {},
'links': {},
},
'children': {},
'playbooks': {
'cleanup': 'cleanup.yml',
'create': 'create.yml',
'converge': 'playbook.yml',
'destroy': 'destroy.yml',
'prepare': 'prepare.yml',
'side_effect': 'side_effect.yml',
'verify': 'verify.yml',
},
'lint': {
'name': 'ansible-lint',
'enabled': True,
'options': {},
'env': {},
},
},
'scenario': {
'name':
scenario_name,
'check_sequence': [
'dependency',
'cleanup',
'destroy',
'create',
'prepare',
'converge',
'check',
'cleanup',
'destroy',
],
'cleanup_sequence': ['cleanup'],
'converge_sequence': [
'dependency',
'create',
'prepare',
'converge',
],
'create_sequence': [
'dependency',
'create',
'prepare',
],
'destroy_sequence': [
'dependency',
'cleanup',
'destroy',
],
'test_sequence': [
'lint',
'dependency',
'cleanup',
'destroy',
'syntax',
'create',
'prepare',
'converge',
'idempotence',
'side_effect',
'verify',
'cleanup',
'destroy',
],
},
'verifier': {
'name': 'testinfra',
'enabled': True,
'directory': 'tests',
'options': {},
'env': {},
'additional_files_or_dirs': [],
'lint': {
'name': 'flake8',
'enabled': True,
'options': {},
'env': {},
},
},
}
def _preflight(self, data):
env = os.environ.copy()
env = set_env_from_file(env, self.env_file)
errors = schema_v2.pre_validate(data, env, MOLECULE_KEEP_STRING)
if errors:
msg = "Failed to validate.\n\n{}".format(errors)
util.sysexit_with_message(msg)
def _validate(self):
msg = 'Validating schema {}.'.format(self.molecule_file)
LOG.info(msg)
errors = schema_v2.validate(self.config)
if errors:
msg = "Failed to validate.\n\n{}".format(errors)
util.sysexit_with_message(msg)
msg = 'Validation completed successfully.'
LOG.success(msg)
def molecule_directory(path):
return os.path.join(path, MOLECULE_DIRECTORY)
def molecule_file(path):
return os.path.join(path, MOLECULE_FILE)
def molecule_drivers():
return [
azure.Azure(None).name,
delegated.Delegated(None).name,
digitalocean.DigitalOcean(None).name,
docker.Docker(None).name,
ec2.EC2(None).name,
gce.GCE(None).name,
linode.Linode(None).name,
lxc.LXC(None).name,
lxd.LXD(None).name,
openstack.Openstack(None).name,
vagrant.Vagrant(None).name,
]
def molecule_verifiers():
return [
goss.Goss(None).name,
inspec.Inspec(None).name,
testinfra.Testinfra(None).name,
]
def set_env_from_file(env, env_file):
if env_file and os.path.exists(env_file):
env = env.copy()
d = util.safe_load_file(env_file)
for k, v in d.items():
env[k] = v
return env
return env
|
import pytest
from molecule import status
@pytest.fixture
def _instance():
s = status.get_status()
s.instance_name = None
s.driver_name = None
s.provisioner_name = None
s.scenario_name = None
s.created = None
s.converged = None
return s
def test__instance_name_attribute(_instance):
assert _instance.instance_name is None
def test_status_driver_name_attribute(_instance):
assert _instance.driver_name is None
def test_status_provisioner_name_attribute(_instance):
assert _instance.provisioner_name is None
def test_status_scenario_name_attribute(_instance):
assert _instance.scenario_name is None
def test_status_created_attribute(_instance):
assert _instance.created is None
def test_status_converged_attribute(_instance):
assert _instance.converged is None
|
import gevent
import json
import unittest2
import base64
import os
import tempfile
import urllib2
from psdash.run import PsDashRunner
try:
import httplib
except ImportError:
# support for python 3
import http.client as httplib
class TestBasicAuth(unittest2.TestCase):
default_username = 'tester'
default_password = 'secret'
def setUp(self):
self.app = PsDashRunner().app
self.client = self.app.test_client()
def _enable_basic_auth(self, username, password):
self.app.config['PSDASH_AUTH_USERNAME'] = username
self.app.config['PSDASH_AUTH_PASSWORD'] = password
def _create_auth_headers(self, username, password):
data = base64.b64encode(':'.join([username, password]))
headers = [('Authorization', 'Basic %s' % data)]
return headers
def test_missing_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
resp = self.client.get('/')
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_correct_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
headers = self._create_auth_headers(self.default_username, self.default_password)
resp = self.client.get('/', headers=headers)
self.assertEqual(resp.status_code, httplib.OK)
def test_incorrect_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
headers = self._create_auth_headers(self.default_username, 'wrongpass')
resp = self.client.get('/', headers=headers)
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
class TestAllowedRemoteAddresses(unittest2.TestCase):
def test_correct_remote_address(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
def test_incorrect_remote_address(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_multiple_remote_addresses(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1, 10.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.124.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_multiple_remote_addresses_using_list(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': ['127.0.0.1', '10.0.0.1']})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.124.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
class TestEnvironmentWhitelist(unittest2.TestCase):
def test_show_only_whitelisted(self):
r = PsDashRunner({'PSDASH_ENVIRON_WHITELIST': ['USER']})
resp = r.app.test_client().get('/process/%d/environment' % os.getpid())
self.assertTrue(os.environ['USER'] in resp.data)
self.assertTrue('*hidden by whitelist*' in resp.data)
class TestUrlPrefix(unittest2.TestCase):
def setUp(self):
self.default_prefix = '/subfolder/'
def test_page_not_found_on_root(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': self.default_prefix})
resp = r.app.test_client().get('/')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_works_on_prefix(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': self.default_prefix})
resp = r.app.test_client().get(self.default_prefix)
self.assertEqual(resp.status_code, httplib.OK)
def test_multiple_level_prefix(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': '/use/this/folder/'})
resp = r.app.test_client().get('/use/this/folder/')
self.assertEqual(resp.status_code, httplib.OK)
def test_missing_starting_slash_works(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': 'subfolder/'})
resp = r.app.test_client().get('/subfolder/')
self.assertEqual(resp.status_code, httplib.OK)
def test_missing_trailing_slash_works(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': '/subfolder'})
resp = r.app.test_client().get('/subfolder/')
self.assertEqual(resp.status_code, httplib.OK)
class TestHttps(unittest2.TestCase):
def _run(self, https=False):
options = {'PSDASH_PORT': 5051}
if https:
options.update({
'PSDASH_HTTPS_KEYFILE': os.path.join(os.path.dirname(__file__), 'keyfile'),
'PSDASH_HTTPS_CERTFILE': os.path.join(os.path.dirname(__file__), 'cacert.pem')
})
self.r = PsDashRunner(options)
self.runner = gevent.spawn(self.r.run)
gevent.sleep(0.3)
def tearDown(self):
self.r.server.close()
self.runner.kill()
gevent.sleep(0.3)
def test_https_dont_work_without_certs(self):
self._run()
self.assertRaises(urllib2.URLError, urllib2.urlopen, 'https://127.0.0.1:5051')
def test_https_works_with_certs(self):
self._run(https=True)
resp = urllib2.urlopen('https://127.0.0.1:5051')
self.assertEqual(resp.getcode(), httplib.OK)
class TestEndpoints(unittest2.TestCase):
def setUp(self):
self.r = PsDashRunner()
self.app = self.r.app
self.client = self.app.test_client()
self.pid = os.getpid()
self.r.get_local_node().net_io_counters.update()
def test_index(self):
resp = self.client.get('/')
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf('TRAVIS' in os.environ, 'Functionality not supported on Travis CI')
def test_disks(self):
resp = self.client.get('/disks')
self.assertEqual(resp.status_code, httplib.OK)
def test_network(self):
resp = self.client.get('/network')
self.assertEqual(resp.status_code, httplib.OK)
def test_processes(self):
resp = self.client.get('/processes')
self.assertEqual(resp.status_code, httplib.OK)
def test_process_overview(self):
resp = self.client.get('/process/%d' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf(os.environ.get('USER') == 'root', 'It would fail as root as we would have access to pid 1')
def test_process_no_access(self):
resp = self.client.get('/process/1') # pid 1 == init
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_process_non_existing_pid(self):
resp = self.client.get('/process/0')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_process_children(self):
resp = self.client.get('/process/%d/children' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_connections(self):
resp = self.client.get('/process/%d/connections' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_environment(self):
resp = self.client.get('/process/%d/environment' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_files(self):
resp = self.client.get('/process/%d/files' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_threads(self):
resp = self.client.get('/process/%d/threads' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_memory(self):
resp = self.client.get('/process/%d/memory' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf('TRAVIS' in os.environ, 'Functionality not supported on Travis CI')
def test_process_limits(self):
resp = self.client.get('/process/%d/limits' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_invalid_section(self):
resp = self.client.get('/process/%d/whatnot' % self.pid)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_non_existing(self):
resp = self.client.get('/prettywronghuh')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_connection_filters(self):
resp = self.client.get('/network?laddr=127.0.0.1')
self.assertEqual(resp.status_code, httplib.OK)
def test_register_node(self):
resp = self.client.get('/register?name=examplehost&port=500')
self.assertEqual(resp.status_code, httplib.OK)
def test_register_node_all_params_required(self):
resp = self.client.get('/register?name=examplehost')
self.assertEqual(resp.status_code, httplib.BAD_REQUEST)
resp = self.client.get('/register?port=500')
self.assertEqual(resp.status_code, httplib.BAD_REQUEST)
class TestLogs(unittest2.TestCase):
def _create_log_file(self):
fd, filename = tempfile.mkstemp()
fp = os.fdopen(fd, 'w')
fp.write('woha\n' * 100)
fp.write('something\n')
fp.write('woha\n' * 100)
fp.flush()
return filename
def setUp(self):
self.r = PsDashRunner()
self.app = self.r.app
self.client = self.app.test_client()
self.filename = self._create_log_file()
self.r.get_local_node().logs.add_available(self.filename)
def test_logs(self):
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_logs_removed_file(self):
filename = self._create_log_file()
self.r.get_local_node().logs.add_available(filename)
# first visit to make sure the logs are properly initialized
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
os.unlink(filename)
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_logs_removed_file_uninitialized(self):
filename = self._create_log_file()
self.r.get_local_node().logs.add_available(filename)
os.unlink(filename)
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_view(self):
resp = self.client.get('/log?filename=%s' % self.filename)
self.assertEqual(resp.status_code, httplib.OK)
def test_search(self):
resp = self.client.get('/log/search?filename=%s&text=%s' % (self.filename, 'something'),
environ_overrides={'HTTP_X_REQUESTED_WITH': 'xmlhttprequest'})
self.assertEqual(resp.status_code, httplib.OK)
try:
data = json.loads(resp.data)
self.assertIn('something', data['content'])
except ValueError:
self.fail('Log search did not return valid json data')
def test_read(self):
resp = self.client.get('/log?filename=%s' % self.filename,
environ_overrides={'HTTP_X_REQUESTED_WITH': 'xmlhttprequest'})
self.assertEqual(resp.status_code, httplib.OK)
def test_read_tail(self):
resp = self.client.get('/log?filename=%s&seek_tail=1' % self.filename)
self.assertEqual(resp.status_code, httplib.OK)
def test_non_existing_file(self):
filename = "/var/log/surelynotaroundright.log"
resp = self.client.get('/log?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/search?filename=%s&text=%s' % (filename, 'something'))
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/read?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/read_tail?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
if __name__ == '__main__':
unittest2.main()
|
import logging
import time
from contextlib import contextmanager
from queue import Empty
from queue import PriorityQueue
from queue import Queue
from threading import Condition
from threading import Event
from threading import Thread
from typing import Any
from typing import Collection
from typing import Generator
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing_extensions import Protocol
from paasta_tools.marathon_tools import DEFAULT_SOA_DIR
from paasta_tools.marathon_tools import get_all_marathon_apps
from paasta_tools.marathon_tools import get_marathon_clients
from paasta_tools.marathon_tools import get_marathon_servers
from paasta_tools.marathon_tools import load_marathon_service_config_no_cache
from paasta_tools.marathon_tools import MarathonClients
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.metrics.metrics_lib import TimerProtocol
from paasta_tools.utils import load_system_paasta_config
class BounceTimers(NamedTuple):
processed_by_worker: TimerProtocol
setup_marathon: TimerProtocol
bounce_length: TimerProtocol
class ServiceInstance(NamedTuple):
service: str
instance: str
watcher: str
bounce_by: float
wait_until: float
enqueue_time: float
bounce_start_time: float
failures: int = 0
processed_count: int = 0
# Hack to make the default values for ServiceInstance work on python 3.6.0. (typing.NamedTuple gained default values in
# python 3.6.1.)
ServiceInstance.__new__.__defaults__ = (0, 0) # type: ignore
class PaastaThread(Thread):
@property
def log(self) -> logging.Logger:
name = ".".join([type(self).__module__, type(self).__name__])
return logging.getLogger(name)
class PaastaQueue(Queue):
def __init__(self, name: str, *args: Any, **kwargs: Any) -> None:
self.name = name
super().__init__(*args, **kwargs)
@property
def log(self) -> logging.Logger:
name = ".".join([type(self).__module__, type(self).__name__])
return logging.getLogger(name)
def put(self, item: Any, *args: Any, **kwargs: Any) -> None:
self.log.debug(f"Adding {item} to {self.name} queue")
super().put(item, *args, **kwargs)
def exponential_back_off(
failures: int, factor: float, base: float, max_time: float
) -> float:
seconds = factor * base ** failures
return seconds if seconds < max_time else max_time
def get_service_instances_needing_update(
marathon_clients: MarathonClients,
instances: Collection[Tuple[str, str]],
cluster: str,
) -> List[Tuple[str, str, MarathonServiceConfig, str]]:
marathon_apps = {}
for marathon_client in marathon_clients.get_all_clients():
marathon_apps.update(
{app.id: app for app in get_all_marathon_apps(marathon_client)}
)
marathon_app_ids = marathon_apps.keys()
service_instances = []
for service, instance in instances:
try:
config = load_marathon_service_config_no_cache(
service=service,
instance=instance,
cluster=cluster,
soa_dir=DEFAULT_SOA_DIR,
)
config_app = config.format_marathon_app_dict()
app_id = "/{}".format(config_app["id"])
# Not ideal but we rely on a lot of user input to create the app dict
# and we really can't afford to bail if just one app definition is malformed
except Exception as e:
print(
"ERROR: Skipping {}.{} because: '{}'".format(service, instance, str(e))
)
continue
if (
app_id not in marathon_app_ids
or marathon_apps[app_id].instances != config_app["instances"]
):
service_instances.append((service, instance, config, app_id))
return service_instances
def get_marathon_clients_from_config() -> MarathonClients:
system_paasta_config = load_system_paasta_config()
marathon_servers = get_marathon_servers(system_paasta_config)
marathon_clients = get_marathon_clients(marathon_servers)
return marathon_clients
class DelayDeadlineQueueProtocol(Protocol):
def __init__(self) -> None:
...
def put(self, si: ServiceInstance) -> None:
...
@contextmanager
def get(
self, block: bool = True, timeout: float = None
) -> Generator[ServiceInstance, None, None]:
...
def get_available_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, Optional[ServiceInstance]]]:
...
def get_unavailable_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, float, Optional[ServiceInstance]]]:
...
class DelayDeadlineQueue(DelayDeadlineQueueProtocol):
"""Entries into this queue have both a wait_until and a bounce_by. Before wait_until, get() will not return an entry.
get() returns the entry whose wait_until has passed and which has the lowest bounce_by."""
def __init__(self) -> None:
self.available_service_instances: PriorityQueue[
Tuple[float, ServiceInstance]
] = PriorityQueue()
self.unavailable_service_instances: PriorityQueue[
Tuple[float, float, ServiceInstance]
] = PriorityQueue()
self.unavailable_service_instances_modify = Condition()
self.background_thread_started = Event()
Thread(target=self.move_from_unavailable_to_available, daemon=True).start()
self.background_thread_started.wait()
@property
def log(self) -> logging.Logger:
name = ".".join([type(self).__module__, type(self).__name__])
return logging.getLogger(name)
def put(self, si: ServiceInstance) -> None:
self.log.debug(
f"adding {si.service}.{si.instance} to queue with wait_until {si.wait_until} and bounce_by {si.bounce_by}"
)
with self.unavailable_service_instances_modify:
self.unavailable_service_instances.put((si.wait_until, si.bounce_by, si))
self.unavailable_service_instances_modify.notify()
def move_from_unavailable_to_available(self) -> None:
self.background_thread_started.set()
with self.unavailable_service_instances_modify:
while True:
try:
while True:
(
wait_until,
bounce_by,
si,
) = self.unavailable_service_instances.get_nowait()
if wait_until < time.time():
self.available_service_instances.put_nowait((bounce_by, si))
else:
self.unavailable_service_instances.put_nowait(
(wait_until, bounce_by, si)
)
timeout = wait_until - time.time()
break
except Empty:
timeout = None
self.unavailable_service_instances_modify.wait(timeout=timeout)
@contextmanager
def get(
self, block: bool = True, timeout: float = None
) -> Generator[ServiceInstance, None, None]:
bounce_by, si = self.available_service_instances.get(
block=block, timeout=timeout
)
try:
yield si
except Exception:
self.available_service_instances.put((bounce_by, si))
def get_available_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, Optional[ServiceInstance]]]:
return [
(bounce_by, (si if fetch_service_instances else None))
for bounce_by, si in self.available_service_instances.queue
]
def get_unavailable_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, float, Optional[ServiceInstance]]]:
return [
(wait_until, bounce_by, (si if fetch_service_instances else None))
for wait_until, bounce_by, si in self.unavailable_service_instances.queue
]
|
import unittest
import numpy as np
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors.discrete import TabularCPD
from pgmpy.inference import Inference
from collections import defaultdict
class TestInferenceBase(unittest.TestCase):
def setUp(self):
self.bayesian = BayesianModel([("a", "b"), ("b", "c"), ("c", "d"), ("d", "e")])
a_cpd = TabularCPD("a", 2, [[0.4], [0.6]])
b_cpd = TabularCPD(
"b", 2, [[0.2, 0.4], [0.8, 0.6]], evidence=["a"], evidence_card=[2]
)
c_cpd = TabularCPD(
"c", 2, [[0.1, 0.2], [0.9, 0.8]], evidence=["b"], evidence_card=[2]
)
d_cpd = TabularCPD(
"d", 2, [[0.4, 0.3], [0.6, 0.7]], evidence=["c"], evidence_card=[2]
)
e_cpd = TabularCPD(
"e", 2, [[0.3, 0.2], [0.7, 0.8]], evidence=["d"], evidence_card=[2]
)
self.bayesian.add_cpds(a_cpd, b_cpd, c_cpd, d_cpd, e_cpd)
self.markov = MarkovModel([("a", "b"), ("b", "d"), ("a", "c"), ("c", "d")])
factor_1 = DiscreteFactor(["a", "b"], [2, 2], np.array([100, 1, 1, 100]))
factor_2 = DiscreteFactor(["a", "c"], [2, 2], np.array([40, 30, 100, 20]))
factor_3 = DiscreteFactor(["b", "d"], [2, 2], np.array([1, 100, 100, 1]))
factor_4 = DiscreteFactor(["c", "d"], [2, 2], np.array([60, 60, 40, 40]))
self.markov.add_factors(factor_1, factor_2, factor_3, factor_4)
def test_bayesian_inference_init(self):
infer_bayesian = Inference(self.bayesian)
self.assertEqual(set(infer_bayesian.variables), {"a", "b", "c", "d", "e"})
self.assertEqual(
infer_bayesian.cardinality, {"a": 2, "b": 2, "c": 2, "d": 2, "e": 2}
)
self.assertIsInstance(infer_bayesian.factors, defaultdict)
self.assertEqual(
set(infer_bayesian.factors["a"]),
set(
[
self.bayesian.get_cpds("a").to_factor(),
self.bayesian.get_cpds("b").to_factor(),
]
),
)
self.assertEqual(
set(infer_bayesian.factors["b"]),
set(
[
self.bayesian.get_cpds("b").to_factor(),
self.bayesian.get_cpds("c").to_factor(),
]
),
)
self.assertEqual(
set(infer_bayesian.factors["c"]),
set(
[
self.bayesian.get_cpds("c").to_factor(),
self.bayesian.get_cpds("d").to_factor(),
]
),
)
self.assertEqual(
set(infer_bayesian.factors["d"]),
set(
[
self.bayesian.get_cpds("d").to_factor(),
self.bayesian.get_cpds("e").to_factor(),
]
),
)
self.assertEqual(
set(infer_bayesian.factors["e"]),
set([self.bayesian.get_cpds("e").to_factor()]),
)
def test_markov_inference_init(self):
infer_markov = Inference(self.markov)
self.assertEqual(set(infer_markov.variables), {"a", "b", "c", "d"})
self.assertEqual(infer_markov.cardinality, {"a": 2, "b": 2, "c": 2, "d": 2})
self.assertEqual(
infer_markov.factors,
{
"a": [
DiscreteFactor(["a", "b"], [2, 2], np.array([100, 1, 1, 100])),
DiscreteFactor(["a", "c"], [2, 2], np.array([40, 30, 100, 20])),
],
"b": [
DiscreteFactor(["a", "b"], [2, 2], np.array([100, 1, 1, 100])),
DiscreteFactor(["b", "d"], [2, 2], np.array([1, 100, 100, 1])),
],
"c": [
DiscreteFactor(["a", "c"], [2, 2], np.array([40, 30, 100, 20])),
DiscreteFactor(["c", "d"], [2, 2], np.array([60, 60, 40, 40])),
],
"d": [
DiscreteFactor(["b", "d"], [2, 2], np.array([1, 100, 100, 1])),
DiscreteFactor(["c", "d"], [2, 2], np.array([60, 60, 40, 40])),
],
},
)
|
import os, re, sys, subprocess
import tarfile
from distutils import log, version
from contextlib import closing
from ftplib import FTP
try:
from urlparse import urljoin, unquote, urlparse
from urllib import urlretrieve, urlopen, urlcleanup
except ImportError:
from urllib.parse import urljoin, unquote, urlparse
from urllib.request import urlretrieve, urlopen, urlcleanup
multi_make_options = []
try:
import multiprocessing
cpus = multiprocessing.cpu_count()
if cpus > 1:
if cpus > 5:
cpus = 5
multi_make_options = ['-j%d' % (cpus+1)]
except:
pass
# use pre-built libraries on Windows
def download_and_extract_windows_binaries(destdir):
url = "https://github.com/mhils/libxml2-win-binaries/releases"
filenames = list(_list_dir_urllib(url))
release_path = "/download/%s/" % find_max_version(
"library release", filenames, re.compile(r"/releases/tag/([0-9.]+[0-9])$"))
url += release_path
filenames = [
filename.rsplit('/', 1)[1]
for filename in filenames
if release_path in filename
]
arch = "win64" if sys.maxsize > 2**32 else "win32"
if sys.version_info < (3, 5):
arch = 'vs2008.' + arch
libs = {}
for libname in ['libxml2', 'libxslt', 'zlib', 'iconv']:
libs[libname] = "%s-%s.%s.zip" % (
libname,
find_max_version(libname, filenames),
arch,
)
if not os.path.exists(destdir):
os.makedirs(destdir)
for libname, libfn in libs.items():
srcfile = urljoin(url, libfn)
destfile = os.path.join(destdir, libfn)
if os.path.exists(destfile + ".keep"):
print('Using local copy of "{}"'.format(srcfile))
else:
print('Retrieving "%s" to "%s"' % (srcfile, destfile))
urlcleanup() # work around FTP bug 27973 in Py2.7.12+
urlretrieve(srcfile, destfile)
d = unpack_zipfile(destfile, destdir)
libs[libname] = d
return libs
def find_top_dir_of_zipfile(zipfile):
topdir = None
files = [f.filename for f in zipfile.filelist]
dirs = [d for d in files if d.endswith('/')]
if dirs:
dirs.sort(key=len)
topdir = dirs[0]
topdir = topdir[:topdir.index("/")+1]
for path in files:
if not path.startswith(topdir):
topdir = None
break
assert topdir, (
"cannot determine single top-level directory in zip file %s" %
zipfile.filename)
return topdir.rstrip('/')
def unpack_zipfile(zipfn, destdir):
assert zipfn.endswith('.zip')
import zipfile
print('Unpacking %s into %s' % (os.path.basename(zipfn), destdir))
f = zipfile.ZipFile(zipfn)
try:
extracted_dir = os.path.join(destdir, find_top_dir_of_zipfile(f))
f.extractall(path=destdir)
finally:
f.close()
assert os.path.exists(extracted_dir), 'missing: %s' % extracted_dir
return extracted_dir
def get_prebuilt_libxml2xslt(download_dir, static_include_dirs, static_library_dirs):
assert sys.platform.startswith('win')
libs = download_and_extract_windows_binaries(download_dir)
for libname, path in libs.items():
i = os.path.join(path, 'include')
l = os.path.join(path, 'lib')
assert os.path.exists(i), 'does not exist: %s' % i
assert os.path.exists(l), 'does not exist: %s' % l
static_include_dirs.append(i)
static_library_dirs.append(l)
## Routines to download and build libxml2/xslt from sources:
LIBXML2_LOCATION = 'http://xmlsoft.org/sources/'
LIBICONV_LOCATION = 'https://ftp.gnu.org/pub/gnu/libiconv/'
ZLIB_LOCATION = 'https://zlib.net/'
match_libfile_version = re.compile('^[^-]*-([.0-9-]+)[.].*').match
def _find_content_encoding(response, default='iso8859-1'):
from email.message import Message
content_type = response.headers.get('Content-Type')
if content_type:
msg = Message()
msg.add_header('Content-Type', content_type)
charset = msg.get_content_charset(default)
else:
charset = default
return charset
def remote_listdir(url):
try:
return _list_dir_urllib(url)
except IOError:
assert url.lower().startswith('ftp://')
print("Requesting with urllib failed. Falling back to ftplib. "
"Proxy argument will be ignored for %s" % url)
return _list_dir_ftplib(url)
def _list_dir_ftplib(url):
parts = urlparse(url)
ftp = FTP(parts.netloc)
try:
ftp.login()
ftp.cwd(parts.path)
data = []
ftp.dir(data.append)
finally:
ftp.quit()
return parse_text_ftplist("\n".join(data))
def _list_dir_urllib(url):
with closing(urlopen(url)) as res:
charset = _find_content_encoding(res)
content_type = res.headers.get('Content-Type')
data = res.read()
data = data.decode(charset)
if content_type and content_type.startswith('text/html'):
files = parse_html_filelist(data)
else:
files = parse_text_ftplist(data)
return files
def http_listfiles(url, re_pattern):
with closing(urlopen(url)) as res:
charset = _find_content_encoding(res)
data = res.read()
files = re.findall(re_pattern, data.decode(charset))
return files
def parse_text_ftplist(s):
for line in s.splitlines():
if not line.startswith('d'):
# -rw-r--r-- 1 ftp ftp 476 Sep 1 2011 md5sum.txt
# Last (9th) element is 'md5sum.txt' in the above example, but there
# may be variations, so we discard only the first 8 entries.
yield line.split(None, 8)[-1]
def parse_html_filelist(s):
re_href = re.compile(
r'<a\s+(?:[^>]*\s+)?href=["\']([^;?"\']+?)[;?"\']',
re.I|re.M)
links = set(re_href.findall(s))
for link in links:
if not link.endswith('/'):
yield unquote(link)
def tryint(s):
try:
return int(s)
except ValueError:
return s
def download_libxml2(dest_dir, version=None):
"""Downloads libxml2, returning the filename where the library was downloaded"""
#version_re = re.compile(r'LATEST_LIBXML2_IS_([0-9.]+[0-9](?:-[abrc0-9]+)?)')
version_re = re.compile(r'libxml2-([0-9.]+[0-9]).tar.gz')
filename = 'libxml2-%s.tar.gz'
return download_library(dest_dir, LIBXML2_LOCATION, 'libxml2',
version_re, filename, version=version)
def download_libxslt(dest_dir, version=None):
"""Downloads libxslt, returning the filename where the library was downloaded"""
#version_re = re.compile(r'LATEST_LIBXSLT_IS_([0-9.]+[0-9](?:-[abrc0-9]+)?)')
version_re = re.compile(r'libxslt-([0-9.]+[0-9]).tar.gz')
filename = 'libxslt-%s.tar.gz'
return download_library(dest_dir, LIBXML2_LOCATION, 'libxslt',
version_re, filename, version=version)
def download_libiconv(dest_dir, version=None):
"""Downloads libiconv, returning the filename where the library was downloaded"""
version_re = re.compile(r'libiconv-([0-9.]+[0-9]).tar.gz')
filename = 'libiconv-%s.tar.gz'
return download_library(dest_dir, LIBICONV_LOCATION, 'libiconv',
version_re, filename, version=version)
def download_zlib(dest_dir, version):
"""Downloads zlib, returning the filename where the library was downloaded"""
version_re = re.compile(r'zlib-([0-9.]+[0-9]).tar.gz')
filename = 'zlib-%s.tar.gz'
return download_library(dest_dir, ZLIB_LOCATION, 'zlib',
version_re, filename, version=version)
def find_max_version(libname, filenames, version_re=None):
if version_re is None:
version_re = re.compile(r'%s-([0-9.]+[0-9](?:-[abrc0-9]+)?)' % libname)
versions = []
for fn in filenames:
match = version_re.search(fn)
if match:
version_string = match.group(1)
versions.append((tuple(map(tryint, version_string.split('.'))),
version_string))
if not versions:
raise Exception(
"Could not find the most current version of %s from the files: %s" % (
libname, filenames))
versions.sort()
version_string = versions[-1][-1]
print('Latest version of %s is %s' % (libname, version_string))
return version_string
def download_library(dest_dir, location, name, version_re, filename, version=None):
if version is None:
try:
if location.startswith('ftp://'):
fns = remote_listdir(location)
else:
fns = http_listfiles(location, '(%s)' % filename.replace('%s', '(?:[0-9.]+[0-9])'))
version = find_max_version(name, fns, version_re)
except IOError:
# network failure - maybe we have the files already?
latest = (0,0,0)
fns = os.listdir(dest_dir)
for fn in fns:
if fn.startswith(name+'-'):
match = match_libfile_version(fn)
if match:
version_tuple = tuple(map(tryint, match.group(1).split('.')))
if version_tuple > latest:
latest = version_tuple
filename = fn
version = None
if latest == (0,0,0):
raise
if version:
filename = filename % version
full_url = urljoin(location, filename)
dest_filename = os.path.join(dest_dir, filename)
if os.path.exists(dest_filename):
print(('Using existing %s downloaded into %s '
'(delete this file if you want to re-download the package)') % (
name, dest_filename))
else:
print('Downloading %s into %s from %s' % (name, dest_filename, full_url))
urlcleanup() # work around FTP bug 27973 in Py2.7.12
urlretrieve(full_url, dest_filename)
return dest_filename
def unpack_tarball(tar_filename, dest):
print('Unpacking %s into %s' % (os.path.basename(tar_filename), dest))
tar = tarfile.open(tar_filename)
base_dir = None
for member in tar:
base_name = member.name.split('/')[0]
if base_dir is None:
base_dir = base_name
elif base_dir != base_name:
print('Unexpected path in %s: %s' % (tar_filename, base_name))
tar.extractall(dest)
tar.close()
return os.path.join(dest, base_dir)
def call_subprocess(cmd, **kw):
import subprocess
cwd = kw.get('cwd', '.')
cmd_desc = ' '.join(cmd)
log.info('Running "%s" in %s' % (cmd_desc, cwd))
returncode = subprocess.call(cmd, **kw)
if returncode:
raise Exception('Command "%s" returned code %s' % (cmd_desc, returncode))
def safe_mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def cmmi(configure_cmd, build_dir, multicore=None, **call_setup):
print('Starting build in %s' % build_dir)
call_subprocess(configure_cmd, cwd=build_dir, **call_setup)
if not multicore:
make_jobs = multi_make_options
elif int(multicore) > 1:
make_jobs = ['-j%s' % multicore]
else:
make_jobs = []
call_subprocess(
['make'] + make_jobs,
cwd=build_dir, **call_setup)
call_subprocess(
['make'] + make_jobs + ['install'],
cwd=build_dir, **call_setup)
def configure_darwin_env(env_setup):
import platform
# configure target architectures on MacOS-X (x86_64 only, by default)
major_version, minor_version = tuple(map(int, platform.mac_ver()[0].split('.')[:2]))
if major_version > 7:
env_default = {
'CFLAGS': "-arch x86_64 -O2",
'LDFLAGS': "-arch x86_64",
'MACOSX_DEPLOYMENT_TARGET': "10.6"
}
env_default.update(os.environ)
env_setup['env'] = env_default
def build_libxml2xslt(download_dir, build_dir,
static_include_dirs, static_library_dirs,
static_cflags, static_binaries,
libxml2_version=None,
libxslt_version=None,
libiconv_version=None,
zlib_version=None,
multicore=None):
safe_mkdir(download_dir)
safe_mkdir(build_dir)
zlib_dir = unpack_tarball(download_zlib(download_dir, zlib_version), build_dir)
libiconv_dir = unpack_tarball(download_libiconv(download_dir, libiconv_version), build_dir)
libxml2_dir = unpack_tarball(download_libxml2(download_dir, libxml2_version), build_dir)
libxslt_dir = unpack_tarball(download_libxslt(download_dir, libxslt_version), build_dir)
prefix = os.path.join(os.path.abspath(build_dir), 'libxml2')
lib_dir = os.path.join(prefix, 'lib')
safe_mkdir(prefix)
lib_names = ['libxml2', 'libexslt', 'libxslt', 'iconv', 'libz']
existing_libs = {
lib: os.path.join(lib_dir, filename)
for lib in lib_names
for filename in os.listdir(lib_dir)
if lib in filename and filename.endswith('.a')
} if os.path.isdir(lib_dir) else {}
def has_current_lib(name, build_dir, _build_all_following=[False]):
if _build_all_following[0]:
return False # a dependency was rebuilt => rebuilt this lib as well
lib_file = existing_libs.get(name)
found = lib_file and os.path.getmtime(lib_file) > os.path.getmtime(build_dir)
if found:
print("Found pre-built '%s'" % name)
else:
# also rebuild all following libs (which may depend on this one)
_build_all_following[0] = True
return found
call_setup = {}
if sys.platform == 'darwin':
configure_darwin_env(call_setup)
configure_cmd = ['./configure',
'--disable-dependency-tracking',
'--disable-shared',
'--prefix=%s' % prefix,
]
# build zlib
zlib_configure_cmd = [
'./configure',
'--prefix=%s' % prefix,
]
if not has_current_lib("libz", zlib_dir):
cmmi(zlib_configure_cmd, zlib_dir, multicore, **call_setup)
# build libiconv
if not has_current_lib("iconv", libiconv_dir):
cmmi(configure_cmd, libiconv_dir, multicore, **call_setup)
# build libxml2
libxml2_configure_cmd = configure_cmd + [
'--without-python',
'--with-iconv=%s' % prefix,
'--with-zlib=%s' % prefix,
]
if not libxml2_version:
libxml2_version = os.path.basename(libxml2_dir).split('-', 1)[-1]
if tuple(map(tryint, libxml2_version.split('-', 1)[0].split('.'))) >= (2, 9, 5):
libxml2_configure_cmd.append('--without-lzma') # can't currently build that
try:
if tuple(map(tryint, libxml2_version.split('-', 1)[0].split('.'))) >= (2, 7, 3):
libxml2_configure_cmd.append('--enable-rebuild-docs=no')
except Exception:
pass # this isn't required, so ignore any errors
if not has_current_lib("libxml2", libxml2_dir):
cmmi(libxml2_configure_cmd, libxml2_dir, multicore, **call_setup)
# build libxslt
libxslt_configure_cmd = configure_cmd + [
'--without-python',
'--with-libxml-prefix=%s' % prefix,
'--without-crypto',
]
if not (has_current_lib("libxslt", libxslt_dir) and has_current_lib("libexslt", libxslt_dir)):
cmmi(libxslt_configure_cmd, libxslt_dir, multicore, **call_setup)
# collect build setup for lxml
xslt_config = os.path.join(prefix, 'bin', 'xslt-config')
xml2_config = os.path.join(prefix, 'bin', 'xml2-config')
static_include_dirs.extend([
os.path.join(prefix, 'include'),
os.path.join(prefix, 'include', 'libxml2'),
os.path.join(prefix, 'include', 'libxslt'),
os.path.join(prefix, 'include', 'libexslt')])
static_library_dirs.append(lib_dir)
listdir = os.listdir(lib_dir)
static_binaries += [os.path.join(lib_dir, filename)
for lib in lib_names
for filename in listdir
if lib in filename and filename.endswith('.a')]
return xml2_config, xslt_config
|
import unittest
from allennlp.data.tokenizers import SpacyTokenizer
class TestAllenNlp(unittest.TestCase):
# reference
# https://github.com/allenai/allennlp/blob/master/allennlp/tests/data/tokenizers/word_tokenizer_test.py
def test_passes_through_correctly(self):
tokenizer = SpacyTokenizer()
sentence = "this (sentence) has 'crazy' \"punctuation\"."
tokens = [t.text for t in tokenizer.tokenize(sentence)]
expected_tokens = ["this", "(", "sentence", ")", "has", "'", "crazy", "'", "\"",
"punctuation", "\"", "."]
self.assertSequenceEqual(tokens, expected_tokens)
|
import asyncio
from datetime import datetime, timedelta
import logging
from typing import Any, Dict, Optional
from aioflo.api import API
from aioflo.errors import RequestError
from async_timeout import timeout
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
import homeassistant.util.dt as dt_util
from .const import DOMAIN as FLO_DOMAIN
_LOGGER = logging.getLogger(__name__)
class FloDeviceDataUpdateCoordinator(DataUpdateCoordinator):
"""Flo device object."""
def __init__(
self, hass: HomeAssistantType, api_client: API, location_id: str, device_id: str
):
"""Initialize the device."""
self.hass: HomeAssistantType = hass
self.api_client: API = api_client
self._flo_location_id: str = location_id
self._flo_device_id: str = device_id
self._manufacturer: str = "Flo by Moen"
self._device_information: Optional[Dict[str, Any]] = None
self._water_usage: Optional[Dict[str, Any]] = None
super().__init__(
hass,
_LOGGER,
name=f"{FLO_DOMAIN}-{device_id}",
update_interval=timedelta(seconds=60),
)
async def _async_update_data(self):
"""Update data via library."""
try:
async with timeout(10):
await asyncio.gather(
*[self._update_device(), self._update_consumption_data()]
)
except (RequestError) as error:
raise UpdateFailed(error) from error
@property
def location_id(self) -> str:
"""Return Flo location id."""
return self._flo_location_id
@property
def id(self) -> str:
"""Return Flo device id."""
return self._flo_device_id
@property
def device_name(self) -> str:
"""Return device name."""
return f"{self.manufacturer} {self.model}"
@property
def manufacturer(self) -> str:
"""Return manufacturer for device."""
return self._manufacturer
@property
def mac_address(self) -> str:
"""Return ieee address for device."""
return self._device_information["macAddress"]
@property
def model(self) -> str:
"""Return model for device."""
return self._device_information["deviceModel"]
@property
def rssi(self) -> float:
"""Return rssi for device."""
return self._device_information["connectivity"]["rssi"]
@property
def last_heard_from_time(self) -> str:
"""Return lastHeardFromTime for device."""
return self._device_information["lastHeardFromTime"]
@property
def device_type(self) -> str:
"""Return the device type for the device."""
return self._device_information["deviceType"]
@property
def available(self) -> bool:
"""Return True if device is available."""
return self.last_update_success and self._device_information["isConnected"]
@property
def current_system_mode(self) -> str:
"""Return the current system mode."""
return self._device_information["systemMode"]["lastKnown"]
@property
def target_system_mode(self) -> str:
"""Return the target system mode."""
return self._device_information["systemMode"]["target"]
@property
def current_flow_rate(self) -> float:
"""Return current flow rate in gpm."""
return self._device_information["telemetry"]["current"]["gpm"]
@property
def current_psi(self) -> float:
"""Return the current pressure in psi."""
return self._device_information["telemetry"]["current"]["psi"]
@property
def temperature(self) -> float:
"""Return the current temperature in degrees F."""
return self._device_information["telemetry"]["current"]["tempF"]
@property
def consumption_today(self) -> float:
"""Return the current consumption for today in gallons."""
return self._water_usage["aggregations"]["sumTotalGallonsConsumed"]
@property
def firmware_version(self) -> str:
"""Return the firmware version for the device."""
return self._device_information["fwVersion"]
@property
def serial_number(self) -> str:
"""Return the serial number for the device."""
return self._device_information["serialNumber"]
@property
def pending_info_alerts_count(self) -> int:
"""Return the number of pending info alerts for the device."""
return self._device_information["notifications"]["pending"]["infoCount"]
@property
def pending_warning_alerts_count(self) -> int:
"""Return the number of pending warning alerts for the device."""
return self._device_information["notifications"]["pending"]["warningCount"]
@property
def pending_critical_alerts_count(self) -> int:
"""Return the number of pending critical alerts for the device."""
return self._device_information["notifications"]["pending"]["criticalCount"]
@property
def has_alerts(self) -> bool:
"""Return True if any alert counts are greater than zero."""
return bool(
self.pending_info_alerts_count
or self.pending_warning_alerts_count
or self.pending_warning_alerts_count
)
@property
def last_known_valve_state(self) -> str:
"""Return the last known valve state for the device."""
return self._device_information["valve"]["lastKnown"]
@property
def target_valve_state(self) -> str:
"""Return the target valve state for the device."""
return self._device_information["valve"]["target"]
async def async_set_mode_home(self):
"""Set the Flo location to home mode."""
await self.api_client.location.set_mode_home(self._flo_location_id)
async def async_set_mode_away(self):
"""Set the Flo location to away mode."""
await self.api_client.location.set_mode_away(self._flo_location_id)
async def async_set_mode_sleep(self, sleep_minutes, revert_to_mode):
"""Set the Flo location to sleep mode."""
await self.api_client.location.set_mode_sleep(
self._flo_location_id, sleep_minutes, revert_to_mode
)
async def async_run_health_test(self):
"""Run a Flo device health test."""
await self.api_client.device.run_health_test(self._flo_device_id)
async def _update_device(self, *_) -> None:
"""Update the device information from the API."""
self._device_information = await self.api_client.device.get_info(
self._flo_device_id
)
_LOGGER.debug("Flo device data: %s", self._device_information)
async def _update_consumption_data(self, *_) -> None:
"""Update water consumption data from the API."""
today = dt_util.now().date()
start_date = datetime(today.year, today.month, today.day, 0, 0)
end_date = datetime(today.year, today.month, today.day, 23, 59, 59, 999000)
self._water_usage = await self.api_client.water.get_consumption_info(
self._flo_location_id, start_date, end_date
)
_LOGGER.debug("Updated Flo consumption data: %s", self._water_usage)
|
import unittest
import cv2
import torch
import kornia
class TestKornia(unittest.TestCase):
def test_imread_opencv(self):
img = cv2.imread('/input/tests/data/dot.png')
img_t = kornia.image_to_tensor(img)
self.assertEqual(img.shape, (1, 1, 3))
self.assertEqual(img_t.shape, (3, 1, 1))
def test_grayscale_torch(self):
img_rgb = torch.rand(2, 3, 4, 5)
img_gray = kornia.rgb_to_grayscale(img_rgb)
self.assertEqual(img_gray.shape, (2, 1, 4, 5))
|
import struct
from graphite import GraphiteHandler
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
class GraphitePickleHandler(GraphiteHandler):
"""
Overrides the GraphiteHandler class
Sending data to graphite using batched pickle format
"""
def __init__(self, config=None):
"""
Create a new instance of the GraphitePickleHandler
"""
# Initialize GraphiteHandler
GraphiteHandler.__init__(self, config)
# Initialize Data
self.batch = []
# Initialize Options
self.batch_size = int(self.config['batch'])
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GraphitePickleHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphitePickleHandler, self).get_default_config()
config.update({
'port': 2004,
})
return config
def process(self, metric):
# Convert metric to pickle format
m = (metric.path, (metric.timestamp, metric.value))
# Add the metric to the match
self.batch.append(m)
# If there are sufficient metrics, then pickle and send
if len(self.batch) >= self.batch_size:
# Log
self.log.debug("GraphitePickleHandler: Sending batch size: %d",
self.batch_size)
# Pickle the batch of metrics
self.metrics = [self._pickle_batch()]
# Send pickled batch
self._send()
# Flush the metric pack down the wire
self.flush()
# Clear Batch
self.batch = []
def _pickle_batch(self):
"""
Pickle the metrics into a form that can be understood
by the graphite pickle connector.
"""
# Pickle
payload = pickle.dumps(self.batch)
# Pack Message
header = struct.pack("!L", len(payload))
message = header + payload
# Return Message
return message
|
import re
import operator
# release type identifier -> release type priority (higher == better)
RELEASE_TYPE_PRIORITIES = {
None: 4, # no release type
"a": 1, # alpha post release
"b": 2, # beta post release
"rc": 3, # release candidate
"post": 5, # post release
"dev": 0, # dev release
}
def _parse_version(vs):
"""
Parse a version string, e.g. '2!3.0.1.rc2.dev3'
:param vs: version to parse
:type vs: str
:return: a dict containing the fragments about the version
:rtype: dict
"""
# NOTE: the below code may be a bit messy, because it was rewritten multiple times and then repurposed from a sort-function to a parsing-function
# version scheme (PEP440): [N!]N(.N)*[{a|b|rc}N][.postN][.devN]
# convert to lowercase, since all versions must be case insenstive (PEP440)
e = vs.lower()
# extract information from
if "!" in e:
# read epoch
es = e[:e.find("!")]
e = e[e.find("!") + 1:]
epoch = int(es)
else:
# default epoch is 0
epoch = 0
# parse version tuple
veis = re.search("[^0-9\\.]", e)
if veis is None:
# no non-digits
vei = len(e)
else:
vei = veis.start()
vstr = e[:vei]
while vstr.endswith("."):
# remove trailing '.'
vstr = vstr[:-1]
splitted = vstr.split(".")
verparts = []
for v in splitted:
verparts.append(int(v))
# parse post release
rtstr = e[vei:]
if "post" in rtstr:
postrnstr = rtstr[rtstr.find("post") + 4:]
postrnres = re.search("[0-9]*", postrnstr)
if postrnres is None or postrnres.end() == 0:
# PEP440: implicit post version 0
subpriority = 0
else:
subpriority = int(postrnstr[:postrnres.end()])
rtstr = rtstr[:rtstr.find("post") - 1]
is_post = True
else:
subpriority = 0
is_post = False
# parse release type
rtype = None
if len(rtstr) == 0:
rtype = None
elif rtstr[0] in ("a", "b"):
rtype = rtstr[0]
elif rtstr.startswith("rc"):
rtype = "rc"
# parse number of release
if rtype is None:
# not needed
rsubpriority = 0
else:
# 1. strip rtype
rtps = rtstr[len(rtype):]
# 2. extract until non-digit
rtpsr = re.search("[0-9]*", rtps)
if rtpsr is None or rtpsr.end() == 0:
# no number
rsubpriority = 0
else:
rsubpriority = int(rtps[:rtpsr.end()])
# extract dev release information
devr = re.search("\\.?dev[0-9]*", e)
if devr is None:
isdev = False
devnum = 0
else:
isdev = True
devns = e[devr.start() + 4:] # 4: 1 for '.'; 3 for 'dev'
if len(devns) == 0:
devnum = 0
else:
devnum = int(devns)
return {
"epoch": epoch,
"versiontuple": tuple(verparts),
"rtype": rtype,
"subversion": rsubpriority,
"postrelease": (subpriority if is_post else None),
"devrelease": (devnum if isdev else None),
}
def sort_versions(versionlist):
"""
Return a list containing the versions in versionlist, starting with the highest version.
:param versionlist: list of versions to sort
:type versionlist: list of str
:return: the sorted list
:rtype: list of str
"""
return sorted(versionlist, key=lambda s: Version.parse(s) if isinstance(s, str) else s, reverse=True)
class Version(object):
"""
This class represents a version. It is mainly used for version comparsion.
"""
TYPE_NORMAL = None
TYPE_ALPHA = "a"
TYPE_BETA = "b"
TYPE_RELEASE_CANDIDATE = "rc"
RELEASE_TYPE_PRIORITIES = {
# priority of a release type. greate => higher priority
TYPE_NORMAL: 3, # no release type
TYPE_ALPHA: 0, # alpha post release
TYPE_BETA: 1, # beta post release
TYPE_RELEASE_CANDIDATE: 2, # release candidate
}
def __init__(self, epoch=0, versiontuple=(), rtype=None, subversion=0, postrelease=None, devrelease=None):
assert isinstance(epoch, int)
assert isinstance(versiontuple, tuple)
assert isinstance(rtype, str) or rtype is None
assert isinstance(subversion, int)
assert isinstance(postrelease, int) or postrelease is None
assert isinstance(devrelease, int) or devrelease is None
self.epoch = epoch
self.versiontuple = versiontuple
self.rtype = rtype
self.subversion = subversion
self.postrelease = postrelease
self.devrelease = devrelease
@classmethod
def parse(cls, s):
"""
Parse a versionstring and return a Version() of it.
:param s: string to parse
:type s: str
:return: a Version() instance describing the parsed string
:rtype: Version
"""
if isinstance(s, cls):
# s is already a Version
return s
parsed = _parse_version(s)
return Version(**parsed)
@property
def is_postrelease(self):
"""whether this version is a postrelease or not"""
return self.postrelease is not None
@property
def is_devrelease(self):
"""whether this version is a devrelease or not"""
return self.devrelease is not None
def _get_sortkey(self):
"""
Return a value which can be used to compare two versions.
Sort order:
1. epoch
2. release version
3. postrelease > release > prerelease
4. postrelease#
5. non-dev > dev
6. dev#
:return: a value which can be used for comparing this version to another version
:rtype: tuple
"""
rpriority = self.RELEASE_TYPE_PRIORITIES.get(self.rtype, 0)
return (
self.epoch,
self.versiontuple,
rpriority,
self.subversion,
self.is_postrelease,
self.postrelease,
not self.is_devrelease,
self.devrelease
)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._get_sortkey() == other._get_sortkey()
else:
return False
def __gt__(self, other):
if isinstance(other, self.__class__):
return self._get_sortkey() > other._get_sortkey()
else:
return True
def __lt__(self, other):
if isinstance(other, self.__class__):
return self._get_sortkey() < other._get_sortkey()
else:
return False
def __ge__(self, other):
if isinstance(other, self.__class__):
return self._get_sortkey() >= other._get_sortkey()
else:
return False
def __le__(self, other):
if isinstance(other, self.__class__):
return self._get_sortkey() <= other._get_sortkey()
else:
return False
def __str__(self):
"""return a string representation of this version"""
version = ".".join([str(e) for e in self.versiontuple]) # base version
# epoch
if self.epoch > 0:
version = str(self.epoch) + "!" + version
# release type
if self.rtype is not None:
version += "." + self.rtype
if self.subversion > 0:
version += str(self.subversion)
# postrelease
if self.is_postrelease:
version += ".post"
if self.postrelease > 0:
version += str(self.postrelease)
# devrelease
if self.is_devrelease:
version += ".dev"
if self.devrelease > 0:
version += str(self.devrelease)
# done
return version
class VersionSpecifier(object):
"""
This class is to represent the versions of a requirement, e.g. pyte==0.4.10.
"""
OPS = {
'<=': operator.le,
'<': operator.lt,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'==': operator.eq,
'~=': operator.ge,
}
def __init__(self, version_specs):
self.specs = [(VersionSpecifier.OPS[op], version) for (op, version) in version_specs]
self.str = str(version_specs)
def __str__(self):
return self.str
@staticmethod
def parse_requirement(requirement):
"""
Factory method to create a VersionSpecifier object from a requirement
:param requirement: requirement to parse
:type requirement: str
:return: tuple of (requirement_name, version_specifier, list of extras)
:rtype: tuple of (str, VersionSpecifier, list of str)
"""
if isinstance(requirement, (list, tuple)):
if len(requirement) == 1:
requirement = requirement[0]
else:
raise ValueError("Unknown requirement format: " + repr(requirement))
# remove all whitespaces and '()'
requirement = requirement.replace(' ', '')
requirement = requirement.replace("(", "").replace(")", "")
if requirement.startswith("#"):
# ignore
return None, None, []
PAREN = lambda x: '(' + x + ')'
version_cmp = PAREN('?:' + '|'.join(('<=', '<', '!=', '>=', '>', '~=', '==')))
name_end_res = re.search(version_cmp, requirement)
if name_end_res is None:
if "[" in requirement:
si = requirement.find("[")
extra_s = requirement[si + 1:-1]
requirement = requirement[:si]
if len(extra_s) == 0:
extras = []
else:
extras = extra_s.split(",")
else:
extras = []
return requirement, None, extras
name_end = name_end_res.start()
name, specs_s = requirement[:name_end], requirement[name_end:]
if "[" in specs_s:
si = specs_s.find("[")
extra_s = specs_s[si + 1:-1]
specs_s = specs_s[:si]
if len(extra_s) == 0:
extras = []
else:
extras = extra_s.split(",")
else:
extras = []
splitted = specs_s.split(",")
specs = []
for vs in splitted:
if vs == "":
# for some weird reasons, sometimes a trailing ',' is
# included in the requirement list.
continue
cmp_end = re.search(version_cmp, vs).end()
c, v = vs[:cmp_end], vs[cmp_end:]
specs.append((c, v))
version = VersionSpecifier(specs)
return name, version, extras
def match(self, version):
"""
Check if version is allowed by the version specifiers.
:param version: version to check
:type version: str
:return: whether the version is allowed or not
:rtype: boolean
"""
# return all([op(Version.parse(version), Version.parse(ver)) for op, ver in self.specs])
matches = True
for op, ver in self.specs:
try:
vi = Version.parse(version)
evi = Version.parse(ver)
except:
# warning: wildcard except!
# fallback to old, string-based comparsion
if not op(version, ver):
matches = False
else:
if not op(vi, evi):
matches = False
return matches
|
import os.path as op
import numpy as np
from scipy import linalg
from numpy.testing import assert_allclose
import mne
from mne.beamformer import rap_music
from mne.cov import regularize
from mne.datasets import testing
from mne.minimum_norm.tests.test_inverse import assert_var_exp_log
from mne.utils import catch_logging
data_path = testing.data_path(download=False)
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
def _get_data(ch_decim=1):
"""Read in data used in tests."""
# Read evoked
evoked = mne.read_evokeds(fname_ave, 0, baseline=(None, 0))
evoked.info['bads'] = ['MEG 2443']
evoked.info['lowpass'] = 16 # fake for decim
evoked.decimate(12)
evoked.crop(0.0, 0.3)
picks = mne.pick_types(evoked.info, meg=True, eeg=False)
picks = picks[::ch_decim]
evoked.pick_channels([evoked.ch_names[pick] for pick in picks])
evoked.info.normalize_proj()
noise_cov = mne.read_cov(fname_cov)
noise_cov['projs'] = []
noise_cov = regularize(noise_cov, evoked.info, rank='full', proj=False)
return evoked, noise_cov
def simu_data(evoked, forward, noise_cov, n_dipoles, times, nave=1):
"""Simulate an evoked dataset with 2 sources.
One source is put in each hemisphere.
"""
# Generate the two dipoles data
mu, sigma = 0.1, 0.005
s1 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 /
(2 * sigma ** 2))
mu, sigma = 0.075, 0.008
s2 = -1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 /
(2 * sigma ** 2))
data = np.array([s1, s2]) * 1e-9
src = forward['src']
rng = np.random.RandomState(42)
rndi = rng.randint(len(src[0]['vertno']))
lh_vertno = src[0]['vertno'][[rndi]]
rndi = rng.randint(len(src[1]['vertno']))
rh_vertno = src[1]['vertno'][[rndi]]
vertices = [lh_vertno, rh_vertno]
tmin, tstep = times.min(), 1 / evoked.info['sfreq']
stc = mne.SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep)
sim_evoked = mne.simulation.simulate_evoked(forward, stc, evoked.info,
noise_cov, nave=nave,
random_state=rng)
return sim_evoked, stc
def _check_dipoles(dipoles, fwd, stc, evoked, residual=None):
src = fwd['src']
pos1 = fwd['source_rr'][np.where(src[0]['vertno'] ==
stc.vertices[0])]
pos2 = fwd['source_rr'][np.where(src[1]['vertno'] ==
stc.vertices[1])[0] +
len(src[0]['vertno'])]
# Check the position of the two dipoles
assert (dipoles[0].pos[0] in np.array([pos1, pos2]))
assert (dipoles[1].pos[0] in np.array([pos1, pos2]))
ori1 = fwd['source_nn'][np.where(src[0]['vertno'] ==
stc.vertices[0])[0]][0]
ori2 = fwd['source_nn'][np.where(src[1]['vertno'] ==
stc.vertices[1])[0] +
len(src[0]['vertno'])][0]
# Check the orientation of the dipoles
assert (np.max(np.abs(np.dot(dipoles[0].ori[0],
np.array([ori1, ori2]).T))) > 0.99)
assert (np.max(np.abs(np.dot(dipoles[1].ori[0],
np.array([ori1, ori2]).T))) > 0.99)
if residual is not None:
picks_grad = mne.pick_types(residual.info, meg='grad')
picks_mag = mne.pick_types(residual.info, meg='mag')
rel_tol = 0.02
for picks in [picks_grad, picks_mag]:
assert (linalg.norm(residual.data[picks], ord='fro') <
rel_tol * linalg.norm(evoked.data[picks], ord='fro'))
@testing.requires_testing_data
def test_rap_music_simulated():
"""Test RAP-MUSIC with simulated evoked."""
evoked, noise_cov = _get_data(ch_decim=16)
forward = mne.read_forward_solution(fname_fwd)
forward = mne.pick_channels_forward(forward, evoked.ch_names)
forward_surf_ori = mne.convert_forward_solution(forward, surf_ori=True)
forward_fixed = mne.convert_forward_solution(forward, force_fixed=True,
surf_ori=True, use_cps=True)
n_dipoles = 2
sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov,
n_dipoles, evoked.times, nave=evoked.nave)
# Check dipoles for fixed ori
with catch_logging() as log:
dipoles = rap_music(sim_evoked, forward_fixed, noise_cov,
n_dipoles=n_dipoles, verbose=True)
assert_var_exp_log(log.getvalue(), 89, 91)
_check_dipoles(dipoles, forward_fixed, stc, sim_evoked)
assert 97 < dipoles[0].gof.max() < 100
assert 91 < dipoles[1].gof.max() < 93
assert dipoles[0].gof.min() >= 0.
nave = 100000 # add a tiny amount of noise to the simulated evokeds
sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov,
n_dipoles, evoked.times, nave=nave)
dipoles, residual = rap_music(sim_evoked, forward_fixed, noise_cov,
n_dipoles=n_dipoles, return_residual=True)
_check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
# Check dipoles for free ori
dipoles, residual = rap_music(sim_evoked, forward, noise_cov,
n_dipoles=n_dipoles, return_residual=True)
_check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
# Check dipoles for free surface ori
dipoles, residual = rap_music(sim_evoked, forward_surf_ori, noise_cov,
n_dipoles=n_dipoles, return_residual=True)
_check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
@testing.requires_testing_data
def test_rap_music_sphere():
"""Test RAP-MUSIC with real data, sphere model, MEG only."""
evoked, noise_cov = _get_data(ch_decim=8)
sphere = mne.make_sphere_model(r0=(0., 0., 0.04))
src = mne.setup_volume_source_space(subject=None, pos=10.,
sphere=(0.0, 0.0, 40, 65.0),
mindist=5.0, exclude=0.0,
sphere_units='mm')
forward = mne.make_forward_solution(evoked.info, trans=None, src=src,
bem=sphere)
with catch_logging() as log:
dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2,
verbose=True)
assert_var_exp_log(log.getvalue(), 47, 49)
# Test that there is one dipole on each hemisphere
pos = np.array([dip.pos[0] for dip in dipoles])
assert pos.shape == (2, 3)
assert (pos[:, 0] < 0).sum() == 1
assert (pos[:, 0] > 0).sum() == 1
# Check the amplitude scale
assert (1e-10 < dipoles[0].amplitude[0] < 1e-7)
# Check the orientation
dip_fit = mne.fit_dipole(evoked, noise_cov, sphere)[0]
assert (np.max(np.abs(np.dot(dip_fit.ori, dipoles[0].ori[0]))) > 0.99)
assert (np.max(np.abs(np.dot(dip_fit.ori, dipoles[1].ori[0]))) > 0.99)
idx = dip_fit.gof.argmax()
dist = np.linalg.norm(dipoles[0].pos[idx] - dip_fit.pos[idx])
assert 0.004 <= dist < 0.007
assert_allclose(dipoles[0].gof[idx], dip_fit.gof[idx], atol=3)
@testing.requires_testing_data
def test_rap_music_picks():
"""Test RAP-MUSIC with picking."""
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.crop(tmin=0.05, tmax=0.15) # select N100
evoked.pick_types(meg=True, eeg=False)
forward = mne.read_forward_solution(fname_fwd)
noise_cov = mne.read_cov(fname_cov)
dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2)
assert len(dipoles) == 2
|
import numpy as np
import pandas as pd
from scipy.stats import norm, rankdata
from scattertext.Common import DEFAULT_SCALER_ALGO, DEFAULT_BETA
class InvalidScalerException(Exception):
pass
class ScoreBalancer(object):
@staticmethod
def balance_scores(cat_scores, not_cat_scores):
scores = ScoreBalancer.balance_scores_and_dont_scale(cat_scores, not_cat_scores)
return ScoreBalancer._zero_centered_scale(scores)
@staticmethod
def balance_scores_and_dont_scale(cat_scores, not_cat_scores):
'''
median = np.median(cat_scores)
scores = np.zeros(len(cat_scores)).astype(np.float)
scores[cat_scores > median] = cat_scores[cat_scores > median]
not_cat_mask = cat_scores < median if median != 0 else cat_scores <= median
scores[not_cat_mask] = -not_cat_scores[not_cat_mask]
'''
scores = np.zeros(len(cat_scores)).astype(np.float)
scores[cat_scores > not_cat_scores] = cat_scores[cat_scores > not_cat_scores]
scores[cat_scores < not_cat_scores] = -not_cat_scores[cat_scores < not_cat_scores]
return scores
@staticmethod
def _zero_centered_scale(ar):
ar[ar > 0] = ScoreBalancer._scale(ar[ar > 0])
ar[ar < 0] = -ScoreBalancer._scale(-ar[ar < 0])
return (ar + 1) / 2.
@staticmethod
def _scale(ar):
if len(ar) == 0:
return ar
if ar.min() == ar.max():
return np.full(len(ar), 0.5)
return (ar - ar.min()) / (ar.max() - ar.min())
class ScaledFScorePresets(object):
def __init__(self,
scaler_algo=DEFAULT_SCALER_ALGO,
beta=DEFAULT_BETA,
one_to_neg_one=False,
priors=None,
use_score_difference=False,
):
self.scaler_algo_ = scaler_algo
self.beta_ = beta
self.one_to_neg_one_ = one_to_neg_one
self.priors_ = priors
self.use_score_difference_ = use_score_difference
assert self.beta_ > 0
def get_name(self):
return 'Scaled F-Score'
def get_default_score(self):
if self.one_to_neg_one_:
return 0
return 0.5
def get_scores(self, cat_word_counts, not_cat_word_counts):
'''
Parameters
----------
cat_word_counts : np.array
category counts
not_cat_word_counts : np.array
not category counts
Returns
-------
np.array
scores
'''
cat_scores = self.get_scores_for_category(cat_word_counts,
not_cat_word_counts)
not_cat_scores = self.get_scores_for_category(not_cat_word_counts,
cat_word_counts)
if self.use_score_difference_:
scores = ((cat_scores - not_cat_scores) + 1.) / 2.
else:
scores = ScoreBalancer.balance_scores(cat_scores, not_cat_scores)
if self.one_to_neg_one_:
return 2 * scores - 1
else:
return scores
def get_scores_for_category(self, cat_word_counts, not_cat_word_counts):
'''
Parameters
----------
cat_word_counts : np.array
category counts
not_cat_word_counts : np.array
not category counts
Returns
-------
np.array
scores
'''
beta = self.beta_
# import pdb; pdb.set_trace()
assert len(cat_word_counts) == len(not_cat_word_counts)
old_cat_word_counts = None
if type(cat_word_counts) == pd.Series:
assert all(cat_word_counts.index == not_cat_word_counts.index)
old_cat_word_counts = cat_word_counts
cat_word_counts = cat_word_counts.values
if type(not_cat_word_counts) == pd.Series:
not_cat_word_counts = not_cat_word_counts.values
if self.priors_ is not None:
p = self.priors_
assert len(p) == len(cat_word_counts)
precision = ((cat_word_counts + p * 1.) /
(cat_word_counts + not_cat_word_counts + 2 * p))
recall = (cat_word_counts + p) * 1. / (cat_word_counts.sum() + p.sum())
else:
precision = (cat_word_counts * 1. / (cat_word_counts + not_cat_word_counts))
recall = cat_word_counts * 1. / cat_word_counts.sum()
precision_normcdf = ScaledFScore._safe_scaler(self.scaler_algo_, precision)
recall_normcdf = ScaledFScore._safe_scaler(self.scaler_algo_, recall)
scores = self._weighted_h_mean(precision_normcdf, recall_normcdf)
scores[np.isnan(scores)] = 0.
if old_cat_word_counts is not None:
return pd.Series(scores, index=old_cat_word_counts.index)
return scores
def _weighted_h_mean(self, precision_normcdf, recall_normcdf):
scores = (1 + self.beta_ ** 2) * (precision_normcdf * recall_normcdf) \
/ ((self.beta_ ** 2) * precision_normcdf + recall_normcdf)
return scores
class ScaledFScorePresetsNeg1To1(ScaledFScorePresets):
@staticmethod
def get_default_score():
return 0
def get_scores(self, cat_word_counts, not_cat_word_counts):
scores = ScaledFScorePresets.get_scores(self, cat_word_counts, not_cat_word_counts)
return scores * 2 - 1
class ScaledFZScore(ScaledFScorePresets):
@staticmethod
def get_default_score():
return 0
def get_scores(self, cat_word_counts, not_cat_word_counts):
sfs = ScaledFScorePresets.get_scores(self, cat_word_counts, not_cat_word_counts)
# sfs = self.get_score_deltas(cat_word_counts, not_cat_word_counts)
# import pdb; pdb.set_trace()
# return (sfs - 0.5) / np.std(sfs - 0.5)
return (sfs - sfs.mean()) / np.std(sfs)
def get_name(self):
return "Scaled F-Score Z-Score"
def get_score_deltas(self, cat_word_counts, not_cat_word_counts):
cat_scores = ScaledFScorePresets.get_scores_for_category(
self, cat_word_counts, not_cat_word_counts)
not_cat_scores = ScaledFScorePresets.get_scores_for_category(
self, not_cat_word_counts, cat_word_counts)
return np.log(cat_scores) - np.log(not_cat_scores)
def get_p_vals(self, X):
'''
Parameters
----------
X : np.array
Array of word counts, shape (N, 2) where N is the vocab size. X[:,0] is the
positive class, while X[:,1] is the negative class. None by default
Returns
-------
np.array of p-values
'''
z_scores = self.get_scores(X[:, 0], X[:, 1])
return norm.cdf(z_scores)
class ScaledFZScorePrior(ScaledFZScore):
def __init__(self, prior, alpha=1, scaler_algo=DEFAULT_SCALER_ALGO, beta=DEFAULT_BETA):
self.prior = prior
self.alpha = alpha
ScaledFZScore.__init__(self, scaler_algo, beta)
def get_name(self):
return 'SFS w/ Informative Prior Z-Score'
def apply_prior(self, c):
n = np.sum(c)
prior_scale = (np.sum(c) * self.alpha * 1. / np.sum(self.prior))
return c + (self.prior * prior_scale)
def get_scores(self, cat_word_counts, not_cat_word_counts):
sfs = ScaledFScorePresets.get_scores(self, self.apply_prior(cat_word_counts),
self.apply_prior(not_cat_word_counts))
# sfs = self.get_score_deltas(cat_word_counts, not_cat_word_counts)
# import pdb; pdb.set_trace()
# return (sfs - 0.5) / np.std(sfs - 0.5)
return (sfs - sfs.mean()) / np.std(sfs)
def get_name(self):
return "SFS Z-Scores"
def get_score_deltas(self, cat_word_counts, not_cat_word_counts):
cat_scores = ScaledFScorePresets.get_scores_for_category(
self,
self.apply_prior(cat_word_counts),
self.apply_prior(not_cat_word_counts))
not_cat_scores = ScaledFScorePresets.get_scores_for_category(
self,
self.apply_prior(not_cat_word_counts),
self.apply_prior(cat_word_counts))
return np.log(cat_scores) - np.log(not_cat_scores)
class ScaledFScore(object):
@staticmethod
def get_default_score():
return 0.5
@staticmethod
def get_scores(cat_word_counts, not_cat_word_counts,
scaler_algo=DEFAULT_SCALER_ALGO, beta=DEFAULT_BETA):
''' Computes balanced scaled f-scores
Parameters
----------
cat_word_counts : np.array
category counts
not_cat_word_counts : np.array
not category counts
scaler_algo : str
Function that scales an array to a range \in [0 and 1]. Use 'percentile', 'normcdf'. Default.
beta : float
Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Default.
Returns
-------
np.array
Harmonic means of scaled P(word|category)
and scaled P(category|word) for >median half of scores. Low scores are harmonic means
of scaled P(word|~category) and scaled P(~category|word). Array is squashed to between
0 and 1, with 0.5 indicating a median score.
'''
cat_scores = ScaledFScore.get_scores_for_category(cat_word_counts,
not_cat_word_counts,
scaler_algo,
beta)
not_cat_scores = ScaledFScore.get_scores_for_category(not_cat_word_counts,
cat_word_counts,
scaler_algo, beta)
return ScoreBalancer.balance_scores(cat_scores, not_cat_scores)
@staticmethod
def get_scores_for_category(cat_word_counts,
not_cat_word_counts,
scaler_algo=DEFAULT_SCALER_ALGO,
beta=DEFAULT_BETA):
''' Computes unbalanced scaled-fscores
Parameters
----------
category : str
category name to score
scaler_algo : str
Function that scales an array to a range \in [0 and 1]. Use 'percentile', 'normcdf'. Default normcdf
beta : float
Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Defaults to 1.
Returns
-------
np.array of harmonic means of scaled P(word|category) and scaled P(category|word).
'''
assert beta > 0
old_cat_word_counts = None
if type(cat_word_counts) == pd.Series:
old_cat_word_counts = cat_word_counts
cat_word_counts = cat_word_counts.values
if type(not_cat_word_counts) == pd.Series:
not_cat_word_counts = not_cat_word_counts.values
precision = (cat_word_counts * 1. / (cat_word_counts + not_cat_word_counts))
recall = cat_word_counts * 1. / cat_word_counts.sum()
precision_normcdf = ScaledFScore._safe_scaler(scaler_algo, precision)
recall_normcdf = ScaledFScore._safe_scaler(scaler_algo, recall)
scores_numerator = (1 + beta ** 2) * (precision_normcdf * recall_normcdf)
scores_denominator = ((beta ** 2) * precision_normcdf + recall_normcdf)
scores_denominator[scores_denominator == 0] = 1
scores = scores_numerator/scores_denominator
scores[np.isnan(scores)] = 0.
if old_cat_word_counts is None:
return scores
else:
return pd.Series(scores, index=old_cat_word_counts.index)
@staticmethod
def _get_scaled_f_score_from_counts(cat_word_counts, not_cat_word_counts, scaler_algo, beta=DEFAULT_BETA):
p_word_given_category = cat_word_counts.astype(np.float) / cat_word_counts.sum()
p_category_given_word = cat_word_counts * 1. / (cat_word_counts + not_cat_word_counts)
scores \
= ScaledFScore._get_harmonic_mean_of_probabilities_over_non_zero_in_category_count_terms \
(cat_word_counts, p_category_given_word, p_word_given_category, scaler_algo, beta)
return scores
@staticmethod
def _safe_scaler(algo, ar):
if algo == 'none':
return ar
scaled_ar = ScaledFScore._get_scaler_function(algo)(ar)
if np.isnan(scaled_ar).any():
return ScaledFScore._get_scaler_function('percentile')(scaled_ar)
return scaled_ar
@staticmethod
def _get_scaler_function(scaler_algo):
scaler = None
if scaler_algo == 'normcdf':
scaler = lambda x: norm.cdf(x, x.mean(), x.std())
elif scaler_algo == 'lognormcdf':
scaler = lambda x: norm.cdf(np.log(x), np.log(x).mean(), np.log(x).std())
elif scaler_algo == 'percentile':
scaler = lambda x: rankdata(x).astype(np.float64) / len(x)
elif scaler_algo == 'percentiledense':
scaler = lambda x: rankdata(x, method='dense').astype(np.float64) / len(x)
elif scaler_algo == 'ecdf':
from statsmodels.distributions import ECDF
scaler = lambda x: ECDF(x)
elif scaler_algo == 'none':
scaler = lambda x: x
else:
raise InvalidScalerException("Invalid scaler alogrithm. Must be either percentile or normcdf.")
return scaler
|
import boto3
from moto import mock_sts, mock_s3
def test_get_certificates(app):
from lemur.plugins.base import plugins
p = plugins.get("aws-s3")
assert p
@mock_sts()
@mock_s3()
def test_upload_acme_token(app):
from lemur.plugins.base import plugins
from lemur.plugins.lemur_aws.s3 import get
bucket = "public-bucket"
account = "123456789012"
prefix = "some-path/more-path/"
token_content = "Challenge"
token_name = "TOKEN"
token_path = ".well-known/acme-challenge/" + token_name
additional_options = [
{
"name": "bucket",
"value": bucket,
"type": "str",
"required": True,
"validation": r"[0-9a-z.-]{3,63}",
"helpMessage": "Must be a valid S3 bucket name!",
},
{
"name": "accountNumber",
"type": "str",
"value": account,
"required": True,
"validation": r"[0-9]{12}",
"helpMessage": "A valid AWS account number with permission to access S3",
},
{
"name": "region",
"type": "str",
"default": "us-east-1",
"required": False,
"helpMessage": "Region bucket exists",
"available": ["us-east-1", "us-west-2", "eu-west-1"],
},
{
"name": "encrypt",
"type": "bool",
"value": False,
"required": False,
"helpMessage": "Enable server side encryption",
"default": True,
},
{
"name": "prefix",
"type": "str",
"value": prefix,
"required": False,
"helpMessage": "Must be a valid S3 object prefix!",
},
]
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket)
p = plugins.get("aws-s3")
response = p.upload_acme_token(token_path=token_path,
token_content=token_content,
token=token_content,
options=additional_options)
assert response
response = get(bucket_name=bucket,
prefixed_object_name=prefix + token_name,
encrypt=False,
account_number=account)
# put data, and getting the same data
assert (response == token_content)
response = p.delete_acme_token(token_path=token_path,
options=additional_options,
account_number=account)
assert response
|
from __future__ import unicode_literals
import contextlib
import io
import os
import random
import subprocess
import string
import boto3
import smart_open
_BUCKET = os.environ.get('SO_BUCKET')
assert _BUCKET is not None, 'please set the SO_BUCKET environment variable'
_KEY = os.environ.get('SO_KEY')
assert _KEY is not None, 'please set the SO_KEY environment variable'
#
# https://stackoverflow.com/questions/13484726/safe-enough-8-character-short-unique-random-string
#
def _random_string(length=8):
alphabet = string.ascii_lowercase + string.digits
return ''.join(random.choices(alphabet, k=length))
@contextlib.contextmanager
def temporary():
"""Yields a URL than can be used for temporary writing.
Removes all content under the URL when exiting.
"""
key = '%s/%s' % (_KEY, _random_string())
yield 's3://%s/%s' % (_BUCKET, key)
boto3.resource('s3').Bucket(_BUCKET).objects.filter(Prefix=key).delete()
def _test_case(function):
def inner(benchmark):
with temporary() as uri:
return function(benchmark, uri)
return inner
def write_read(uri, content, write_mode, read_mode, encoding=None, s3_upload=None, **kwargs):
write_params = dict(kwargs)
write_params.update(s3_upload=s3_upload)
with smart_open.open(uri, write_mode, encoding=encoding, transport_params=write_params) as fout:
fout.write(content)
with smart_open.open(uri, read_mode, encoding=encoding, transport_params=kwargs) as fin:
actual = fin.read()
return actual
def read_length_prefixed_messages(uri, read_mode, encoding=None, **kwargs):
with smart_open.open(uri, read_mode, encoding=encoding, transport_params=kwargs) as fin:
actual = b''
length_byte = fin.read(1)
while len(length_byte):
actual += length_byte
msg = fin.read(ord(length_byte))
actual += msg
length_byte = fin.read(1)
return actual
@_test_case
def test_s3_readwrite_text(benchmark, uri):
text = 'с гранатою в кармане, с чекою в руке'
actual = benchmark(write_read, uri, text, 'w', 'r', 'utf-8')
assert actual == text
@_test_case
def test_s3_readwrite_text_gzip(benchmark, uri):
text = 'не чайки здесь запели на знакомом языке'
actual = benchmark(write_read, uri, text, 'w', 'r', 'utf-8')
assert actual == text
@_test_case
def test_s3_readwrite_binary(benchmark, uri):
binary = b'this is a test'
actual = benchmark(write_read, uri, binary, 'wb', 'rb')
assert actual == binary
@_test_case
def test_s3_readwrite_binary_gzip(benchmark, uri):
binary = b'this is a test'
actual = benchmark(write_read, uri, binary, 'wb', 'rb')
assert actual == binary
@_test_case
def test_s3_performance(benchmark, uri):
one_megabyte = io.BytesIO()
for _ in range(1024*128):
one_megabyte.write(b'01234567')
one_megabyte = one_megabyte.getvalue()
actual = benchmark(write_read, uri, one_megabyte, 'wb', 'rb')
assert actual == one_megabyte
@_test_case
def test_s3_performance_gz(benchmark, uri):
one_megabyte = io.BytesIO()
for _ in range(1024*128):
one_megabyte.write(b'01234567')
one_megabyte = one_megabyte.getvalue()
actual = benchmark(write_read, uri, one_megabyte, 'wb', 'rb')
assert actual == one_megabyte
@_test_case
def test_s3_performance_small_reads(benchmark, uri):
one_mib = 1024**2
one_megabyte_of_msgs = io.BytesIO()
msg = b'\x0f' + b'0123456789abcde' # a length-prefixed "message"
for _ in range(0, one_mib, len(msg)):
one_megabyte_of_msgs.write(msg)
one_megabyte_of_msgs = one_megabyte_of_msgs.getvalue()
with smart_open.open(uri, 'wb') as fout:
fout.write(one_megabyte_of_msgs)
actual = benchmark(read_length_prefixed_messages, uri, 'rb', buffer_size=one_mib)
assert actual == one_megabyte_of_msgs
@_test_case
def test_s3_encrypted_file(benchmark, uri):
text = 'с гранатою в кармане, с чекою в руке'
s3_upload = {'ServerSideEncryption': 'AES256'}
actual = benchmark(write_read, uri, text, 'w', 'r', 'utf-8', s3_upload=s3_upload)
assert actual == text
|
from numato_gpio import NumatoGpioError
NUMATO_CFG = {
"numato": {
"discover": ["/ttyACM0", "/ttyACM1"],
"devices": [
{
"id": 0,
"binary_sensors": {
"invert_logic": False,
"ports": {
"2": "numato_binary_sensor_mock_port2",
"3": "numato_binary_sensor_mock_port3",
"4": "numato_binary_sensor_mock_port4",
},
},
"sensors": {
"ports": {
"1": {
"name": "numato_adc_mock_port1",
"source_range": [100, 1023],
"destination_range": [0, 10],
"unit": "mocks",
}
},
},
"switches": {
"invert_logic": False,
"ports": {
"5": "numato_switch_mock_port5",
"6": "numato_switch_mock_port6",
},
},
}
],
}
}
def mockup_raise(*args, **kwargs):
"""Mockup to replace regular functions for error injection."""
raise NumatoGpioError("Error mockup")
def mockup_return(*args, **kwargs):
"""Mockup to replace regular functions for error injection."""
return False
|
import numpy as np
import pandas as pd
from arctic.serialization.numpy_records import DataFrameSerializer
from tests.integration.chunkstore.test_utils import create_test_data
from tests.util import get_large_ts
NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS = 50
_TEST_DATA = None
df_serializer = DataFrameSerializer()
def _mixed_test_data():
global _TEST_DATA
if _TEST_DATA is None:
onerow_ts = get_large_ts(1)
small_ts = get_large_ts(10)
medium_ts = get_large_ts(600)
large_ts = get_large_ts(1800)
empty_ts = pd.DataFrame()
empty_index = create_test_data(size=0, cols=10, index=True, multiindex=False, random_data=True, random_ids=True)
with_some_objects_ts = medium_ts.copy(deep=True)
with_some_objects_ts.iloc[0:NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS, 0] = None
with_some_objects_ts.iloc[0:NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS, 1] = 'A string'
large_with_some_objects = create_test_data(size=10000, cols=64, index=True, multiindex=False, random_data=True,
random_ids=True, use_hours=True)
large_with_some_objects.iloc[0:NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS, 0] = None
large_with_some_objects.iloc[0:NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS, 1] = 'A string'
with_string_ts = medium_ts.copy(deep=True)
with_string_ts['str_col'] = 'abc'
with_unicode_ts = medium_ts.copy(deep=True)
with_unicode_ts['ustr_col'] = u'abc'
with_some_none_ts = medium_ts.copy(deep=True)
with_some_none_ts.iloc[10:10] = None
with_some_none_ts.iloc[-10:-10] = np.nan
with_some_none_ts = with_some_none_ts.replace({np.nan: None})
# Multi-index data frames
multiindex_ts = create_test_data(size=500, cols=10, index=True, multiindex=True, random_data=True,
random_ids=True)
empty_multiindex_ts = create_test_data(size=0, cols=10, index=True, multiindex=True, random_data=True,
random_ids=True)
large_multi_index = create_test_data(
size=50000, cols=10, index=True, multiindex=True, random_data=True, random_ids=True, use_hours=True)
# Multi-column data frames
columns = pd.MultiIndex.from_product([["bar", "baz", "foo", "qux"], ["one", "two"]], names=["first", "second"])
empty_multi_column_ts = pd.DataFrame([], columns=columns)
columns = pd.MultiIndex.from_product([["bar", "baz", "foo", "qux"], ["one", "two"]], names=["first", "second"])
multi_column_no_multiindex = pd.DataFrame(np.random.randn(2, 8), index=[0, 1], columns=columns)
large_multi_column = pd.DataFrame(np.random.randn(100000, 8), index=range(100000), columns=columns)
columns = pd.MultiIndex.from_product([[1, 2, 'a'], ['c', 5]])
multi_column_int_levels = pd.DataFrame([[9, 2, 8, 1, 2, 3], [3, 4, 2, 9, 10, 11]],
index=['x', 'y'], columns=columns)
# Multi-index and multi-column data frames
columns = pd.MultiIndex.from_product([["bar", "baz", "foo", "qux"], ["one", "two"]])
index = pd.MultiIndex.from_product([["x", "y", "z"], ["a", "b"]])
multi_column_and_multi_index = pd.DataFrame(np.random.randn(6, 8), index=index, columns=columns)
# Nested n-dimensional
def _new_np_nd_array(val):
return np.rec.array([(val, ['A', 'BC'])],
dtype=[('index', '<M8[ns]'), ('values', 'S2', (2,))])
n_dimensional_df = pd.DataFrame(
{'a': [_new_np_nd_array(1356998400000000000), _new_np_nd_array(1356998400000000001)],
'b': [_new_np_nd_array(1356998400000000002), _new_np_nd_array(1356998400000000003)]
},
index=(0, 1))
# With mixed types (i.e. string / numbers) in multi-index
input_dict = {'POSITION': {
(pd.Timestamp('2013-10-07 15:45:43'), 'MYSTRT', 'SYMA', 'XX', 0): 0.0,
(pd.Timestamp('2013-10-07 15:45:43'), 'MYSTRT', 'SYMA', 'FFL', '201312'): -558.0,
(pd.Timestamp('2013-10-07 15:45:43'), 'MYSTRT', 'AG', 'FFL', '201312'): -74.0,
(pd.Timestamp('2013-10-07 15:45:43'), 'MYSTRT', 'AG', 'XX', 0): 0.0}
}
multi_index_with_object = pd.DataFrame.from_dict(input_dict)
# Exhaust all dtypes
mixed_dtypes_df = pd.DataFrame({
'string': list('abc'),
'int64': list(range(1, 4)),
'uint8': np.arange(3, 6).astype('u1'),
'uint64': np.arange(3, 6).astype('u8'),
'float64': np.arange(4.0, 7.0),
'bool1': [True, False, True],
'dates': pd.date_range('now', periods=3).values,
'other_dates': pd.date_range('20130101', periods=3).values,
# 'category': pd.Series(list("ABC")).astype('category'),
'tz_aware_dates': pd.date_range('20130101', periods=3, tz='US/Eastern'),
'complex': np.array([1. + 4.j, 2. + 5.j, 3. + 6.j])
})
mixed_dtypes_df['timedeltas'] = mixed_dtypes_df.dates.diff()
# Multi-column with some objects
multi_column_with_some_objects = multi_column_no_multiindex.copy()
multi_column_with_some_objects.iloc[1:, 1:2] = 'Convert this columnt dtype to object'
# Index with timezone-aware datetime
index_tz_aware = pd.DataFrame(data={'colA': range(10),
'colB': pd.date_range('20130101', periods=10, tz='US/Eastern')},
index=pd.date_range('20130101', periods=10, tz='US/Eastern'))
index_tz_aware.index.name = 'index'
_TEST_DATA = {
'onerow': (onerow_ts, df_serializer.serialize(onerow_ts),
df_serializer.can_convert_to_records_without_objects(small_ts, 'symA')),
'small': (small_ts, df_serializer.serialize(small_ts),
df_serializer.can_convert_to_records_without_objects(small_ts, 'symA')),
'medium': (medium_ts, df_serializer.serialize(medium_ts),
df_serializer.can_convert_to_records_without_objects(medium_ts, 'symA')),
'large': (large_ts, df_serializer.serialize(large_ts),
df_serializer.can_convert_to_records_without_objects(large_ts, 'symA')),
'empty': (empty_ts, df_serializer.serialize(empty_ts),
df_serializer.can_convert_to_records_without_objects(empty_ts, 'symA')),
'empty_index': (empty_index, df_serializer.serialize(empty_index),
df_serializer.can_convert_to_records_without_objects(empty_index, 'symA')),
'with_some_objects': (with_some_objects_ts, df_serializer.serialize(with_some_objects_ts),
df_serializer.can_convert_to_records_without_objects(with_some_objects_ts, 'symA')),
'large_with_some_objects': (
large_with_some_objects, df_serializer.serialize(large_with_some_objects),
df_serializer.can_convert_to_records_without_objects(large_with_some_objects, 'symA')),
'with_string': (with_string_ts, df_serializer.serialize(with_string_ts),
df_serializer.can_convert_to_records_without_objects(with_string_ts, 'symA')),
'with_unicode': (with_unicode_ts, df_serializer.serialize(with_unicode_ts),
df_serializer.can_convert_to_records_without_objects(with_unicode_ts, 'symA')),
'with_some_none': (with_some_none_ts, df_serializer.serialize(with_some_none_ts),
df_serializer.can_convert_to_records_without_objects(with_some_none_ts, 'symA')),
'multiindex': (multiindex_ts, df_serializer.serialize(multiindex_ts),
df_serializer.can_convert_to_records_without_objects(multiindex_ts, 'symA')),
'multiindex_with_object': (
multi_index_with_object, df_serializer.serialize(multi_index_with_object),
df_serializer.can_convert_to_records_without_objects(multi_index_with_object, 'symA')),
'empty_multiindex': (empty_multiindex_ts, df_serializer.serialize(empty_multiindex_ts),
df_serializer.can_convert_to_records_without_objects(empty_multiindex_ts, 'symA')),
'large_multi_index': (large_multi_index, df_serializer.serialize(large_multi_index),
df_serializer.can_convert_to_records_without_objects(large_multi_index, 'symA')),
'empty_multicolumn': (empty_multi_column_ts, df_serializer.serialize(empty_multi_column_ts),
df_serializer.can_convert_to_records_without_objects(empty_multi_column_ts, 'symA')),
'multi_column_no_multiindex': (
multi_column_no_multiindex, df_serializer.serialize(multi_column_no_multiindex),
df_serializer.can_convert_to_records_without_objects(multi_column_no_multiindex, 'symA')),
'large_multi_column': (large_multi_column, df_serializer.serialize(large_multi_column),
df_serializer.can_convert_to_records_without_objects(large_multi_column, 'symA')),
'multi_column_int_levels': (
multi_column_int_levels, df_serializer.serialize(multi_column_int_levels),
df_serializer.can_convert_to_records_without_objects(multi_column_int_levels, 'symA')),
'multi_column_and_multi_index': (
multi_column_and_multi_index, df_serializer.serialize(multi_column_and_multi_index),
df_serializer.can_convert_to_records_without_objects(multi_column_and_multi_index, 'symA')),
'multi_column_with_some_objects': (
multi_column_with_some_objects, df_serializer.serialize(multi_column_with_some_objects),
df_serializer.can_convert_to_records_without_objects(multi_column_with_some_objects, 'symA')),
'n_dimensional_df': (n_dimensional_df, Exception, None),
'mixed_dtypes_df': (mixed_dtypes_df, df_serializer.serialize(mixed_dtypes_df),
df_serializer.can_convert_to_records_without_objects(mixed_dtypes_df, 'symA')),
'index_tz_aware': (index_tz_aware, df_serializer.serialize(index_tz_aware),
df_serializer.can_convert_to_records_without_objects(index_tz_aware, 'symA'))
}
return _TEST_DATA
def is_test_data_serializable(input_df_descr):
return _mixed_test_data()[input_df_descr][2]
|
from homeassistant.components.met import DOMAIN
from homeassistant.components.weather import DOMAIN as WEATHER_DOMAIN
async def test_tracking_home(hass, mock_weather):
"""Test we track home."""
await hass.config_entries.flow.async_init("met", context={"source": "onboarding"})
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("weather")) == 1
assert len(mock_weather.mock_calls) == 4
# Test the hourly sensor is disabled by default
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("weather.test_home_hourly")
assert state is None
entry = registry.async_get("weather.test_home_hourly")
assert entry
assert entry.disabled
assert entry.disabled_by == "integration"
# Test we track config
await hass.config.async_update(latitude=10, longitude=20)
await hass.async_block_till_done()
assert len(mock_weather.mock_calls) == 8
entry = hass.config_entries.async_entries()[0]
await hass.config_entries.async_remove(entry.entry_id)
assert len(hass.states.async_entity_ids("weather")) == 0
async def test_not_tracking_home(hass, mock_weather):
"""Test when we not track home."""
# Pre-create registry entry for disabled by default hourly weather
registry = await hass.helpers.entity_registry.async_get_registry()
registry.async_get_or_create(
WEATHER_DOMAIN,
DOMAIN,
"10-20-hourly",
suggested_object_id="somewhere_hourly",
disabled_by=None,
)
await hass.config_entries.flow.async_init(
"met",
context={"source": "user"},
data={"name": "Somewhere", "latitude": 10, "longitude": 20, "elevation": 0},
)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("weather")) == 2
assert len(mock_weather.mock_calls) == 4
# Test we do not track config
await hass.config.async_update(latitude=10, longitude=20)
await hass.async_block_till_done()
assert len(mock_weather.mock_calls) == 4
entry = hass.config_entries.async_entries()[0]
await hass.config_entries.async_remove(entry.entry_id)
assert len(hass.states.async_entity_ids("weather")) == 0
|
from queue import Queue
from . import base
from . import virtual
from collections import defaultdict
class Channel(virtual.Channel):
"""In-memory Channel."""
events = defaultdict(set)
queues = {}
do_restore = False
supports_fanout = True
def _has_queue(self, queue, **kwargs):
return queue in self.queues
def _new_queue(self, queue, **kwargs):
if queue not in self.queues:
self.queues[queue] = Queue()
def _get(self, queue, timeout=None):
return self._queue_for(queue).get(block=False)
def _queue_for(self, queue):
if queue not in self.queues:
self.queues[queue] = Queue()
return self.queues[queue]
def _queue_bind(self, *args):
pass
def _put_fanout(self, exchange, message, routing_key=None, **kwargs):
for queue in self._lookup(exchange, routing_key):
self._queue_for(queue).put(message)
def _put(self, queue, message, **kwargs):
self._queue_for(queue).put(message)
def _size(self, queue):
return self._queue_for(queue).qsize()
def _delete(self, queue, *args, **kwargs):
self.queues.pop(queue, None)
def _purge(self, queue):
q = self._queue_for(queue)
size = q.qsize()
q.queue.clear()
return size
def close(self):
super().close()
for queue in self.queues.values():
queue.empty()
self.queues = {}
def after_reply_message_received(self, queue):
pass
class Transport(virtual.Transport):
"""In-memory Transport."""
Channel = Channel
#: memory backend state is global.
state = virtual.BrokerState()
implements = base.Transport.implements
driver_type = 'memory'
driver_name = 'memory'
def driver_version(self):
return 'N/A'
|
import contextlib
import warnings
import numpy as np
import pandas as pd
import pytest
from xarray import (
Dataset,
SerializationWarning,
Variable,
coding,
conventions,
open_dataset,
)
from xarray.backends.common import WritableCFDataStore
from xarray.backends.memory import InMemoryDataStore
from xarray.conventions import decode_cf
from xarray.testing import assert_identical
from . import (
assert_array_equal,
raises_regex,
requires_cftime,
requires_dask,
requires_netCDF4,
)
from .test_backends import CFEncodedBase
class TestBoolTypeArray:
def test_booltype_array(self):
x = np.array([1, 0, 1, 1, 0], dtype="i1")
bx = conventions.BoolTypeArray(x)
assert bx.dtype == bool
assert_array_equal(bx, np.array([True, False, True, True, False], dtype=bool))
class TestNativeEndiannessArray:
def test(self):
x = np.arange(5, dtype=">i8")
expected = np.arange(5, dtype="int64")
a = conventions.NativeEndiannessArray(x)
assert a.dtype == expected.dtype
assert a.dtype == expected[:].dtype
assert_array_equal(a, expected)
def test_decode_cf_with_conflicting_fill_missing_value():
expected = Variable(["t"], [np.nan, np.nan, 2], {"units": "foobar"})
var = Variable(
["t"], np.arange(3), {"units": "foobar", "missing_value": 0, "_FillValue": 1}
)
with warnings.catch_warnings(record=True) as w:
actual = conventions.decode_cf_variable("t", var)
assert_identical(actual, expected)
assert "has multiple fill" in str(w[0].message)
expected = Variable(["t"], np.arange(10), {"units": "foobar"})
var = Variable(
["t"],
np.arange(10),
{"units": "foobar", "missing_value": np.nan, "_FillValue": np.nan},
)
actual = conventions.decode_cf_variable("t", var)
assert_identical(actual, expected)
var = Variable(
["t"],
np.arange(10),
{
"units": "foobar",
"missing_value": np.float32(np.nan),
"_FillValue": np.float32(np.nan),
},
)
actual = conventions.decode_cf_variable("t", var)
assert_identical(actual, expected)
@requires_cftime
class TestEncodeCFVariable:
def test_incompatible_attributes(self):
invalid_vars = [
Variable(
["t"], pd.date_range("2000-01-01", periods=3), {"units": "foobar"}
),
Variable(["t"], pd.to_timedelta(["1 day"]), {"units": "foobar"}),
Variable(["t"], [0, 1, 2], {"add_offset": 0}, {"add_offset": 2}),
Variable(["t"], [0, 1, 2], {"_FillValue": 0}, {"_FillValue": 2}),
]
for var in invalid_vars:
with pytest.raises(ValueError):
conventions.encode_cf_variable(var)
def test_missing_fillvalue(self):
v = Variable(["x"], np.array([np.nan, 1, 2, 3]))
v.encoding = {"dtype": "int16"}
with pytest.warns(Warning, match="floating point data as an integer"):
conventions.encode_cf_variable(v)
def test_multidimensional_coordinates(self):
# regression test for GH1763
# Set up test case with coordinates that have overlapping (but not
# identical) dimensions.
zeros1 = np.zeros((1, 5, 3))
zeros2 = np.zeros((1, 6, 3))
zeros3 = np.zeros((1, 5, 4))
orig = Dataset(
{
"lon1": (["x1", "y1"], zeros1.squeeze(0), {}),
"lon2": (["x2", "y1"], zeros2.squeeze(0), {}),
"lon3": (["x1", "y2"], zeros3.squeeze(0), {}),
"lat1": (["x1", "y1"], zeros1.squeeze(0), {}),
"lat2": (["x2", "y1"], zeros2.squeeze(0), {}),
"lat3": (["x1", "y2"], zeros3.squeeze(0), {}),
"foo1": (["time", "x1", "y1"], zeros1, {"coordinates": "lon1 lat1"}),
"foo2": (["time", "x2", "y1"], zeros2, {"coordinates": "lon2 lat2"}),
"foo3": (["time", "x1", "y2"], zeros3, {"coordinates": "lon3 lat3"}),
"time": ("time", [0.0], {"units": "hours since 2017-01-01"}),
}
)
orig = conventions.decode_cf(orig)
# Encode the coordinates, as they would be in a netCDF output file.
enc, attrs = conventions.encode_dataset_coordinates(orig)
# Make sure we have the right coordinates for each variable.
foo1_coords = enc["foo1"].attrs.get("coordinates", "")
foo2_coords = enc["foo2"].attrs.get("coordinates", "")
foo3_coords = enc["foo3"].attrs.get("coordinates", "")
assert set(foo1_coords.split()) == {"lat1", "lon1"}
assert set(foo2_coords.split()) == {"lat2", "lon2"}
assert set(foo3_coords.split()) == {"lat3", "lon3"}
# Should not have any global coordinates.
assert "coordinates" not in attrs
def test_do_not_overwrite_user_coordinates(self):
orig = Dataset(
coords={"x": [0, 1, 2], "y": ("x", [5, 6, 7]), "z": ("x", [8, 9, 10])},
data_vars={"a": ("x", [1, 2, 3]), "b": ("x", [3, 5, 6])},
)
orig["a"].encoding["coordinates"] = "y"
orig["b"].encoding["coordinates"] = "z"
enc, _ = conventions.encode_dataset_coordinates(orig)
assert enc["a"].attrs["coordinates"] == "y"
assert enc["b"].attrs["coordinates"] == "z"
orig["a"].attrs["coordinates"] = "foo"
with raises_regex(ValueError, "'coordinates' found in both attrs"):
conventions.encode_dataset_coordinates(orig)
@requires_dask
def test_string_object_warning(self):
original = Variable(("x",), np.array(["foo", "bar"], dtype=object)).chunk()
with pytest.warns(SerializationWarning, match="dask array with dtype=object"):
encoded = conventions.encode_cf_variable(original)
assert_identical(original, encoded)
@requires_cftime
class TestDecodeCF:
def test_dataset(self):
original = Dataset(
{
"t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}),
"foo": ("t", [0, 0, 0], {"coordinates": "y", "units": "bar"}),
"y": ("t", [5, 10, -999], {"_FillValue": -999}),
}
)
expected = Dataset(
{"foo": ("t", [0, 0, 0], {"units": "bar"})},
{
"t": pd.date_range("2000-01-01", periods=3),
"y": ("t", [5.0, 10.0, np.nan]),
},
)
actual = conventions.decode_cf(original)
assert_identical(expected, actual)
def test_invalid_coordinates(self):
# regression test for GH308
original = Dataset({"foo": ("t", [1, 2], {"coordinates": "invalid"})})
actual = conventions.decode_cf(original)
assert_identical(original, actual)
def test_decode_coordinates(self):
# regression test for GH610
original = Dataset(
{"foo": ("t", [1, 2], {"coordinates": "x"}), "x": ("t", [4, 5])}
)
actual = conventions.decode_cf(original)
assert actual.foo.encoding["coordinates"] == "x"
def test_0d_int32_encoding(self):
original = Variable((), np.int32(0), encoding={"dtype": "int64"})
expected = Variable((), np.int64(0))
actual = conventions.maybe_encode_nonstring_dtype(original)
assert_identical(expected, actual)
def test_decode_cf_with_multiple_missing_values(self):
original = Variable(["t"], [0, 1, 2], {"missing_value": np.array([0, 1])})
expected = Variable(["t"], [np.nan, np.nan, 2], {})
with warnings.catch_warnings(record=True) as w:
actual = conventions.decode_cf_variable("t", original)
assert_identical(expected, actual)
assert "has multiple fill" in str(w[0].message)
def test_decode_cf_with_drop_variables(self):
original = Dataset(
{
"t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}),
"x": ("x", [9, 8, 7], {"units": "km"}),
"foo": (
("t", "x"),
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
{"units": "bar"},
),
"y": ("t", [5, 10, -999], {"_FillValue": -999}),
}
)
expected = Dataset(
{
"t": pd.date_range("2000-01-01", periods=3),
"foo": (
("t", "x"),
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
{"units": "bar"},
),
"y": ("t", [5, 10, np.nan]),
}
)
actual = conventions.decode_cf(original, drop_variables=("x",))
actual2 = conventions.decode_cf(original, drop_variables="x")
assert_identical(expected, actual)
assert_identical(expected, actual2)
@pytest.mark.filterwarnings("ignore:Ambiguous reference date string")
def test_invalid_time_units_raises_eagerly(self):
ds = Dataset({"time": ("time", [0, 1], {"units": "foobar since 123"})})
with raises_regex(ValueError, "unable to decode time"):
decode_cf(ds)
@requires_cftime
def test_dataset_repr_with_netcdf4_datetimes(self):
# regression test for #347
attrs = {"units": "days since 0001-01-01", "calendar": "noleap"}
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "unable to decode time")
ds = decode_cf(Dataset({"time": ("time", [0, 1], attrs)}))
assert "(time) object" in repr(ds)
attrs = {"units": "days since 1900-01-01"}
ds = decode_cf(Dataset({"time": ("time", [0, 1], attrs)}))
assert "(time) datetime64[ns]" in repr(ds)
@requires_cftime
def test_decode_cf_datetime_transition_to_invalid(self):
# manually create dataset with not-decoded date
from datetime import datetime
ds = Dataset(coords={"time": [0, 266 * 365]})
units = "days since 2000-01-01 00:00:00"
ds.time.attrs = dict(units=units)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "unable to decode time")
ds_decoded = conventions.decode_cf(ds)
expected = [datetime(2000, 1, 1, 0, 0), datetime(2265, 10, 28, 0, 0)]
assert_array_equal(ds_decoded.time.values, expected)
@requires_dask
def test_decode_cf_with_dask(self):
import dask.array as da
original = Dataset(
{
"t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}),
"foo": ("t", [0, 0, 0], {"coordinates": "y", "units": "bar"}),
"bar": ("string2", [b"a", b"b"]),
"baz": (("x"), [b"abc"], {"_Encoding": "utf-8"}),
"y": ("t", [5, 10, -999], {"_FillValue": -999}),
}
).chunk()
decoded = conventions.decode_cf(original)
print(decoded)
assert all(
isinstance(var.data, da.Array)
for name, var in decoded.variables.items()
if name not in decoded.indexes
)
assert_identical(decoded, conventions.decode_cf(original).compute())
@requires_dask
def test_decode_dask_times(self):
original = Dataset.from_dict(
{
"coords": {},
"dims": {"time": 5},
"data_vars": {
"average_T1": {
"dims": ("time",),
"attrs": {"units": "days since 1958-01-01 00:00:00"},
"data": [87659.0, 88024.0, 88389.0, 88754.0, 89119.0],
}
},
}
)
assert_identical(
conventions.decode_cf(original.chunk()),
conventions.decode_cf(original).chunk(),
)
def test_decode_cf_time_kwargs(self):
ds = Dataset.from_dict(
{
"coords": {
"timedelta": {
"data": np.array([1, 2, 3], dtype="int64"),
"dims": "timedelta",
"attrs": {"units": "days"},
},
"time": {
"data": np.array([1, 2, 3], dtype="int64"),
"dims": "time",
"attrs": {"units": "days since 2000-01-01"},
},
},
"dims": {"time": 3, "timedelta": 3},
"data_vars": {
"a": {"dims": ("time", "timedelta"), "data": np.ones((3, 3))},
},
}
)
dsc = conventions.decode_cf(ds)
assert dsc.timedelta.dtype == np.dtype("m8[ns]")
assert dsc.time.dtype == np.dtype("M8[ns]")
dsc = conventions.decode_cf(ds, decode_times=False)
assert dsc.timedelta.dtype == np.dtype("int64")
assert dsc.time.dtype == np.dtype("int64")
dsc = conventions.decode_cf(ds, decode_times=True, decode_timedelta=False)
assert dsc.timedelta.dtype == np.dtype("int64")
assert dsc.time.dtype == np.dtype("M8[ns]")
dsc = conventions.decode_cf(ds, decode_times=False, decode_timedelta=True)
assert dsc.timedelta.dtype == np.dtype("m8[ns]")
assert dsc.time.dtype == np.dtype("int64")
class CFEncodedInMemoryStore(WritableCFDataStore, InMemoryDataStore):
def encode_variable(self, var):
"""encode one variable"""
coder = coding.strings.EncodedStringCoder(allows_unicode=True)
var = coder.encode(var)
return var
@requires_netCDF4
class TestCFEncodedDataStore(CFEncodedBase):
@contextlib.contextmanager
def create_store(self):
yield CFEncodedInMemoryStore()
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
store = CFEncodedInMemoryStore()
data.dump_to_store(store, **save_kwargs)
yield open_dataset(store, **open_kwargs)
@pytest.mark.skip("cannot roundtrip coordinates yet for CFEncodedInMemoryStore")
def test_roundtrip_coordinates(self):
pass
def test_invalid_dataarray_names_raise(self):
# only relevant for on-disk file formats
pass
def test_encoding_kwarg(self):
# we haven't bothered to raise errors yet for unexpected encodings in
# this test dummy
pass
def test_encoding_kwarg_fixed_width_string(self):
# CFEncodedInMemoryStore doesn't support explicit string encodings.
pass
|
import numpy as np
class DeltaJSDivergence(object):
def __init__(self, pi1=0.5, pi2=0.5):
assert pi1 + pi2 == 1
self.pi1 = pi1
self.pi2 = pi2
def get_scores(self, a, b):
# via https://arxiv.org/pdf/2008.02250.pdf eqn 1
p1 = 0.001 + a / np.sum(a)
p2 = 0.001 + b / np.sum(b)
pi1, pi2 = self.pi1, self.pi2
m = pi1 * p1 + pi2 * p2
def lg(x): return np.log(x) / np.log(2)
return m * lg(1 / m) - (pi1 * p2 * lg(1 / p1) + pi2 * p2 * lg(1 / p2))
def get_name(self):
return 'JS Divergence Shift'
|
import os
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
SUPPORT_LANGUAGES = ["en", "de"]
DEFAULT_LANG = "en"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES)}
)
def get_engine(hass, config, discovery_info=None):
"""Set up Demo speech component."""
return DemoProvider(config.get(CONF_LANG, DEFAULT_LANG))
class DemoProvider(Provider):
"""Demo speech API provider."""
def __init__(self, lang):
"""Initialize demo provider."""
self._lang = lang
self.name = "Demo"
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_options(self):
"""Return list of supported options like voice, emotionen."""
return ["voice", "age"]
def get_tts_audio(self, message, language, options=None):
"""Load TTS from demo."""
filename = os.path.join(os.path.dirname(__file__), "tts.mp3")
try:
with open(filename, "rb") as voice:
data = voice.read()
except OSError:
return (None, None)
return ("mp3", data)
|
import diamond.collector
import re
class DRBDCollector(diamond.collector.Collector):
"""
DRBD Simple metric collector
"""
def get_default_config_help(self):
config_help = super(DRBDCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DRBDCollector, self).get_default_config()
config.update({
'path': 'drbd'
})
return config
def collect(self):
"""
Overrides the Collector.collect method
"""
performance_indicators = {
'ns': 'network_send',
'nr': 'network_receive',
'dw': 'disk_write',
'dr': 'disk_read',
'al': 'activity_log',
'bm': 'bit_map',
'lo': 'local_count',
'pe': 'pending',
'ua': 'unacknowledged',
'ap': 'application_pending',
'ep': 'epochs',
'wo': 'write_order',
'oos': 'out_of_sync',
'cs': 'connection_state',
'ro': 'roles',
'ds': 'disk_states'
}
results = dict()
try:
statusfile = open('/proc/drbd', 'r')
current_resource = ''
for line in statusfile:
if re.search('version', line) is None:
if re.search(r' \d: cs', line):
matches = re.match(r' (\d): (cs:\w+) (ro:\w+/\w+) '
'(ds:\w+/\w+) (\w{1}) .*', line)
current_resource = matches.group(1)
results[current_resource] = dict()
elif re.search(r'\sns:', line):
metrics = line.strip().split(" ")
for metric in metrics:
item, value = metric.split(":")
results[current_resource][
performance_indicators[item]] = value
else:
continue
statusfile.close()
except IOError as errormsg:
self.log.error("Can't read DRBD status file: {}".format(errormsg))
return
for resource in results.keys():
for metric_name, metric_value in results[resource].items():
if metric_value.isdigit():
self.publish(resource + "." + metric_name, metric_value)
else:
continue
|
import asyncio
import logging
from roonapi import RoonApi
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_API_KEY, CONF_HOST
from .const import ( # pylint: disable=unused-import
AUTHENTICATE_TIMEOUT,
DEFAULT_NAME,
DOMAIN,
ROON_APPINFO,
)
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({"host": str})
TIMEOUT = 120
class RoonHub:
"""Interact with roon during config flow."""
def __init__(self, host):
"""Initialize."""
self._host = host
async def authenticate(self, hass) -> bool:
"""Test if we can authenticate with the host."""
token = None
secs = 0
roonapi = RoonApi(ROON_APPINFO, None, self._host, blocking_init=False)
while secs < TIMEOUT:
token = roonapi.token
secs += AUTHENTICATE_TIMEOUT
if token:
break
await asyncio.sleep(AUTHENTICATE_TIMEOUT)
token = roonapi.token
roonapi.stop()
return token
async def authenticate(hass: core.HomeAssistant, host):
"""Connect and authenticate home assistant."""
hub = RoonHub(host)
token = await hub.authenticate(hass)
if token is None:
raise InvalidAuth
return {CONF_HOST: host, CONF_API_KEY: token}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for roon."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the Roon flow."""
self._host = None
async def async_step_user(self, user_input=None):
"""Handle getting host details from the user."""
errors = {}
if user_input is not None:
self._host = user_input["host"]
existing = {
entry.data[CONF_HOST] for entry in self._async_current_entries()
}
if self._host in existing:
errors["base"] = "duplicate_entry"
return self.async_show_form(step_id="user", errors=errors)
return await self.async_step_link()
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_link(self, user_input=None):
"""Handle linking and authenticting with the roon server."""
errors = {}
if user_input is not None:
try:
info = await authenticate(self.hass, self._host)
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(title=DEFAULT_NAME, data=info)
return self.async_show_form(step_id="link", errors=errors)
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
from homeassistant.components import vacuum
def test_deprecated_base_class(caplog):
"""Test deprecated base class."""
class CustomVacuum(vacuum.VacuumDevice):
pass
class CustomStateVacuum(vacuum.StateVacuumDevice):
pass
CustomVacuum()
assert "VacuumDevice is deprecated, modify CustomVacuum" in caplog.text
CustomStateVacuum()
assert "StateVacuumDevice is deprecated, modify CustomStateVacuum" in caplog.text
|
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import getpass
import logging
import typing
import six
import verboselogs
from requests import Session
from .looters import HashtagLooter, ProfileLooter
from .pbar import TqdmProgressBar
if typing.TYPE_CHECKING:
from typing import Any, Dict, Mapping, Optional, Text, Type, Union
from .looter import InstaLooter
#: The module logger
logger = verboselogs.VerboseLogger(__name__)
class BatchRunner(object):
"""Run ``InstaLooter`` in batch mode, using a configuration file.
"""
_CLS_MAP = {
'users': ProfileLooter,
'hashtag': HashtagLooter,
} # type: Mapping[Text, Type[InstaLooter]]
def __init__(self, handle, args=None):
# type: (Any, Optional[Mapping[Text, Any]]) -> None
close_handle = False
if isinstance(handle, six.binary_type):
handle = handle.decode('utf-8')
if isinstance(handle, six.text_type):
_handle = open(handle) # type: typing.IO
close_handle = True
else:
_handle = handle
try:
self.args = args or {}
self.parser = six.moves.configparser.ConfigParser()
getattr(self.parser, "readfp" if six.PY2 else "read_file")(_handle)
finally:
if close_handle:
_handle.close()
@typing.overload
def _getboolean(self, section_id, key, default):
# type: (Text, Text, bool) -> bool
pass
@typing.overload
def _getboolean(self, section_id, key):
# type: (Text, Text) -> Optional[bool]
pass
@typing.overload
def _getboolean(self, section_id, key, default):
# type: (Text, Text, None) -> Optional[bool]
pass
def _getboolean(self, section_id, key, default=None):
# type: (Text, Text, Optional[bool]) -> Optional[bool]
if self.parser.has_option(section_id, key):
return self.parser.getboolean(section_id, key)
return default
@typing.overload
def _getint(self, section_id, key, default):
# type: (Text, Text, None) -> Optional[int]
pass
@typing.overload
def _getint(self, section_id, key):
# type: (Text, Text) -> Optional[int]
pass
@typing.overload
def _getint(self, section_id, key, default):
# type: (Text, Text, int) -> int
pass
def _getint(self, section_id, key, default=None):
# type: (Text, Text, Optional[int]) -> Optional[int]
if self.parser.has_option(section_id, key):
return self.parser.getint(section_id, key)
return default
@typing.overload
def _get(self, section_id, key, default):
# type: (Text, Text, None) -> Optional[Text]
pass
@typing.overload
def _get(self, section_id, key):
# type: (Text, Text) -> Optional[Text]
pass
@typing.overload
def _get(self, section_id, key, default):
# type: (Text, Text, Text) -> Text
pass
def _get(self, section_id, key, default=None):
# type: (Text, Text, Optional[Text]) -> Optional[Text]
if self.parser.has_option(section_id, key):
return self.parser.get(section_id, key)
return default
def run_all(self):
# type: () -> None
"""Run all the jobs specified in the configuration file.
"""
logger.debug("Creating batch session")
session = Session()
for section_id in self.parser.sections():
self.run_job(section_id, session=session)
def run_job(self, section_id, session=None):
# type: (Text, Optional[Session]) -> None
"""Run a job as described in the section named ``section_id``.
Raises:
KeyError: when the section could not be found.
"""
if not self.parser.has_section(section_id):
raise KeyError('section not found: {}'.format(section_id))
session = session or Session()
for name, looter_cls in six.iteritems(self._CLS_MAP):
targets = self.get_targets(self._get(section_id, name))
quiet = self._getboolean(
section_id, "quiet", self.args.get("--quiet", False))
if targets:
logger.info("Launching {} job for section {}".format(name, section_id))
for target, directory in six.iteritems(targets):
try:
logger.info("Downloading {} to {}".format(target, directory))
looter = looter_cls(
target,
add_metadata=self._getboolean(section_id, 'add-metadata', False),
get_videos=self._getboolean(section_id, 'get-videos', False),
videos_only=self._getboolean(section_id, 'videos-only', False),
jobs=self._getint(section_id, 'jobs', 16),
template=self._get(section_id, 'template', '{id}'),
dump_json=self._getboolean(section_id, 'dump-json', False),
dump_only=self._getboolean(section_id, 'dump-only', False),
extended_dump=self._getboolean(section_id, 'extended-dump', False),
session=session)
if self.parser.has_option(section_id, 'username'):
looter.logout()
username = self._get(section_id, 'username')
password = self._get(section_id, 'password') or \
getpass.getpass('Password for "{}": '.format(username))
looter.login(username, password)
n = looter.download(
directory,
media_count=self._getint(section_id, 'num-to-dl'),
# FIXME: timeframe=self._get(section_id, 'timeframe'),
new_only=self._getboolean(section_id, 'new', False),
pgpbar_cls=None if quiet else TqdmProgressBar,
dlpbar_cls=None if quiet else TqdmProgressBar)
logger.success("Downloaded %i medias !", n)
except Exception as exception:
logger.error(six.text_type(exception))
def get_targets(self, raw_string):
# type: (Optional[Text]) -> Dict[Text, Text]
"""Extract targets from a string in 'key: value' format.
"""
targets = {}
if raw_string is not None:
for line in raw_string.splitlines():
if line:
target, directory = line.split(':', 1)
targets[target.strip()] = directory.strip()
return targets
|
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.gree.const import DOMAIN as GREE_DOMAIN
async def test_creating_entry_sets_up_climate(hass, discovery, device, setup):
"""Test setting up Gree creates the climate components."""
result = await hass.config_entries.flow.async_init(
GREE_DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
assert len(setup.mock_calls) == 1
|
from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.utils import np_utils
from elephas.spark_model import SparkMLlibModel
from elephas.utils.rdd_utils import to_labeled_point
from pyspark import SparkContext, SparkConf
# Define basic parameters
batch_size = 64
nb_classes = 10
epochs = 3
# Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(128, input_dim=784))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
# Compile model
rms = RMSprop()
model.compile(rms, "categorical_crossentropy", ['acc'])
# Create Spark context
conf = SparkConf().setAppName('Mnist_Spark_MLP').setMaster('local[8]')
sc = SparkContext(conf=conf)
# Build RDD from numpy features and labels
lp_rdd = to_labeled_point(sc, x_train, y_train, categorical=True)
# Initialize SparkModel from Keras model and Spark context
spark_model = SparkMLlibModel(model=model, frequency='epoch', mode='synchronous')
# Train Spark model
spark_model.fit(lp_rdd, epochs=5, batch_size=32, verbose=0,
validation_split=0.1, categorical=True, nb_classes=nb_classes)
# Evaluate Spark model by evaluating the underlying model
score = spark_model.master_network.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', score[1])
|
import logging
from homeassistant.components.python_script import DOMAIN, FOLDER, execute
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.setup import async_setup_component
from tests.async_mock import mock_open, patch
from tests.common import patch_yaml_files
async def test_setup(hass):
"""Test we can discover scripts."""
scripts = [
"/some/config/dir/python_scripts/hello.py",
"/some/config/dir/python_scripts/world_beer.py",
]
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch("homeassistant.components.python_script.glob.iglob", return_value=scripts):
res = await async_setup_component(hass, "python_script", {})
assert res
assert hass.services.has_service("python_script", "hello")
assert hass.services.has_service("python_script", "world_beer")
with patch(
"homeassistant.components.python_script.open",
mock_open(read_data="fake source"),
create=True,
), patch("homeassistant.components.python_script.execute") as mock_ex:
await hass.services.async_call(
"python_script", "hello", {"some": "data"}, blocking=True
)
assert len(mock_ex.mock_calls) == 1
hass, script, source, data = mock_ex.mock_calls[0][1]
assert hass is hass
assert script == "hello.py"
assert source == "fake source"
assert data == {"some": "data"}
async def test_setup_fails_on_no_dir(hass, caplog):
"""Test we fail setup when no dir found."""
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=False
):
res = await async_setup_component(hass, "python_script", {})
assert not res
assert "Folder python_scripts not found in configuration folder" in caplog.text
async def test_execute_with_data(hass, caplog):
"""Test executing a script."""
caplog.set_level(logging.WARNING)
source = """
hass.states.set('test.entity', data.get('name', 'not set'))
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {"name": "paulus"})
await hass.async_block_till_done()
assert hass.states.is_state("test.entity", "paulus")
# No errors logged = good
assert caplog.text == ""
async def test_execute_warns_print(hass, caplog):
"""Test print triggers warning."""
caplog.set_level(logging.WARNING)
source = """
print("This triggers warning.")
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Don't use print() inside scripts." in caplog.text
async def test_execute_logging(hass, caplog):
"""Test logging works."""
caplog.set_level(logging.INFO)
source = """
logger.info('Logging from inside script')
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Logging from inside script" in caplog.text
async def test_execute_compile_error(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
this is not valid Python
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Error loading script test.py" in caplog.text
async def test_execute_runtime_error(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
raise Exception('boom')
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Error executing script: boom" in caplog.text
async def test_accessing_async_methods(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
hass.async_stop()
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Not allowed to access async methods" in caplog.text
async def test_using_complex_structures(hass, caplog):
"""Test that dicts and lists work."""
caplog.set_level(logging.INFO)
source = """
mydict = {"a": 1, "b": 2}
mylist = [1, 2, 3, 4]
logger.info('Logging from inside script: %s %s' % (mydict["a"], mylist[2]))
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Logging from inside script: 1 3" in caplog.text
async def test_accessing_forbidden_methods(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
for source, name in {
"hass.stop()": "HomeAssistant.stop",
"dt_util.set_default_time_zone()": "module.set_default_time_zone",
"datetime.non_existing": "module.non_existing",
"time.tzset()": "TimeWrapper.tzset",
}.items():
caplog.records.clear()
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert f"Not allowed to access {name}" in caplog.text
async def test_iterating(hass):
"""Test compile error logs error."""
source = """
for i in [1, 2]:
hass.states.set('hello.{}'.format(i), 'world')
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.1", "world")
assert hass.states.is_state("hello.2", "world")
async def test_unpacking_sequence(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
a,b = (1,2)
ab_list = [(a,b) for a,b in [(1, 2), (3, 4)]]
hass.states.set('hello.a', a)
hass.states.set('hello.b', b)
hass.states.set('hello.ab_list', '{}'.format(ab_list))
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.a", "1")
assert hass.states.is_state("hello.b", "2")
assert hass.states.is_state("hello.ab_list", "[(1, 2), (3, 4)]")
# No errors logged = good
assert caplog.text == ""
async def test_execute_sorted(hass, caplog):
"""Test sorted() function."""
caplog.set_level(logging.ERROR)
source = """
a = sorted([3,1,2])
assert(a == [1,2,3])
hass.states.set('hello.a', a[0])
hass.states.set('hello.b', a[1])
hass.states.set('hello.c', a[2])
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.a", "1")
assert hass.states.is_state("hello.b", "2")
assert hass.states.is_state("hello.c", "3")
# No errors logged = good
assert caplog.text == ""
async def test_exposed_modules(hass, caplog):
"""Test datetime and time modules exposed."""
caplog.set_level(logging.ERROR)
source = """
hass.states.set('module.time', time.strftime('%Y', time.gmtime(521276400)))
hass.states.set('module.time_strptime',
time.strftime('%H:%M', time.strptime('12:34', '%H:%M')))
hass.states.set('module.datetime',
datetime.timedelta(minutes=1).total_seconds())
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("module.time", "1986")
assert hass.states.is_state("module.time_strptime", "12:34")
assert hass.states.is_state("module.datetime", "60.0")
# No errors logged = good
assert caplog.text == ""
async def test_execute_functions(hass, caplog):
"""Test functions defined in script can call one another."""
caplog.set_level(logging.ERROR)
source = """
def a():
hass.states.set('hello.a', 'one')
def b():
a()
hass.states.set('hello.b', 'two')
b()
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.a", "one")
assert hass.states.is_state("hello.b", "two")
# No errors logged = good
assert caplog.text == ""
async def test_reload(hass):
"""Test we can re-discover scripts."""
scripts = [
"/some/config/dir/python_scripts/hello.py",
"/some/config/dir/python_scripts/world_beer.py",
]
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch("homeassistant.components.python_script.glob.iglob", return_value=scripts):
res = await async_setup_component(hass, "python_script", {})
assert res
assert hass.services.has_service("python_script", "hello")
assert hass.services.has_service("python_script", "world_beer")
assert hass.services.has_service("python_script", "reload")
scripts = [
"/some/config/dir/python_scripts/hello2.py",
"/some/config/dir/python_scripts/world_beer.py",
]
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch("homeassistant.components.python_script.glob.iglob", return_value=scripts):
await hass.services.async_call("python_script", "reload", {}, blocking=True)
assert not hass.services.has_service("python_script", "hello")
assert hass.services.has_service("python_script", "hello2")
assert hass.services.has_service("python_script", "world_beer")
assert hass.services.has_service("python_script", "reload")
async def test_service_descriptions(hass):
"""Test that service descriptions are loaded and reloaded correctly."""
# Test 1: no user-provided services.yaml file
scripts1 = [
"/some/config/dir/python_scripts/hello.py",
"/some/config/dir/python_scripts/world_beer.py",
]
service_descriptions1 = (
"hello:\n"
" description: Description of hello.py.\n"
" fields:\n"
" fake_param:\n"
" description: Parameter used by hello.py.\n"
" example: 'This is a test of python_script.hello'"
)
services_yaml1 = {
"{}/{}/services.yaml".format(
hass.config.config_dir, FOLDER
): service_descriptions1
}
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch(
"homeassistant.components.python_script.glob.iglob", return_value=scripts1
), patch(
"homeassistant.components.python_script.os.path.exists", return_value=True
), patch_yaml_files(
services_yaml1
):
await async_setup_component(hass, DOMAIN, {})
descriptions = await async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert descriptions[DOMAIN]["hello"]["description"] == "Description of hello.py."
assert (
descriptions[DOMAIN]["hello"]["fields"]["fake_param"]["description"]
== "Parameter used by hello.py."
)
assert (
descriptions[DOMAIN]["hello"]["fields"]["fake_param"]["example"]
== "This is a test of python_script.hello"
)
assert descriptions[DOMAIN]["world_beer"]["description"] == ""
assert bool(descriptions[DOMAIN]["world_beer"]["fields"]) is False
# Test 2: user-provided services.yaml file
scripts2 = [
"/some/config/dir/python_scripts/hello2.py",
"/some/config/dir/python_scripts/world_beer.py",
]
service_descriptions2 = (
"hello2:\n"
" description: Description of hello2.py.\n"
" fields:\n"
" fake_param:\n"
" description: Parameter used by hello2.py.\n"
" example: 'This is a test of python_script.hello2'"
)
services_yaml2 = {
"{}/{}/services.yaml".format(
hass.config.config_dir, FOLDER
): service_descriptions2
}
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch(
"homeassistant.components.python_script.glob.iglob", return_value=scripts2
), patch(
"homeassistant.components.python_script.os.path.exists", return_value=True
), patch_yaml_files(
services_yaml2
):
await hass.services.async_call(DOMAIN, "reload", {}, blocking=True)
descriptions = await async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert descriptions[DOMAIN]["hello2"]["description"] == "Description of hello2.py."
assert (
descriptions[DOMAIN]["hello2"]["fields"]["fake_param"]["description"]
== "Parameter used by hello2.py."
)
assert (
descriptions[DOMAIN]["hello2"]["fields"]["fake_param"]["example"]
== "This is a test of python_script.hello2"
)
async def test_sleep_warns_one(hass, caplog):
"""Test time.sleep warns once."""
caplog.set_level(logging.WARNING)
source = """
time.sleep(2)
time.sleep(5)
"""
with patch("homeassistant.components.python_script.time.sleep"):
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert caplog.text.count("time.sleep") == 1
|
__docformat__ = "restructuredtext en"
import os
import stat
from resource import getrlimit, setrlimit, RLIMIT_CPU, RLIMIT_AS
from signal import signal, SIGXCPU, SIGKILL, SIGUSR2, SIGUSR1
from threading import Timer, currentThread, Thread, Event
from time import time
from logilab.common.tree import Node
class NoSuchProcess(Exception): pass
def proc_exists(pid):
"""check the a pid is registered in /proc
raise NoSuchProcess exception if not
"""
if not os.path.exists('/proc/%s' % pid):
raise NoSuchProcess()
PPID = 3
UTIME = 13
STIME = 14
CUTIME = 15
CSTIME = 16
VSIZE = 22
class ProcInfo(Node):
"""provide access to process information found in /proc"""
def __init__(self, pid):
self.pid = int(pid)
Node.__init__(self, self.pid)
proc_exists(self.pid)
self.file = '/proc/%s/stat' % self.pid
self.ppid = int(self.status()[PPID])
def memory_usage(self):
"""return the memory usage of the process in Ko"""
try :
return int(self.status()[VSIZE])
except IOError:
return 0
def lineage_memory_usage(self):
return self.memory_usage() + sum([child.lineage_memory_usage()
for child in self.children])
def time(self, children=0):
"""return the number of jiffies that this process has been scheduled
in user and kernel mode"""
status = self.status()
time = int(status[UTIME]) + int(status[STIME])
if children:
time += int(status[CUTIME]) + int(status[CSTIME])
return time
def status(self):
"""return the list of fields found in /proc/<pid>/stat"""
return open(self.file).read().split()
def name(self):
"""return the process name found in /proc/<pid>/stat
"""
return self.status()[1].strip('()')
def age(self):
"""return the age of the process
"""
return os.stat(self.file)[stat.ST_MTIME]
class ProcInfoLoader:
"""manage process information"""
def __init__(self):
self._loaded = {}
def list_pids(self):
"""return a list of existent process ids"""
for subdir in os.listdir('/proc'):
if subdir.isdigit():
yield int(subdir)
def load(self, pid):
"""get a ProcInfo object for a given pid"""
pid = int(pid)
try:
return self._loaded[pid]
except KeyError:
procinfo = ProcInfo(pid)
procinfo.manager = self
self._loaded[pid] = procinfo
return procinfo
def load_all(self):
"""load all processes information"""
for pid in self.list_pids():
try:
procinfo = self.load(pid)
if procinfo.parent is None and procinfo.ppid:
pprocinfo = self.load(procinfo.ppid)
pprocinfo.append(procinfo)
except NoSuchProcess:
pass
try:
class ResourceError(BaseException):
"""Error raise when resource limit is reached"""
limit = "Unknown Resource Limit"
except NameError:
class ResourceError(Exception):
"""Error raise when resource limit is reached"""
limit = "Unknown Resource Limit"
class XCPUError(ResourceError):
"""Error raised when CPU Time limit is reached"""
limit = "CPU Time"
class LineageMemoryError(ResourceError):
"""Error raised when the total amount of memory used by a process and
it's child is reached"""
limit = "Lineage total Memory"
class TimeoutError(ResourceError):
"""Error raised when the process is running for to much time"""
limit = "Real Time"
# Can't use subclass because the StandardError MemoryError raised
RESOURCE_LIMIT_EXCEPTION = (ResourceError, MemoryError)
class MemorySentinel(Thread):
"""A class checking a process don't use too much memory in a separated
daemonic thread
"""
def __init__(self, interval, memory_limit, gpid=os.getpid()):
Thread.__init__(self, target=self._run, name="Test.Sentinel")
self.memory_limit = memory_limit
self._stop = Event()
self.interval = interval
self.setDaemon(True)
self.gpid = gpid
def stop(self):
"""stop ap"""
self._stop.set()
def _run(self):
pil = ProcInfoLoader()
while not self._stop.isSet():
if self.memory_limit <= pil.load(self.gpid).lineage_memory_usage():
os.killpg(self.gpid, SIGUSR1)
self._stop.wait(self.interval)
class ResourceController:
def __init__(self, max_cpu_time=None, max_time=None, max_memory=None,
max_reprieve=60):
if SIGXCPU == -1:
raise RuntimeError("Unsupported platform")
self.max_time = max_time
self.max_memory = max_memory
self.max_cpu_time = max_cpu_time
self._reprieve = max_reprieve
self._timer = None
self._msentinel = None
self._old_max_memory = None
self._old_usr1_hdlr = None
self._old_max_cpu_time = None
self._old_usr2_hdlr = None
self._old_sigxcpu_hdlr = None
self._limit_set = 0
self._abort_try = 0
self._start_time = None
self._elapse_time = 0
def _hangle_sig_timeout(self, sig, frame):
raise TimeoutError()
def _hangle_sig_memory(self, sig, frame):
if self._abort_try < self._reprieve:
self._abort_try += 1
raise LineageMemoryError("Memory limit reached")
else:
os.killpg(os.getpid(), SIGKILL)
def _handle_sigxcpu(self, sig, frame):
if self._abort_try < self._reprieve:
self._abort_try += 1
raise XCPUError("Soft CPU time limit reached")
else:
os.killpg(os.getpid(), SIGKILL)
def _time_out(self):
if self._abort_try < self._reprieve:
self._abort_try += 1
os.killpg(os.getpid(), SIGUSR2)
if self._limit_set > 0:
self._timer = Timer(1, self._time_out)
self._timer.start()
else:
os.killpg(os.getpid(), SIGKILL)
def setup_limit(self):
"""set up the process limit"""
assert currentThread().getName() == 'MainThread'
os.setpgrp()
if self._limit_set <= 0:
if self.max_time is not None:
self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout)
self._timer = Timer(max(1, int(self.max_time) - self._elapse_time),
self._time_out)
self._start_time = int(time())
self._timer.start()
if self.max_cpu_time is not None:
self._old_max_cpu_time = getrlimit(RLIMIT_CPU)
cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1])
self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu)
setrlimit(RLIMIT_CPU, cpu_limit)
if self.max_memory is not None:
self._msentinel = MemorySentinel(1, int(self.max_memory) )
self._old_max_memory = getrlimit(RLIMIT_AS)
self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory)
as_limit = (int(self.max_memory), self._old_max_memory[1])
setrlimit(RLIMIT_AS, as_limit)
self._msentinel.start()
self._limit_set += 1
def clean_limit(self):
"""reinstall the old process limit"""
if self._limit_set > 0:
if self.max_time is not None:
self._timer.cancel()
self._elapse_time += int(time())-self._start_time
self._timer = None
signal(SIGUSR2, self._old_usr2_hdlr)
if self.max_cpu_time is not None:
setrlimit(RLIMIT_CPU, self._old_max_cpu_time)
signal(SIGXCPU, self._old_sigxcpu_hdlr)
if self.max_memory is not None:
self._msentinel.stop()
self._msentinel = None
setrlimit(RLIMIT_AS, self._old_max_memory)
signal(SIGUSR1, self._old_usr1_hdlr)
self._limit_set -= 1
|
from aiohttp import ClientError as HTTPClientError
from homeassistant.components.directv.const import CONF_RECEIVER_ID, DOMAIN
from homeassistant.components.ssdp import ATTR_UPNP_SERIAL
from homeassistant.config_entries import SOURCE_SSDP, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_SOURCE
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.components.directv import (
HOST,
MOCK_SSDP_DISCOVERY_INFO,
MOCK_USER_INPUT,
RECEIVER_ID,
UPNP_SERIAL,
mock_connection,
setup_integration,
)
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_show_user_form(hass: HomeAssistantType) -> None:
"""Test that the user set up form is served."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["step_id"] == "user"
assert result["type"] == RESULT_TYPE_FORM
async def test_show_ssdp_form(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test that the ssdp confirmation form is served."""
mock_connection(aioclient_mock)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=discovery_info
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "ssdp_confirm"
assert result["description_placeholders"] == {CONF_NAME: HOST}
async def test_cannot_connect(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we show user form on connection error."""
aioclient_mock.get("http://127.0.0.1:8080/info/getVersion", exc=HTTPClientError)
user_input = MOCK_USER_INPUT.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_ssdp_cannot_connect(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow on connection error."""
aioclient_mock.get("http://127.0.0.1:8080/info/getVersion", exc=HTTPClientError)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_ssdp_confirm_cannot_connect(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow on connection error."""
aioclient_mock.get("http://127.0.0.1:8080/info/getVersion", exc=HTTPClientError)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP, CONF_HOST: HOST, CONF_NAME: HOST},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_user_device_exists_abort(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort user flow if DirecTV receiver already configured."""
await setup_integration(hass, aioclient_mock, skip_entry_setup=True)
user_input = MOCK_USER_INPUT.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_device_exists_abort(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow if DirecTV receiver already configured."""
await setup_integration(hass, aioclient_mock, skip_entry_setup=True)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_with_receiver_id_device_exists_abort(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow if DirecTV receiver already configured."""
await setup_integration(hass, aioclient_mock, skip_entry_setup=True)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
discovery_info[ATTR_UPNP_SERIAL] = UPNP_SERIAL
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_unknown_error(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we show user form on unknown error."""
user_input = MOCK_USER_INPUT.copy()
with patch(
"homeassistant.components.directv.config_flow.DIRECTV.update",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_ssdp_unknown_error(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow on unknown error."""
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
with patch(
"homeassistant.components.directv.config_flow.DIRECTV.update",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_ssdp_confirm_unknown_error(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow on unknown error."""
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
with patch(
"homeassistant.components.directv.config_flow.DIRECTV.update",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP, CONF_HOST: HOST, CONF_NAME: HOST},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_full_user_flow_implementation(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the full manual user flow from start to finish."""
mock_connection(aioclient_mock)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
user_input = MOCK_USER_INPUT.copy()
with patch(
"homeassistant.components.directv.async_setup_entry", return_value=True
), patch("homeassistant.components.directv.async_setup", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=user_input,
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"]
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_RECEIVER_ID] == RECEIVER_ID
async def test_full_ssdp_flow_implementation(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the full SSDP flow from start to finish."""
mock_connection(aioclient_mock)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=discovery_info
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "ssdp_confirm"
assert result["description_placeholders"] == {CONF_NAME: HOST}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"]
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_RECEIVER_ID] == RECEIVER_ID
|
import pytest
from homeassistant.components.demo import DOMAIN
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_KELVIN,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_RGB_COLOR,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
DOMAIN as LIGHT_DOMAIN,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
ENTITY_LIGHT = "light.bed_light"
@pytest.fixture(autouse=True)
async def setup_comp(hass):
"""Set up demo component."""
assert await async_setup_component(
hass, LIGHT_DOMAIN, {LIGHT_DOMAIN: {"platform": DOMAIN}}
)
await hass.async_block_till_done()
async def test_state_attributes(hass):
"""Test light state attributes."""
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_XY_COLOR: (0.4, 0.4), ATTR_BRIGHTNESS: 25},
blocking=True,
)
state = hass.states.get(ENTITY_LIGHT)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_XY_COLOR) == (0.4, 0.4)
assert state.attributes.get(ATTR_BRIGHTNESS) == 25
assert state.attributes.get(ATTR_RGB_COLOR) == (255, 234, 164)
assert state.attributes.get(ATTR_EFFECT) == "rainbow"
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: ENTITY_LIGHT,
ATTR_RGB_COLOR: (251, 253, 255),
ATTR_WHITE_VALUE: 254,
},
blocking=True,
)
state = hass.states.get(ENTITY_LIGHT)
assert state.attributes.get(ATTR_WHITE_VALUE) == 254
assert state.attributes.get(ATTR_RGB_COLOR) == (250, 252, 255)
assert state.attributes.get(ATTR_XY_COLOR) == (0.319, 0.326)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_EFFECT: "none", ATTR_COLOR_TEMP: 400},
blocking=True,
)
state = hass.states.get(ENTITY_LIGHT)
assert state.attributes.get(ATTR_COLOR_TEMP) == 400
assert state.attributes.get(ATTR_MIN_MIREDS) == 153
assert state.attributes.get(ATTR_MAX_MIREDS) == 500
assert state.attributes.get(ATTR_EFFECT) == "none"
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_LIGHT, ATTR_BRIGHTNESS_PCT: 50, ATTR_KELVIN: 3000},
blocking=True,
)
state = hass.states.get(ENTITY_LIGHT)
assert state.attributes.get(ATTR_COLOR_TEMP) == 333
assert state.attributes.get(ATTR_BRIGHTNESS) == 128
async def test_turn_off(hass):
"""Test light turn off method."""
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_LIGHT}, blocking=True
)
state = hass.states.get(ENTITY_LIGHT)
assert state.state == STATE_ON
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_LIGHT}, blocking=True
)
state = hass.states.get(ENTITY_LIGHT)
assert state.state == STATE_OFF
async def test_turn_off_without_entity_id(hass):
"""Test light turn off all lights."""
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: "all"}, blocking=True
)
state = hass.states.get(ENTITY_LIGHT)
assert state.state == STATE_ON
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "all"}, blocking=True
)
state = hass.states.get(ENTITY_LIGHT)
assert state.state == STATE_OFF
|
import logging
from typing import Any, Callable, List, Optional, Union
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import EVENT_HOMEASSISTANT_START, CoreState, callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (
Event,
TrackTemplate,
TrackTemplateResult,
async_track_template_result,
)
from homeassistant.helpers.template import Template, result_as_boolean
_LOGGER = logging.getLogger(__name__)
class _TemplateAttribute:
"""Attribute value linked to template result."""
def __init__(
self,
entity: Entity,
attribute: str,
template: Template,
validator: Callable[[Any], Any] = None,
on_update: Optional[Callable[[Any], None]] = None,
none_on_template_error: Optional[bool] = False,
):
"""Template attribute."""
self._entity = entity
self._attribute = attribute
self.template = template
self.validator = validator
self.on_update = on_update
self.async_update = None
self.none_on_template_error = none_on_template_error
@callback
def async_setup(self):
"""Config update path for the attribute."""
if self.on_update:
return
if not hasattr(self._entity, self._attribute):
raise AttributeError(f"Attribute '{self._attribute}' does not exist.")
self.on_update = self._default_update
@callback
def _default_update(self, result):
attr_result = None if isinstance(result, TemplateError) else result
setattr(self._entity, self._attribute, attr_result)
@callback
def handle_result(
self,
event: Optional[Event],
template: Template,
last_result: Union[str, None, TemplateError],
result: Union[str, TemplateError],
) -> None:
"""Handle a template result event callback."""
if isinstance(result, TemplateError):
_LOGGER.error(
"TemplateError('%s') "
"while processing template '%s' "
"for attribute '%s' in entity '%s'",
result,
self.template,
self._attribute,
self._entity.entity_id,
)
if self.none_on_template_error:
self._default_update(result)
else:
self.on_update(result)
return
if not self.validator:
self.on_update(result)
return
try:
validated = self.validator(result)
except vol.Invalid as ex:
_LOGGER.error(
"Error validating template result '%s' "
"from template '%s' "
"for attribute '%s' in entity %s "
"validation message '%s'",
result,
self.template,
self._attribute,
self._entity.entity_id,
ex.msg,
)
self.on_update(None)
return
self.on_update(validated)
return
class TemplateEntity(Entity):
"""Entity that uses templates to calculate attributes."""
def __init__(
self,
*,
availability_template=None,
icon_template=None,
entity_picture_template=None,
attribute_templates=None,
):
"""Template Entity."""
self._template_attrs = {}
self._async_update = None
self._attribute_templates = attribute_templates
self._attributes = {}
self._availability_template = availability_template
self._available = True
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._icon = None
self._entity_picture = None
self._self_ref_update_count = 0
@property
def should_poll(self):
"""No polling needed."""
return False
@callback
def _update_available(self, result):
if isinstance(result, TemplateError):
self._available = True
return
self._available = result_as_boolean(result)
@callback
def _update_state(self, result):
if self._availability_template:
return
self._available = not isinstance(result, TemplateError)
@property
def available(self) -> bool:
"""Return if the device is available."""
return self._available
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def entity_picture(self):
"""Return the entity_picture to use in the frontend, if any."""
return self._entity_picture
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@callback
def _add_attribute_template(self, attribute_key, attribute_template):
"""Create a template tracker for the attribute."""
def _update_attribute(result):
attr_result = None if isinstance(result, TemplateError) else result
self._attributes[attribute_key] = attr_result
self.add_template_attribute(
attribute_key, attribute_template, None, _update_attribute
)
def add_template_attribute(
self,
attribute: str,
template: Template,
validator: Callable[[Any], Any] = None,
on_update: Optional[Callable[[Any], None]] = None,
none_on_template_error: bool = False,
) -> None:
"""
Call in the constructor to add a template linked to a attribute.
Parameters
----------
attribute
The name of the attribute to link to. This attribute must exist
unless a custom on_update method is supplied.
template
The template to calculate.
validator
Validator function to parse the result and ensure it's valid.
on_update
Called to store the template result rather than storing it
the supplied attribute. Passed the result of the validator, or None
if the template or validator resulted in an error.
"""
attribute = _TemplateAttribute(
self, attribute, template, validator, on_update, none_on_template_error
)
self._template_attrs.setdefault(template, [])
self._template_attrs[template].append(attribute)
@callback
def _handle_results(
self,
event: Optional[Event],
updates: List[TrackTemplateResult],
) -> None:
"""Call back the results to the attributes."""
if event:
self.async_set_context(event.context)
entity_id = event and event.data.get(ATTR_ENTITY_ID)
if entity_id and entity_id == self.entity_id:
self._self_ref_update_count += 1
else:
self._self_ref_update_count = 0
if self._self_ref_update_count > len(self._template_attrs):
for update in updates:
_LOGGER.warning(
"Template loop detected while processing event: %s, skipping template render for Template[%s]",
event,
update.template.template,
)
return
for update in updates:
for attr in self._template_attrs[update.template]:
attr.handle_result(
event, update.template, update.last_result, update.result
)
self.async_write_ha_state()
async def _async_template_startup(self, *_) -> None:
template_var_tups = []
for template, attributes in self._template_attrs.items():
template_var_tups.append(TrackTemplate(template, None))
for attribute in attributes:
attribute.async_setup()
result_info = async_track_template_result(
self.hass, template_var_tups, self._handle_results
)
self.async_on_remove(result_info.async_remove)
self._async_update = result_info.async_refresh
result_info.async_refresh()
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
if self._availability_template is not None:
self.add_template_attribute(
"_available", self._availability_template, None, self._update_available
)
if self._attribute_templates is not None:
for key, value in self._attribute_templates.items():
self._add_attribute_template(key, value)
if self._icon_template is not None:
self.add_template_attribute(
"_icon", self._icon_template, vol.Or(cv.whitespace, cv.icon)
)
if self._entity_picture_template is not None:
self.add_template_attribute(
"_entity_picture", self._entity_picture_template
)
if self.hass.state == CoreState.running:
await self._async_template_startup()
return
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, self._async_template_startup
)
async def async_update(self) -> None:
"""Call for forced update."""
self._async_update()
|
from dataclasses import dataclass
from datetime import timedelta
import logging
from python_awair.devices import AwairDevice
from homeassistant.const import (
ATTR_DEVICE_CLASS,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
LIGHT_LUX,
PERCENTAGE,
TEMP_CELSIUS,
)
API_CO2 = "carbon_dioxide"
API_DUST = "dust"
API_HUMID = "humidity"
API_LUX = "illuminance"
API_PM10 = "particulate_matter_10"
API_PM25 = "particulate_matter_2_5"
API_SCORE = "score"
API_SPL_A = "sound_pressure_level"
API_TEMP = "temperature"
API_TIMEOUT = 20
API_VOC = "volatile_organic_compounds"
ATTRIBUTION = "Awair air quality sensor"
ATTR_ICON = "icon"
ATTR_LABEL = "label"
ATTR_UNIT = "unit"
ATTR_UNIQUE_ID = "unique_id"
DOMAIN = "awair"
DUST_ALIASES = [API_PM25, API_PM10]
LOGGER = logging.getLogger(__package__)
UPDATE_INTERVAL = timedelta(minutes=5)
SENSOR_TYPES = {
API_SCORE: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:blur",
ATTR_UNIT: PERCENTAGE,
ATTR_LABEL: "Awair score",
ATTR_UNIQUE_ID: "score", # matches legacy format
},
API_HUMID: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ICON: None,
ATTR_UNIT: PERCENTAGE,
ATTR_LABEL: "Humidity",
ATTR_UNIQUE_ID: "HUMID", # matches legacy format
},
API_LUX: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_ILLUMINANCE,
ATTR_ICON: None,
ATTR_UNIT: LIGHT_LUX,
ATTR_LABEL: "Illuminance",
ATTR_UNIQUE_ID: "illuminance",
},
API_SPL_A: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:ear-hearing",
ATTR_UNIT: "dBa",
ATTR_LABEL: "Sound level",
ATTR_UNIQUE_ID: "sound_level",
},
API_VOC: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:cloud",
ATTR_UNIT: CONCENTRATION_PARTS_PER_BILLION,
ATTR_LABEL: "Volatile organic compounds",
ATTR_UNIQUE_ID: "VOC", # matches legacy format
},
API_TEMP: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_UNIT: TEMP_CELSIUS,
ATTR_LABEL: "Temperature",
ATTR_UNIQUE_ID: "TEMP", # matches legacy format
},
API_PM25: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:blur",
ATTR_UNIT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
ATTR_LABEL: "PM2.5",
ATTR_UNIQUE_ID: "PM25", # matches legacy format
},
API_PM10: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:blur",
ATTR_UNIT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
ATTR_LABEL: "PM10",
ATTR_UNIQUE_ID: "PM10", # matches legacy format
},
API_CO2: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:cloud",
ATTR_UNIT: CONCENTRATION_PARTS_PER_MILLION,
ATTR_LABEL: "Carbon dioxide",
ATTR_UNIQUE_ID: "CO2", # matches legacy format
},
}
@dataclass
class AwairResult:
"""Wrapper class to hold an awair device and set of air data."""
device: AwairDevice
air_data: dict
|
from datetime import timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
STATE_MIN_VALUE = "minimal_value"
STATE_MAX_VALUE = "maximum_value"
STATE_VALUE = "value"
STATE_OBJECT = "object"
CONF_INTERVAL = "interval"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=15)
SCAN_INTERVAL = timedelta(seconds=30)
RETRY_INTERVAL = timedelta(seconds=30)
OHM_VALUE = "Value"
OHM_MIN = "Min"
OHM_MAX = "Max"
OHM_CHILDREN = "Children"
OHM_NAME = "Text"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=8085): cv.port}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Open Hardware Monitor platform."""
data = OpenHardwareMonitorData(config, hass)
if data.data is None:
raise PlatformNotReady
add_entities(data.devices, True)
class OpenHardwareMonitorDevice(Entity):
"""Device used to display information from OpenHardwareMonitor."""
def __init__(self, data, name, path, unit_of_measurement):
"""Initialize an OpenHardwareMonitor sensor."""
self._name = name
self._data = data
self.path = path
self.attributes = {}
self._unit_of_measurement = unit_of_measurement
self.value = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self.value
@property
def state_attributes(self):
"""Return the state attributes of the sun."""
return self.attributes
@classmethod
def parse_number(cls, string):
"""In some locales a decimal numbers uses ',' instead of '.'."""
return string.replace(",", ".")
def update(self):
"""Update the device from a new JSON object."""
self._data.update()
array = self._data.data[OHM_CHILDREN]
_attributes = {}
for path_index in range(0, len(self.path)):
path_number = self.path[path_index]
values = array[path_number]
if path_index == len(self.path) - 1:
self.value = self.parse_number(values[OHM_VALUE].split(" ")[0])
_attributes.update(
{
"name": values[OHM_NAME],
STATE_MIN_VALUE: self.parse_number(
values[OHM_MIN].split(" ")[0]
),
STATE_MAX_VALUE: self.parse_number(
values[OHM_MAX].split(" ")[0]
),
}
)
self.attributes = _attributes
return
array = array[path_number][OHM_CHILDREN]
_attributes.update({"level_%s" % path_index: values[OHM_NAME]})
class OpenHardwareMonitorData:
"""Class used to pull data from OHM and create sensors."""
def __init__(self, config, hass):
"""Initialize the Open Hardware Monitor data-handler."""
self.data = None
self._config = config
self._hass = hass
self.devices = []
self.initialize(utcnow())
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Hit by the timer with the configured interval."""
if self.data is None:
self.initialize(utcnow())
else:
self.refresh()
def refresh(self):
"""Download and parse JSON from OHM."""
data_url = (
f"http://{self._config.get(CONF_HOST)}:"
f"{self._config.get(CONF_PORT)}/data.json"
)
try:
response = requests.get(data_url, timeout=30)
self.data = response.json()
except requests.exceptions.ConnectionError:
_LOGGER.debug("ConnectionError: Is OpenHardwareMonitor running?")
def initialize(self, now):
"""Parse of the sensors and adding of devices."""
self.refresh()
if self.data is None:
return
self.devices = self.parse_children(self.data, [], [], [])
def parse_children(self, json, devices, path, names):
"""Recursively loop through child objects, finding the values."""
result = devices.copy()
if json[OHM_CHILDREN]:
for child_index in range(0, len(json[OHM_CHILDREN])):
child_path = path.copy()
child_path.append(child_index)
child_names = names.copy()
if path:
child_names.append(json[OHM_NAME])
obj = json[OHM_CHILDREN][child_index]
added_devices = self.parse_children(
obj, devices, child_path, child_names
)
result = result + added_devices
return result
if json[OHM_VALUE].find(" ") == -1:
return result
unit_of_measurement = json[OHM_VALUE].split(" ")[1]
child_names = names.copy()
child_names.append(json[OHM_NAME])
fullname = " ".join(child_names)
dev = OpenHardwareMonitorDevice(self, fullname, path, unit_of_measurement)
result.append(dev)
return result
|
import unittest
import mock
from kalliope.core.Models.settings.Settings import Settings
from kalliope.core.TriggerLauncher import TriggerLauncher
from kalliope.core.Models.settings.Trigger import Trigger
class TestTriggerLauncher(unittest.TestCase):
"""
Class to test Launchers Classes (TriggerLauncher) and methods
"""
def setUp(self):
pass
####
# Trigger Launcher
def test_get_trigger(self):
"""
Test the Trigger Launcher trying to run the trigger
"""
trigger1 = Trigger("Trigger", {})
trigger2 = Trigger("Trigger2", {'pmdl_file': "trigger/snowboy/resources/kalliope-FR-6samples.pmdl"})
settings = Settings()
settings.triggers = [trigger1, trigger2]
trigger_folder = None
if settings.resources:
trigger_folder = settings.resources.trigger_folder
with mock.patch("kalliope.core.Utils.get_dynamic_class_instantiation") as mock_get_class_instantiation:
# Get the trigger 1
settings.default_trigger_name = "Trigger"
TriggerLauncher.get_trigger(settings=settings,
callback=None)
mock_get_class_instantiation.assert_called_once_with(package_name="trigger",
module_name=trigger1.name,
parameters=trigger1.parameters,
resources_dir=trigger_folder)
mock_get_class_instantiation.reset_mock()
# Get the trigger 2
settings.default_trigger_name = "Trigger2"
TriggerLauncher.get_trigger(settings=settings,
callback=None)
mock_get_class_instantiation.assert_called_once_with(package_name="trigger",
module_name=trigger2.name,
parameters=trigger2.parameters,
resources_dir=trigger_folder)
mock_get_class_instantiation.reset_mock()
|
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
async def test_rainmachine_pro_8_setup(hass):
"""Test that a RainMachine can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "rainmachine-pro-8.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Assert that the entity is correctly added to the entity registry
entry = entity_registry.async_get("switch.rainmachine_00ce4a")
assert entry.unique_id == "homekit-00aa0000aa0a-512"
helper = Helper(
hass, "switch.rainmachine_00ce4a", pairing, accessories[0], config_entry
)
state = await helper.poll_and_get_state()
# Assert that the friendly name is detected correctly
assert state.attributes["friendly_name"] == "RainMachine-00ce4a"
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.manufacturer == "Green Electronics LLC"
assert device.name == "RainMachine-00ce4a"
assert device.model == "SPK5 Pro"
assert device.sw_version == "1.0.4"
assert device.via_device_id is None
# The device is made up of multiple valves - make sure we have enumerated them all
entry = entity_registry.async_get("switch.rainmachine_00ce4a_2")
assert entry.unique_id == "homekit-00aa0000aa0a-768"
entry = entity_registry.async_get("switch.rainmachine_00ce4a_3")
assert entry.unique_id == "homekit-00aa0000aa0a-1024"
entry = entity_registry.async_get("switch.rainmachine_00ce4a_4")
assert entry.unique_id == "homekit-00aa0000aa0a-1280"
entry = entity_registry.async_get("switch.rainmachine_00ce4a_5")
assert entry.unique_id == "homekit-00aa0000aa0a-1536"
entry = entity_registry.async_get("switch.rainmachine_00ce4a_6")
assert entry.unique_id == "homekit-00aa0000aa0a-1792"
entry = entity_registry.async_get("switch.rainmachine_00ce4a_7")
assert entry.unique_id == "homekit-00aa0000aa0a-2048"
entry = entity_registry.async_get("switch.rainmachine_00ce4a_8")
assert entry.unique_id == "homekit-00aa0000aa0a-2304"
entry = entity_registry.async_get("switch.rainmachine_00ce4a_9")
assert entry is None
|
from __future__ import print_function
import os
import shutil
import sys
import requests
import zipfile
import time
DEFAULT_REPO = "ywangd"
DEFAULT_BRANCH = "master"
TMPDIR = os.environ.get('TMPDIR', os.environ.get('TMP'))
URL_TEMPLATE = 'https://github.com/{}/stash/archive/{}.zip'
TEMP_ZIPFILE = os.path.join(TMPDIR, 'StaSh.zip')
TEMP_PTI = os.path.join(TMPDIR, 'ptinstaller.py')
URL_PTI = 'https://raw.githubusercontent.com/ywangd/pythonista-tools-installer/master/ptinstaller.py'
BASE_DIR = os.path.expanduser('~')
DEFAULT_INSTALL_DIR = os.path.join(BASE_DIR, 'Documents/site-packages/stash')
DEFAULT_PTI_PATH = os.path.join(DEFAULT_INSTALL_DIR, "bin", "ptinstaller.py")
IN_PYTHONISTA = sys.executable.find('Pythonista') >= 0
UNWANTED_FILES = [
'getstash.py',
'run_tests.py',
'testing.py',
'dummyui.py',
'dummyconsole.py',
'bin/pcsm.py',
'bin/bh.py',
'bin/pythonista.py',
'bin/cls.py',
'stash.py',
'lib/librunner.py'
'system/shui.py',
'system/shterminal.py',
'system/dummyui.py',
]
class DownloadError(Exception):
"""
Exception indicating a problem with a download.
"""
pass
def download_stash(repo=DEFAULT_REPO, branch=DEFAULT_BRANCH, outpath=TEMP_ZIPFILE, verbose=False):
"""
Download the StaSh zipfile from github.
:param repo: user owning the repo to download from
:type repo: str
:param branch: branch to download
:type branch: str
:param verbose: if True, print additional information
:type verbose: bool
"""
url = URL_TEMPLATE.format(repo, branch)
if verbose:
print('Downloading {} ...'.format(url))
r = requests.get(url, stream=True)
file_size = r.headers.get('Content-Length')
if file_size is not None:
file_size = int(file_size)
with open(outpath, 'wb') as outs:
block_sz = 8192
for chunk in r.iter_content(block_sz):
outs.write(chunk)
def install_pti(url=URL_PTI, outpath=DEFAULT_PTI_PATH, verbose=False):
"""
Download and install the pythonista tools installer.
:param url: url to download from
:type url: str
:param outpath: path to save to
:type outpath: str
:param verbose: if True, print additional information
:type verbose: bool
"""
if verbose:
print("Downloading {} to {}".format(url, outpath))
r = requests.get(url)
with open(outpath, 'w') as outs:
outs.write(r.text)
def install_from_zip(path=TEMP_ZIPFILE, outpath=DEFAULT_INSTALL_DIR, launcher_path=None, verbose=False):
"""
Install StaSh from its zipfile.
:param path: path to zipfile
:type path: str
:param outpath: path to extract to
:type outpath: str
:param launcher_path: path to install launch_stash.py to
:type launcher_path: str
:param verbose: print additional information
:type verbose: bool
"""
unzip_into(path, outpath, verbose=verbose)
if launcher_path is not None:
# Move launch script to Documents for easy access
shutil.move(os.path.join(outpath, 'launch_stash.py'), launcher_path)
def unzip_into(path, outpath, verbose=False):
"""
Unzip zipfile at path into outpath.
:param path: path to zipfile
:type path: str
:param outpath: path to extract to
:type outpath: str
"""
if not os.path.exists(outpath):
os.makedirs(outpath)
if verbose:
print('Unzipping into %s ...' % outpath)
with zipfile.ZipFile(path) as zipfp:
toplevel_directory = None
namelist = zipfp.namelist()
# find toplevel directory name
for name in namelist:
if os.path.dirname(os.path.normpath(name)) == "":
# is toplevel
toplevel_directory = name
break
for name in namelist:
data = zipfp.read(name)
name = name.split(toplevel_directory, 1)[-1] # strip the top-level directory
if name == '': # skip top-level directory
continue
fname = os.path.join(outpath, name)
if fname.endswith('/'): # A directory
if not os.path.exists(fname):
os.makedirs(fname)
else:
fp = open(fname, 'wb')
try:
fp.write(data)
finally:
fp.close()
def remove_unwanted_files(basepath, reraise=False):
"""
Remove unwanted files.
:param basepath: path os StaSh installation
:type basepath: str
:param reraise: If True, reraise any exception occuring
:type reraise: bool
"""
for fname in UNWANTED_FILES:
try:
os.remove(os.path.join(basepath, fname))
except:
pass
def pythonista_install(install_path, repo=DEFAULT_REPO, branch=DEFAULT_BRANCH, launcher_path=None, zippath=None, verbose=False):
"""
Download and install StaSh and other dependencies for pythonista.
:param install_path: directory to install into
:type install_path: str
:param repo: name of user owning the github repo to download/install from
:type repo: str
:param branch: branch to download/install
:type repo: str
:param launcher_path: path to install launcher to
:type launcher_path: str
:param zippath: if not None, it specifies a path to a StaSh zipfile, otherwise download it from repo:branch
:type zippath: str
:param verbose: if True, print additional information
:type verbose: bool
"""
if zippath is None:
zp = TEMP_ZIPFILE
# download StaSh
try:
download_stash(repo=repo, branch=branch, outpath=zp, verbose=verbose)
except:
raise DownloadError("Unable to download StaSh from {}:{}".format(repo, branch))
else:
if verbose:
print("Using '{}' as source.".format(zippath))
zp = zippath
try:
# install StaSh
install_from_zip(zp, install_path, launcher_path, verbose=verbose)
# install pythonista tools installer
# TODO: should this script realy install it?
pti_path = os.path.join(install_path, "bin", "ptinstaller.py")
install_pti(outpath=pti_path)
finally:
# cleanup
if verbose:
print("Cleaning up...")
if os.path.exists(zp):
os.remove(zp)
remove_unwanted_files(install_path, reraise=False)
def setup_install(repo=DEFAULT_REPO, branch=DEFAULT_BRANCH, install_path=None, as_user=False, zippath=None, dryrun=False, verbose=False):
"""
Download and install StaSh using setup.py
:param repo: name of user owning the github repo to download/install from
:type repo: str
:param branch: branch to download/install
:type repo: str
:param install_path: path to install to (as --prefix)
:type install_path: str
:param as_user: install into user packages
:type as_user: bool
:param zippath: alternative path to zip to install from (default: download from repo:branch)
:param dryrun: if True, pass --dry-run to setup.py
:param verbose: if True, print additional information
:type verbose: bool
"""
if zippath is None:
zp = TEMP_ZIPFILE
# download StaSh
try:
download_stash(repo=repo, branch=branch, outpath=zp, verbose=verbose)
except:
raise DownloadError("Unable to download StaSh from {}:{}".format(repo, branch))
else:
zp = zippath
tp = os.path.join(TMPDIR, "getstash-{}".format(time.time()))
unzip_into(zp, tp, verbose=verbose)
# run setup.py
os.chdir(tp)
argv = ["setup.py", "install"]
if as_user:
argv.append("--user")
if install_path is not None:
argv += ["--prefix", install_path]
if dryrun:
argv.append("--dry-run")
sys.argv = argv
fp = os.path.abspath("setup.py")
ns = {
"__name__": "__main__",
"__file__": fp,
}
with open(fp, "rU") as fin:
content = fin.read()
code = compile(content, fp, "exec", dont_inherit=True)
exec(code, ns, ns)
def main(defs={}):
"""
The main function.
:param defs: namespace which may contain additional parameters
:type defs: dict
"""
# read additional arguments
# These arguments will not be defined when StaSh is normally installed,
# but both selfupdate and tests may specify different values
# i would like to use argparse here, but this must be compatible with older StaSh versions
repo = defs.get("_owner", DEFAULT_REPO) # owner of repo
branch = defs.get("_br", DEFAULT_BRANCH) # target branch
is_update = '_IS_UPDATE' in defs # True if update
install_path = defs.get("_target", None) # target path
launcher_path = defs.get("_launcher_path", None) # target path for launch_stash.py
force_dist = defs.get("_force_dist", None) # force install method
zippath = defs.get("_zippath", None) # alternate path of zipfile to use
dryrun = defs.get("_dryrun", None) # do not do anything if True
asuser = defs.get("_asuser", None) # install as user if True
# find out which install to use
if force_dist is None:
if IN_PYTHONISTA:
dist = "pythonista"
else:
dist = "setup"
else:
dist = force_dist
if dist.lower() == "pythonista":
if install_path is None:
install_path = DEFAULT_INSTALL_DIR
if launcher_path is None:
launcher_path = os.path.join(BASE_DIR, "Documents", "launch_stash.py")
pythonista_install(install_path, repo=repo, branch=branch, launcher_path=launcher_path, zippath=zippath, verbose=True)
elif dist.lower() == "setup":
setup_install(repo, branch, install_path=install_path, zippath=zippath, dryrun=dryrun, as_user=asuser, verbose=True)
else:
raise ValueError("Invalid install type: {}".format(dist))
if not is_update:
# print additional instructions
print('Installation completed.')
print('Please restart Pythonista and run launch_stash.py under the home directory to start StaSh.')
# if __name__ == "__main__":
# print("executing main()")
# main(locals())
main(locals()) # older StaSh versions do not pass __name__="__main__" to getstash.py, so we must run this on toplevel
|
from datetime import datetime
from typing import Dict
from homeassistant.components.device_tracker import SOURCE_TYPE_ROUTER
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import HomeAssistantType
from .const import DEFAULT_DEVICE_NAME, DEVICE_ICONS, DOMAIN
from .router import FreeboxRouter
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up device tracker for Freebox component."""
router = hass.data[DOMAIN][entry.unique_id]
tracked = set()
@callback
def update_router():
"""Update the values of the router."""
add_entities(router, async_add_entities, tracked)
router.listeners.append(
async_dispatcher_connect(hass, router.signal_device_new, update_router)
)
update_router()
@callback
def add_entities(router, async_add_entities, tracked):
"""Add new tracker entities from the router."""
new_tracked = []
for mac, device in router.devices.items():
if mac in tracked:
continue
new_tracked.append(FreeboxDevice(router, device))
tracked.add(mac)
if new_tracked:
async_add_entities(new_tracked, True)
class FreeboxDevice(ScannerEntity):
"""Representation of a Freebox device."""
def __init__(self, router: FreeboxRouter, device: Dict[str, any]) -> None:
"""Initialize a Freebox device."""
self._router = router
self._name = device["primary_name"].strip() or DEFAULT_DEVICE_NAME
self._mac = device["l2ident"]["id"]
self._manufacturer = device["vendor_name"]
self._icon = icon_for_freebox_device(device)
self._active = False
self._attrs = {}
@callback
def async_update_state(self) -> None:
"""Update the Freebox device."""
device = self._router.devices[self._mac]
self._active = device["active"]
if device.get("attrs") is None:
# device
self._attrs = {
"last_time_reachable": datetime.fromtimestamp(
device["last_time_reachable"]
),
"last_time_activity": datetime.fromtimestamp(device["last_activity"]),
}
else:
# router
self._attrs = device["attrs"]
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._mac
@property
def name(self) -> str:
"""Return the name."""
return self._name
@property
def is_connected(self):
"""Return true if the device is connected to the network."""
return self._active
@property
def source_type(self) -> str:
"""Return the source type."""
return SOURCE_TYPE_ROUTER
@property
def icon(self) -> str:
"""Return the icon."""
return self._icon
@property
def device_state_attributes(self) -> Dict[str, any]:
"""Return the attributes."""
return self._attrs
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return {
"connections": {(CONNECTION_NETWORK_MAC, self._mac)},
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": self._manufacturer,
}
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@callback
def async_on_demand_update(self):
"""Update state."""
self.async_update_state()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register state update callback."""
self.async_update_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
self._router.signal_device_update,
self.async_on_demand_update,
)
)
def icon_for_freebox_device(device) -> str:
"""Return a device icon from its type."""
return DEVICE_ICONS.get(device["host_type"], "mdi:help-network")
|
import os.path as op
from mne.io import show_fiff
base_dir = op.join(op.dirname(__file__), 'data')
fname_evoked = op.join(base_dir, 'test-ave.fif')
fname_raw = op.join(base_dir, 'test_raw.fif')
fname_c_annot = op.join(base_dir, 'test_raw-annot.fif')
def test_show_fiff():
"""Test show_fiff."""
# this is not exhaustive, but hopefully bugs will be found in use
info = show_fiff(fname_evoked)
assert 'BAD' not in info
keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM',
'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE',
'FIFF_EPOCH', 'COORD_TRANS']
assert all(key in info for key in keys)
info = show_fiff(fname_raw, read_limit=1024)
assert 'BAD' not in info
info = show_fiff(fname_c_annot)
assert 'BAD' not in info
assert '>B' in info, info
|
from homeassistant.components.agent_dvr.const import DOMAIN, SERVER_URL
from homeassistant.const import CONF_HOST, CONF_PORT, CONTENT_TYPE_JSON
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
async def init_integration(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
skip_setup: bool = False,
) -> MockConfigEntry:
"""Set up the Agent DVR integration in Home Assistant."""
aioclient_mock.get(
"http://example.local:8090/command.cgi?cmd=getStatus",
text=load_fixture("agent_dvr/status.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"http://example.local:8090/command.cgi?cmd=getObjects",
text=load_fixture("agent_dvr/objects.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="c0715bba-c2d0-48ef-9e3e-bc81c9ea4447",
data={
CONF_HOST: "example.local",
CONF_PORT: 8090,
SERVER_URL: "http://example.local:8090/",
},
)
entry.add_to_hass(hass)
if not skip_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
import asyncio
import logging
from async_timeout import timeout
from homeassistant import config_entries
from homeassistant.helpers import config_entry_flow
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DISPATCH_CONTROLLER_DISCOVERED, IZONE, TIMEOUT_DISCOVERY
from .discovery import async_start_discovery_service, async_stop_discovery_service
_LOGGER = logging.getLogger(__name__)
async def _async_has_devices(hass):
controller_ready = asyncio.Event()
async_dispatcher_connect(
hass, DISPATCH_CONTROLLER_DISCOVERED, lambda x: controller_ready.set()
)
disco = await async_start_discovery_service(hass)
try:
async with timeout(TIMEOUT_DISCOVERY):
await controller_ready.wait()
except asyncio.TimeoutError:
pass
if not disco.pi_disco.controllers:
await async_stop_discovery_service(hass)
_LOGGER.debug("No controllers found")
return False
_LOGGER.debug("Controllers %s", disco.pi_disco.controllers)
return True
config_entry_flow.register_discovery_flow(
IZONE, "iZone Aircon", _async_has_devices, config_entries.CONN_CLASS_LOCAL_POLL
)
|
import argparse
import logging
import sys
from paasta_tools.kubernetes_tools import KubeClient
log = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
options = parser.parse_args()
return options
def main() -> None:
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
kube_client = KubeClient()
try:
kube_client.core.list_namespace()
log.info("API is ok")
sys.exit(0)
except Exception as exc:
log.error(f"Error connecting to API: {exc}")
sys.exit(1)
if __name__ == "__main__":
main()
|
import queue
import numpy as np
import pandas as pd
import pytest
import pytz
from qstrader.broker.portfolio.portfolio import Portfolio
from qstrader.broker.simulated_broker import SimulatedBroker
from qstrader.broker.fee_model.zero_fee_model import ZeroFeeModel
from qstrader import settings
class ExchangeMock(object):
def get_latest_asset_bid_ask(self, asset):
return (np.NaN, np.NaN)
def is_open_at_datetime(self, dt):
return True
class ExchangeMockException(object):
def get_latest_asset_bid_ask(self, asset):
raise ValueError("No price available!")
def is_open_at_datetime(self, dt):
return True
class ExchangeMockPrice(object):
def is_open_at_datetime(self, dt):
return True
class DataHandlerMock(object):
def get_asset_latest_bid_ask_price(self, dt, asset):
return (np.NaN, np.NaN)
def get_asset_latest_mid_price(self, dt, asset):
return np.NaN
class DataHandlerMockPrice(object):
def get_asset_latest_bid_ask_price(self, dt, asset):
return (53.45, 53.47)
def get_asset_latest_mid_price(self, dt, asset):
return (53.47 - 53.45) / 2.0
class OrderMock(object):
def __init__(self, asset, quantity, order_id=None):
self.asset = asset
self.quantity = quantity
self.order_id = 1 if order_id is None else order_id
self.direction = np.copysign(1, self.quantity)
class AssetMock(object):
def __init__(self, name, symbol):
self.name = name
self.symbol = symbol
def test_initial_settings_for_default_simulated_broker():
"""
Tests that the SimulatedBroker settings are set
correctly for default settings.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
# Test a default SimulatedBroker
sb1 = SimulatedBroker(start_dt, exchange, data_handler)
assert sb1.start_dt == start_dt
assert sb1.current_dt == start_dt
assert sb1.exchange == exchange
assert sb1.account_id is None
assert sb1.base_currency == "USD"
assert sb1.initial_funds == 0.0
assert type(sb1.fee_model) == ZeroFeeModel
tcb1 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
assert sb1.cash_balances == tcb1
assert sb1.portfolios == {}
assert sb1.open_orders == {}
# Test a SimulatedBroker with some parameters set
sb2 = SimulatedBroker(
start_dt, exchange, data_handler, account_id="ACCT1234",
base_currency="GBP", initial_funds=1e6,
fee_model=ZeroFeeModel()
)
assert sb2.start_dt == start_dt
assert sb2.current_dt == start_dt
assert sb2.exchange == exchange
assert sb2.account_id == "ACCT1234"
assert sb2.base_currency == "GBP"
assert sb2.initial_funds == 1e6
assert type(sb2.fee_model) == ZeroFeeModel
tcb2 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
tcb2["GBP"] = 1e6
assert sb2.cash_balances == tcb2
assert sb2.portfolios == {}
assert sb2.open_orders == {}
def test_bad_set_base_currency():
"""
Checks _set_base_currency raises ValueError
if a non-supported currency is attempted to be
set as the base currency.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
with pytest.raises(ValueError):
SimulatedBroker(
start_dt, exchange, data_handler, base_currency="XYZ"
)
def test_good_set_base_currency():
"""
Checks _set_base_currency sets the currency
correctly if it is supported by QSTrader.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(
start_dt, exchange, data_handler, base_currency="EUR"
)
assert sb.base_currency == "EUR"
def test_bad_set_initial_funds():
"""
Checks _set_initial_funds raises ValueError
if initial funds amount is negative.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
with pytest.raises(ValueError):
SimulatedBroker(
start_dt, exchange, data_handler, initial_funds=-56.34
)
def test_good_set_initial_funds():
"""
Checks _set_initial_funds sets the initial funds
correctly if it is a positive floating point value.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler, initial_funds=1e4)
assert sb._set_initial_funds(1e4) == 1e4
def test_all_cases_of_set_broker_commission():
"""
Tests that _set_broker_commission correctly sets the
appropriate broker commission model depending upon
user choice.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
# Broker commission is None
sb1 = SimulatedBroker(start_dt, exchange, data_handler)
assert sb1.fee_model.__class__.__name__ == "ZeroFeeModel"
# Broker commission is specified as a subclass
# of FeeModel abstract base class
bc2 = ZeroFeeModel()
sb2 = SimulatedBroker(
start_dt, exchange, data_handler, fee_model=bc2
)
assert sb2.fee_model.__class__.__name__ == "ZeroFeeModel"
# FeeModel is mis-specified and thus
# raises a TypeError
with pytest.raises(TypeError):
SimulatedBroker(
start_dt, exchange, data_handler, fee_model="bad_fee_model"
)
def test_set_cash_balances():
"""
Checks _set_cash_balances for zero and non-zero
initial_funds.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
# Zero initial funds
sb1 = SimulatedBroker(
start_dt, exchange, data_handler, initial_funds=0.0
)
tcb1 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
assert sb1._set_cash_balances() == tcb1
# Non-zero initial funds
sb2 = SimulatedBroker(
start_dt, exchange, data_handler, initial_funds=12345.0
)
tcb2 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
tcb2["USD"] = 12345.0
assert sb2._set_cash_balances() == tcb2
def test_set_initial_portfolios():
"""
Check _set_initial_portfolios method for return
of an empty dictionary.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
assert sb._set_initial_portfolios() == {}
def test_set_initial_open_orders():
"""
Check _set_initial_open_orders method for return
of an empty dictionary.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
assert sb._set_initial_open_orders() == {}
def test_subscribe_funds_to_account():
"""
Tests subscribe_funds_to_account method for:
* Raising ValueError with negative amount
* Correctly setting cash_balances for a positive amount
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising ValueError with negative amount
with pytest.raises(ValueError):
sb.subscribe_funds_to_account(-4306.23)
# Correctly setting cash_balances for a positive amount
sb.subscribe_funds_to_account(165303.23)
assert sb.cash_balances[sb.base_currency] == 165303.23
def test_withdraw_funds_from_account():
"""
Tests withdraw_funds_from_account method for:
* Raising ValueError with negative amount
* Raising ValueError for lack of cash
* Correctly setting cash_balances for positive amount
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler, initial_funds=1e6)
# Raising ValueError with negative amount
with pytest.raises(ValueError):
sb.withdraw_funds_from_account(-4306.23)
# Raising ValueError for lack of cash
with pytest.raises(ValueError):
sb.withdraw_funds_from_account(2e6)
# Correctly setting cash_balances for a positive amount
sb.withdraw_funds_from_account(3e5)
assert sb.cash_balances[sb.base_currency] == 7e5
def test_get_account_cash_balance():
"""
Tests get_account_cash_balance method for:
* If currency is None, return the cash_balances
* If the currency code isn't in the cash_balances
dictionary, then raise ValueError
* Otherwise, return the appropriate cash balance
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(
start_dt, exchange, data_handler, initial_funds=1000.0
)
# If currency is None, return the cash balances
sbcb1 = sb.get_account_cash_balance()
tcb1 = dict(
zip(
settings.SUPPORTED['CURRENCIES'],
[0.0] * len(settings.SUPPORTED['CURRENCIES'])
)
)
tcb1["USD"] = 1000.0
assert sbcb1 == tcb1
# If the currency code isn't in the cash_balances
# dictionary, then raise ValueError
with pytest.raises(ValueError):
sb.get_account_cash_balance(currency="XYZ")
# Otherwise, return appropriate cash balance
assert sb.get_account_cash_balance(currency="USD") == 1000.0
assert sb.get_account_cash_balance(currency="EUR") == 0.0
def test_get_account_total_market_value():
"""
Tests get_account_total_market_value method for:
* The correct market values after cash is subscribed.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Subscribe all necessary funds and create portfolios
sb.subscribe_funds_to_account(300000.0)
sb.create_portfolio(portfolio_id="1", name="My Portfolio #1")
sb.create_portfolio(portfolio_id="2", name="My Portfolio #1")
sb.create_portfolio(portfolio_id="3", name="My Portfolio #1")
sb.subscribe_funds_to_portfolio("1", 100000.0)
sb.subscribe_funds_to_portfolio("2", 100000.0)
sb.subscribe_funds_to_portfolio("3", 100000.0)
# Check that the market value is correct
res_equity = sb.get_account_total_equity()
test_equity = {
"1": 100000.0,
"2": 100000.0,
"3": 100000.0,
"master": 300000.0
}
assert res_equity == test_equity
def test_create_portfolio():
"""
Tests create_portfolio method for:
* If portfolio_id already in the dictionary keys,
raise ValueError
* If it isn't, check that they portfolio and open
orders dictionary was created correctly.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# If portfolio_id isn't in the dictionary, then check it
# was created correctly, along with the orders dictionary
sb.create_portfolio(portfolio_id=1234, name="My Portfolio")
assert "1234" in sb.portfolios
assert isinstance(sb.portfolios["1234"], Portfolio)
assert "1234" in sb.open_orders
assert isinstance(sb.open_orders["1234"], queue.Queue)
# If portfolio is already in the dictionary
# then raise ValueError
with pytest.raises(ValueError):
sb.create_portfolio(
portfolio_id=1234, name="My Portfolio"
)
def test_list_all_portfolio():
"""
Tests list_all_portfolios method for:
* If empty portfolio dictionary, return empty list
* If non-empty, return sorted list via the portfolio IDs
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# If empty portfolio dictionary, return empty list
assert sb.list_all_portfolios() == []
# If non-empty, return sorted list via the portfolio IDs
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.create_portfolio(portfolio_id="z154", name="My Portfolio #2")
sb.create_portfolio(portfolio_id="abcd", name="My Portfolio #3")
res_ports = sorted([
p.portfolio_id
for p in sb.list_all_portfolios()
])
test_ports = ["1234", "abcd", "z154"]
assert res_ports == test_ports
def test_subscribe_funds_to_portfolio():
"""
Tests subscribe_funds_to_portfolio method for:
* Raising ValueError with negative amount
* Raising ValueError if portfolio does not exist
* Correctly setting cash_balances for a positive amount
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising ValueError with negative amount
with pytest.raises(ValueError):
sb.subscribe_funds_to_portfolio("1234", -4306.23)
# Raising KeyError if portfolio doesn't exist
with pytest.raises(KeyError):
sb.subscribe_funds_to_portfolio("1234", 5432.12)
# Add in cash balance to the account
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.subscribe_funds_to_account(165303.23)
# Raising ValueError if not enough cash
with pytest.raises(ValueError):
sb.subscribe_funds_to_portfolio("1234", 200000.00)
# If everything else worked, check balances are correct
sb.subscribe_funds_to_portfolio("1234", 100000.00)
assert sb.cash_balances[sb.base_currency] == 65303.23000000001
assert sb.portfolios["1234"].cash == 100000.00
def test_withdraw_funds_from_portfolio():
"""
Tests withdraw_funds_from_portfolio method for:
* Raising ValueError with negative amount
* Raising ValueError if portfolio does not exist
* Raising ValueError for a lack of cash
* Correctly setting cash_balances for a positive amount
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising ValueError with negative amount
with pytest.raises(ValueError):
sb.withdraw_funds_from_portfolio("1234", -4306.23)
# Raising KeyError if portfolio doesn't exist
with pytest.raises(KeyError):
sb.withdraw_funds_from_portfolio("1234", 5432.12)
# Add in cash balance to the account
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.subscribe_funds_to_account(165303.23)
sb.subscribe_funds_to_portfolio("1234", 100000.00)
# Raising ValueError if not enough cash
with pytest.raises(ValueError):
sb.withdraw_funds_from_portfolio("1234", 200000.00)
# If everything else worked, check balances are correct
sb.withdraw_funds_from_portfolio("1234", 50000.00)
assert sb.cash_balances[sb.base_currency] == 115303.23000000001
assert sb.portfolios["1234"].cash == 50000.00
def test_get_portfolio_cash_balance():
"""
Tests get_portfolio_cash_balance method for:
* Raising ValueError if portfolio_id not in keys
* Correctly obtaining the value after cash transfers
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising ValueError if portfolio_id not in keys
with pytest.raises(ValueError):
sb.get_portfolio_cash_balance("5678")
# Create fund transfers and portfolio
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.subscribe_funds_to_account(175000.0)
sb.subscribe_funds_to_portfolio("1234", 100000.00)
# Check correct values obtained after cash transfers
assert sb.get_portfolio_cash_balance("1234") == 100000.0
def test_get_portfolio_total_market_value():
"""
Tests get_portfolio_total_market_value method for:
* Raising ValueError if portfolio_id not in keys
* Correctly obtaining the market value after cash transfers
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
# Raising KeyError if portfolio_id not in keys
with pytest.raises(KeyError):
sb.get_portfolio_total_market_value("5678")
# Create fund transfers and portfolio
sb.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sb.subscribe_funds_to_account(175000.0)
sb.subscribe_funds_to_portfolio("1234", 100000.00)
# Check correct values obtained after cash transfers
assert sb.get_portfolio_total_equity("1234") == 100000.0
def test_submit_order():
"""
Tests the execute_order method for:
* Raises ValueError if no portfolio_id
* Raises ValueError if bid/ask is (np.NaN, np.NaN)
* Checks that bid/ask are correctly set dependent
upon order direction
* Checks that portfolio values are correct after
carrying out a transaction
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
# Raising KeyError if portfolio_id not in keys
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
asset = 'EQ:RDSB'
quantity = 100
order = OrderMock(asset, quantity)
with pytest.raises(KeyError):
sb.submit_order("1234", order)
# Raises ValueError if bid/ask is (np.NaN, np.NaN)
exchange_exception = ExchangeMockException()
sbnp = SimulatedBroker(start_dt, exchange_exception, data_handler)
sbnp.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
quantity = 100
order = OrderMock(asset, quantity)
with pytest.raises(ValueError):
sbnp._execute_order(start_dt, "1234", order)
# Checks that bid/ask are correctly set dependent on
# order direction
# Positive direction
exchange_price = ExchangeMockPrice()
data_handler_price = DataHandlerMockPrice()
sbwp = SimulatedBroker(start_dt, exchange_price, data_handler_price)
sbwp.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sbwp.subscribe_funds_to_account(175000.0)
sbwp.subscribe_funds_to_portfolio("1234", 100000.00)
quantity = 1000
order = OrderMock(asset, quantity)
sbwp.submit_order("1234", order)
sbwp.update(start_dt)
port = sbwp.portfolios["1234"]
assert port.cash == 46530.0
assert port.total_market_value == 53470.0
assert port.total_equity == 100000.0
assert port.pos_handler.positions[asset].unrealised_pnl == 0.0
assert port.pos_handler.positions[asset].market_value == 53470.0
assert port.pos_handler.positions[asset].net_quantity == 1000
# Negative direction
exchange_price = ExchangeMockPrice()
sbwp = SimulatedBroker(start_dt, exchange_price, data_handler_price)
sbwp.create_portfolio(portfolio_id=1234, name="My Portfolio #1")
sbwp.subscribe_funds_to_account(175000.0)
sbwp.subscribe_funds_to_portfolio("1234", 100000.00)
quantity = -1000
order = OrderMock(asset, quantity)
sbwp.submit_order("1234", order)
sbwp.update(start_dt)
port = sbwp.portfolios["1234"]
assert port.cash == 153450.0
assert port.total_market_value == -53450.0
assert port.total_equity == 100000.0
assert port.pos_handler.positions[asset].unrealised_pnl == 0.0
assert port.pos_handler.positions[asset].market_value == -53450.0
assert port.pos_handler.positions[asset].net_quantity == -1000
def test_update_sets_correct_time():
"""
Tests that the update method sets the current
time correctly.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2017-10-07 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
sb.update(new_dt)
assert sb.current_dt == new_dt
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from resqueweb import ResqueWebCollector
##########################################################################
class TestResqueWebCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ResqueWebCollector', {
'interval': 10
})
self.collector = ResqueWebCollector(config, None)
def test_import(self):
self.assertTrue(ResqueWebCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats.txt')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'pending.current': 2,
'processed.total': 11686516,
'failed.total': 38667,
'workers.current': 9,
'working.current': 2,
'queue.low.current': 4,
'queue.mail.current': 3,
'queue.realtime.current': 9,
'queue.normal.current': 1,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats_blank.txt')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.helpers import config_validation as cv
from .const import (
ATTR_MANUFACTURER,
CONF_ARP_PING,
CONF_DETECTION_TIME,
CONF_FORCE_DHCP,
DEFAULT_API_PORT,
DEFAULT_DETECTION_TIME,
DEFAULT_NAME,
DOMAIN,
)
from .hub import MikrotikHub
MIKROTIK_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_API_PORT): cv.port,
vol.Optional(CONF_VERIFY_SSL, default=False): cv.boolean,
vol.Optional(CONF_ARP_PING, default=False): cv.boolean,
vol.Optional(CONF_FORCE_DHCP, default=False): cv.boolean,
vol.Optional(
CONF_DETECTION_TIME, default=DEFAULT_DETECTION_TIME
): cv.time_period,
}
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [MIKROTIK_SCHEMA])}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Import the Mikrotik component from config."""
if DOMAIN in config:
for entry in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=entry
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Mikrotik component."""
hub = MikrotikHub(hass, config_entry)
if not await hub.async_setup():
return False
hass.data.setdefault(DOMAIN, {})[config_entry.entry_id] = hub
device_registry = await hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(DOMAIN, hub.serial_num)},
manufacturer=ATTR_MANUFACTURER,
model=hub.model,
name=hub.hostname,
sw_version=hub.firmware,
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
await hass.config_entries.async_forward_entry_unload(config_entry, "device_tracker")
hass.data[DOMAIN].pop(config_entry.entry_id)
return True
|
from pygal._compat import is_str
from pygal.graph.graph import Graph
from pygal.util import alter, compute_scale, cut, decorate
from pygal.view import PolarThetaLogView, PolarThetaView
class Gauge(Graph):
"""Gauge graph class"""
needle_width = 1 / 20
def _set_view(self):
"""Assign a view to current graph"""
if self.logarithmic:
view_class = PolarThetaLogView
else:
view_class = PolarThetaView
self.view = view_class(
self.width - self.margin_box.x, self.height - self.margin_box.y,
self._box
)
def needle(self, serie):
"""Draw a needle for each value"""
serie_node = self.svg.serie(serie)
for i, theta in enumerate(serie.values):
if theta is None:
continue
def point(x, y):
return '%f %f' % self.view((x, y))
val = self._format(serie, i)
metadata = serie.metadata.get(i)
gauges = decorate(
self.svg, self.svg.node(serie_node['plot'], class_="dots"),
metadata
)
tolerance = 1.15
if theta < self._min:
theta = self._min * tolerance
if theta > self._max:
theta = self._max * tolerance
w = (self._box._tmax - self._box._tmin + self.view.aperture) / 4
if self.logarithmic:
w = min(w, self._min - self._min * 10**-10)
alter(
self.svg.node(
gauges,
'path',
d='M %s L %s A %s 1 0 1 %s Z' % (
point(.85, theta),
point(self.needle_width, theta - w),
'%f %f' % (self.needle_width, self.needle_width),
point(self.needle_width, theta + w),
),
class_='line reactive tooltip-trigger'
), metadata
)
x, y = self.view((.75, theta))
self._tooltip_data(gauges, val, x, y, xlabel=self._get_x_label(i))
self._static_value(serie_node, val, x, y, metadata)
def _y_axis(self, draw_axes=True):
"""Override y axis to plot a polar axis"""
axis = self.svg.node(self.nodes['plot'], class_="axis x gauge")
for i, (label, theta) in enumerate(self._y_labels):
guides = self.svg.node(axis, class_='guides')
self.svg.line(
guides, [self.view((.95, theta)),
self.view((1, theta))],
close=True,
class_='line'
)
self.svg.line(
guides, [self.view((0, theta)),
self.view((.95, theta))],
close=True,
class_='guide line %s' %
('major' if i in (0, len(self._y_labels) - 1) else '')
)
x, y = self.view((.9, theta))
self.svg.node(guides, 'text', x=x, y=y).text = label
self.svg.node(
guides,
'title',
).text = self._y_format(theta)
def _x_axis(self, draw_axes=True):
"""Override x axis to put a center circle in center"""
axis = self.svg.node(self.nodes['plot'], class_="axis y gauge")
x, y = self.view((0, 0))
self.svg.node(axis, 'circle', cx=x, cy=y, r=4)
def _compute(self):
"""Compute y min and max and y scale and set labels"""
self.min_ = self._min or 0
self.max_ = self._max or 0
if self.max_ - self.min_ == 0:
self.min_ -= 1
self.max_ += 1
self._box.set_polar_box(0, 1, self.min_, self.max_)
def _compute_x_labels(self):
pass
def _compute_y_labels(self):
y_pos = compute_scale(
self.min_, self.max_, self.logarithmic, self.order_min,
self.min_scale, self.max_scale
)
if self.y_labels:
self._y_labels = []
for i, y_label in enumerate(self.y_labels):
if isinstance(y_label, dict):
pos = self._adapt(y_label.get('value'))
title = y_label.get('label', self._y_format(pos))
elif is_str(y_label):
pos = self._adapt(y_pos[i])
title = y_label
else:
pos = self._adapt(y_label)
title = self._y_format(pos)
self._y_labels.append((title, pos))
self.min_ = min(self.min_, min(cut(self._y_labels, 1)))
self.max_ = max(self.max_, max(cut(self._y_labels, 1)))
self._box.set_polar_box(0, 1, self.min_, self.max_)
else:
self._y_labels = list(zip(map(self._y_format, y_pos), y_pos))
def _plot(self):
"""Plot all needles"""
for serie in self.series:
self.needle(serie)
|
from scrapy.utils.reqser import request_to_dict, request_from_dict
from . import picklecompat
class Base(object):
"""Per-spider base queue class"""
def __init__(self, server, spider, key, serializer=None):
"""Initialize per-spider redis queue.
Parameters
----------
server : StrictRedis
Redis client instance.
spider : Spider
Scrapy spider instance.
key: str
Redis key where to put and get messages.
serializer : object
Serializer object with ``loads`` and ``dumps`` methods.
"""
if serializer is None:
# Backward compatibility.
# TODO: deprecate pickle.
serializer = picklecompat
if not hasattr(serializer, 'loads'):
raise TypeError("serializer does not implement 'loads' function: %r"
% serializer)
if not hasattr(serializer, 'dumps'):
raise TypeError("serializer '%s' does not implement 'dumps' function: %r"
% serializer)
self.server = server
self.spider = spider
self.key = key % {'spider': spider.name}
self.serializer = serializer
def _encode_request(self, request):
"""Encode a request object"""
obj = request_to_dict(request, self.spider)
return self.serializer.dumps(obj)
def _decode_request(self, encoded_request):
"""Decode an request previously encoded"""
obj = self.serializer.loads(encoded_request)
return request_from_dict(obj, self.spider)
def __len__(self):
"""Return the length of the queue"""
raise NotImplementedError
def push(self, request):
"""Push a request"""
raise NotImplementedError
def pop(self, timeout=0):
"""Pop a request"""
raise NotImplementedError
def clear(self):
"""Clear queue/stack"""
self.server.delete(self.key)
class FifoQueue(Base):
"""Per-spider FIFO queue"""
def __len__(self):
"""Return the length of the queue"""
return self.server.llen(self.key)
def push(self, request):
"""Push a request"""
self.server.lpush(self.key, self._encode_request(request))
def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = self.server.brpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = self.server.rpop(self.key)
if data:
return self._decode_request(data)
class PriorityQueue(Base):
"""Per-spider priority queue abstraction using redis' sorted set"""
def __len__(self):
"""Return the length of the queue"""
return self.server.zcard(self.key)
def push(self, request):
"""Push a request"""
data = self._encode_request(request)
score = -request.priority
# We don't use zadd method as the order of arguments change depending on
# whether the class is Redis or StrictRedis, and the option of using
# kwargs only accepts strings, not bytes.
self.server.execute_command('ZADD', self.key, score, data)
def pop(self, timeout=0):
"""
Pop a request
timeout not support in this queue class
"""
# use atomic range/remove using multi/exec
pipe = self.server.pipeline()
pipe.multi()
pipe.zrange(self.key, 0, 0).zremrangebyrank(self.key, 0, 0)
results, count = pipe.execute()
if results:
return self._decode_request(results[0])
class LifoQueue(Base):
"""Per-spider LIFO queue."""
def __len__(self):
"""Return the length of the stack"""
return self.server.llen(self.key)
def push(self, request):
"""Push a request"""
self.server.lpush(self.key, self._encode_request(request))
def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = self.server.blpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = self.server.lpop(self.key)
if data:
return self._decode_request(data)
# TODO: Deprecate the use of these names.
SpiderQueue = FifoQueue
SpiderStack = LifoQueue
SpiderPriorityQueue = PriorityQueue
|
import asyncio
import voluptuous as vol
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.config import async_log_exception, config_without_domain
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform
from homeassistant.helpers.condition import async_validate_condition_config
from homeassistant.helpers.script import async_validate_actions_config
from homeassistant.helpers.trigger import async_validate_trigger_config
from homeassistant.loader import IntegrationNotFound
from . import CONF_ACTION, CONF_CONDITION, CONF_TRIGGER, DOMAIN, PLATFORM_SCHEMA
# mypy: allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs, no-warn-return-any
async def async_validate_config_item(hass, config, full_config=None):
"""Validate config item."""
config = PLATFORM_SCHEMA(config)
config[CONF_TRIGGER] = await async_validate_trigger_config(
hass, config[CONF_TRIGGER]
)
if CONF_CONDITION in config:
config[CONF_CONDITION] = await asyncio.gather(
*[
async_validate_condition_config(hass, cond)
for cond in config[CONF_CONDITION]
]
)
config[CONF_ACTION] = await async_validate_actions_config(hass, config[CONF_ACTION])
return config
async def _try_async_validate_config_item(hass, config, full_config=None):
"""Validate config item."""
try:
config = await async_validate_config_item(hass, config, full_config)
except (
vol.Invalid,
HomeAssistantError,
IntegrationNotFound,
InvalidDeviceAutomationConfig,
) as ex:
async_log_exception(ex, DOMAIN, full_config or config, hass)
return None
return config
async def async_validate_config(hass, config):
"""Validate config."""
automations = list(
filter(
lambda x: x is not None,
await asyncio.gather(
*(
_try_async_validate_config_item(hass, p_config, config)
for _, p_config in config_per_platform(config, DOMAIN)
)
),
)
)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
config = config_without_domain(config, DOMAIN)
config[DOMAIN] = automations
return config
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from haproxy import HAProxyCollector
##########################################################################
class TestHAProxyCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('HAProxyCollector', {
'interval': 10,
})
self.collector = HAProxyCollector(config, None)
def test_import(self):
self.assertTrue(HAProxyCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
self.collector.config['ignore_servers'] = False
patch_urlopen = patch('urllib2.urlopen',
Mock(return_value=self.getFixture('stats.csv')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = self.getPickledResults('real_data.pkl')
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_unix_socket_code_path(self, publish_mock):
self.collector.config['method'] = 'unix'
class MockSocket():
def __init__(*args, **kwargs):
self.connected = False
self.output_data = ''
def connect(*args, **kwargs):
self.connected = True
def send(obj, string, *args, **kwargs):
if not self.connected:
raise Exception('MockSocket: Endpoint not connected.')
if string == 'show stat\n':
self.output_data = self.getFixture('stats.csv').getvalue()
def recv(obj, bufsize, *args, **kwargs):
output_buffer = self.output_data[:bufsize]
self.output_data = self.output_data[bufsize:]
return output_buffer
patch_socket = patch('socket.socket', Mock(return_value=MockSocket()))
patch_socket.start()
self.collector.collect()
patch_socket.stop()
metrics = self.getPickledResults('real_data.pkl')
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_and_ignore_servers(self, publish_mock):
self.collector.config['ignore_servers'] = True
patch_urlopen = patch('urllib2.urlopen',
Mock(return_value=self.getFixture('stats.csv')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = self.getPickledResults('real_data_ignore_servers.pkl')
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import numpy as np
import pytest
from mock import create_autospec, sentinel, call
from pymongo.collection import Collection
from pymongo.results import UpdateResult
from pytest import raises
from arctic.exceptions import DataIntegrityException
from arctic.store._ndarray_store import NdarrayStore, _promote_struct_dtypes
def test_dtype_parsing():
store = NdarrayStore()
dtypes = []
dtypes.append(np.dtype(np.object_))
dtypes.append(np.dtype(np.float128))
dtypes.append(np.dtype('int64'))
dtypes.append(np.dtype([('A', 'int64')]))
dtypes.append(np.dtype([('A', 'int64'), ('B', '<f8')]))
dtypes.append(np.dtype([('A', 'int64'), ('B', '<f8', (2,))]))
for d in dtypes:
assert d == store._dtype(str(d), None)
def test_promote_dtype_handles_string_increase():
dtype1 = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
dtype2 = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a20')])
expected = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a20')])
actual = _promote_struct_dtypes(dtype1, dtype2)
assert expected == actual
def test_promote_dtype_handles_string_decrease():
dtype1 = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a20')])
dtype2 = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
expected = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a20')])
actual = _promote_struct_dtypes(dtype1, dtype2)
assert expected == actual
def test_promote_dtype_handles_new_column():
dtype1 = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
dtype2 = np.dtype([('A', 'i4'), ('B', 'f4')])
expected = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
actual = _promote_struct_dtypes(dtype1, dtype2)
assert expected == actual
def test_promote_dtype_handles_rearrangement_of_columns_favouring_dtype1():
dtype1 = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
dtype2 = np.dtype([('A', 'i4'), ('C', 'a10'), ('B', 'f4')])
expected = np.dtype([('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
actual = _promote_struct_dtypes(dtype1, dtype2)
assert expected == actual
def test_promote_dtype_throws_if_column_is_removed():
dtype1 = np.dtype([('A', 'i4'), ('B', 'f4')])
dtype2 = np.dtype([('A', 'i4'), ('C', 'a10'), ('B', 'f4')])
with raises(Exception):
_promote_struct_dtypes(dtype1, dtype2)
def test_concat_and_rewrite_checks_chunk_count():
self = create_autospec(NdarrayStore)
collection = create_autospec(Collection)
version = {}
previous_version = {'_id': sentinel.id,
'base_version_id': sentinel.base_version_id,
'version': sentinel.version,
'segment_count' : 3,
'append_count' : 1,
'up_to': sentinel.up_to}
symbol = sentinel.symbol
item = sentinel.item
collection.find.return_value = [{'compressed': True, 'segment': 1},
{'compressed': False, 'segment': 2}]
with pytest.raises(DataIntegrityException) as e:
NdarrayStore._concat_and_rewrite(self, collection, version, symbol, item, previous_version)
assert str(e.value) == 'Symbol: sentinel.symbol:sentinel.version expected 1 segments but found 0'
def test_concat_and_rewrite_checks_written():
self = create_autospec(NdarrayStore)
collection = create_autospec(Collection)
version = {'_id': sentinel.version_id,
'segment_count': 1}
previous_version = {'_id': sentinel.id,
'up_to': sentinel.up_to,
'base_version_id': sentinel.base_version_id,
'version': sentinel.version,
'segment_count' : 5,
'append_count' : 3}
symbol = sentinel.symbol
item = []
collection.find.return_value = [{'_id': sentinel.id, 'segment': 47, 'compressed': True, 'sha': 'abc0'},
{'_id': sentinel.id_2, 'segment': 48, 'compressed': True, 'sha': 'abc1'},
# 3 appended items
{'_id': sentinel.id_3, 'segment': 49, 'compressed': False, 'sha': 'abc2'},
{'_id': sentinel.id_4, 'segment': 50, 'compressed': False, 'sha': 'abc3'},
{'_id': sentinel.id_5, 'segment': 51, 'compressed': False, 'sha': 'abc4'}]
collection.update_many.return_value = create_autospec(UpdateResult, matched_count=1)
NdarrayStore._concat_and_rewrite(self, collection, version, symbol, item, previous_version)
assert self.check_written.call_count == 1
def test_concat_and_rewrite_checks_different_id():
self = create_autospec(NdarrayStore)
collection = create_autospec(Collection)
version = {'_id': sentinel.version_id,
'segment_count': 1}
previous_version = {'_id': sentinel.id,
'up_to': sentinel.up_to,
'base_version_id': sentinel.base_version_id,
'version': sentinel.version,
'segment_count' : 5,
'append_count' : 3}
symbol = sentinel.symbol
item = []
collection.find.side_effect = [
[{'_id': sentinel.id, 'segment' : 47, 'compressed': True},
{'_id': sentinel.id_3, 'segment': 48, 'compressed': True},
{'_id': sentinel.id_4, 'segment': 49, 'compressed': False},
{'_id': sentinel.id_5, 'segment': 50, 'compressed': False},
{'_id': sentinel.id_6, 'segment': 51, 'compressed': False}], # 3 appended items
[{'_id': sentinel.id_2}] # the returned id is different after the update_many
]
expected_verify_find_spec = {'symbol': sentinel.symbol, 'segment': {'$lte': 47}, 'parent': sentinel.version_id}
collection.update_many.return_value = create_autospec(UpdateResult, matched_count=0)
with pytest.raises(DataIntegrityException) as e:
NdarrayStore._concat_and_rewrite(self, collection, version, symbol, item, previous_version)
assert collection.find.call_args_list[1] == call(expected_verify_find_spec)
assert str(e.value) == 'Symbol: sentinel.symbol:sentinel.version update_many updated 0 segments instead of 1'
def test_concat_and_rewrite_checks_fewer_updated():
self = create_autospec(NdarrayStore)
collection = create_autospec(Collection)
version = {'_id': sentinel.version_id,
'segment_count': 1}
previous_version = {'_id': sentinel.id,
'up_to': sentinel.up_to,
'base_version_id': sentinel.base_version_id,
'version': sentinel.version,
'segment_count': 5,
'append_count': 3}
symbol = sentinel.symbol
item = []
collection.find.side_effect = [
[{'_id': sentinel.id_1, 'segment': 47, 'compressed': True},
{'_id': sentinel.id_2, 'segment': 48, 'compressed': True},
{'_id': sentinel.id_3, 'segment': 49, 'compressed': True},
{'_id': sentinel.id_4, 'segment': 50, 'compressed': False},
{'_id': sentinel.id_5, 'segment': 51, 'compressed': False},
{'_id': sentinel.id_6, 'segment': 52, 'compressed': False}], # 3 appended items
[{'_id': sentinel.id_1}] # the returned id is different after the update_many
]
expected_verify_find_spec = {'symbol': sentinel.symbol, 'segment': {'$lte': 48}, 'parent': sentinel.version_id}
collection.update_many.return_value = create_autospec(UpdateResult, matched_count=1)
with pytest.raises(DataIntegrityException) as e:
NdarrayStore._concat_and_rewrite(self, collection, version, symbol, item, previous_version)
assert collection.find.call_args_list[1] == call(expected_verify_find_spec)
assert str(e.value) == 'Symbol: sentinel.symbol:sentinel.version update_many updated 1 segments instead of 2'
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from perfkitbenchmarker import events
from perfkitbenchmarker.traces import base_collector
flags.DEFINE_boolean(
'tcpdump', False, 'Run tcpdump on each VM to collect network packets in '
'each benchmark run.')
flags.DEFINE_list('tcpdump_ignore_ports', [22],
'Ports to ignore when running tcpdump')
flags.DEFINE_list(
'tcpdump_include_ports', [], 'Ports to include when running tcpdump. By '
'default collects all ports except those in --tcpdump_ignore_ports')
flags.DEFINE_integer('tcpdump_snaplen', 96,
'Tcpdump snaplen, see man tcpdump "-s"')
flags.DEFINE_integer(
'tcpdump_packet_count', None, 'Number of packets to collect. Default '
'is to collect all packets in the run phase')
FLAGS = flags.FLAGS
def _PortFilter(ports):
"""Returns the port filter suitable for tcpdump.
Example: _PortFilter([22, 53]) => ['port', '(22 or 53)']
Args:
ports: List of ports to filter on.
Returns:
Two element list to append to tcpdump command.
"""
return ['port', r'\({}\)'.format(' or '.join([str(port) for port in ports]))]
class _TcpdumpCollector(base_collector.BaseCollector):
"""tcpdump collector.
Installs tcpdump and runs it on the VMs.
"""
def __init__(self,
ignore_ports=None,
include_ports=None,
snaplen=None,
packet_count=None):
super(_TcpdumpCollector, self).__init__(None, None)
self.snaplen = snaplen
self.packet_count = packet_count
if include_ports:
self.filter = _PortFilter(include_ports)
elif ignore_ports:
self.filter = ['not'] + _PortFilter(ignore_ports)
else:
self.filter = []
def _CollectorName(self):
"""See base class."""
return 'tcpdump'
def _KillCommand(self, pid):
"""See base class.
Args:
pid: The pid of the process to kill
Different from base class:
1. Needs to run as sudo as tcpdump launched as root
2. Sends a SIGINT signal so that tcpdump can flush its cache
3. Sleep for 3 seconds to allow the flush to happen
Returns:
String command to run to kill of tcpdump.
"""
return 'sudo kill -s INT {}; sleep 3'.format(pid)
def _InstallCollector(self, vm):
"""See base class."""
vm.InstallPackages('tcpdump')
def _CollectorRunCommand(self, vm, collector_file):
"""See base class."""
cmd = ['sudo', 'tcpdump', '-n', '-w', collector_file]
if self.snaplen:
cmd.extend(['-s', str(self.snaplen)])
if self.packet_count:
cmd.extend(['-c', str(self.packet_count)])
cmd.extend(self.filter)
# ignore stdout, stderr, put in background and echo the pid
cmd.extend(['>', '/dev/null', '2>&1', '&', 'echo $!'])
return ' '.join(cmd)
def _CreateCollector(parsed_flags):
"""Creates a _TcpdumpCollector from flags."""
return _TcpdumpCollector(
ignore_ports=parsed_flags.tcpdump_ignore_ports,
include_ports=parsed_flags.tcpdump_include_ports,
snaplen=parsed_flags.tcpdump_snaplen,
packet_count=parsed_flags.tcpdump_packet_count)
def Register(parsed_flags):
"""Registers the tcpdump collector if FLAGS.tcpdump is set."""
if not parsed_flags.tcpdump:
return
collector = _CreateCollector(parsed_flags)
events.before_phase.connect(collector.Start, events.RUN_PHASE, weak=False)
events.after_phase.connect(collector.Stop, events.RUN_PHASE, weak=False)
|
from collections import OrderedDict
import uuid
from homeassistant.components.scene import DOMAIN, PLATFORM_SCHEMA
from homeassistant.config import SCENE_CONFIG_PATH
from homeassistant.const import CONF_ID, SERVICE_RELOAD
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.helpers import config_validation as cv, entity_registry
from . import ACTION_DELETE, EditIdBasedConfigView
async def async_setup(hass):
"""Set up the Scene config API."""
async def hook(action, config_key):
"""post_write_hook for Config View that reloads scenes."""
await hass.services.async_call(DOMAIN, SERVICE_RELOAD)
if action != ACTION_DELETE:
return
ent_reg = await entity_registry.async_get_registry(hass)
entity_id = ent_reg.async_get_entity_id(DOMAIN, HA_DOMAIN, config_key)
if entity_id is None:
return
ent_reg.async_remove(entity_id)
hass.http.register_view(
EditSceneConfigView(
DOMAIN,
"config",
SCENE_CONFIG_PATH,
cv.string,
PLATFORM_SCHEMA,
post_write_hook=hook,
)
)
return True
class EditSceneConfigView(EditIdBasedConfigView):
"""Edit scene config."""
def _write_value(self, hass, data, config_key, new_value):
"""Set value."""
index = None
for index, cur_value in enumerate(data):
# When people copy paste their scenes to the config file,
# they sometimes forget to add IDs. Fix it here.
if CONF_ID not in cur_value:
cur_value[CONF_ID] = uuid.uuid4().hex
elif cur_value[CONF_ID] == config_key:
break
else:
cur_value = {}
cur_value[CONF_ID] = config_key
index = len(data)
data.append(cur_value)
# Iterate through some keys that we want to have ordered in the output
updated_value = OrderedDict()
for key in ("id", "name", "entities"):
if key in cur_value:
updated_value[key] = cur_value[key]
if key in new_value:
updated_value[key] = new_value[key]
# We cover all current fields above, but just in case we start
# supporting more fields in the future.
updated_value.update(cur_value)
updated_value.update(new_value)
data[index] = updated_value
|
import logging
from adext import AdExt
from alarmdecoder.devices import SerialDevice, SocketDevice
from alarmdecoder.util import NoDeviceError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_PROTOCOL
from homeassistant.core import callback
from .const import ( # pylint: disable=unused-import
CONF_ALT_NIGHT_MODE,
CONF_AUTO_BYPASS,
CONF_CODE_ARM_REQUIRED,
CONF_DEVICE_BAUD,
CONF_DEVICE_PATH,
CONF_RELAY_ADDR,
CONF_RELAY_CHAN,
CONF_ZONE_LOOP,
CONF_ZONE_NAME,
CONF_ZONE_NUMBER,
CONF_ZONE_RFID,
CONF_ZONE_TYPE,
DEFAULT_ARM_OPTIONS,
DEFAULT_DEVICE_BAUD,
DEFAULT_DEVICE_HOST,
DEFAULT_DEVICE_PATH,
DEFAULT_DEVICE_PORT,
DEFAULT_ZONE_OPTIONS,
DEFAULT_ZONE_TYPE,
DOMAIN,
OPTIONS_ARM,
OPTIONS_ZONES,
PROTOCOL_SERIAL,
PROTOCOL_SOCKET,
)
EDIT_KEY = "edit_selection"
EDIT_ZONES = "Zones"
EDIT_SETTINGS = "Arming Settings"
_LOGGER = logging.getLogger(__name__)
class AlarmDecoderFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a AlarmDecoder config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize AlarmDecoder ConfigFlow."""
self.protocol = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for AlarmDecoder."""
return AlarmDecoderOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
self.protocol = user_input[CONF_PROTOCOL]
return await self.async_step_protocol()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_PROTOCOL): vol.In(
[PROTOCOL_SOCKET, PROTOCOL_SERIAL]
),
}
),
)
async def async_step_protocol(self, user_input=None):
"""Handle AlarmDecoder protocol setup."""
errors = {}
if user_input is not None:
if _device_already_added(
self._async_current_entries(), user_input, self.protocol
):
return self.async_abort(reason="already_configured")
connection = {}
baud = None
if self.protocol == PROTOCOL_SOCKET:
host = connection[CONF_HOST] = user_input[CONF_HOST]
port = connection[CONF_PORT] = user_input[CONF_PORT]
title = f"{host}:{port}"
device = SocketDevice(interface=(host, port))
if self.protocol == PROTOCOL_SERIAL:
path = connection[CONF_DEVICE_PATH] = user_input[CONF_DEVICE_PATH]
baud = connection[CONF_DEVICE_BAUD] = user_input[CONF_DEVICE_BAUD]
title = path
device = SerialDevice(interface=path)
controller = AdExt(device)
def test_connection():
controller.open(baud)
controller.close()
try:
await self.hass.async_add_executor_job(test_connection)
return self.async_create_entry(
title=title, data={CONF_PROTOCOL: self.protocol, **connection}
)
except NoDeviceError:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception during AlarmDecoder setup")
errors["base"] = "unknown"
if self.protocol == PROTOCOL_SOCKET:
schema = vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_DEVICE_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_DEVICE_PORT): int,
}
)
if self.protocol == PROTOCOL_SERIAL:
schema = vol.Schema(
{
vol.Required(CONF_DEVICE_PATH, default=DEFAULT_DEVICE_PATH): str,
vol.Required(CONF_DEVICE_BAUD, default=DEFAULT_DEVICE_BAUD): int,
}
)
return self.async_show_form(
step_id="protocol",
data_schema=schema,
errors=errors,
)
class AlarmDecoderOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle AlarmDecoder options."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize AlarmDecoder options flow."""
self.arm_options = config_entry.options.get(OPTIONS_ARM, DEFAULT_ARM_OPTIONS)
self.zone_options = config_entry.options.get(
OPTIONS_ZONES, DEFAULT_ZONE_OPTIONS
)
self.selected_zone = None
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
if user_input[EDIT_KEY] == EDIT_SETTINGS:
return await self.async_step_arm_settings()
if user_input[EDIT_KEY] == EDIT_ZONES:
return await self.async_step_zone_select()
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(EDIT_KEY, default=EDIT_SETTINGS): vol.In(
[EDIT_SETTINGS, EDIT_ZONES]
)
},
),
)
async def async_step_arm_settings(self, user_input=None):
"""Arming options form."""
if user_input is not None:
return self.async_create_entry(
title="",
data={OPTIONS_ARM: user_input, OPTIONS_ZONES: self.zone_options},
)
return self.async_show_form(
step_id="arm_settings",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALT_NIGHT_MODE,
default=self.arm_options[CONF_ALT_NIGHT_MODE],
): bool,
vol.Optional(
CONF_AUTO_BYPASS, default=self.arm_options[CONF_AUTO_BYPASS]
): bool,
vol.Optional(
CONF_CODE_ARM_REQUIRED,
default=self.arm_options[CONF_CODE_ARM_REQUIRED],
): bool,
},
),
)
async def async_step_zone_select(self, user_input=None):
"""Zone selection form."""
errors = _validate_zone_input(user_input)
if user_input is not None and not errors:
self.selected_zone = str(
int(user_input[CONF_ZONE_NUMBER])
) # remove leading zeros
return await self.async_step_zone_details()
return self.async_show_form(
step_id="zone_select",
data_schema=vol.Schema({vol.Required(CONF_ZONE_NUMBER): str}),
errors=errors,
)
async def async_step_zone_details(self, user_input=None):
"""Zone details form."""
errors = _validate_zone_input(user_input)
if user_input is not None and not errors:
zone_options = self.zone_options.copy()
zone_id = self.selected_zone
zone_options[zone_id] = _fix_input_types(user_input)
# Delete zone entry if zone_name is omitted
if CONF_ZONE_NAME not in zone_options[zone_id]:
zone_options.pop(zone_id)
return self.async_create_entry(
title="",
data={OPTIONS_ARM: self.arm_options, OPTIONS_ZONES: zone_options},
)
existing_zone_settings = self.zone_options.get(self.selected_zone, {})
return self.async_show_form(
step_id="zone_details",
description_placeholders={CONF_ZONE_NUMBER: self.selected_zone},
data_schema=vol.Schema(
{
vol.Optional(
CONF_ZONE_NAME,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_NAME
)
},
): str,
vol.Optional(
CONF_ZONE_TYPE,
default=existing_zone_settings.get(
CONF_ZONE_TYPE, DEFAULT_ZONE_TYPE
),
): vol.In(DEVICE_CLASSES),
vol.Optional(
CONF_ZONE_RFID,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_RFID
)
},
): str,
vol.Optional(
CONF_ZONE_LOOP,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_LOOP
)
},
): str,
vol.Optional(
CONF_RELAY_ADDR,
description={
"suggested_value": existing_zone_settings.get(
CONF_RELAY_ADDR
)
},
): str,
vol.Optional(
CONF_RELAY_CHAN,
description={
"suggested_value": existing_zone_settings.get(
CONF_RELAY_CHAN
)
},
): str,
}
),
errors=errors,
)
def _validate_zone_input(zone_input):
if not zone_input:
return {}
errors = {}
# CONF_RELAY_ADDR & CONF_RELAY_CHAN are inclusive
if (CONF_RELAY_ADDR in zone_input and CONF_RELAY_CHAN not in zone_input) or (
CONF_RELAY_ADDR not in zone_input and CONF_RELAY_CHAN in zone_input
):
errors["base"] = "relay_inclusive"
# The following keys must be int
for key in [CONF_ZONE_NUMBER, CONF_ZONE_LOOP, CONF_RELAY_ADDR, CONF_RELAY_CHAN]:
if key in zone_input:
try:
int(zone_input[key])
except ValueError:
errors[key] = "int"
# CONF_ZONE_LOOP depends on CONF_ZONE_RFID
if CONF_ZONE_LOOP in zone_input and CONF_ZONE_RFID not in zone_input:
errors[CONF_ZONE_LOOP] = "loop_rfid"
# CONF_ZONE_LOOP must be 1-4
if (
CONF_ZONE_LOOP in zone_input
and zone_input[CONF_ZONE_LOOP].isdigit()
and int(zone_input[CONF_ZONE_LOOP]) not in list(range(1, 5))
):
errors[CONF_ZONE_LOOP] = "loop_range"
return errors
def _fix_input_types(zone_input):
"""Convert necessary keys to int.
Since ConfigFlow inputs of type int cannot default to an empty string, we collect the values below as
strings and then convert them to ints.
"""
for key in [CONF_ZONE_LOOP, CONF_RELAY_ADDR, CONF_RELAY_CHAN]:
if key in zone_input:
zone_input[key] = int(zone_input[key])
return zone_input
def _device_already_added(current_entries, user_input, protocol):
"""Determine if entry has already been added to HA."""
user_host = user_input.get(CONF_HOST)
user_port = user_input.get(CONF_PORT)
user_path = user_input.get(CONF_DEVICE_PATH)
user_baud = user_input.get(CONF_DEVICE_BAUD)
for entry in current_entries:
entry_host = entry.data.get(CONF_HOST)
entry_port = entry.data.get(CONF_PORT)
entry_path = entry.data.get(CONF_DEVICE_PATH)
entry_baud = entry.data.get(CONF_DEVICE_BAUD)
if protocol == PROTOCOL_SOCKET:
if user_host == entry_host and user_port == entry_port:
return True
if protocol == PROTOCOL_SERIAL:
if user_baud == entry_baud and user_path == entry_path:
return True
return False
|
import mock
from paasta_tools.cli.cmds import info
from paasta_tools.cli.utils import PaastaColors
from paasta_tools.long_running_service_tools import ServiceNamespaceConfig
from paasta_tools.utils import NoDeploymentsAvailable
def test_get_service_info():
with mock.patch(
"paasta_tools.cli.cmds.info.get_team", autospec=True
) as mock_get_team, mock.patch(
"paasta_tools.cli.cmds.info.get_runbook", autospec=True
) as mock_get_runbook, mock.patch(
"paasta_tools.cli.cmds.info.read_service_configuration", autospec=True
) as mock_read_service_configuration, mock.patch(
"service_configuration_lib.read_service_configuration", autospec=True
) as mock_scl_read_service_configuration, mock.patch(
"service_configuration_lib.read_extra_service_information", autospec=True
) as mock_read_extra_service_information, mock.patch(
"paasta_tools.cli.cmds.info.get_actual_deployments", autospec=True
) as mock_get_actual_deployments, mock.patch(
"paasta_tools.cli.cmds.info.get_smartstack_endpoints", autospec=True
) as mock_get_smartstack_endpoints:
mock_get_team.return_value = "fake_team"
mock_get_runbook.return_value = "fake_runbook"
mock_read_service_configuration.return_value = {
"description": "a fake service that does stuff",
"external_link": "http://bla",
"smartstack": {"main": {"proxy_port": 9001}},
}
mock_scl_read_service_configuration.return_value = (
mock_read_service_configuration.return_value
)
mock_read_extra_service_information.return_value = mock_read_service_configuration.return_value[
"smartstack"
]
mock_get_actual_deployments.return_value = ["clusterA.main", "clusterB.main"]
mock_get_smartstack_endpoints.return_value = [
"http://foo:1234",
"tcp://bar:1234",
]
actual = info.get_service_info("fake_service", "/fake/soa/dir")
assert "Service Name: fake_service" in actual
assert "Monitored By: team fake_team" in actual
assert "Runbook: " in actual
assert "fake_runbook" in actual
assert "Description: a fake service" in actual
assert "http://bla" in actual
assert "Git Repo: [email protected]:services/fake_service" in actual
assert "Deployed to the following" in actual
assert (
"clusterA (%s)"
% PaastaColors.cyan("http://fake_service.paasta-clusterA.yelp/")
in actual
)
assert (
"clusterB (%s)"
% PaastaColors.cyan("http://fake_service.paasta-clusterB.yelp/")
in actual
)
assert "Smartstack endpoint" in actual
assert "http://foo:1234" in actual
assert "tcp://bar:1234" in actual
assert "Dashboard" in actual
assert (
"%s (Sensu Alerts)"
% PaastaColors.cyan("https://uchiwa.yelpcorp.com/#/events?q=fake_service")
in actual
)
mock_get_team.assert_called_with(
service="fake_service", overrides={}, soa_dir="/fake/soa/dir"
)
mock_get_runbook.assert_called_with(
service="fake_service", overrides={}, soa_dir="/fake/soa/dir"
)
def test_deployments_to_clusters():
deployments = ["A.main", "A.canary", "B.main", "C.othermain.dev"]
expected = {"A", "B", "C"}
actual = info.deployments_to_clusters(deployments)
assert actual == expected
def test_get_smartstack_endpoints_http():
with mock.patch(
"service_configuration_lib.read_service_configuration", autospec=True
) as mock_read_service_configuration:
mock_read_service_configuration.return_value = {
"smartstack": {"main": {"proxy_port": 1234}}
}
expected = ["http://169.254.255.254:1234 (main)"]
actual = info.get_smartstack_endpoints("unused", "/fake/soa/dir")
assert actual == expected
def test_get_smartstack_endpoints_tcp():
with mock.patch(
"service_configuration_lib.read_service_configuration", autospec=True
) as mock_read_service_configuration:
mock_read_service_configuration.return_value = {
"smartstack": {"tcpone": {"proxy_port": 1234, "mode": "tcp"}}
}
expected = ["tcp://169.254.255.254:1234 (tcpone)"]
actual = info.get_smartstack_endpoints("unused", "/fake/soa/dir")
assert actual == expected
def test_get_deployments_strings_default_case_with_smartstack():
with mock.patch(
"paasta_tools.cli.cmds.info.get_actual_deployments", autospec=True
) as mock_get_actual_deployments, mock.patch(
"service_configuration_lib.read_extra_service_information", autospec=True
) as mock_read_extra_service_information:
mock_get_actual_deployments.return_value = ["clusterA.main", "clusterB.main"]
mock_read_extra_service_information.return_value = {
"main": {"proxy_port": 9001}
}
actual = info.get_deployments_strings("fake_service", "/fake/soa/dir")
assert (
" - clusterA (%s)"
% PaastaColors.cyan("http://fake_service.paasta-clusterA.yelp/")
in actual
)
assert (
" - clusterB (%s)"
% PaastaColors.cyan("http://fake_service.paasta-clusterB.yelp/")
in actual
)
def test_get_deployments_strings_protocol_tcp_case():
with mock.patch(
"paasta_tools.cli.cmds.info.get_actual_deployments", autospec=True
) as mock_get_actual_deployments, mock.patch(
"paasta_tools.cli.cmds.info.load_service_namespace_config", autospec=True
) as mock_load_service_namespace_config:
mock_get_actual_deployments.return_value = ["clusterA.main", "clusterB.main"]
mock_load_service_namespace_config.return_value = ServiceNamespaceConfig(
{"mode": "tcp", "proxy_port": 8080}
)
actual = info.get_deployments_strings("unused", "/fake/soa/dir")
assert (
" - clusterA (%s)" % PaastaColors.cyan("tcp://paasta-clusterA.yelp:8080/")
in actual
)
assert (
" - clusterB (%s)" % PaastaColors.cyan("tcp://paasta-clusterB.yelp:8080/")
in actual
)
def test_get_deployments_strings_non_listening_service():
with mock.patch(
"paasta_tools.cli.cmds.info.get_actual_deployments", autospec=True
) as mock_get_actual_deployments, mock.patch(
"paasta_tools.cli.cmds.info.load_service_namespace_config", autospec=True
) as mock_load_service_namespace_config:
mock_get_actual_deployments.return_value = ["clusterA.main", "clusterB.main"]
mock_load_service_namespace_config.return_value = ServiceNamespaceConfig()
actual = info.get_deployments_strings("unused", "/fake/soa/dir")
assert " - clusterA (N/A)" in actual
assert " - clusterB (N/A)" in actual
def test_get_deployments_strings_no_deployments():
with mock.patch(
"paasta_tools.cli.cmds.info.get_actual_deployments", autospec=True
) as mock_get_actual_deployments:
mock_get_actual_deployments.side_effect = NoDeploymentsAvailable
actual = info.get_deployments_strings("unused", "/fake/soa/dir")
assert "N/A: Not deployed" in actual[0]
|
from datetime import timedelta
import logging
from py17track import Client as SeventeenTrackClient
from py17track.errors import SeventeenTrackError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LOCATION,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
from homeassistant.util import Throttle, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_DESTINATION_COUNTRY = "destination_country"
ATTR_FRIENDLY_NAME = "friendly_name"
ATTR_INFO_TEXT = "info_text"
ATTR_ORIGIN_COUNTRY = "origin_country"
ATTR_PACKAGES = "packages"
ATTR_PACKAGE_TYPE = "package_type"
ATTR_STATUS = "status"
ATTR_TRACKING_INFO_LANGUAGE = "tracking_info_language"
ATTR_TRACKING_NUMBER = "tracking_number"
CONF_SHOW_ARCHIVED = "show_archived"
CONF_SHOW_DELIVERED = "show_delivered"
DATA_PACKAGES = "package_data"
DATA_SUMMARY = "summary_data"
DEFAULT_ATTRIBUTION = "Data provided by 17track.net"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=10)
UNIQUE_ID_TEMPLATE = "package_{0}_{1}"
ENTITY_ID_TEMPLATE = "sensor.seventeentrack_package_{0}"
NOTIFICATION_DELIVERED_ID = "package_delivered_{0}"
NOTIFICATION_DELIVERED_TITLE = "Package {0} delivered"
NOTIFICATION_DELIVERED_MESSAGE = (
"Package Delivered: {0}<br />Visit 17.track for more information: "
"https://t.17track.net/track#nums={1}"
)
VALUE_DELIVERED = "Delivered"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SHOW_ARCHIVED, default=False): cv.boolean,
vol.Optional(CONF_SHOW_DELIVERED, default=False): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Configure the platform and add the sensors."""
websession = aiohttp_client.async_get_clientsession(hass)
client = SeventeenTrackClient(websession)
try:
login_result = await client.profile.login(
config[CONF_USERNAME], config[CONF_PASSWORD]
)
if not login_result:
_LOGGER.error("Invalid username and password provided")
return
except SeventeenTrackError as err:
_LOGGER.error("There was an error while logging in: %s", err)
return
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
data = SeventeenTrackData(
client,
async_add_entities,
scan_interval,
config[CONF_SHOW_ARCHIVED],
config[CONF_SHOW_DELIVERED],
)
await data.async_update()
class SeventeenTrackSummarySensor(Entity):
"""Define a summary sensor."""
def __init__(self, data, status, initial_state):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._data = data
self._state = initial_state
self._status = status
@property
def available(self):
"""Return whether the entity is available."""
return self._state is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
return f"Seventeentrack Packages {self._status}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return "summary_{}_{}".format(self._data.account_id, slugify(self._status))
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return "packages"
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
package_data = []
for package in self._data.packages.values():
if package.status != self._status:
continue
package_data.append(
{
ATTR_FRIENDLY_NAME: package.friendly_name,
ATTR_INFO_TEXT: package.info_text,
ATTR_STATUS: package.status,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
)
if package_data:
self._attrs[ATTR_PACKAGES] = package_data
self._state = self._data.summary.get(self._status)
class SeventeenTrackPackageSensor(Entity):
"""Define an individual package sensor."""
def __init__(self, data, package):
"""Initialize."""
self._attrs = {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DESTINATION_COUNTRY: package.destination_country,
ATTR_INFO_TEXT: package.info_text,
ATTR_LOCATION: package.location,
ATTR_ORIGIN_COUNTRY: package.origin_country,
ATTR_PACKAGE_TYPE: package.package_type,
ATTR_TRACKING_INFO_LANGUAGE: package.tracking_info_language,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
self._data = data
self._friendly_name = package.friendly_name
self._state = package.status
self._tracking_number = package.tracking_number
self.entity_id = ENTITY_ID_TEMPLATE.format(self._tracking_number)
@property
def available(self):
"""Return whether the entity is available."""
return self._data.packages.get(self._tracking_number) is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
name = self._friendly_name
if not name:
name = self._tracking_number
return f"Seventeentrack Package: {name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number)
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
if not self.available:
# Entity cannot be removed while its being added
async_call_later(self.hass, 1, self._remove)
return
package = self._data.packages.get(self._tracking_number, None)
# If the user has elected to not see delivered packages and one gets
# delivered, post a notification:
if package.status == VALUE_DELIVERED and not self._data.show_delivered:
self._notify_delivered()
# Entity cannot be removed while its being added
async_call_later(self.hass, 1, self._remove)
return
self._attrs.update(
{ATTR_INFO_TEXT: package.info_text, ATTR_LOCATION: package.location}
)
self._state = package.status
self._friendly_name = package.friendly_name
async def _remove(self, *_):
"""Remove entity itself."""
await self.async_remove()
reg = await self.hass.helpers.entity_registry.async_get_registry()
entity_id = reg.async_get_entity_id(
"sensor",
"seventeentrack",
UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number),
)
if entity_id:
reg.async_remove(entity_id)
def _notify_delivered(self):
"""Notify when package is delivered."""
_LOGGER.info("Package delivered: %s", self._tracking_number)
identification = (
self._friendly_name if self._friendly_name else self._tracking_number
)
message = NOTIFICATION_DELIVERED_MESSAGE.format(
identification, self._tracking_number
)
title = NOTIFICATION_DELIVERED_TITLE.format(identification)
notification_id = NOTIFICATION_DELIVERED_TITLE.format(self._tracking_number)
self.hass.components.persistent_notification.create(
message, title=title, notification_id=notification_id
)
class SeventeenTrackData:
"""Define a data handler for 17track.net."""
def __init__(
self, client, async_add_entities, scan_interval, show_archived, show_delivered
):
"""Initialize."""
self._async_add_entities = async_add_entities
self._client = client
self._scan_interval = scan_interval
self._show_archived = show_archived
self.account_id = client.profile.account_id
self.packages = {}
self.show_delivered = show_delivered
self.summary = {}
self.async_update = Throttle(self._scan_interval)(self._async_update)
self.first_update = True
async def _async_update(self):
"""Get updated data from 17track.net."""
try:
packages = await self._client.profile.packages(
show_archived=self._show_archived
)
_LOGGER.debug("New package data received: %s", packages)
new_packages = {p.tracking_number: p for p in packages}
to_add = set(new_packages) - set(self.packages)
_LOGGER.debug("Will add new tracking numbers: %s", to_add)
if to_add:
self._async_add_entities(
[
SeventeenTrackPackageSensor(self, new_packages[tracking_number])
for tracking_number in to_add
],
True,
)
self.packages = new_packages
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving packages: %s", err)
try:
self.summary = await self._client.profile.summary(
show_archived=self._show_archived
)
_LOGGER.debug("New summary data received: %s", self.summary)
# creating summary sensors on first update
if self.first_update:
self.first_update = False
self._async_add_entities(
[
SeventeenTrackSummarySensor(self, status, quantity)
for status, quantity in self.summary.items()
],
True,
)
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving the summary: %s", err)
self.summary = {}
|
from homeassistant.const import CONF_DEVICE_ID, CONF_NAME, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import DISPATCHER_KAITERRA, DOMAIN
SENSORS = [
{"name": "Temperature", "prop": "rtemp", "device_class": "temperature"},
{"name": "Humidity", "prop": "rhumid", "device_class": "humidity"},
]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the kaiterra temperature and humidity sensor."""
if discovery_info is None:
return
api = hass.data[DOMAIN]
name = discovery_info[CONF_NAME]
device_id = discovery_info[CONF_DEVICE_ID]
async_add_entities(
[KaiterraSensor(api, name, device_id, sensor) for sensor in SENSORS]
)
class KaiterraSensor(Entity):
"""Implementation of a Kaittera sensor."""
def __init__(self, api, name, device_id, sensor):
"""Initialize the sensor."""
self._api = api
self._name = f'{name} {sensor["name"]}'
self._device_id = device_id
self._kind = sensor["name"].lower()
self._property = sensor["prop"]
self._device_class = sensor["device_class"]
@property
def _sensor(self):
"""Return the sensor data."""
return self._api.data.get(self._device_id, {}).get(self._property, {})
@property
def should_poll(self):
"""Return that the sensor should not be polled."""
return False
@property
def available(self):
"""Return the availability of the sensor."""
return self._api.data.get(self._device_id) is not None
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
return self._sensor.get("value")
@property
def unique_id(self):
"""Return the sensor's unique id."""
return f"{self._device_id}_{self._kind}"
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if not self._sensor.get("units"):
return None
value = self._sensor["units"].value
if value == "F":
return TEMP_FAHRENHEIT
if value == "C":
return TEMP_CELSIUS
return value
async def async_added_to_hass(self):
"""Register callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, DISPATCHER_KAITERRA, self.async_write_ha_state
)
)
|
import unittest
import urwid.escape
class InputEscapeSequenceParserTest(unittest.TestCase):
""" Tests for parser of input escape sequences """
def test_bare_escape(self):
codes = [27]
expected = ['esc']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_meta(self):
codes = [27, ord('4'), ord('2')]
expected = ['meta 4']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([ord('2')], rest)
def test_shift_arrows(self):
codes = [27, ord('['), ord('a')]
expected = ['shift up']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_ctrl_pgup(self):
codes = [27, 91, 53, 59, 53, 126]
expected = ['ctrl page up']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_esc_meta_1(self):
codes = [27, 27, 49]
expected = ['esc', 'meta 1']
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_midsequence(self):
# '[11~' is F1, '[12~' is F2, etc
codes = [27, ord('['), ord('1')]
with self.assertRaises(urwid.escape.MoreInputRequired):
urwid.escape.process_keyqueue(codes, more_available=True)
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(['meta ['], actual)
self.assertListEqual([ord('1')], rest)
def test_mouse_press(self):
codes = [27, 91, 77, 32, 41, 48]
expected = [('mouse press', 1.0, 8, 15)]
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
def test_bug_104(self):
""" GH #104: click-Esc & Esc-click crashes urwid apps """
codes = [27, 27, 91, 77, 32, 127, 59]
expected = ['esc', ('mouse press', 1.0, 94, 26)]
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
codes = [27, 27, 91, 77, 35, 120, 59]
expected = ['esc', ('mouse release', 0, 87, 26)]
actual, rest = urwid.escape.process_keyqueue(codes, more_available=False)
self.assertListEqual(expected, actual)
self.assertListEqual([], rest)
|
import json as _json
import dbus as _dbus
import openrazer_daemon.misc.macro as _daemon_macro
from openrazer_daemon import keyboard
class RazerMacro(object):
def __init__(self, serial: str, devname: str, daemon_dbus=None, capabilities=None):
if daemon_dbus is None:
session_bus = _dbus.SessionBus()
daemon_dbus = session_bus.get_object("org.razer", "/org/razer/device/{0}".format(serial))
if capabilities is None:
self._capabilities = {}
else:
self._capabilities = capabilities
self._macro_dbus = _dbus.Interface(daemon_dbus, "razer.device.macro")
self._macro_enabled = False
self.name = devname
def get_macros(self) -> dict:
json_payload = self._macro_dbus.getMacros()
macro_structure = _json.loads(json_payload)
macro_key_mapping = {}
for bind_key, macro_list in macro_structure.items():
macro_objects = []
for macro_dict in macro_list:
macro_obj = _daemon_macro.macro_dict_to_obj(macro_dict)
macro_objects.append(macro_obj)
macro_key_mapping[bind_key] = macro_objects
return macro_key_mapping
def add_macro(self, bind_key: str, macro_object_sequence: list):
"""
Add macro to specified bind key
:param bind_key: Bind Key (has to be in openrazer.keyboard.KEY_MAPPING)
:type bind_key: str
:param macro_object_sequence: List of macro objects
:type macro_object_sequence: list or tuple or __daemon_macro.MacroObject
"""
if isinstance(macro_object_sequence, _daemon_macro.MacroObject):
macro_object_sequence = [macro_object_sequence]
if not isinstance(macro_object_sequence, (tuple, list)):
raise ValueError("macro_object_sequence is not iterable")
macro_list = []
for macro_obj in macro_object_sequence:
if not isinstance(macro_obj, _daemon_macro.MacroObject):
raise ValueError("{0} is not a macro object".format(str(macro_obj)))
macro_list.append(macro_obj.to_dict())
json_payload = _json.dumps(macro_list)
self._macro_dbus.addMacro(bind_key, json_payload)
def del_macro(self, bind_key: str):
key_map = keyboard.KEY_MAPPING
map_str = "keyboard.KEY_MAPPING"
if self.name in ["Razer Orbweaver", "Razer Orbweaver Chroma", "Razer Tartarus V2", "Razer Tartarus Chroma V2"]:
key_map = keyboard.ORBWEAVER_KEY_MAPPING
map_str = "keyboard.ORBWEAVER_KEY_MAPPING"
elif self.name in ["Razer Tartarus", "Razer Tartarus Chroma", "Razer Nostromo"]:
key_map = keyboard.TARTARUS_KEY_MAPPING
map_str = "keyboard.TARTARUS_KEY_MAPPING"
elif self.name in ["Razer Naga Hex V2", "Razer Naga Chroma"]:
key_map = keyboard.NAGA_HEX_V2_KEY_MAPPING
map_str = "keyboard.NAGA_HEX_V2_KEY_MAPPING"
if bind_key not in key_map:
raise ValueError("Key {0} is not in openrazer.{1}".format(bind_key, map_str))
else:
self._macro_dbus.deleteMacro(bind_key)
@property
def mode_modifier(self):
if 'macro_mode_modifier' in self._capabilities:
return self._macro_dbus.getModeModifier()
return False
@mode_modifier.setter
def mode_modifier(self, value):
if 'macro_mode_modifier' in self._capabilities and isinstance(value, bool):
self._macro_dbus.getModeModifier(value)
@staticmethod
def create_url_macro_item(url: str) -> _daemon_macro.MacroURL:
"""
Create a macro object that opens a URL in a browser
:param url: URL
:type url: str
:return: Macro object
:rtype: _daemon_macro.MacroURL
"""
return _daemon_macro.MacroURL(url)
@staticmethod
def create_script_macro_item(script_path: str, script_args: str = None) -> _daemon_macro.MacroScript:
"""
Create a macro object that runs a script
The arguments to the script should be a string containing all the arguments, if any values contain spaces they should be quoted accordingly
:param script_path: Script filepath, includes script name
:type script_path: str
:param script_args: Script arguments
:type script_args: str or None
:return: Macro object
:rtype: _daemon_macro.MacroScript
"""
return _daemon_macro.MacroScript(script_path, script_args)
@staticmethod
def create_keypress_up_macro_item(key_name: str, pre_pause: int = 0) -> _daemon_macro.MacroKey:
"""
Create a macro action that consists of a key release event
:param key_name: Key Name, compatible with XTE
:type key_name: str
:param pre_pause: Optional delay before key is actioned (if turned on in daemon)
:type pre_pause: int
:return: Macro Key
:rtype: _daemon_macro.MacroKey
"""
return _daemon_macro.MacroKey(key_name, pre_pause, 'UP')
@staticmethod
def create_keypress_down_macro_item(key_name: str, pre_pause: int = 0) -> _daemon_macro.MacroKey:
"""
Create a macro action that consists of a key press event
:param key_name: Key Name, compatible with XTE
:type key_name: str
:param pre_pause: Optional delay before key is actioned (if turned on in daemon)
:type pre_pause: int
:return: Macro Key
:rtype: _daemon_macro.MacroKey
"""
return _daemon_macro.MacroKey(key_name, pre_pause, 'DOWN')
@classmethod
def create_keypress_macro_item(cls, key_name: str, pre_pause: int = 0) -> list:
"""
Create a macro action that consists of a key press and release event
The pre_pause delay will be applied to both key events
:param key_name: Key Name, compatible with XTE
:type key_name: str
:param pre_pause: Optional delay before key is actioned (if turned on in daemon)
:type pre_pause: int
:return: Macro Key
:rtype: list of _daemon_macro.MacroKey
"""
return [cls.create_keypress_down_macro_item(key_name, pre_pause), cls.create_keypress_up_macro_item(key_name, pre_pause)]
|
from datetime import timedelta
import pytest
import voluptuous as vol
import homeassistant.components.automation as automation
import homeassistant.components.homeassistant.triggers.time_pattern as time_pattern
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed, async_mock_service, mock_component
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
async def test_if_fires_when_hour_matches(hass, calls):
"""Test for firing if hour is matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=3
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": 0,
"minutes": "*",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, hour=0))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, now.replace(year=now.year + 1, hour=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_minute_matches(hass, calls):
"""Test for firing if minutes are matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=30
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": 0,
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, minute=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_second_matches(hass, calls):
"""Test for firing if seconds are matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, second=30
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "*",
"seconds": 0,
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, second=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_second_as_string_matches(hass, calls):
"""Test for firing if seconds are matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, second=15
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "*",
"seconds": "30",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, time_that_will_not_match_right_away + timedelta(seconds=15)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_all_matches(hass, calls):
"""Test for firing if everything matches."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=4
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": 1,
"minutes": 2,
"seconds": 3,
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=3)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_periodic_seconds(hass, calls):
"""Test for firing periodically every second."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, second=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "*",
"seconds": "/10",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=0, minute=0, second=10)
)
await hass.async_block_till_done()
assert len(calls) >= 1
async def test_if_fires_periodic_minutes(hass, calls):
"""Test for firing periodically every minute."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "/2",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=0, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_periodic_hours(hass, calls):
"""Test for firing periodically every hour."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "/2",
"minutes": "*",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=2, minute=0, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_default_values(hass, calls):
"""Test for firing at 2 minutes every hour."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "time_pattern", "minutes": "2"},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=1)
)
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=2, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 2
async def test_invalid_schemas(hass, calls):
"""Test invalid schemas."""
schemas = (
None,
{},
{"platform": "time_pattern"},
{"platform": "time_pattern", "minutes": "/"},
{"platform": "time_pattern", "minutes": "*/5"},
{"platform": "time_pattern", "minutes": "/90"},
{"platform": "time_pattern", "hours": 12, "minutes": 0, "seconds": 100},
)
for value in schemas:
with pytest.raises(vol.Invalid):
time_pattern.TRIGGER_SCHEMA(value)
|
from datetime import timedelta
import logging
from typing import List
from aioazuredevops.builds import DevOpsBuild
from aioazuredevops.client import DevOpsClient
import aiohttp
from homeassistant.components.azure_devops import AzureDevOpsDeviceEntity
from homeassistant.components.azure_devops.const import (
CONF_ORG,
CONF_PROJECT,
DATA_AZURE_DEVOPS_CLIENT,
DATA_ORG,
DATA_PROJECT,
DOMAIN,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=300)
PARALLEL_UPDATES = 4
BUILDS_QUERY = "?queryOrder=queueTimeDescending&maxBuildsPerDefinition=1"
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up Azure DevOps sensor based on a config entry."""
instance_key = f"{DOMAIN}_{entry.data[CONF_ORG]}_{entry.data[CONF_PROJECT]}"
client = hass.data[instance_key][DATA_AZURE_DEVOPS_CLIENT]
organization = entry.data[DATA_ORG]
project = entry.data[DATA_PROJECT]
sensors = []
try:
builds: List[DevOpsBuild] = await client.get_builds(
organization, project, BUILDS_QUERY
)
except aiohttp.ClientError as exception:
_LOGGER.warning(exception)
raise PlatformNotReady from exception
for build in builds:
sensors.append(
AzureDevOpsLatestBuildSensor(client, organization, project, build)
)
async_add_entities(sensors, True)
class AzureDevOpsSensor(AzureDevOpsDeviceEntity):
"""Defines a Azure DevOps sensor."""
def __init__(
self,
client: DevOpsClient,
organization: str,
project: str,
key: str,
name: str,
icon: str,
measurement: str = "",
unit_of_measurement: str = "",
) -> None:
"""Initialize Azure DevOps sensor."""
self._state = None
self._attributes = None
self._available = False
self._unit_of_measurement = unit_of_measurement
self.measurement = measurement
self.client = client
self.organization = organization
self.project = project
self.key = key
super().__init__(organization, project, name, icon)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return "_".join([self.organization, self.key])
@property
def state(self) -> str:
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self) -> object:
"""Return the attributes of the sensor."""
return self._attributes
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
class AzureDevOpsLatestBuildSensor(AzureDevOpsSensor):
"""Defines a Azure DevOps card count sensor."""
def __init__(
self, client: DevOpsClient, organization: str, project: str, build: DevOpsBuild
):
"""Initialize Azure DevOps sensor."""
self.build: DevOpsBuild = build
super().__init__(
client,
organization,
project,
f"{build.project.id}_{build.definition.id}_latest_build",
f"{build.project.name} {build.definition.name} Latest Build",
"mdi:pipe",
)
async def _azure_devops_update(self) -> bool:
"""Update Azure DevOps entity."""
try:
build: DevOpsBuild = await self.client.get_build(
self.organization, self.project, self.build.id
)
except aiohttp.ClientError as exception:
_LOGGER.warning(exception)
self._available = False
return False
self._state = build.build_number
self._attributes = {
"definition_id": build.definition.id,
"definition_name": build.definition.name,
"id": build.id,
"reason": build.reason,
"result": build.result,
"source_branch": build.source_branch,
"source_version": build.source_version,
"status": build.status,
"url": build.links.web,
"queue_time": build.queue_time,
"start_time": build.start_time,
"finish_time": build.finish_time,
}
self._available = True
return True
|
import numpy as np
from mock import Mock, sentinel, patch
from pytest import raises
# Do not remove PandasStore
from arctic.store._pandas_ndarray_store import PandasDataFrameStore, PandasPanelStore, PandasStore
from tests.util import read_str_as_pandas
def test_panel_converted_to_dataframe_and_stacked_to_write():
store = PandasPanelStore()
panel = Mock(shape=(1, 2, 3), axes=[Mock(names=['n%d' % i]) for i in range(3)])
panel.to_frame.return_value.dtypes = [sentinel.dtype]
with patch.object(PandasDataFrameStore, 'write') as mock_write:
with patch('arctic.store._pandas_ndarray_store.DataFrame') as DF:
store.write(sentinel.mlib, sentinel.version, sentinel.symbol, panel, sentinel.prev)
panel.to_frame.assert_called_with(filter_observations=False)
DF.assert_called_with(panel.to_frame.return_value.stack.return_value)
mock_write.assert_called_with(sentinel.mlib, sentinel.version, sentinel.symbol,
DF.return_value, sentinel.prev)
def test_panel_append_not_supported():
store = PandasPanelStore()
panel = Mock(shape=(1, 2, 3), axes=[Mock(names=['n%d' % i]) for i in range(3)], dtypes=['a'])
with raises(ValueError):
store.append(sentinel.mlib, sentinel.version, sentinel.symbol, panel, sentinel.prev)
def test_panel_converted_from_dataframe_for_reading():
store = PandasPanelStore()
with patch.object(PandasDataFrameStore, 'read') as mock_read:
res = store.read(sentinel.mlib, sentinel.version, sentinel.symbol)
mock_read.assert_called_with(sentinel.mlib, sentinel.version, sentinel.symbol)
assert res == mock_read.return_value.to_panel.return_value
def test_raises_upon_empty_panel_write():
store = PandasPanelStore()
panel = Mock(shape=(1, 0, 3))
with raises(ValueError):
store.write(sentinel.mlib, sentinel.version, sentinel.symbol, panel, sentinel.prev)
def test_read_multi_index_with_no_ts_info():
# github #81: old multi-index ts would not have tz info in metadata. Ensure read is not broken
df = read_str_as_pandas("""index 1 | index 2 | SPAM
2012-09-08 | 2015-01-01 | 1.0
2012-09-09 | 2015-01-02 | 1.1
2012-10-08 | 2015-01-03 | 2.0""", num_index=2)
store = PandasDataFrameStore()
record = store.SERIALIZER.serialize(df)[0]
# now take away timezone info from metadata
record = np.array(record.tolist(), dtype=np.dtype([('index 1', '<M8[ns]'), ('index 2', '<M8[ns]'), ('SPAM', '<f8')],
metadata={'index': ['index 1', 'index 2'], 'columns': ['SPAM']}))
assert store.SERIALIZER._index_from_records(record).equals(df.index)
|
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.experimental.links.model.fcis import FCIS
from chainercv.utils import assert_is_instance_segmentation_link
from chainercv.utils import generate_random_bbox
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class DummyExtractor(chainer.Link):
def __init__(self, feat_stride):
super(DummyExtractor, self).__init__()
self.feat_stride = feat_stride
def forward(self, x):
_, _, H, W = x.shape
rpn_features = _random_array(
self.xp, (1, 8, H // self.feat_stride, W // self.feat_stride))
roi_features = _random_array(
self.xp, (1, 8, H // self.feat_stride, W // self.feat_stride))
return rpn_features, roi_features
class DummyHead(chainer.Chain):
def __init__(self, n_class, roi_size):
super(DummyHead, self).__init__()
self.n_class = n_class
self.roi_size = roi_size
def forward(self, x, rois, roi_indices, img_size, gt_roi_label=None):
n_roi = len(rois)
ag_locs = chainer.Variable(
_random_array(self.xp, (n_roi, 2, 4)))
# For each bbox, the score for a selected class is
# overwhelmingly higher than the scores for the other classes.
ag_seg_scores = chainer.Variable(
_random_array(
self.xp, (n_roi, 2, self.roi_size, self.roi_size)))
score_idx = np.random.randint(
low=0, high=self.n_class, size=(n_roi,))
cls_scores = self.xp.zeros((n_roi, self.n_class), dtype=np.float32)
cls_scores[np.arange(n_roi), score_idx] = 100
cls_scores = chainer.Variable(cls_scores)
return ag_seg_scores, ag_locs, cls_scores, rois, roi_indices
class DummyRegionProposalNetwork(chainer.Chain):
def __init__(self, n_anchor_base, n_roi):
super(DummyRegionProposalNetwork, self).__init__()
self.n_anchor_base = n_anchor_base
self.n_roi = n_roi
def forward(self, x, img_size, scale):
B, _, H, W = x.shape
n_anchor = self.n_anchor_base * H * W
rpn_locs = _random_array(self.xp, (B, n_anchor, 4))
rpn_cls_scores = _random_array(self.xp, (B, n_anchor, 2))
rois = self.xp.asarray(generate_random_bbox(
self.n_roi, img_size, 16, min(img_size)))
roi_indices = self.xp.zeros((len(rois),), dtype=np.int32)
anchor = self.xp.asarray(generate_random_bbox(
n_anchor, img_size, 16, min(img_size)))
return (chainer.Variable(rpn_locs),
chainer.Variable(rpn_cls_scores), rois, roi_indices, anchor)
class DummyFCIS(FCIS):
def __init__(
self, n_anchor_base, feat_stride,
n_fg_class, n_roi, roi_size,
min_size, max_size
):
super(DummyFCIS, self).__init__(
DummyExtractor(feat_stride),
DummyRegionProposalNetwork(n_anchor_base, n_roi),
DummyHead(n_fg_class + 1, roi_size),
mean=np.array([[[123.15]], [[115.90]], [[103.06]]]),
min_size=min_size,
max_size=max_size,
loc_normalize_mean=(0.0, 0.0, 0.0, 0.0),
loc_normalize_std=(0.2, 0.2, 0.5, 0.5))
class TestFCIS(unittest.TestCase):
def setUp(self):
self.n_anchor_base = 6
self.feat_stride = 4
n_fg_class = 4
self.n_class = n_fg_class + 1
self.n_roi = 24
self.roi_size = 21
self.link = DummyFCIS(
n_anchor_base=self.n_anchor_base,
feat_stride=self.feat_stride,
n_fg_class=n_fg_class,
n_roi=self.n_roi,
roi_size=21,
min_size=600,
max_size=1000,
)
def check_call(self):
xp = self.link.xp
x1 = chainer.Variable(_random_array(xp, (1, 3, 600, 800)))
roi_ag_seg_scores, roi_ag_locs, roi_cls_scores, rois, roi_indices = \
self.link(x1)
self.assertIsInstance(roi_ag_seg_scores, chainer.Variable)
self.assertIsInstance(roi_ag_seg_scores.array, xp.ndarray)
self.assertEqual(
roi_ag_seg_scores.shape,
(self.n_roi, 2, self.roi_size, self.roi_size))
self.assertIsInstance(roi_ag_locs, chainer.Variable)
self.assertIsInstance(roi_ag_locs.array, xp.ndarray)
self.assertEqual(roi_ag_locs.shape, (self.n_roi, 2, 4))
self.assertIsInstance(roi_cls_scores, chainer.Variable)
self.assertIsInstance(roi_cls_scores.array, xp.ndarray)
self.assertEqual(roi_cls_scores.shape, (self.n_roi, self.n_class))
self.assertIsInstance(rois, xp.ndarray)
self.assertEqual(rois.shape, (self.n_roi, 4))
self.assertIsInstance(roi_indices, xp.ndarray)
self.assertEqual(roi_indices.shape, (self.n_roi,))
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
def test_predict_cpu(self):
assert_is_instance_segmentation_link(self.link, self.n_class - 1)
@attr.gpu
def test_predict_gpu(self):
self.link.to_gpu()
assert_is_instance_segmentation_link(self.link, self.n_class - 1)
@testing.parameterize(
{'in_shape': (3, 100, 100), 'expected_shape': (3, 200, 200)},
{'in_shape': (3, 200, 50), 'expected_shape': (3, 400, 100)},
{'in_shape': (3, 400, 100), 'expected_shape': (3, 400, 100)},
{'in_shape': (3, 300, 600), 'expected_shape': (3, 200, 400)},
{'in_shape': (3, 600, 900), 'expected_shape': (3, 200, 300)}
)
class TestFCISPrepare(unittest.TestCase):
min_size = 200
max_size = 400
def setUp(self):
self.link = DummyFCIS(
n_anchor_base=1,
feat_stride=16,
n_fg_class=20,
n_roi=1,
roi_size=21,
min_size=self.min_size,
max_size=self.max_size
)
def check_prepare(self):
x = _random_array(np, self.in_shape)
out = self.link.prepare(x)
self.assertIsInstance(out, np.ndarray)
self.assertEqual(out.shape, self.expected_shape)
def test_prepare_cpu(self):
self.check_prepare()
@attr.gpu
def test_prepare_gpu(self):
self.link.to_gpu()
self.check_prepare()
testing.run_module(__name__, __file__)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.