text
stringlengths 213
32.3k
|
---|
import logging
class AbstractPlugin(object):
""" Plugin interface
Parent class for all plugins """
SECTION = 'DEFAULT'
@staticmethod
def get_key():
""" Get dictionary key for plugin,
should point to __file__ magic constant """
raise TypeError("Abstract method needs to be overridden")
# TODO: do we realy need cfg_updater here?
def __init__(self, core, cfg, name):
"""
:param name:
:type core: yandextank.core.TankCore
:type cfg: dict
"""
super(AbstractPlugin, self).__init__()
self._cleanup_actions = []
self.log = logging.getLogger(__name__)
self.core = core
self.cfg = cfg
self.cfg_section_name = name
self.interrupted = self.core.interrupted
def set_option(self, option, value):
self.cfg[option] = value
def configure(self):
""" A stage to read config values and instantiate objects """
pass
def prepare_test(self):
""" Test preparation tasks """
pass
def start_test(self):
""" Launch test process """
pass
def is_test_finished(self):
"""
Polling call, if result differs from -1 then test end
will be triggered
"""
return -1
def add_cleanup(self, action):
"""
:type action: function
"""
assert callable(action)
self._cleanup_actions.append(action)
def cleanup(self):
for action in reversed(self._cleanup_actions):
try:
action()
except Exception:
logging.error('Exception occurred during plugin cleanup {}'.format(self.__module__), exc_info=True)
def end_test(self, retcode):
"""
Stop processes launched at 'start_test',
change return code if necessary
"""
return retcode
def post_process(self, retcode):
""" Post-process test data """
return retcode
def get_option(self, option_name, default_value=None):
""" Wrapper to get option from plugins' section """
return self.cfg.get(option_name, default_value)
def get_available_options(self):
""" returns array containing known options for plugin """
return []
def get_multiline_option(self, option_name, default_value=None):
if default_value is not None:
default = ' '.join(default_value)
else:
default = None
value = self.get_option(option_name, default)
if value:
return (' '.join(value.split("\n"))).split(' ')
else:
return ()
def publish(self, key, value):
"""publish value to status"""
self.log.debug(
"Publishing status: %s/%s: %s", self.__class__.SECTION, key, value)
self.core.publish(self.__class__.SECTION, key, value)
def close(self):
"""
Release allocated resources here.
Warning: don't do any logic or potentially dangerous operations
"""
pass
class MonitoringDataListener(object):
""" Monitoring listener interface
parent class for Monitoring data listeners"""
def __init__(self):
pass
def monitoring_data(self, data):
"""Notification about new monitoring data lines"""
raise NotImplementedError("Abstract method needs to be overridden")
class AggregateResultListener(object):
""" Listener interface
parent class for Aggregate results listeners"""
def on_aggregated_data(self, data, stats):
"""
notification about new aggregated data and stats
data contains aggregated metrics and stats contain non-aggregated
metrics from gun (like instances count, for example)
data and stats are cached and synchronized by timestamp. Stat items
are holded until corresponding data item is received and vice versa.
"""
raise NotImplementedError("Abstract method should be overridden")
class AbstractInfoWidget(object):
""" InfoWidgets interface
parent class for all InfoWidgets"""
def __init__(self):
pass
def render(self, screen):
raise NotImplementedError("Abstract method should be overridden")
def on_aggregated_data(self, data, stats):
raise NotImplementedError("Abstract method should be overridden")
def get_index(self):
""" get vertical priority index """
return 0
class AbstractCriterion(object):
""" Criterions interface,
parent class for all criterions """
RC_TIME = 21
RC_HTTP = 22
RC_NET = 23
RC_STEADY = 33
def __init__(self):
self.cause_second = None
@staticmethod
def count_matched_codes(codes_regex, codes_dict):
""" helper to aggregate codes by mask """
total = 0
for code, count in codes_dict.items():
if codes_regex.match(str(code)):
total += count
return total
def notify(self, data, stat):
""" notification about aggregate data goes here """
raise NotImplementedError("Abstract methods requires overriding")
def get_rc(self):
""" get return code for test """
raise NotImplementedError("Abstract methods requires overriding")
def explain(self):
""" long explanation to show after test stop """
raise NotImplementedError("Abstract methods requires overriding")
def get_criterion_parameters(self):
""" returns dict with all criterion parameters """
raise NotImplementedError("Abstract methods requires overriding")
def widget_explain(self):
""" short explanation to display in right panel """
return self.explain(), 0
@staticmethod
def get_type_string():
""" returns string that used as config name for criterion """
raise NotImplementedError("Abstract methods requires overriding")
class GeneratorPlugin(AbstractPlugin):
DEFAULT_INFO = {
'address': '',
'port': 80,
'instances': 1,
'ammo_file': '',
'rps_schedule': [],
'duration': 0,
'loop_count': 0
}
def __init__(self, core, cfg, name):
super(GeneratorPlugin, self).__init__(core, cfg, name)
self.stats_reader = None
self.reader = None
self.process = None
self.process_stderr = None
self.start_time = None
self.affinity = None
self.buffered_seconds = 2
class Info(object):
def __init__(
self, address, port, instances, ammo_file, rps_schedule,
duration, loop_count):
self.address = address
self.port = port
self.instances = instances
self.ammo_file = ammo_file
self.rps_schedule = rps_schedule
self.duration = duration
self.loop_count = loop_count
def get_info(self):
"""
:rtype: GeneratorPlugin.Info
"""
return self.Info(**self.DEFAULT_INFO)
def get_reader(self):
"""
:rtype: collections.Iterable
"""
pass
def get_stats_reader(self):
"""
:rtype: collections.Iterable
"""
pass
def end_test(self, retcode):
pass
class StatsReader(object):
@staticmethod
def stats_item(ts, instances, rps):
return {
'ts': ts,
'metrics': {
'instances': instances,
'reqps': rps
}
}
class MonitoringPlugin(AbstractPlugin):
def __init__(self, core, cfg, name):
super(MonitoringPlugin, self).__init__(core, cfg, name)
self.listeners = set()
def add_listener(self, plugin):
self.listeners.add(plugin)
class TankInfo(object):
def __init__(self, info):
self._info = info
def get_info_dict(self):
return self._info.copy()
def _set_info(self, new_info_dict):
raise NotImplementedError
def update(self, keys, value):
if len(keys) > 1:
self._info[keys[0]] = self._update_dict(self.get_value([keys[0]], {}), keys[1:], value)
else:
self._info[keys[0]] = value
def get_value(self, keys, default=None):
value = self.get_info_dict()
for key in keys:
value = value.get(key, {})
return value or default
@classmethod
def _update_dict(cls, status_dict, keys, value):
if len(keys) > 1:
cls._update_dict(status_dict.setdefault(keys[0], {}), keys[1:], value)
else:
status_dict[keys[0]] = value
return status_dict
|
try:
import redis
except ImportError:
redis = None
from django.conf import settings
from django.dispatch import Signal
customer_recognized = Signal(providing_args=['customer', 'request'])
if redis and hasattr(settings, 'SESSION_REDIS'):
redis_con = dict((key, settings.SESSION_REDIS[key]) for key in ['host', 'port', 'db', 'socket_timeout'])
pool = redis.ConnectionPool(**redis_con)
redis_con = redis.Redis(connection_pool=pool)
else:
redis_con = type(str('Redis'), (), {'publish': lambda *args: None})()
def email_queued():
"""
If SESSION_REDIS is configured, inform a separately running worker engine, that
emails are ready for delivery. Call this function every time an email has been
handled over to the Post-Office.
"""
redis_con.publish('django-SHOP', 'send_queued_mail')
|
import asyncio
import http.server
import threading
from typing import List
import mock
from behave import given
from behave import then
from behave import when
from paasta_tools import drain_lib
@given("a fake HTTP server")
def make_fake_http_server(context):
context.fake_http_server = FakeHTTPServer()
context.fake_http_server.start()
@given("a HTTPDrainMethod configured to point at that server")
def make_http_drain_method(context):
context.http_drain_method = drain_lib.HTTPDrainMethod(
service="fake_service",
instance="fake_instance",
registrations=["fake_nerve_ns"],
drain={
"url_format": "http://localhost:%d/drain"
% context.fake_http_server.server.server_port,
"success_codes": "200",
},
stop_draining={},
is_draining={
"url_format": "http://localhost:%d/is_draining"
% context.fake_http_server.server.server_port,
"success_codes": "200",
},
is_safe_to_kill={},
)
@when("we call {method}() and get status code {status_code:d}")
def call_drain(context, method, status_code):
fake_task = mock.Mock(host="fake_host", ports=[654321])
FakeHTTPRequestHandler.status_code = status_code
func = {
"drain": context.http_drain_method.drain,
"is_draining": context.http_drain_method.is_draining,
}[method]
context.retval = asyncio.get_event_loop().run_until_complete(func(fake_task))
@then("the server should see a request to {path}")
def check_http_server(context, path):
assert context.fake_http_server.paths == [path]
@then("the return value should be {expected_retval}")
def check_retval(context, expected_retval):
assert repr(context.retval) == expected_retval
class FakeHTTPServer:
paths: List[str] = []
def start(self):
self.server = http.server.HTTPServer(("localhost", 0), FakeHTTPRequestHandler)
self.server_thread = threading.Thread(target=self.serve)
self.server_thread.daemon = True
self.server_thread.start()
def serve(self):
self.server.serve_forever()
def shutdown(self):
FakeHTTPServer.paths = []
self.server.shutdown()
self.server_thread.join()
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
status_code = 200
def do_GET(self):
print("Got GET for %s" % self.path)
try:
FakeHTTPServer.paths.append(self.path)
self.send_response(self.status_code)
self.end_headers()
except Exception as e:
print(e)
|
import json
from django.contrib import messages as django_messages
from django.utils.encoding import force_str
def add_message(request, level, message, title=None, delay=None):
if title is None:
title = django_messages.DEFAULT_TAGS[level].capitalize()
extra_tags = {'title': force_str(title), 'delay': delay}
django_messages.add_message(request, level, force_str(message), extra_tags=json.dumps(extra_tags))
def success(request, message, title=None, delay=0):
add_message(request, django_messages.SUCCESS, message, title, delay)
def warning(request, message, title=None, delay=0):
add_message(request, django_messages.WARNING, message, title, delay)
def error(request, message, title=None, delay=0):
add_message(request, django_messages.ERROR, message, title, delay)
def info(request, message, title=None, delay=0):
add_message(request, django_messages.INFO, message, title, delay)
def debug(request, message, title=None, delay=0):
add_message(request, django_messages.DEBUG, message, title, delay)
def get_messages_as_json(request):
data = []
for message in django_messages.get_messages(request):
try:
extra_tags = json.loads(message.extra_tags)
except (TypeError, json.JSONDecodeError):
extra_tags = {}
heading = extra_tags.get('title', message.level_tag.capitalize())
try:
delay = int(float(extra_tags['delay']) * 1000)
except (KeyError, ValueError):
delay = None
data.append({
'level': message.level_tag,
'heading': heading,
'body': message.message,
'delay': delay,
})
return data
|
import sys
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy.test import helper
class WSGIGraftTests(helper.CPWebCase):
@staticmethod
def setup_server():
def test_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
output = ['Hello, world!\n',
'This is a wsgi app running within CherryPy!\n\n']
keys = list(environ.keys())
keys.sort()
for k in keys:
output.append('%s: %s\n' % (k, environ[k]))
return [ntob(x, 'utf-8') for x in output]
def test_empty_string_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [
b'Hello', b'', b' ', b'', b'world',
]
class WSGIResponse(object):
def __init__(self, appresults):
self.appresults = appresults
self.iter = iter(appresults)
def __iter__(self):
return self
if sys.version_info >= (3, 0):
def __next__(self):
return next(self.iter)
else:
def next(self):
return self.iter.next()
def close(self):
if hasattr(self.appresults, 'close'):
self.appresults.close()
class ReversingMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
results = app(environ, start_response)
class Reverser(WSGIResponse):
if sys.version_info >= (3, 0):
def __next__(this):
line = list(next(this.iter))
line.reverse()
return bytes(line)
else:
def next(this):
line = list(this.iter.next())
line.reverse()
return ''.join(line)
return Reverser(results)
class Root:
@cherrypy.expose
def index(self):
return ntob("I'm a regular CherryPy page handler!")
cherrypy.tree.mount(Root())
cherrypy.tree.graft(test_app, '/hosted/app1')
cherrypy.tree.graft(test_empty_string_app, '/hosted/app3')
# Set script_name explicitly to None to signal CP that it should
# be pulled from the WSGI environ each time.
app = cherrypy.Application(Root(), script_name=None)
cherrypy.tree.graft(ReversingMiddleware(app), '/hosted/app2')
wsgi_output = '''Hello, world!
This is a wsgi app running within CherryPy!'''
def test_01_standard_app(self):
self.getPage('/')
self.assertBody("I'm a regular CherryPy page handler!")
def test_04_pure_wsgi(self):
if not cherrypy.server.using_wsgi:
return self.skip('skipped (not using WSGI)... ')
self.getPage('/hosted/app1')
self.assertHeader('Content-Type', 'text/plain')
self.assertInBody(self.wsgi_output)
def test_05_wrapped_cp_app(self):
if not cherrypy.server.using_wsgi:
return self.skip('skipped (not using WSGI)... ')
self.getPage('/hosted/app2/')
body = list("I'm a regular CherryPy page handler!")
body.reverse()
body = ''.join(body)
self.assertInBody(body)
def test_06_empty_string_app(self):
if not cherrypy.server.using_wsgi:
return self.skip('skipped (not using WSGI)... ')
self.getPage('/hosted/app3')
self.assertHeader('Content-Type', 'text/plain')
self.assertInBody('Hello world')
|
from homeassistant.components.switch import DOMAIN, SwitchEntity
from homeassistant.util import convert
from . import FIBARO_DEVICES, FibaroDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fibaro switches."""
if discovery_info is None:
return
add_entities(
[FibaroSwitch(device) for device in hass.data[FIBARO_DEVICES]["switch"]], True
)
class FibaroSwitch(FibaroDevice, SwitchEntity):
"""Representation of a Fibaro Switch."""
def __init__(self, fibaro_device):
"""Initialize the Fibaro device."""
self._state = False
super().__init__(fibaro_device)
self.entity_id = f"{DOMAIN}.{self.ha_id}"
def turn_on(self, **kwargs):
"""Turn device on."""
self.call_turn_on()
self._state = True
def turn_off(self, **kwargs):
"""Turn device off."""
self.call_turn_off()
self._state = False
@property
def current_power_w(self):
"""Return the current power usage in W."""
if "power" in self.fibaro_device.interfaces:
return convert(self.fibaro_device.properties.power, float, 0.0)
return None
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
if "energy" in self.fibaro_device.interfaces:
return convert(self.fibaro_device.properties.energy, float, 0.0)
return None
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def update(self):
"""Update device state."""
self._state = self.current_binary_state
|
from datetime import timedelta
from django.db.models import Q, Sum
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import BaseAddon
from weblate.addons.events import EVENT_DAILY
from weblate.addons.forms import RemoveForm, RemoveSuggestionForm
from weblate.trans.models import Comment, Suggestion
class RemovalAddon(BaseAddon):
project_scope = True
events = (EVENT_DAILY,)
settings_form = RemoveForm
icon = "delete.svg"
def get_cutoff(self):
age = self.instance.configuration["age"]
return timezone.now() - timedelta(days=age)
def delete_older(self, objects, component):
count = objects.filter(timestamp__lt=self.get_cutoff()).delete()[0]
if count:
component.invalidate_stats_deep()
class RemoveComments(RemovalAddon):
name = "weblate.removal.comments"
verbose = _("Stale comment removal")
description = _("Set a timeframe for removal of comments.")
def daily(self, component):
self.delete_older(
Comment.objects.filter(
unit__translation__component__project=component.project
),
component,
)
class RemoveSuggestions(RemovalAddon):
name = "weblate.removal.suggestions"
verbose = _("Stale suggestion removal")
description = _("Set a timeframe for removal of suggestions.")
settings_form = RemoveSuggestionForm
def daily(self, component):
self.delete_older(
Suggestion.objects.filter(
unit__translation__component__project=component.project
)
.annotate(Sum("vote__value"))
.filter(
Q(vote__value__sum__lte=self.instance.configuration.get("votes", 0))
| Q(vote__value__sum=None)
),
component,
)
|
import logging
from kalliope.core.Cortex import Cortex
from kalliope.core.NeuronLauncher import NeuronLauncher
from kalliope.core.Models.APIResponse import APIResponse
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Serialize(Exception):
"""
When raised, the LIFO class return the current API response to the caller
"""
pass
class SynapseListAddedToLIFO(Exception):
"""
When raised, a synapse list to process has been added to the LIFO list.
The LIFO must start over and process the last synapse list added
"""
pass
class LIFOBuffer(object):
"""
This class is a LIFO list of synapse to process where the last synapse list to enter will be the first synapse
list to be processed.
This design is needed in order to use Kalliope from the API.
Because we want to return an information when a Neuron is still processing and waiting for an answer from the user
like with the Neurotransmitter neuron.
"""
def __init__(self):
logger.debug("[LIFOBuffer] LIFO buffer created")
self.api_response = APIResponse()
self.lifo_list = list()
self.answer = None
self.is_api_call = False
self.is_running = False
self.reset_lifo = False
def set_answer(self, value):
self.answer = value
def set_api_call(self, value):
self.is_api_call = value
def add_synapse_list_to_lifo(self, matched_synapse_list, high_priority=False):
"""
Add a synapse list to process to the lifo
:param matched_synapse_list: List of Matched Synapse
:param high_priority: If True, the synapse list added is executed directly
:return:
"""
logger.debug("[LIFOBuffer] Add a new synapse list to process to the LIFO")
self.lifo_list.append(matched_synapse_list)
self.reset_lifo = high_priority
def clean(self):
"""
Clean the LIFO by creating a new list
"""
self.lifo_list = list()
self.api_response = APIResponse()
def _return_serialized_api_response(self):
"""
Serialize Exception has been raised by the execute process somewhere, return the serialized API response
to the caller. Clean up the APIResponse object for the next call
:return:
"""
# we prepare a json response
returned_api_response = self.api_response.serialize()
# we clean up the API response object for the next call
self.api_response = APIResponse()
return returned_api_response
def execute(self, answer=None, is_api_call=False):
"""
Process the LIFO list.
The LIFO list contains multiple list of matched synapses.
For each list of matched synapse we process synapses inside
For each synapses we process neurons.
If a neuron add a Synapse list to the lifo, this synapse list is processed before executing the first list
in which we were in.
:param answer: String answer to give the the last neuron which was waiting for an answer
:param is_api_call: Boolean passed to all neuron in order to let them know if the current call comes from API
:return: serialized APIResponse object
"""
# store the answer if present
self.answer = answer
self.is_api_call = is_api_call
try:
if not self.is_running:
self.is_running = True
# we keep looping over the LIFO til we have synapse list to process in it
while self.lifo_list:
logger.debug("[LIFOBuffer] number of synapse list to process: %s" % len(self.lifo_list))
try:
# get the last list of matched synapse in the LIFO
last_synapse_fifo_list = self.lifo_list[-1]
self._process_synapse_list(last_synapse_fifo_list)
except SynapseListAddedToLIFO:
continue
# remove the synapse list from the LIFO
self.lifo_list.remove(last_synapse_fifo_list)
# clean the cortex from value loaded from order as all synapses have been processed
Cortex.clean_parameter_from_order()
self.is_running = False
raise Serialize
except Serialize:
return self._return_serialized_api_response()
def _process_synapse_list(self, synapse_list):
"""
Process a list of matched synapse.
Execute each neuron list for each synapse.
Add info in the API response object after each processed synapse
Remove the synapse from the synapse_list when it has been fully executed
:param synapse_list: List of MatchedSynapse
"""
# we keep processing til we have synapse in the FIFO to process
while synapse_list:
# get the first matched synapse in the list
matched_synapse = synapse_list[0]
# add the synapse to the API response so the user will get the status if the synapse was not already
# in the response
if matched_synapse not in self.api_response.list_processed_matched_synapse:
self.api_response.list_processed_matched_synapse.append(matched_synapse)
self._process_neuron_list(matched_synapse=matched_synapse)
# The synapse has been processed we can remove it from the list.
synapse_list.remove(matched_synapse)
def _process_neuron_list(self, matched_synapse):
"""
Process the neuron list of the matched_synapse
Execute the Neuron
Executing a Neuron creates a NeuronModule object. This one can have 3 status:
- waiting for an answer: The neuron wait for an answer from the caller. The api response object is returned.
The neuron is not removed from the matched synapse to be executed again
- want to execute a synapse: The neuron add a list of synapse to execute to the lifo.
The LIFO restart over to process it.The neuron is removed from the matched synapse
- complete: The neuron has been executed and its not waiting for an answer and doesn't want to start a synapse
The neuron is removed from the matched synapse
:param matched_synapse: MatchedSynapse object to process
"""
logger.debug("[LIFOBuffer] number of neuron to process: %s" % len(matched_synapse.neuron_fifo_list))
# while we have synapse to process in the list of synapse
while matched_synapse.neuron_fifo_list:
# get the first neuron in the FIFO neuron list
neuron = matched_synapse.neuron_fifo_list[0]
# from here, we are back into the last neuron we were processing.
if self.answer is not None: # we give the answer if exist to the first neuron
neuron.parameters["answer"] = self.answer
# the next neuron should not get this answer
self.answer = None
# todo fix this when we have a full client/server call. The client would be the voice or api call
neuron.parameters["is_api_call"] = self.is_api_call
logger.debug("[LIFOBuffer] process_neuron_list: is_api_call: %s" % (self.is_api_call))
# execute the neuron
instantiated_neuron = NeuronLauncher.start_neuron(neuron=neuron,
parameters_dict=matched_synapse.parameters)
# the status of an execution is "complete" if no neuron are waiting for an answer
self.api_response.status = "complete"
if instantiated_neuron is not None:
if instantiated_neuron.is_waiting_for_answer: # the neuron is waiting for an answer
logger.debug("[LIFOBuffer] Wait for answer mode")
self.api_response.status = "waiting_for_answer"
self.is_running = False
raise Serialize
else:
logger.debug("[LIFOBuffer] complete mode")
# we add the instantiated neuron to the neuron_module_list.
# This one contains info about generated text
matched_synapse.neuron_module_list.append(instantiated_neuron)
# the neuron is fully processed we can remove it from the list
matched_synapse.neuron_fifo_list.remove(neuron)
if self.reset_lifo: # the last executed neuron want to run a synapse
logger.debug("[LIFOBuffer] Last executed neuron want to run a synapse. Restart the LIFO")
# we have added a list of synapse to the LIFO ! this one must start over.
# break all while loop until the execution is back to the LIFO loop
self.reset_lifo = False
raise SynapseListAddedToLIFO
else:
# the neuron has not been processed but we still need to remove it from the list
matched_synapse.neuron_fifo_list.remove(neuron)
|
import logging
import voluptuous as vol
import voluptuous_serialize
from homeassistant import data_entry_flow
from homeassistant.components import websocket_api
from homeassistant.core import HomeAssistant, callback
WS_TYPE_SETUP_MFA = "auth/setup_mfa"
SCHEMA_WS_SETUP_MFA = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_SETUP_MFA,
vol.Exclusive("mfa_module_id", "module_or_flow_id"): str,
vol.Exclusive("flow_id", "module_or_flow_id"): str,
vol.Optional("user_input"): object,
}
)
WS_TYPE_DEPOSE_MFA = "auth/depose_mfa"
SCHEMA_WS_DEPOSE_MFA = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_DEPOSE_MFA, vol.Required("mfa_module_id"): str}
)
DATA_SETUP_FLOW_MGR = "auth_mfa_setup_flow_manager"
_LOGGER = logging.getLogger(__name__)
class MfaFlowManager(data_entry_flow.FlowManager):
"""Manage multi factor authentication flows."""
async def async_create_flow(self, handler_key, *, context, data):
"""Create a setup flow. handler is a mfa module."""
mfa_module = self.hass.auth.get_auth_mfa_module(handler_key)
if mfa_module is None:
raise ValueError(f"Mfa module {handler_key} is not found")
user_id = data.pop("user_id")
return await mfa_module.async_setup_flow(user_id)
async def async_finish_flow(self, flow, result):
"""Complete an mfs setup flow."""
_LOGGER.debug("flow_result: %s", result)
return result
async def async_setup(hass):
"""Init mfa setup flow manager."""
hass.data[DATA_SETUP_FLOW_MGR] = MfaFlowManager(hass)
hass.components.websocket_api.async_register_command(
WS_TYPE_SETUP_MFA, websocket_setup_mfa, SCHEMA_WS_SETUP_MFA
)
hass.components.websocket_api.async_register_command(
WS_TYPE_DEPOSE_MFA, websocket_depose_mfa, SCHEMA_WS_DEPOSE_MFA
)
@callback
@websocket_api.ws_require_user(allow_system_user=False)
def websocket_setup_mfa(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return a setup flow for mfa auth module."""
async def async_setup_flow(msg):
"""Return a setup flow for mfa auth module."""
flow_manager = hass.data[DATA_SETUP_FLOW_MGR]
flow_id = msg.get("flow_id")
if flow_id is not None:
result = await flow_manager.async_configure(flow_id, msg.get("user_input"))
connection.send_message(
websocket_api.result_message(msg["id"], _prepare_result_json(result))
)
return
mfa_module_id = msg.get("mfa_module_id")
mfa_module = hass.auth.get_auth_mfa_module(mfa_module_id)
if mfa_module is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "no_module", f"MFA module {mfa_module_id} is not found"
)
)
return
result = await flow_manager.async_init(
mfa_module_id, data={"user_id": connection.user.id}
)
connection.send_message(
websocket_api.result_message(msg["id"], _prepare_result_json(result))
)
hass.async_create_task(async_setup_flow(msg))
@callback
@websocket_api.ws_require_user(allow_system_user=False)
def websocket_depose_mfa(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Remove user from mfa module."""
async def async_depose(msg):
"""Remove user from mfa auth module."""
mfa_module_id = msg["mfa_module_id"]
try:
await hass.auth.async_disable_user_mfa(
connection.user, msg["mfa_module_id"]
)
except ValueError as err:
connection.send_message(
websocket_api.error_message(
msg["id"],
"disable_failed",
f"Cannot disable MFA Module {mfa_module_id}: {err}",
)
)
return
connection.send_message(websocket_api.result_message(msg["id"], "done"))
hass.async_create_task(async_depose(msg))
def _prepare_result_json(result):
"""Convert result to JSON."""
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
data = result.copy()
return data
if result["type"] != data_entry_flow.RESULT_TYPE_FORM:
return result
data = result.copy()
schema = data["data_schema"]
if schema is None:
data["data_schema"] = []
else:
data["data_schema"] = voluptuous_serialize.convert(schema)
return data
|
import contextlib
import datetime
import functools
import glob
import os
import os.path
import random
import re
import shlex
import sys
import types
import pytest
from unittest_mixins import (
EnvironmentAwareMixin, StdStreamCapturingMixin, TempDirMixin,
DelayedAssertionMixin,
)
import coverage
from coverage import env
from coverage.backunittest import TestCase, unittest
from coverage.backward import StringIO, import_local_file, string_class, shlex_quote
from coverage.cmdline import CoverageScript
from coverage.misc import StopEverything
from tests.helpers import arcs_to_arcz_repr, arcz_to_arcs
from tests.helpers import run_command, SuperModuleCleaner
# Status returns for the command line.
OK, ERR = 0, 1
# The coverage/tests directory, for all sorts of finding test helping things.
TESTS_DIR = os.path.dirname(__file__)
def convert_skip_exceptions(method):
"""A decorator for test methods to convert StopEverything to SkipTest."""
@functools.wraps(method)
def _wrapper(*args, **kwargs):
try:
result = method(*args, **kwargs)
except StopEverything:
raise unittest.SkipTest("StopEverything!")
return result
return _wrapper
class SkipConvertingMetaclass(type):
"""Decorate all test methods to convert StopEverything to SkipTest."""
def __new__(cls, name, bases, attrs):
for attr_name, attr_value in attrs.items():
if attr_name.startswith('test_') and isinstance(attr_value, types.FunctionType):
attrs[attr_name] = convert_skip_exceptions(attr_value)
return super(SkipConvertingMetaclass, cls).__new__(cls, name, bases, attrs)
CoverageTestMethodsMixin = SkipConvertingMetaclass('CoverageTestMethodsMixin', (), {})
class CoverageTest(
EnvironmentAwareMixin,
StdStreamCapturingMixin,
TempDirMixin,
DelayedAssertionMixin,
CoverageTestMethodsMixin,
TestCase,
):
"""A base class for coverage.py test cases."""
# Standard unittest setting: show me diffs even if they are very long.
maxDiff = None
# Tell newer unittest implementations to print long helpful messages.
longMessage = True
# Let stderr go to stderr, pytest will capture it for us.
show_stderr = True
# Temp dirs go to $TMPDIR/coverage_test/*
temp_dir_prefix = "coverage_test/"
if os.getenv('COVERAGE_ENV_ID'):
temp_dir_prefix += "{}/".format(os.getenv('COVERAGE_ENV_ID'))
# Keep the temp directories if the env says to.
# $set_env.py: COVERAGE_KEEP_TMP - Keep the temp directories made by tests.
keep_temp_dir = bool(int(os.getenv("COVERAGE_KEEP_TMP", "0")))
def setUp(self):
super(CoverageTest, self).setUp()
self.module_cleaner = SuperModuleCleaner()
# Attributes for getting info about what happened.
self.last_command_status = None
self.last_command_output = None
self.last_module_name = None
def clean_local_file_imports(self):
"""Clean up the results of calls to `import_local_file`.
Use this if you need to `import_local_file` the same file twice in
one test.
"""
self.module_cleaner.clean_local_file_imports()
def start_import_stop(self, cov, modname, modfile=None):
"""Start coverage, import a file, then stop coverage.
`cov` is started and stopped, with an `import_local_file` of
`modname` in the middle. `modfile` is the file to import as `modname`
if it isn't in the current directory.
The imported module is returned.
"""
cov.start()
try: # pragma: nested
# Import the Python file, executing it.
mod = import_local_file(modname, modfile)
finally: # pragma: nested
# Stop coverage.py.
cov.stop()
return mod
def get_module_name(self):
"""Return a random module name to use for this test run."""
self.last_module_name = 'coverage_test_' + str(random.random())[2:]
return self.last_module_name
def assert_equal_arcs(self, a1, a2, msg=None):
"""Assert that the arc lists `a1` and `a2` are equal."""
# Make them into multi-line strings so we can see what's going wrong.
s1 = arcs_to_arcz_repr(a1)
s2 = arcs_to_arcz_repr(a2)
self.assertMultiLineEqual(s1, s2, msg)
def check_coverage(
self, text, lines=None, missing="", report="",
excludes=None, partials="",
arcz=None, arcz_missing="", arcz_unpredicted="",
arcs=None, arcs_missing=None, arcs_unpredicted=None,
):
"""Check the coverage measurement of `text`.
The source `text` is run and measured. `lines` are the line numbers
that are executable, or a list of possible line numbers, any of which
could match. `missing` are the lines not executed, `excludes` are
regexes to match against for excluding lines, and `report` is the text
of the measurement report.
For arc measurement, `arcz` is a string that can be decoded into arcs
in the code (see `arcz_to_arcs` for the encoding scheme).
`arcz_missing` are the arcs that are not executed, and
`arcz_unpredicted` are the arcs executed in the code, but not deducible
from the code. These last two default to "", meaning we explicitly
check that there are no missing or unpredicted arcs.
Returns the Coverage object, in case you want to poke at it some more.
"""
# We write the code into a file so that we can import it.
# Coverage.py wants to deal with things as modules with file names.
modname = self.get_module_name()
self.make_file(modname + ".py", text)
if arcs is None and arcz is not None:
arcs = arcz_to_arcs(arcz)
if arcs_missing is None:
arcs_missing = arcz_to_arcs(arcz_missing)
if arcs_unpredicted is None:
arcs_unpredicted = arcz_to_arcs(arcz_unpredicted)
# Start up coverage.py.
cov = coverage.Coverage(branch=True)
cov.erase()
for exc in excludes or []:
cov.exclude(exc)
for par in partials or []:
cov.exclude(par, which='partial')
mod = self.start_import_stop(cov, modname)
# Clean up our side effects
del sys.modules[modname]
# Get the analysis results, and check that they are right.
analysis = cov._analyze(mod)
statements = sorted(analysis.statements)
if lines is not None:
if isinstance(lines[0], int):
# lines is just a list of numbers, it must match the statements
# found in the code.
self.assertEqual(statements, lines)
else:
# lines is a list of possible line number lists, one of them
# must match.
for line_list in lines:
if statements == line_list:
break
else:
self.fail("None of the lines choices matched %r" % statements)
missing_formatted = analysis.missing_formatted()
if isinstance(missing, string_class):
self.assertEqual(missing_formatted, missing)
else:
for missing_list in missing:
if missing_formatted == missing_list:
break
else:
self.fail("None of the missing choices matched %r" % missing_formatted)
if arcs is not None:
with self.delayed_assertions():
self.assert_equal_arcs(
arcs, analysis.arc_possibilities(),
"Possible arcs differ: minus is expected, plus is actual"
)
self.assert_equal_arcs(
arcs_missing, analysis.arcs_missing(),
"Missing arcs differ: minus is expected, plus is actual"
)
self.assert_equal_arcs(
arcs_unpredicted, analysis.arcs_unpredicted(),
"Unpredicted arcs differ: minus is expected, plus is actual"
)
if report:
frep = StringIO()
cov.report(mod, file=frep, show_missing=True)
rep = " ".join(frep.getvalue().split("\n")[2].split()[1:])
self.assertEqual(report, rep)
return cov
@contextlib.contextmanager
def assert_warnings(self, cov, warnings, not_warnings=()):
"""A context manager to check that particular warnings happened in `cov`.
`cov` is a Coverage instance. `warnings` is a list of regexes. Every
regex must match a warning that was issued by `cov`. It is OK for
extra warnings to be issued by `cov` that are not matched by any regex.
Warnings that are disabled are still considered issued by this function.
`not_warnings` is a list of regexes that must not appear in the
warnings. This is only checked if there are some positive warnings to
test for in `warnings`.
If `warnings` is empty, then `cov` is not allowed to issue any
warnings.
"""
saved_warnings = []
def capture_warning(msg, slug=None, once=False): # pylint: disable=unused-argument
"""A fake implementation of Coverage._warn, to capture warnings."""
# NOTE: we don't implement `once`.
if slug:
msg = "%s (%s)" % (msg, slug)
saved_warnings.append(msg)
original_warn = cov._warn
cov._warn = capture_warning
try:
yield
except: # pylint: disable=try-except-raise
raise
else:
if warnings:
for warning_regex in warnings:
for saved in saved_warnings:
if re.search(warning_regex, saved):
break
else:
self.fail("Didn't find warning %r in %r" % (warning_regex, saved_warnings))
for warning_regex in not_warnings:
for saved in saved_warnings:
if re.search(warning_regex, saved):
self.fail("Found warning %r in %r" % (warning_regex, saved_warnings))
else:
# No warnings expected. Raise if any warnings happened.
if saved_warnings:
self.fail("Unexpected warnings: %r" % (saved_warnings,))
finally:
cov._warn = original_warn
def nice_file(self, *fparts):
"""Canonicalize the file name composed of the parts in `fparts`."""
fname = os.path.join(*fparts)
return os.path.normcase(os.path.abspath(os.path.realpath(fname)))
def assert_same_files(self, flist1, flist2):
"""Assert that `flist1` and `flist2` are the same set of file names."""
flist1_nice = [self.nice_file(f) for f in flist1]
flist2_nice = [self.nice_file(f) for f in flist2]
self.assertCountEqual(flist1_nice, flist2_nice)
def assert_exists(self, fname):
"""Assert that `fname` is a file that exists."""
msg = "File %r should exist" % fname
self.assertTrue(os.path.exists(fname), msg)
def assert_doesnt_exist(self, fname):
"""Assert that `fname` is a file that doesn't exist."""
msg = "File %r shouldn't exist" % fname
self.assertTrue(not os.path.exists(fname), msg)
def assert_file_count(self, pattern, count):
"""Assert that there are `count` files matching `pattern`."""
files = sorted(glob.glob(pattern))
msg = "There should be {} files matching {!r}, but there are these: {}"
msg = msg.format(count, pattern, files)
self.assertEqual(len(files), count, msg)
def assert_starts_with(self, s, prefix, msg=None):
"""Assert that `s` starts with `prefix`."""
if not s.startswith(prefix):
self.fail(msg or ("%r doesn't start with %r" % (s, prefix)))
def assert_recent_datetime(self, dt, seconds=10, msg=None):
"""Assert that `dt` marks a time at most `seconds` seconds ago."""
age = datetime.datetime.now() - dt
self.assertGreaterEqual(age.total_seconds(), 0, msg)
self.assertLessEqual(age.total_seconds(), seconds, msg)
def command_line(self, args, ret=OK):
"""Run `args` through the command line.
Use this when you want to run the full coverage machinery, but in the
current process. Exceptions may be thrown from deep in the code.
Asserts that `ret` is returned by `CoverageScript.command_line`.
Compare with `run_command`.
Returns None.
"""
ret_actual = command_line(args)
self.assertEqual(ret_actual, ret)
# Some distros rename the coverage command, and need a way to indicate
# their new command name to the tests. This is here for them to override,
# for example:
# https://salsa.debian.org/debian/pkg-python-coverage/-/blob/master/debian/patches/02.rename-public-programs.patch
coverage_command = "coverage"
def run_command(self, cmd):
"""Run the command-line `cmd` in a sub-process.
`cmd` is the command line to invoke in a sub-process. Returns the
combined content of `stdout` and `stderr` output streams from the
sub-process.
See `run_command_status` for complete semantics.
Use this when you need to test the process behavior of coverage.
Compare with `command_line`.
"""
_, output = self.run_command_status(cmd)
return output
def run_command_status(self, cmd):
"""Run the command-line `cmd` in a sub-process, and print its output.
Use this when you need to test the process behavior of coverage.
Compare with `command_line`.
Handles the following command names specially:
* "python" is replaced with the command name of the current
Python interpreter.
* "coverage" is replaced with the command name for the main
coverage.py program.
Returns a pair: the process' exit status and its stdout/stderr text,
which are also stored as `self.last_command_status` and
`self.last_command_output`.
"""
# Make sure "python" and "coverage" mean specifically what we want
# them to mean.
split_commandline = cmd.split()
command_name = split_commandline[0]
command_args = split_commandline[1:]
if command_name == "python":
# Running a Python interpreter in a sub-processes can be tricky.
# Use the real name of our own executable. So "python foo.py" might
# get executed as "python3.3 foo.py". This is important because
# Python 3.x doesn't install as "python", so you might get a Python
# 2 executable instead if you don't use the executable's basename.
command_words = [os.path.basename(sys.executable)]
elif command_name == "coverage":
if env.JYTHON: # pragma: only jython
# Jython can't do reporting, so let's skip the test now.
if command_args and command_args[0] in ('report', 'html', 'xml', 'annotate'):
self.skipTest("Can't run reporting commands in Jython")
# Jython can't run "coverage" as a command because the shebang
# refers to another shebang'd Python script. So run them as
# modules.
command_words = "jython -m coverage".split()
else:
# The invocation requests the coverage.py program. Substitute the
# actual coverage.py main command name.
command_words = [self.coverage_command]
else:
command_words = [command_name]
cmd = " ".join([shlex_quote(w) for w in command_words] + command_args)
# Add our test modules directory to PYTHONPATH. I'm sure there's too
# much path munging here, but...
pythonpath_name = "PYTHONPATH"
if env.JYTHON:
pythonpath_name = "JYTHONPATH" # pragma: only jython
testmods = self.nice_file(self.working_root(), 'tests/modules')
zipfile = self.nice_file(self.working_root(), 'tests/zipmods.zip')
pypath = os.getenv(pythonpath_name, '')
if pypath:
pypath += os.pathsep
pypath += testmods + os.pathsep + zipfile
self.set_environ(pythonpath_name, pypath)
self.last_command_status, self.last_command_output = run_command(cmd)
print(self.last_command_output)
return self.last_command_status, self.last_command_output
def working_root(self):
"""Where is the root of the coverage.py working tree?"""
return os.path.dirname(self.nice_file(coverage.__file__, ".."))
def report_from_command(self, cmd):
"""Return the report from the `cmd`, with some convenience added."""
report = self.run_command(cmd).replace('\\', '/')
self.assertNotIn("error", report.lower())
return report
def report_lines(self, report):
"""Return the lines of the report, as a list."""
lines = report.split('\n')
self.assertEqual(lines[-1], "")
return lines[:-1]
def line_count(self, report):
"""How many lines are in `report`?"""
return len(self.report_lines(report))
def squeezed_lines(self, report):
"""Return a list of the lines in report, with the spaces squeezed."""
lines = self.report_lines(report)
return [re.sub(r"\s+", " ", l.strip()) for l in lines]
def last_line_squeezed(self, report):
"""Return the last line of `report` with the spaces squeezed down."""
return self.squeezed_lines(report)[-1]
def get_measured_filenames(self, coverage_data):
"""Get paths to measured files.
Returns a dict of {filename: absolute path to file}
for given CoverageData.
"""
return {os.path.basename(filename): filename
for filename in coverage_data.measured_files()}
class UsingModulesMixin(object):
"""A mixin for importing modules from tests/modules and tests/moremodules."""
def setUp(self):
super(UsingModulesMixin, self).setUp()
# Parent class saves and restores sys.path, we can just modify it.
sys.path.append(self.nice_file(TESTS_DIR, 'modules'))
sys.path.append(self.nice_file(TESTS_DIR, 'moremodules'))
def command_line(args):
"""Run `args` through the CoverageScript command line.
Returns the return code from CoverageScript.command_line.
"""
script = CoverageScript()
ret = script.command_line(shlex.split(args))
return ret
def xfail(condition, reason):
"""A decorator to mark as test as expected to fail."""
return pytest.mark.xfail(condition, reason=reason, strict=True)
|
import unittest
import time
from common import gpu_test
class TestJAX(unittest.TestCase):
def tanh(self, x):
import jax.numpy as np
y = np.exp(-2.0 * x)
return (1.0 - y) / (1.0 + y)
@gpu_test
def test_JAX(self):
# importing inside the gpu-only test because these packages can't be
# imported on the CPU image since they are not present there.
from jax import grad, jit
grad_tanh = grad(self.tanh)
ag = grad_tanh(1.0)
self.assertEqual(0.4199743, ag)
|
from datetime import timedelta
import logging
from netdata import Netdata
from netdata.exceptions import NetdataError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_ICON,
CONF_NAME,
CONF_PORT,
CONF_RESOURCES,
PERCENTAGE,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
CONF_DATA_GROUP = "data_group"
CONF_ELEMENT = "element"
CONF_INVERT = "invert"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "Netdata"
DEFAULT_PORT = 19999
DEFAULT_ICON = "mdi:desktop-classic"
RESOURCE_SCHEMA = vol.Any(
{
vol.Required(CONF_DATA_GROUP): cv.string,
vol.Required(CONF_ELEMENT): cv.string,
vol.Optional(CONF_ICON, default=DEFAULT_ICON): cv.icon,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_RESOURCES): vol.Schema({cv.string: RESOURCE_SCHEMA}),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Netdata sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
resources = config.get(CONF_RESOURCES)
session = async_get_clientsession(hass)
netdata = NetdataData(Netdata(host, hass.loop, session, port=port))
await netdata.async_update()
if netdata.api.metrics is None:
raise PlatformNotReady
dev = []
for entry, data in resources.items():
icon = data[CONF_ICON]
sensor = data[CONF_DATA_GROUP]
element = data[CONF_ELEMENT]
invert = data[CONF_INVERT]
sensor_name = entry
try:
resource_data = netdata.api.metrics[sensor]
unit = (
PERCENTAGE
if resource_data["units"] == "percentage"
else resource_data["units"]
)
except KeyError:
_LOGGER.error("Sensor is not available: %s", sensor)
continue
dev.append(
NetdataSensor(
netdata, name, sensor, sensor_name, element, icon, unit, invert
)
)
dev.append(NetdataAlarms(netdata, name, host, port))
async_add_entities(dev, True)
class NetdataSensor(Entity):
"""Implementation of a Netdata sensor."""
def __init__(self, netdata, name, sensor, sensor_name, element, icon, unit, invert):
"""Initialize the Netdata sensor."""
self.netdata = netdata
self._state = None
self._sensor = sensor
self._element = element
self._sensor_name = self._sensor if sensor_name is None else sensor_name
self._name = name
self._icon = icon
self._unit_of_measurement = unit
self._invert = invert
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._sensor_name}"
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def state(self):
"""Return the state of the resources."""
return self._state
@property
def available(self):
"""Could the resource be accessed during the last update call."""
return self.netdata.available
async def async_update(self):
"""Get the latest data from Netdata REST API."""
await self.netdata.async_update()
resource_data = self.netdata.api.metrics.get(self._sensor)
self._state = round(resource_data["dimensions"][self._element]["value"], 2) * (
-1 if self._invert else 1
)
class NetdataAlarms(Entity):
"""Implementation of a Netdata alarm sensor."""
def __init__(self, netdata, name, host, port):
"""Initialize the Netdata alarm sensor."""
self.netdata = netdata
self._state = None
self._name = name
self._host = host
self._port = port
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} Alarms"
@property
def state(self):
"""Return the state of the resources."""
return self._state
@property
def icon(self):
"""Status symbol if type is symbol."""
if self._state == "ok":
return "mdi:check"
if self._state == "warning":
return "mdi:alert-outline"
if self._state == "critical":
return "mdi:alert"
return "mdi:crosshairs-question"
@property
def available(self):
"""Could the resource be accessed during the last update call."""
return self.netdata.available
async def async_update(self):
"""Get the latest alarms from Netdata REST API."""
await self.netdata.async_update()
alarms = self.netdata.api.alarms["alarms"]
self._state = None
number_of_alarms = len(alarms)
number_of_relevant_alarms = number_of_alarms
_LOGGER.debug("Host %s has %s alarms", self.name, number_of_alarms)
for alarm in alarms:
if alarms[alarm]["recipient"] == "silent":
number_of_relevant_alarms = number_of_relevant_alarms - 1
elif alarms[alarm]["status"] == "CLEAR":
number_of_relevant_alarms = number_of_relevant_alarms - 1
elif alarms[alarm]["status"] == "UNDEFINED":
number_of_relevant_alarms = number_of_relevant_alarms - 1
elif alarms[alarm]["status"] == "UNINITIALIZED":
number_of_relevant_alarms = number_of_relevant_alarms - 1
elif alarms[alarm]["status"] == "CRITICAL":
self._state = "critical"
return
self._state = "ok" if number_of_relevant_alarms == 0 else "warning"
class NetdataData:
"""The class for handling the data retrieval."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
self.available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from the Netdata REST API."""
try:
await self.api.get_allmetrics()
await self.api.get_alarms()
self.available = True
except NetdataError:
_LOGGER.error("Unable to retrieve data from Netdata")
self.available = False
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from celerymon import CelerymonCollector
###############################################################################
class TestCelerymonCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('CelerymonCollector', {
})
self.collector = CelerymonCollector(config, None)
def test_import(self):
self.assertTrue(CelerymonCollector)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
from django.db import transaction
class atomic_if_using_transaction:
"""Context manager wraps `atomic` if `using_transactions`.
Replaces code::
if using_transactions:
with transaction.atomic():
return something()
return something()
"""
def __init__(self, using_transactions):
self.using_transactions = using_transactions
if using_transactions:
self.context_manager = transaction.atomic()
def __enter__(self):
if self.using_transactions:
self.context_manager.__enter__()
def __exit__(self, *args):
if self.using_transactions:
self.context_manager.__exit__(*args)
|
import diamond.collector
import os
import subprocess
from diamond.collector import str_to_bool
class UPSCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(UPSCollector, self).get_default_config_help()
config_help.update({
'ups_name': 'The name of the ups to collect data for',
'bin': 'The path to the upsc binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns default collector settings.
"""
config = super(UPSCollector, self).get_default_config()
config.update({
'path': 'ups',
'ups_name': 'cyberpower',
'bin': '/bin/upsc',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def collect(self):
if not os.access(self.config['bin'], os.X_OK):
self.log.error("%s is not executable", self.config['bin'])
return False
command = [self.config['bin'], self.config['ups_name']]
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0]
for ln in p.strip().splitlines():
datapoint = ln.split(": ")
try:
val = float(datapoint[1])
except:
continue
if len(datapoint[0].split(".")) == 2:
# If the metric name is the same as the subfolder
# double it so it's visible.
name = ".".join([datapoint[0], datapoint[0].split(".")[1]])
else:
name = datapoint[0]
self.publish(name, val)
|
import logging
import threading
from pushbullet import InvalidKeyError, Listener, PushBullet
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_API_KEY, CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"application_name": ["Application name"],
"body": ["Body"],
"notification_id": ["Notification ID"],
"notification_tag": ["Notification tag"],
"package_name": ["Package name"],
"receiver_email": ["Receiver email"],
"sender_email": ["Sender email"],
"source_device_iden": ["Sender device ID"],
"title": ["Title"],
"type": ["Type"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["title", "body"]): vol.All(
cv.ensure_list, vol.Length(min=1), [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pushbullet Sensor platform."""
try:
pushbullet = PushBullet(config.get(CONF_API_KEY))
except InvalidKeyError:
_LOGGER.error("Wrong API key for Pushbullet supplied")
return False
pbprovider = PushBulletNotificationProvider(pushbullet)
devices = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
devices.append(PushBulletNotificationSensor(pbprovider, sensor_type))
add_entities(devices)
class PushBulletNotificationSensor(Entity):
"""Representation of a Pushbullet Sensor."""
def __init__(self, pb, element):
"""Initialize the Pushbullet sensor."""
self.pushbullet = pb
self._element = element
self._state = None
self._state_attributes = None
def update(self):
"""Fetch the latest data from the sensor.
This will fetch the 'sensor reading' into self._state but also all
attributes into self._state_attributes.
"""
try:
self._state = self.pushbullet.data[self._element]
self._state_attributes = self.pushbullet.data
except (KeyError, TypeError):
pass
@property
def name(self):
"""Return the name of the sensor."""
return f"Pushbullet {self._element}"
@property
def state(self):
"""Return the current state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return all known attributes of the sensor."""
return self._state_attributes
class PushBulletNotificationProvider:
"""Provider for an account, leading to one or more sensors."""
def __init__(self, pb):
"""Start to retrieve pushes from the given Pushbullet instance."""
self.pushbullet = pb
self._data = None
self.listener = None
self.thread = threading.Thread(target=self.retrieve_pushes)
self.thread.daemon = True
self.thread.start()
def on_push(self, data):
"""Update the current data.
Currently only monitors pushes but might be extended to monitor
different kinds of Pushbullet events.
"""
if data["type"] == "push":
self._data = data["push"]
@property
def data(self):
"""Return the current data stored in the provider."""
return self._data
def retrieve_pushes(self):
"""Retrieve_pushes.
Spawn a new Listener and links it to self.on_push.
"""
self.listener = Listener(account=self.pushbullet, on_push=self.on_push)
_LOGGER.debug("Getting pushes")
try:
self.listener.run_forever()
finally:
self.listener.close()
|
import diamond.collector
import os
import re
import subprocess
from diamond.collector import str_to_bool
class PassengerCollector(diamond.collector.Collector):
"""
Collect Memory and CPU Utilization for Passenger
"""
def get_default_config_help(self):
"""
Return help text
"""
config_help = super(PassengerCollector, self).get_default_config_help()
config_help.update({
"bin": "The path to the binary",
"use_sudo": "Use sudo?",
"sudo_cmd": "Path to sudo",
"passenger_status_bin":
"The path to the binary passenger-status",
"passenger_memory_stats_bin":
"The path to the binary passenger-memory-stats",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PassengerCollector, self).get_default_config()
config.update({
"path": "passenger_stats",
"bin": "/usr/lib/ruby-flo/bin/passenger-memory-stats",
"use_sudo": False,
"sudo_cmd": "/usr/bin/sudo",
"passenger_status_bin": "/usr/bin/passenger-status",
"passenger_memory_stats_bin": "/usr/bin/passenger-memory-stats",
})
return config
def get_passenger_memory_stats(self):
"""
Execute passenger-memory-stats, parse its output, return dictionary with
stats.
"""
command = [self.config["passenger_memory_stats_bin"]]
if str_to_bool(self.config["use_sudo"]):
command.insert(0, self.config["sudo_cmd"])
try:
proc1 = subprocess.Popen(command, stdout=subprocess.PIPE)
(std_out, std_err) = proc1.communicate()
except OSError:
return {}
if std_out is None:
return {}
dict_stats = {
"apache_procs": [],
"nginx_procs": [],
"passenger_procs": [],
"apache_mem_total": 0.0,
"nginx_mem_total": 0.0,
"passenger_mem_total": 0.0,
}
#
re_colour = re.compile("\x1B\[([0-9]{1,3}((;[0-9]{1,3})*)?)?[m|K]")
re_digit = re.compile("^\d")
#
apache_flag = 0
nginx_flag = 0
passenger_flag = 0
for raw_line in std_out.splitlines():
line = re_colour.sub("", raw_line)
if "Apache processes" in line:
apache_flag = 1
elif "Nginx processes" in line:
nginx_flag = 1
elif "Passenger processes" in line:
passenger_flag = 1
elif re_digit.match(line):
# If line starts with digit, then store PID and memory consumed
line_splitted = line.split()
if apache_flag == 1:
dict_stats["apache_procs"].append(line_splitted[0])
dict_stats["apache_mem_total"] += float(line_splitted[4])
elif nginx_flag == 1:
dict_stats["nginx_procs"].append(line_splitted[0])
dict_stats["nginx_mem_total"] += float(line_splitted[4])
elif passenger_flag == 1:
dict_stats["passenger_procs"].append(line_splitted[0])
dict_stats["passenger_mem_total"] += float(line_splitted[3])
elif "Processes:" in line:
passenger_flag = 0
apache_flag = 0
nginx_flag = 0
return dict_stats
def get_passenger_cpu_usage(self, dict_stats):
"""
Execute % top; and return STDOUT.
"""
try:
proc1 = subprocess.Popen(
["top", "-b", "-n", "2"],
stdout=subprocess.PIPE)
(std_out, std_err) = proc1.communicate()
except OSError:
return (-1)
re_lspaces = re.compile("^\s*")
re_digit = re.compile("^\d")
overall_cpu = 0
for raw_line in std_out.splitlines():
line = re_lspaces.sub("", raw_line)
if not re_digit.match(line):
continue
line_splitted = line.split()
if line_splitted[0] in dict_stats["apache_procs"]:
overall_cpu += float(line_splitted[8])
elif line_splitted[0] in dict_stats["nginx_procs"]:
overall_cpu += float(line_splitted[8])
elif line_splitted[0] in dict_stats["passenger_procs"]:
overall_cpu += float(line_splitted[8])
return overall_cpu
def get_passenger_queue_stats(self):
"""
Execute passenger-stats, parse its output, returnand requests in queue
"""
queue_stats = {
"top_level_queue_size": 0.0,
"passenger_queue_size": 0.0,
}
command = [self.config["passenger_status_bin"]]
if str_to_bool(self.config["use_sudo"]):
command.insert(0, self.config["sudo_cmd"])
try:
proc1 = subprocess.Popen(command, stdout=subprocess.PIPE)
(std_out, std_err) = proc1.communicate()
except OSError:
return {}
if std_out is None:
return {}
re_colour = re.compile("\x1B\[([0-9]{1,3}((;[0-9]{1,3})*)?)?[m|K]")
re_requests = re.compile(r"Requests")
re_topqueue = re.compile(r"^top-level")
gen_info_flag = 0
app_groups_flag = 0
for raw_line in std_out.splitlines():
line = re_colour.sub("", raw_line)
if "General information" in line:
gen_info_flag = 1
if "Application groups" in line:
app_groups_flag = 1
elif re_requests.match(line) and re_topqueue.search(line):
# If line starts with Requests and line has top-level queue then
# store queue size
line_splitted = line.split()
if gen_info_flag == 1 and line_splitted:
queue_stats["top_level_queue_size"] = float(
line_splitted[5])
elif re_requests.search(line) and not re_topqueue.search(line):
# If line has Requests and nothing else special
line_splitted = line.split()
if app_groups_flag == 1 and line_splitted:
queue_stats["passenger_queue_size"] = float(
line_splitted[3])
return queue_stats
def collect(self):
"""
Collector Passenger stats
"""
if not os.access(self.config["bin"], os.X_OK):
self.log.error("Path %s does not exist or is not executable",
self.config["bin"])
return {}
dict_stats = self.get_passenger_memory_stats()
if len(dict_stats.keys()) == 0:
return {}
queue_stats = self.get_passenger_queue_stats()
if len(queue_stats.keys()) == 0:
return {}
overall_cpu = self.get_passenger_cpu_usage(dict_stats)
if overall_cpu >= 0:
self.publish("phusion_passenger_cpu", overall_cpu)
self.publish("total_passenger_procs", len(
dict_stats["passenger_procs"]))
self.publish("total_nginx_procs", len(dict_stats["nginx_procs"]))
self.publish("total_apache_procs", len(dict_stats["apache_procs"]))
self.publish("total_apache_memory", dict_stats["apache_mem_total"])
self.publish("total_nginx_memory", dict_stats["nginx_mem_total"])
self.publish("total_passenger_memory",
dict_stats["passenger_mem_total"])
self.publish("top_level_queue_size", queue_stats[
"top_level_queue_size"])
self.publish("passenger_queue_size", queue_stats[
"passenger_queue_size"])
|
from datetime import timedelta
import logging
import pyiss
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_NAME,
CONF_SHOW_ON_MAP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_ISS_NEXT_RISE = "next_rise"
ATTR_ISS_NUMBER_PEOPLE_SPACE = "number_of_people_in_space"
DEFAULT_NAME = "ISS"
DEFAULT_DEVICE_CLASS = "visible"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ISS sensor."""
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
try:
iss_data = IssData(hass.config.latitude, hass.config.longitude)
iss_data.update()
except requests.exceptions.HTTPError as error:
_LOGGER.error(error)
return False
name = config.get(CONF_NAME)
show_on_map = config.get(CONF_SHOW_ON_MAP)
add_entities([IssBinarySensor(iss_data, name, show_on_map)], True)
class IssBinarySensor(BinarySensorEntity):
"""Implementation of the ISS binary sensor."""
def __init__(self, iss_data, name, show):
"""Initialize the sensor."""
self.iss_data = iss_data
self._state = None
self._name = name
self._show_on_map = show
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.iss_data.is_above if self.iss_data else False
@property
def device_class(self):
"""Return the class of this sensor."""
return DEFAULT_DEVICE_CLASS
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.iss_data:
attrs = {
ATTR_ISS_NUMBER_PEOPLE_SPACE: self.iss_data.number_of_people_in_space,
ATTR_ISS_NEXT_RISE: self.iss_data.next_rise,
}
if self._show_on_map:
attrs[ATTR_LONGITUDE] = self.iss_data.position.get("longitude")
attrs[ATTR_LATITUDE] = self.iss_data.position.get("latitude")
else:
attrs["long"] = self.iss_data.position.get("longitude")
attrs["lat"] = self.iss_data.position.get("latitude")
return attrs
def update(self):
"""Get the latest data from ISS API and updates the states."""
self.iss_data.update()
class IssData:
"""Get data from the ISS API."""
def __init__(self, latitude, longitude):
"""Initialize the data object."""
self.is_above = None
self.next_rise = None
self.number_of_people_in_space = None
self.position = None
self.latitude = latitude
self.longitude = longitude
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the ISS API."""
try:
iss = pyiss.ISS()
self.is_above = iss.is_ISS_above(self.latitude, self.longitude)
self.next_rise = iss.next_rise(self.latitude, self.longitude)
self.number_of_people_in_space = iss.number_of_people_in_space()
self.position = iss.current_location()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
_LOGGER.error("Unable to retrieve data")
return False
|
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_benchmarks import mnist_benchmark
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'tensor2tensor'
BENCHMARK_CONFIG = """
tensor2tensor:
description: Runs a benchmark using the Tensor2Tensor framework.
vm_groups:
default:
os_type: ubuntu1604
vm_spec:
GCP:
machine_type: n1-standard-8
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
flags.DEFINE_string('t2t_model', None, 'Tensor2Tensor model to run')
flags.DEFINE_string('t2t_problem', None, 'Tensor2Tensor problem to run')
flags.DEFINE_string('t2t_hparams_set', None,
'Tensor2Tensor hyperparameters set')
flags.DEFINE_integer('t2t_train_steps', 1000, 'Number of train steps')
flags.DEFINE_integer('t2t_eval_steps', 1, 'Number of eval steps')
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.model = FLAGS.t2t_model
benchmark_spec.problem = FLAGS.t2t_problem
benchmark_spec.train_steps = FLAGS.t2t_train_steps
benchmark_spec.eval_steps = FLAGS.t2t_eval_steps
benchmark_spec.data_dir = FLAGS.t2t_data_dir
benchmark_spec.hparams_set = FLAGS.t2t_hparams_set
def Prepare(benchmark_spec):
"""Install and set up the Tensor2Tensor benchmark on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
mnist_benchmark.Prepare(benchmark_spec)
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
def _CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec)
metadata.update({
'model': benchmark_spec.model,
'problem': benchmark_spec.problem,
'hparams_set': benchmark_spec.hparams_set,
'data_dir': benchmark_spec.data_dir,
'model_dir': benchmark_spec.model_dir,
'train_steps': benchmark_spec.train_steps,
'eval_steps': benchmark_spec.eval_steps})
return metadata
def _MakeSamplesFromOutput(metadata, output):
"""Create a sample continaing the measured tensor2tensor throughput.
Args:
metadata: dict contains all the metadata that reports.
output: tensor2tensor output
Returns:
a Sample containing the tensor2tensor throughput
"""
samples = []
samples.extend(
mnist_benchmark.ExtractThroughput(r'global_step/sec: (\S+)', output,
metadata, 'Global Steps Per Second',
'global_steps/sec'))
# TODO(user) Workaround until t2t can use TPUEstimator on a GPU
try:
samples.extend(
mnist_benchmark.ExtractThroughput(r'examples/sec: (\S+)', output,
metadata, 'Examples Per Second',
'examples/sec'))
except regex_util.NoMatchError:
logging.info('examples/sec sample not collected')
pattern = (r'Saving dict for global step \d+: .*global_step = (\d+), '
r'.*loss = (\d+\.\d+), '
r'.*accuracy = (\d+\.\d+), '
r'.*accuracy_per_sequence = (\d+\.\d+), '
r'.*accuracy_top5 = (\d+\.\d+), '
r'.*neg_log_perplexity = (-?\d+\.\d+)')
for (step, loss, accuracy, accuracy_per_sequence, accuracy_top5,
neg_log_perplexity) in (
regex_util.ExtractAllMatches(pattern, output)):
metadata_copy = metadata.copy()
metadata_copy['step'] = int(step)
samples.append(sample.Sample('Eval Loss', float(loss), '', metadata_copy))
samples.append(
sample.Sample('Accuracy',
float(accuracy) * 100, '%', metadata_copy))
samples.append(
sample.Sample('Accuracy Per Sequence',
float(accuracy_per_sequence) * 100, '%', metadata_copy))
samples.append(
sample.Sample('Negative Log Perplexity', float(neg_log_perplexity),
'perplexity', metadata_copy))
samples.append(
sample.Sample('Top 5 Accuracy',
float(accuracy_top5) * 100, '%', metadata_copy))
return samples
def Run(benchmark_spec):
"""Run the Tensor2Tensor model on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
t2t_benchmark_cmd = ('t2t-trainer '
'--model={model} '
'--problem={problem} '
'--hparams_set={hparams_set} '
'--train_steps={train_steps} --eval_steps={eval_steps} '
'--data_dir={data_dir} --output_dir={model_dir}'.format(
model=benchmark_spec.model,
problem=benchmark_spec.problem,
train_steps=benchmark_spec.train_steps,
eval_steps=benchmark_spec.eval_steps,
data_dir=benchmark_spec.data_dir,
model_dir=benchmark_spec.model_dir,
hparams_set=benchmark_spec.hparams_set,
))
if benchmark_spec.tpus:
t2t_benchmark_cmd += (
' --use_tpu=True '
'--master={master}'.format(
master=benchmark_spec.tpu_groups['train'].GetMasterGrpcAddress()))
stdout, stderr = vm.RobustRemoteCommand(t2t_benchmark_cmd, should_log=True)
# TODO(user) Add timestamp to tensor2tensor output to enable samples like
# resnet_benchmark
return _MakeSamplesFromOutput(
_CreateMetadataDict(benchmark_spec), stdout + stderr)
def Cleanup(benchmark_spec):
"""Cleanup the Tensor2Tensor workload on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
mnist_benchmark.Cleanup(benchmark_spec)
|
from homeassistant.components import mysensors
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_VIBRATION,
DEVICE_CLASSES,
DOMAIN,
BinarySensorEntity,
)
from homeassistant.const import STATE_ON
SENSORS = {
"S_DOOR": "door",
"S_MOTION": DEVICE_CLASS_MOTION,
"S_SMOKE": "smoke",
"S_SPRINKLER": DEVICE_CLASS_SAFETY,
"S_WATER_LEAK": DEVICE_CLASS_SAFETY,
"S_SOUND": DEVICE_CLASS_SOUND,
"S_VIBRATION": DEVICE_CLASS_VIBRATION,
"S_MOISTURE": DEVICE_CLASS_MOISTURE,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors platform for binary sensors."""
mysensors.setup_mysensors_platform(
hass,
DOMAIN,
discovery_info,
MySensorsBinarySensor,
async_add_entities=async_add_entities,
)
class MySensorsBinarySensor(mysensors.device.MySensorsEntity, BinarySensorEntity):
"""Representation of a MySensors Binary Sensor child node."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._values.get(self.value_type) == STATE_ON
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
pres = self.gateway.const.Presentation
device_class = SENSORS.get(pres(self.child_type).name)
if device_class in DEVICE_CLASSES:
return device_class
return None
|
import pytest
from qutebrowser.utils import usertypes
@pytest.fixture
def question():
return usertypes.Question()
def test_attributes(question):
"""Test setting attributes."""
question.default = True
question.text = "foo"
def test_mode(question):
"""Test setting mode to valid members."""
question.mode = usertypes.PromptMode.yesno
assert question.mode == usertypes.PromptMode.yesno
@pytest.mark.parametrize('mode, answer, signal_names', [
(usertypes.PromptMode.text, 'foo', ['answered', 'completed']),
(usertypes.PromptMode.yesno, True, ['answered', 'answered_yes',
'completed']),
(usertypes.PromptMode.yesno, False, ['answered', 'answered_no',
'completed']),
])
def test_done(mode, answer, signal_names, question, qtbot):
"""Test the 'done' method and completed/answered signals."""
question.mode = mode
question.answer = answer
signals = [getattr(question, name) for name in signal_names]
blockers = [qtbot.waitSignal(signal) for signal in signals]
question.done()
for blocker in blockers:
blocker.wait()
assert not question.is_aborted
def test_cancel(question, qtbot):
"""Test Question.cancel()."""
with qtbot.waitSignal(question.cancelled), qtbot.waitSignal(question.completed):
question.cancel()
assert not question.is_aborted
def test_abort(question, qtbot):
"""Test Question.abort()."""
with qtbot.waitSignal(question.aborted), qtbot.waitSignal(question.completed):
question.abort()
assert question.is_aborted
def test_abort_twice(question, qtbot):
"""Abort a question twice."""
with qtbot.wait_signal(question.aborted):
question.abort()
assert question.is_aborted
with qtbot.assert_not_emitted(question.aborted):
question.abort()
|
import os
from perfkitbenchmarker import object_storage_service
AZURE_CREDENTIAL_DIRECTORY = os.path.join('~', '.azure')
AZURE_CREDENTIAL_TOKENS_FILE = os.path.join(AZURE_CREDENTIAL_DIRECTORY,
'accessTokens.json')
AZURE_CREDENTIAL_PROFILE_FILE = os.path.join(AZURE_CREDENTIAL_DIRECTORY,
'azureProfile.json')
def Install(vm):
"""Copies Azure credentials to the VM."""
vm.RemoteCommand('mkdir -p {0}'.format(AZURE_CREDENTIAL_DIRECTORY))
vm.PushFile(
object_storage_service.FindCredentialFile(AZURE_CREDENTIAL_TOKENS_FILE),
AZURE_CREDENTIAL_TOKENS_FILE)
vm.PushFile(
object_storage_service.FindCredentialFile(AZURE_CREDENTIAL_PROFILE_FILE),
AZURE_CREDENTIAL_PROFILE_FILE)
|
import asyncio
import logging
from typing import Any, Awaitable, Dict, List, Optional
from homeassistant.core import CALLBACK_TYPE, Event, callback
from homeassistant.helpers import entity
from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
from .core.const import (
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
DATA_ZHA,
DATA_ZHA_BRIDGE_ID,
DOMAIN,
SIGNAL_GROUP_ENTITY_REMOVED,
SIGNAL_GROUP_MEMBERSHIP_CHANGE,
SIGNAL_REMOVE,
)
from .core.helpers import LogMixin
from .core.typing import CALLABLE_T, ChannelType, ZhaDeviceType
_LOGGER = logging.getLogger(__name__)
ENTITY_SUFFIX = "entity_suffix"
class BaseZhaEntity(LogMixin, entity.Entity):
"""A base class for ZHA entities."""
def __init__(self, unique_id: str, zha_device: ZhaDeviceType, **kwargs):
"""Init ZHA entity."""
self._name: str = ""
self._force_update: bool = False
self._should_poll: bool = False
self._unique_id: str = unique_id
self._state: Any = None
self._device_state_attributes: Dict[str, Any] = {}
self._zha_device: ZhaDeviceType = zha_device
self._unsubs: List[CALLABLE_T] = []
self.remove_future: Awaitable[None] = None
@property
def name(self) -> str:
"""Return Entity's default name."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def zha_device(self) -> ZhaDeviceType:
"""Return the zha device this entity is attached to."""
return self._zha_device
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return device specific state attributes."""
return self._device_state_attributes
@property
def force_update(self) -> bool:
"""Force update this entity."""
return self._force_update
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return self._should_poll
@property
def device_info(self) -> Dict[str, Any]:
"""Return a device description for device registry."""
zha_device_info = self._zha_device.device_info
ieee = zha_device_info["ieee"]
return {
"connections": {(CONNECTION_ZIGBEE, ieee)},
"identifiers": {(DOMAIN, ieee)},
ATTR_MANUFACTURER: zha_device_info[ATTR_MANUFACTURER],
ATTR_MODEL: zha_device_info[ATTR_MODEL],
ATTR_NAME: zha_device_info[ATTR_NAME],
"via_device": (DOMAIN, self.hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID]),
}
@callback
def async_state_changed(self) -> None:
"""Entity state changed."""
self.async_write_ha_state()
@callback
def async_update_state_attribute(self, key: str, value: Any) -> None:
"""Update a single device state attribute."""
self._device_state_attributes.update({key: value})
self.async_write_ha_state()
@callback
def async_set_state(self, attr_id: int, attr_name: str, value: Any) -> None:
"""Set the entity state."""
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
@callback
def async_accept_signal(
self, channel: ChannelType, signal: str, func: CALLABLE_T, signal_override=False
):
"""Accept a signal from a channel."""
unsub = None
if signal_override:
unsub = async_dispatcher_connect(self.hass, signal, func)
else:
unsub = async_dispatcher_connect(
self.hass, f"{channel.unique_id}_{signal}", func
)
self._unsubs.append(unsub)
def log(self, level: int, msg: str, *args):
"""Log a message."""
msg = f"%s: {msg}"
args = (self.entity_id,) + args
_LOGGER.log(level, msg, *args)
class ZhaEntity(BaseZhaEntity, RestoreEntity):
"""A base class for non group ZHA entities."""
def __init__(
self,
unique_id: str,
zha_device: ZhaDeviceType,
channels: List[ChannelType],
**kwargs,
):
"""Init ZHA entity."""
super().__init__(unique_id, zha_device, **kwargs)
ieeetail = "".join([f"{o:02x}" for o in zha_device.ieee[:4]])
ch_names = [ch.cluster.ep_attribute for ch in channels]
ch_names = ", ".join(sorted(ch_names))
self._name: str = f"{zha_device.name} {ieeetail} {ch_names}"
self.cluster_channels: Dict[str, ChannelType] = {}
for channel in channels:
self.cluster_channels[channel.name] = channel
@property
def available(self) -> bool:
"""Return entity availability."""
return self._zha_device.available
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
self.remove_future = asyncio.Future()
self.async_accept_signal(
None,
f"{SIGNAL_REMOVE}_{self.zha_device.ieee}",
self.async_remove,
signal_override=True,
)
if not self.zha_device.is_mains_powered:
# mains powered devices will get real time state
last_state = await self.async_get_last_state()
if last_state:
self.async_restore_last_state(last_state)
self.async_accept_signal(
None,
f"{self.zha_device.available_signal}_entity",
self.async_state_changed,
signal_override=True,
)
self._zha_device.gateway.register_entity_reference(
self._zha_device.ieee,
self.entity_id,
self._zha_device,
self.cluster_channels,
self.device_info,
self.remove_future,
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
await super().async_will_remove_from_hass()
self.zha_device.gateway.remove_entity_reference(self)
self.remove_future.set_result(True)
@callback
def async_restore_last_state(self, last_state) -> None:
"""Restore previous state."""
async def async_update(self) -> None:
"""Retrieve latest state."""
tasks = [
channel.async_update()
for channel in self.cluster_channels.values()
if hasattr(channel, "async_update")
]
if tasks:
await asyncio.gather(*tasks)
class ZhaGroupEntity(BaseZhaEntity):
"""A base class for ZHA group entities."""
def __init__(
self, entity_ids: List[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> None:
"""Initialize a light group."""
super().__init__(unique_id, zha_device, **kwargs)
self._available = False
self._group = zha_device.gateway.groups.get(group_id)
self._name = f"{self._group.name}_zha_group_0x{group_id:04x}"
self._group_id: int = group_id
self._entity_ids: List[str] = entity_ids
self._async_unsub_state_changed: Optional[CALLBACK_TYPE] = None
self._handled_group_membership = False
@property
def available(self) -> bool:
"""Return entity availability."""
return self._available
async def _handle_group_membership_changed(self):
"""Handle group membership changed."""
# Make sure we don't call remove twice as members are removed
if self._handled_group_membership:
return
self._handled_group_membership = True
await self.async_remove()
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
await super().async_added_to_hass()
self.async_accept_signal(
None,
f"{SIGNAL_GROUP_MEMBERSHIP_CHANGE}_0x{self._group_id:04x}",
self._handle_group_membership_changed,
signal_override=True,
)
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self._entity_ids, self.async_state_changed_listener
)
def send_removed_signal():
async_dispatcher_send(
self.hass, SIGNAL_GROUP_ENTITY_REMOVED, self._group_id
)
self.async_on_remove(send_removed_signal)
@callback
def async_state_changed_listener(self, event: Event):
"""Handle child updates."""
self.async_schedule_update_ha_state(True)
async def async_will_remove_from_hass(self) -> None:
"""Handle removal from Home Assistant."""
await super().async_will_remove_from_hass()
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self) -> None:
"""Update the state of the group entity."""
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import getopt
import os
import re
import sys
import types
import warnings
from absl.flags import _argument_parser
from absl.flags import _defines
from absl.flags import _exceptions
from absl.flags import _flag
from absl.flags import _flagvalues
from absl.flags import _helpers
from absl.flags import _validators
import six
# Initialize the FLAGS_MODULE as early as possible.
# It's only used by adopt_module_key_flags to take SPECIAL_FLAGS into account.
_helpers.FLAGS_MODULE = sys.modules[__name__]
# Add current module to disclaimed module ids.
_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
# DEFINE functions. They are explained in more details in the module doc string.
# pylint: disable=invalid-name
DEFINE = _defines.DEFINE
DEFINE_flag = _defines.DEFINE_flag
DEFINE_string = _defines.DEFINE_string
DEFINE_boolean = _defines.DEFINE_boolean
DEFINE_bool = DEFINE_boolean # Match C++ API.
DEFINE_float = _defines.DEFINE_float
DEFINE_integer = _defines.DEFINE_integer
DEFINE_enum = _defines.DEFINE_enum
DEFINE_enum_class = _defines.DEFINE_enum_class
DEFINE_list = _defines.DEFINE_list
DEFINE_spaceseplist = _defines.DEFINE_spaceseplist
DEFINE_multi = _defines.DEFINE_multi
DEFINE_multi_string = _defines.DEFINE_multi_string
DEFINE_multi_integer = _defines.DEFINE_multi_integer
DEFINE_multi_float = _defines.DEFINE_multi_float
DEFINE_multi_enum = _defines.DEFINE_multi_enum
DEFINE_multi_enum_class = _defines.DEFINE_multi_enum_class
DEFINE_alias = _defines.DEFINE_alias
# pylint: enable=invalid-name
# Flag validators.
register_validator = _validators.register_validator
validator = _validators.validator
register_multi_flags_validator = _validators.register_multi_flags_validator
multi_flags_validator = _validators.multi_flags_validator
mark_flag_as_required = _validators.mark_flag_as_required
mark_flags_as_required = _validators.mark_flags_as_required
mark_flags_as_mutual_exclusive = _validators.mark_flags_as_mutual_exclusive
mark_bool_flags_as_mutual_exclusive = _validators.mark_bool_flags_as_mutual_exclusive
# Key flag related functions.
declare_key_flag = _defines.declare_key_flag
adopt_module_key_flags = _defines.adopt_module_key_flags
disclaim_key_flags = _defines.disclaim_key_flags
# Module exceptions.
# pylint: disable=invalid-name
Error = _exceptions.Error
CantOpenFlagFileError = _exceptions.CantOpenFlagFileError
DuplicateFlagError = _exceptions.DuplicateFlagError
IllegalFlagValueError = _exceptions.IllegalFlagValueError
UnrecognizedFlagError = _exceptions.UnrecognizedFlagError
UnparsedFlagAccessError = _exceptions.UnparsedFlagAccessError
ValidationError = _exceptions.ValidationError
FlagNameConflictsWithMethodError = _exceptions.FlagNameConflictsWithMethodError
# Public classes.
Flag = _flag.Flag
BooleanFlag = _flag.BooleanFlag
EnumFlag = _flag.EnumFlag
EnumClassFlag = _flag.EnumClassFlag
MultiFlag = _flag.MultiFlag
MultiEnumClassFlag = _flag.MultiEnumClassFlag
FlagHolder = _flagvalues.FlagHolder
FlagValues = _flagvalues.FlagValues
ArgumentParser = _argument_parser.ArgumentParser
BooleanParser = _argument_parser.BooleanParser
EnumParser = _argument_parser.EnumParser
EnumClassParser = _argument_parser.EnumClassParser
ArgumentSerializer = _argument_parser.ArgumentSerializer
FloatParser = _argument_parser.FloatParser
IntegerParser = _argument_parser.IntegerParser
BaseListParser = _argument_parser.BaseListParser
ListParser = _argument_parser.ListParser
ListSerializer = _argument_parser.ListSerializer
CsvListSerializer = _argument_parser.CsvListSerializer
WhitespaceSeparatedListParser = _argument_parser.WhitespaceSeparatedListParser
# pylint: enable=invalid-name
# Helper functions.
get_help_width = _helpers.get_help_width
text_wrap = _helpers.text_wrap
flag_dict_to_args = _helpers.flag_dict_to_args
doc_to_help = _helpers.doc_to_help
# Special flags.
_helpers.SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', '',
'Insert flag definitions from the given file into the command line.',
_helpers.SPECIAL_FLAGS) # pytype: disable=wrong-arg-types
DEFINE_string('undefok', '',
'comma-separated list of flag names that it is okay to specify '
'on the command line even if the program does not define a flag '
'with that name. IMPORTANT: flags in this list that have '
'arguments MUST use the --flag=value format.',
_helpers.SPECIAL_FLAGS) # pytype: disable=wrong-arg-types
# The global FlagValues instance.
FLAGS = _flagvalues.FLAGS
|
import json
import os.path
import requests
import socket
import sys
from base64 import b64decode
from bs4 import BeautifulSoup as bs
from httpobs.conf import (SCANNER_ALLOW_LOCALHOST,
SCANNER_PINNED_DOMAINS)
from requests.structures import CaseInsensitiveDict
HSTS_URL = ('https://chromium.googlesource.com/chromium'
'/src/net/+/master/http/transport_security_state_static.json?format=TEXT')
def parse_http_equiv_headers(html: str) -> CaseInsensitiveDict:
http_equiv_headers = CaseInsensitiveDict()
# Try to parse the HTML
try:
soup = bs(html, 'html.parser')
except:
return http_equiv_headers
# Find all the meta tags
metas = soup.find_all('meta')
for meta in metas:
if meta.has_attr('http-equiv') and meta.has_attr('content'):
# Add support for multiple CSP policies specified via http-equiv
# See issue: https://github.com/mozilla/http-observatory/issues/266
# Note that this is so far only done for CSP and not for other types
# of http-equiv
if (meta.get('http-equiv', '').lower().strip() == 'content-security-policy' and
'Content-Security-Policy' in http_equiv_headers):
http_equiv_headers['Content-Security-Policy'] += '; ' + meta.get('content')
else:
http_equiv_headers[meta.get('http-equiv')] = meta.get('content')
# Technically not HTTP Equiv, but I'm treating it that way
elif meta.get('name', '').lower().strip() == 'referrer' and meta.has_attr('content'):
http_equiv_headers['Referrer-Policy'] = meta.get('content')
return http_equiv_headers
def retrieve_store_hsts_preload_list():
# Download the Google HSTS Preload List
try:
r = b64decode(requests.get(HSTS_URL).text).decode('utf-8').split('\n')
# Remove all the comments
r = ''.join([line.split('// ')[0] for line in r if line.strip() != '//'])
r = json.loads(r)
# Mapping of site -> whether it includes subdomains
hsts = {site['name']: {
'includeSubDomains': site.get('include_subdomains', False),
'includeSubDomainsForPinning':
site.get('include_subdomains', False) or site.get('include_subdomains_for_pinning', False),
'mode': site.get('mode'),
'pinned': True if 'pins' in site else False,
} for site in r['entries']}
# Add in the manually pinned domains
for pinned_domain in SCANNER_PINNED_DOMAINS:
hsts[pinned_domain] = {
'includeSubDomains': True,
'includeSubDomainsForPinning': True,
'mode': 'force-https',
'pinned': True
}
# Write json file to disk
__dirname = os.path.abspath(os.path.dirname(__file__))
__filename = os.path.join(__dirname, '..', 'conf', 'hsts-preload.json')
with open(__filename, 'w') as f:
json.dump(hsts, f, indent=2, sort_keys=True)
except:
print('Unable to download the Chromium HSTS preload list.', file=sys.stderr)
def sanitize_headers(headers: dict) -> dict:
"""
:param headers: raw headers object from a request's response
:return: that same header, after sanitization
"""
try:
if len(str(headers)) <= 16384:
return dict(headers)
else:
return None
except:
return None
def valid_hostname(hostname: str):
"""
:param hostname: The hostname requested in the scan
:return: Hostname if it's valid, None if it's an IP address, otherwise False
"""
# Block attempts to scan things like 'localhost' if not allowed
if ('.' not in hostname or 'localhost' in hostname) and not SCANNER_ALLOW_LOCALHOST:
return False
# First, let's try to see if it's an IPv4 address
try:
socket.inet_aton(hostname) # inet_aton() will throw an exception if hostname is not a valid IP address
return None # If we get this far, it's an IP address and therefore not a valid fqdn
except:
pass
# And IPv6
try:
socket.inet_pton(socket.AF_INET6, hostname) # same as inet_aton(), but for IPv6
return None
except:
pass
# Then, try to do a lookup on the hostname; this should return at least one entry and should be the first time
# that the validator is making a network connection -- the same that requests would make.
try:
hostname_ips = socket.getaddrinfo(hostname, 443)
# This shouldn't trigger, since getaddrinfo should generate saierror if there's no A records. Nevertheless,
# I want to be careful in case of edge cases. This does make it hard to test.
if len(hostname_ips) < 1:
return False
except:
return False
# If we've made it this far, then everything is good to go! Woohoo!
return hostname
|
import numpy as np
from tensornetwork.block_sparse.charge import BaseCharge, fuse_charges
import copy
from typing import List, Union
class Index:
"""
An index class to store indices of a symmetric tensor.
"""
def __init__(self, charges: Union[List[BaseCharge], BaseCharge],
flow: Union[List[bool], bool]) -> None:
"""
Initialize an `Index` object.
"""
if isinstance(charges, BaseCharge):
charges = [charges]
self._charges = charges
if np.isscalar(flow):
flow = [flow]
if not all([isinstance(f, (np.bool_, np.bool, bool)) for f in flow]):
raise TypeError("flows have to be boolean. Found flow = {}".format(flow))
self.flow = flow
def __len__(self) -> int:
return self.dim
def __repr__(self) -> str:
dense_shape = f"Dimension: {str(self.dim)} \n"
charge_str = str(self._charges).replace('\n,', ',\n')
charge_str = charge_str.replace('\n', '\n ')
charges = f"Charges: {charge_str} \n"
flow_info = f"Flows: {str(self.flow)} \n"
return f"Index:\n {dense_shape} {charges} {flow_info} "
@property
def dim(self) -> int:
return np.prod([i.dim for i in self._charges])
def __eq__(self, other) -> bool:
if len(other._charges) != len(self._charges):
return False
for n in range(len(self._charges)):
if not np.array_equal(self._charges[n].unique_charges,
other._charges[n].unique_charges):
return False
if not np.array_equal(self._charges[n].charge_labels,
other._charges[n].charge_labels):
return False
if not np.all(np.asarray(self.flow) == np.asarray(other.flow)):
return False
return True
def copy(self) -> "Index":
"""
Returns:
Index: A deep copy of `Index`. Note that all children of
`Index` are copied as well.
"""
index_copy = Index(
charges=[c.copy() for c in self._charges],
flow=copy.deepcopy(self.flow))
return index_copy
@property
def flat_charges(self) -> List:
"""
Returns:
List: A list containing the elementary indices
of `Index`.
"""
return self._charges
@property
def flat_flows(self) -> List:
"""
Returns:
List: A list containing the elementary indices
of `Index`.
"""
return list(self.flow)
def flip_flow(self) -> "Index":
"""
Flip the flow if `Index`.
Returns:
Index
"""
return Index(
charges=[c.copy() for c in self._charges],
flow=list(np.logical_not(self.flow)))
def __mul__(self, index: "Index") -> "Index":
"""
Merge `index` and self into a single larger index.
The flow of the resulting index is set to 1.
Flows of `self` and `index` are multiplied into
the charges upon fusing.n
"""
return fuse_index_pair(self, index)
@property
def charges(self) -> BaseCharge:
"""
Return the fused charges of the index. Note that
flows are merged into the charges.
"""
return fuse_charges(self.flat_charges, self.flat_flows)
def fuse_index_pair(left_index: Index, right_index: Index) -> Index:
"""
Fuse two consecutive indices (legs) of a symmetric tensor.
Args:
left_index: A tensor Index.
right_index: A tensor Index.
flow: An optional flow of the resulting `Index` object.
Returns:
Index: The result of fusing `index1` and `index2`.
"""
return Index(
charges=left_index.flat_charges + right_index.flat_charges,
flow=left_index.flat_flows + right_index.flat_flows)
def fuse_indices(indices: List[Index]) -> Index:
"""
Fuse a list of indices (legs) of a symmetric tensor.
Args:
indices: A list of tensor Index objects
flow: An optional flow of the resulting `Index` object.
Returns:
Index: The result of fusing `indices`.
"""
index = indices[0]
for n in range(1, len(indices)):
index = fuse_index_pair(index, indices[n])
return index
|
from homeassistant.components.binary_sensor import DEVICE_CLASSES, BinarySensorEntity
from tests.common import MockEntity
ENTITIES = {}
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
{}
if empty
else {
device_class: MockBinarySensor(
name=f"{device_class} sensor",
is_on=True,
unique_id=f"unique_{device_class}",
device_class=device_class,
)
for device_class in DEVICE_CLASSES
}
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(list(ENTITIES.values()))
class MockBinarySensor(MockEntity, BinarySensorEntity):
"""Mock Binary Sensor class."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._handle("is_on")
@property
def device_class(self):
"""Return the class of this sensor."""
return self._handle("device_class")
|
import re
from typing import List, Optional
from xbox.webapi.api.client import XboxLiveClient
from xbox.webapi.api.provider.catalog.models import Image
from xbox.webapi.api.provider.smartglass.models import (
PlaybackState,
PowerState,
SmartglassConsole,
SmartglassConsoleList,
VolumeDirection,
)
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_APP,
MEDIA_TYPE_GAME,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import STATE_OFF, STATE_ON, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import ConsoleData, XboxUpdateCoordinator
from .browse_media import build_item_response
from .const import DOMAIN
SUPPORT_XBOX = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY
| SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_BROWSE_MEDIA
| SUPPORT_PLAY_MEDIA
)
XBOX_STATE_MAP = {
PlaybackState.Playing: STATE_PLAYING,
PlaybackState.Paused: STATE_PAUSED,
PowerState.On: STATE_ON,
PowerState.SystemUpdate: STATE_OFF,
PowerState.ConnectedStandby: STATE_OFF,
PowerState.Off: STATE_OFF,
PowerState.Unknown: None,
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Xbox media_player from a config entry."""
client: XboxLiveClient = hass.data[DOMAIN][entry.entry_id]["client"]
consoles: SmartglassConsoleList = hass.data[DOMAIN][entry.entry_id]["consoles"]
coordinator: XboxUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
"coordinator"
]
async_add_entities(
[XboxMediaPlayer(client, console, coordinator) for console in consoles.result]
)
class XboxMediaPlayer(CoordinatorEntity, MediaPlayerEntity):
"""Representation of an Xbox Media Player."""
def __init__(
self,
client: XboxLiveClient,
console: SmartglassConsole,
coordinator: XboxUpdateCoordinator,
) -> None:
"""Initialize the Xbox Media Player."""
super().__init__(coordinator)
self.client: XboxLiveClient = client
self._console: SmartglassConsole = console
@property
def name(self):
"""Return the device name."""
return self._console.name
@property
def unique_id(self):
"""Console device ID."""
return self._console.id
@property
def data(self) -> ConsoleData:
"""Return coordinator data for this console."""
return self.coordinator.data.consoles[self._console.id]
@property
def state(self):
"""State of the player."""
status = self.data.status
if status.playback_state in XBOX_STATE_MAP:
return XBOX_STATE_MAP[status.playback_state]
return XBOX_STATE_MAP[status.power_state]
@property
def supported_features(self):
"""Flag media player features that are supported."""
active_support = SUPPORT_XBOX
if self.state not in [STATE_PLAYING, STATE_PAUSED]:
active_support &= ~SUPPORT_NEXT_TRACK & ~SUPPORT_PREVIOUS_TRACK
return active_support
@property
def media_content_type(self):
"""Media content type."""
app_details = self.data.app_details
if app_details and app_details.product_family == "Games":
return MEDIA_TYPE_GAME
return MEDIA_TYPE_APP
@property
def media_title(self):
"""Title of current playing media."""
app_details = self.data.app_details
if not app_details:
return None
return (
app_details.localized_properties[0].product_title
or app_details.localized_properties[0].short_title
)
@property
def media_image_url(self):
"""Image url of current playing media."""
app_details = self.data.app_details
if not app_details:
return None
image = _find_media_image(app_details.localized_properties[0].images)
if not image:
return None
url = image.uri
if url[0] == "/":
url = f"http:{url}"
return url
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return True
async def async_turn_on(self):
"""Turn the media player on."""
await self.client.smartglass.wake_up(self._console.id)
async def async_turn_off(self):
"""Turn the media player off."""
await self.client.smartglass.turn_off(self._console.id)
async def async_mute_volume(self, mute):
"""Mute the volume."""
if mute:
await self.client.smartglass.mute(self._console.id)
else:
await self.client.smartglass.unmute(self._console.id)
async def async_volume_up(self):
"""Turn volume up for media player."""
await self.client.smartglass.volume(self._console.id, VolumeDirection.Up)
async def async_volume_down(self):
"""Turn volume down for media player."""
await self.client.smartglass.volume(self._console.id, VolumeDirection.Down)
async def async_media_play(self):
"""Send play command."""
await self.client.smartglass.play(self._console.id)
async def async_media_pause(self):
"""Send pause command."""
await self.client.smartglass.pause(self._console.id)
async def async_media_previous_track(self):
"""Send previous track command."""
await self.client.smartglass.previous(self._console.id)
async def async_media_next_track(self):
"""Send next track command."""
await self.client.smartglass.next(self._console.id)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
return await build_item_response(
self.client,
self._console.id,
self.data.status.is_tv_configured,
media_content_type,
media_content_id,
)
async def async_play_media(self, media_type, media_id, **kwargs):
"""Launch an app on the Xbox."""
if media_id == "Home":
await self.client.smartglass.go_home(self._console.id)
elif media_id == "TV":
await self.client.smartglass.show_tv_guide(self._console.id)
else:
await self.client.smartglass.launch_app(self._console.id, media_id)
@property
def device_info(self):
"""Return a device description for device registry."""
# Turns "XboxOneX" into "Xbox One X" for display
matches = re.finditer(
".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)",
self._console.console_type,
)
model = " ".join([m.group(0) for m in matches])
return {
"identifiers": {(DOMAIN, self._console.id)},
"name": self._console.name,
"manufacturer": "Microsoft",
"model": model,
}
def _find_media_image(images=List[Image]) -> Optional[Image]:
purpose_order = ["FeaturePromotionalSquareArt", "Tile", "Logo", "BoxArt"]
for purpose in purpose_order:
for image in images:
if (
image.image_purpose == purpose
and image.width == image.height
and image.width >= 300
):
return image
return None
|
from datetime import datetime, timezone
import humanize
from dateutil.tz import tzlocal
class Tweet:
"""A :class:`Tweet` represents a single tweet.
:param str text: text of the tweet in raw format
:param ~datetime.datetime created_at: (optional) when the tweet was created, defaults to :meth:`~datetime.datetime.now` when no value is given
:param Source source: (optional) the :class:`Source` the tweet is from
"""
def __init__(self, text, created_at=None, source=None):
if text:
self.text = text
else:
raise ValueError("empty text")
if created_at is None:
created_at = datetime.now(tzlocal())
try:
self.created_at = created_at.replace(microsecond=0)
except AttributeError:
raise TypeError("created_at is of invalid type")
self.source = source
@staticmethod
def _is_valid_operand(other):
return (hasattr(other, "text") and
hasattr(other, "created_at"))
def __lt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at < other.created_at
def __le__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at < other.created_at or (self.created_at == other.created_at and self.text == other.text)
def __gt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at > other.created_at
def __ge__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at > other.created_at or (self.created_at == other.created_at and self.text == other.text)
def __eq__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at == other.created_at and self.text == other.text
def __str__(self):
return "{created_at}\t{text}".format(created_at=self.created_at.isoformat(), text=self.text)
@property
def relative_datetime(self):
"""Return human-readable relative time string."""
now = datetime.now(timezone.utc)
tense = "from now" if self.created_at > now else "ago"
return "{0} {1}".format(humanize.naturaldelta(now - self.created_at), tense)
@property
def absolute_datetime(self):
"""Return human-readable absolute time string."""
return self.created_at.strftime("%a, %d %b %Y %H:%M:%S")
class Source:
"""A :class:`Source` represents a twtxt feed, remote as well as local.
:param str nick: nickname of twtxt user
:param str url: URL to remote twtxt file
:param str file: path to local twtxt file
"""
def __init__(self, nick, url=None, file=None):
self.nick = nick.lower()
self.url = url
self.file = file
|
import logging
import gammu # pylint: disable=import-error, no-member
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_DEVICE
from .const import DOMAIN # pylint:disable=unused-import
from .gateway import create_sms_gateway
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({vol.Required(CONF_DEVICE): str})
async def get_imei_from_config(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
device = data[CONF_DEVICE]
config = {"Device": device, "Connection": "at"}
gateway = await create_sms_gateway(config, hass)
if not gateway:
raise CannotConnect
try:
imei = await gateway.get_imei_async()
except gammu.GSMError as err: # pylint: disable=no-member
raise CannotConnect from err
finally:
await gateway.terminate_async()
# Return info that you want to store in the config entry.
return imei
class SMSFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for SMS integration."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
errors = {}
if user_input is not None:
try:
imei = await get_imei_from_config(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if not errors:
await self.async_set_unique_id(imei)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=imei, data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
return await self.async_step_user(user_input)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from compare_gan.architectures import arch_ops
import gin
import numpy as np
import tensorflow as tf
class ArchOpsTpuTest(tf.test.TestCase):
def setUp(self):
# Construct input for batch norm tests:
# 4 images with resolution 2x1 and 3 channels.
x1 = np.asarray([[[5, 7, 2]], [[5, 8, 8]]], dtype=np.float32)
x2 = np.asarray([[[1, 2, 0]], [[4, 0, 4]]], dtype=np.float32)
x3 = np.asarray([[[6, 2, 6]], [[5, 0, 5]]], dtype=np.float32)
x4 = np.asarray([[[2, 4, 2]], [[6, 4, 1]]], dtype=np.float32)
self._inputs = np.stack([x1, x2, x3, x4])
self.assertAllEqual(self._inputs.shape, [4, 2, 1, 3])
# And the expected output for applying batch norm (without additional
# scaling/shifting).
self._expected_outputs = np.asarray(
[[[[0.4375205, 1.30336881, -0.58830315]],
[[0.4375205, 1.66291881, 1.76490951]]],
[[[-1.89592218, -0.49438119, -1.37270737]],
[[-0.14584017, -1.21348119, 0.19610107]]],
[[[1.02088118, -0.49438119, 0.98050523]],
[[0.4375205, -1.21348119, 0.58830321]]],
[[[-1.31256151, 0.22471881, -0.58830315]],
[[1.02088118, 0.22471881, -0.98050523]]]],
dtype=np.float32)
self.assertAllEqual(self._expected_outputs.shape, [4, 2, 1, 3])
def testRunsOnTpu(self):
"""Verify that the test cases runs on a TPU chip and has 2 cores."""
expected_device_names = [
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:TPU:1",
"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
]
with self.session() as sess:
devices = sess.list_devices()
tf.logging.info("devices:\n%s", "\n".join([str(d) for d in devices]))
self.assertAllEqual([d.name for d in devices], expected_device_names)
def testBatchNormOneCore(self):
def computation(x):
core_bn = tf.layers.batch_normalization(x, training=True)
contrib_bn = tf.contrib.layers.batch_norm(x, is_training=True)
custom_bn = arch_ops.batch_norm(x, is_training=True)
tf.logging.info("custom_bn tensor: %s", custom_bn)
return core_bn, contrib_bn, custom_bn
with tf.Graph().as_default():
x = tf.constant(self._inputs)
core_bn, contrib_bn, custom_bn = tf.contrib.tpu.batch_parallel(
computation, [x], num_shards=1)
with self.session() as sess:
sess.run(tf.contrib.tpu.initialize_system())
sess.run(tf.global_variables_initializer())
core_bn, contrib_bn, custom_bn = sess.run(
[core_bn, contrib_bn, custom_bn])
logging.info("core_bn: %s", core_bn)
logging.info("contrib_bn: %s", contrib_bn)
logging.info("custom_bn: %s", custom_bn)
self.assertAllClose(core_bn, self._expected_outputs)
self.assertAllClose(contrib_bn, self._expected_outputs)
self.assertAllClose(custom_bn, self._expected_outputs)
def testBatchNormTwoCoresCoreAndContrib(self):
def computation(x):
core_bn = tf.layers.batch_normalization(x, training=True)
contrib_bn = tf.contrib.layers.batch_norm(x, is_training=True)
return core_bn, contrib_bn
with tf.Graph().as_default():
x = tf.constant(self._inputs)
core_bn, contrib_bn = tf.contrib.tpu.batch_parallel(
computation, [x], num_shards=2)
with self.session() as sess:
sess.run(tf.contrib.tpu.initialize_system())
sess.run(tf.global_variables_initializer())
core_bn, contrib_bn = sess.run([core_bn, contrib_bn])
logging.info("core_bn: %s", core_bn)
logging.info("contrib_bn: %s", contrib_bn)
self.assertNotAllClose(core_bn, self._expected_outputs)
self.assertNotAllClose(contrib_bn, self._expected_outputs)
def testBatchNormTwoCoresCustom(self):
def computation(x):
custom_bn = arch_ops.batch_norm(x, is_training=True, name="custom_bn")
gin.bind_parameter("cross_replica_moments.parallel", False)
custom_bn_seq = arch_ops.batch_norm(x, is_training=True,
name="custom_bn_seq")
return custom_bn, custom_bn_seq
with tf.Graph().as_default():
x = tf.constant(self._inputs)
custom_bn, custom_bn_seq = tf.contrib.tpu.batch_parallel(
computation, [x], num_shards=2)
with self.session() as sess:
sess.run(tf.contrib.tpu.initialize_system())
sess.run(tf.global_variables_initializer())
custom_bn, custom_bn_seq = sess.run(
[custom_bn, custom_bn_seq])
logging.info("custom_bn: %s", custom_bn)
logging.info("custom_bn_seq: %s", custom_bn_seq)
self.assertAllClose(custom_bn, self._expected_outputs)
self.assertAllClose(custom_bn_seq, self._expected_outputs)
if __name__ == "__main__":
tf.test.main()
|
from pygal.graph.graph import Graph
from pygal.util import alter, cached_property, decorate
class Line(Graph):
"""Line graph class"""
def __init__(self, *args, **kwargs):
"""Set _self_close as False, it's True for Radar like Line"""
self._self_close = False
super(Line, self).__init__(*args, **kwargs)
@cached_property
def _values(self):
"""Getter for series values (flattened)"""
return [
val[1] for serie in self.series for val in
(serie.interpolated if self.interpolate else serie.points)
if val[1] is not None and (not self.logarithmic or val[1] > 0)
]
@cached_property
def _secondary_values(self):
"""Getter for secondary series values (flattened)"""
return [
val[1] for serie in self.secondary_series for val in
(serie.interpolated if self.interpolate else serie.points)
if val[1] is not None and (not self.logarithmic or val[1] > 0)
]
def _fill(self, values):
"""Add extra values to fill the line"""
zero = self.view.y(min(max(self.zero, self._box.ymin), self._box.ymax))
# Check to see if the data has been padded with "none's"
# Fill doesn't work correctly otherwise
end = len(values) - 1
while end > 0:
x, y = values[end]
if self.missing_value_fill_truncation == "either":
if x is not None and y is not None:
break
elif self.missing_value_fill_truncation == "x":
if x is not None:
break
elif self.missing_value_fill_truncation == "y":
if y is not None:
break
else:
raise ValueError(
"Invalid value ({}) for config key "
"'missing_value_fill_truncation';"
" Use 'x', 'y' or 'either'".format(
self.missing_value_fill_truncation
)
)
end -= 1
return ([(values[0][0], zero)] + values + [(values[end][0], zero)])
def line(self, serie, rescale=False):
"""Draw the line serie"""
serie_node = self.svg.serie(serie)
if rescale and self.secondary_series:
points = self._rescale(serie.points)
else:
points = serie.points
view_values = list(map(self.view, points))
if serie.show_dots:
for i, (x, y) in enumerate(view_values):
if None in (x, y):
continue
if self.logarithmic:
if points[i][1] is None or points[i][1] <= 0:
continue
if (serie.show_only_major_dots and self.x_labels
and i < len(self.x_labels)
and self.x_labels[i] not in self._x_labels_major):
continue
metadata = serie.metadata.get(i)
classes = []
if x > self.view.width / 2:
classes.append('left')
if y > self.view.height / 2:
classes.append('top')
classes = ' '.join(classes)
self._confidence_interval(
serie_node['overlay'], x, y, serie.values[i], metadata
)
dots = decorate(
self.svg,
self.svg.node(serie_node['overlay'], class_="dots"),
metadata
)
val = self._format(serie, i)
alter(
self.svg.transposable_node(
dots,
'circle',
cx=x,
cy=y,
r=serie.dots_size,
class_='dot reactive tooltip-trigger'
), metadata
)
self._tooltip_data(
dots, val, x, y, xlabel=self._get_x_label(i)
)
self._static_value(
serie_node, val, x + self.style.value_font_size,
y + self.style.value_font_size, metadata
)
if serie.stroke:
if self.interpolate:
points = serie.interpolated
if rescale and self.secondary_series:
points = self._rescale(points)
view_values = list(map(self.view, points))
if serie.fill:
view_values = self._fill(view_values)
if serie.allow_interruptions:
# view_values are in form [(x1, y1), (x2, y2)]. We
# need to split that into multiple sequences if a
# None is present here
sequences = []
cur_sequence = []
for x, y in view_values:
if y is None and len(cur_sequence) > 0:
# emit current subsequence
sequences.append(cur_sequence)
cur_sequence = []
elif y is None: # just discard
continue
else:
cur_sequence.append((x, y)) # append the element
if len(cur_sequence) > 0: # emit last possible sequence
sequences.append(cur_sequence)
else:
# plain vanilla rendering
sequences = [view_values]
if self.logarithmic:
for seq in sequences:
for ele in seq[::-1]:
y = points[seq.index(ele)][1]
if y is None or y <= 0:
del seq[seq.index(ele)]
for seq in sequences:
self.svg.line(
serie_node['plot'],
seq,
close=self._self_close,
class_='line reactive' +
(' nofill' if not serie.fill else '')
)
def _compute(self):
"""Compute y min and max and y scale and set labels"""
# X Labels
if self.horizontal:
self._x_pos = [
x / (self._len - 1) for x in range(self._len)
][::-1] if self._len != 1 else [.5] # Center if only one value
else:
self._x_pos = [
x / (self._len - 1) for x in range(self._len)
] if self._len != 1 else [.5] # Center if only one value
self._points(self._x_pos)
if self.include_x_axis:
# Y Label
self._box.ymin = min(self._min or 0, 0)
self._box.ymax = max(self._max or 0, 0)
else:
self._box.ymin = self._min
self._box.ymax = self._max
def _plot(self):
"""Plot the serie lines and secondary serie lines"""
for serie in self.series:
self.line(serie)
for serie in self.secondary_series:
self.line(serie, True)
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from monit import MonitCollector
##########################################################################
class TestMonitCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MonitCollector',
{'byte_unit': 'kilobyte', })
self.collector = MonitCollector(config, None)
def test_import(self):
self.assertTrue(MonitCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('status.xml')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'app_thin_8101.cpu.percent': 0.9,
'app_thin_8101.memory.kilobyte_usage': 216104,
'app_thin_8102.cpu.percent': 1.1,
'app_thin_8102.memory.kilobyte_usage': 212736,
'app_thin_8103.cpu.percent': 0.9,
'app_thin_8103.memory.kilobyte_usage': 204948,
'app_thin_8104.cpu.percent': 0.9,
'app_thin_8104.memory.kilobyte_usage': 212464,
'sshd.cpu.percent': 0.0,
'sshd.memory.kilobyte_usage': 2588,
'rsyslogd.cpu.percent': 0.0,
'rsyslogd.memory.kilobyte_usage': 2664,
'postfix.cpu.percent': 0.0,
'postfix.memory.kilobyte_usage': 2304,
'nginx.cpu.percent': 0.0,
'nginx.memory.kilobyte_usage': 18684,
'haproxy.cpu.percent': 0.0,
'haproxy.memory.kilobyte_usage': 4040,
'cron.cpu.percent': 0.0,
'cron.memory.kilobyte_usage': 1036,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch(
'urllib2.urlopen',
Mock(
return_value=self.getFixture(
'status_blank.xml')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import inspect
from django.db import models
from django.utils.encoding import force_text
from django.utils.html import strip_tags
def skip_model_member(app, what, name, obj, skip, options):
# These fields always fails !
if name in ('tags', 'image'):
return True
return skip
def process_model_docstring(app, what, name, obj, options, lines):
if inspect.isclass(obj) and issubclass(obj, models.Model):
for field in obj._meta.fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_text(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_text(field.verbose_name).capitalize()
if help_text:
lines.append(':param %s: %s' % (field.attname, help_text))
else:
lines.append(':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(':type %s: %s' % (field.attname,
type(field).__name__))
# Return the extended docstring
return lines
def setup(app):
app.add_crossref_type(
directivename='setting',
rolename='setting',
indextemplate='pair: %s; setting',
)
app.add_crossref_type(
directivename='templatetag',
rolename='ttag',
indextemplate='pair: %s; template tag'
)
app.add_crossref_type(
directivename='templatefilter',
rolename='tfilter',
indextemplate='pair: %s; template filter'
)
app.connect('autodoc-process-docstring',
process_model_docstring)
app.connect('autodoc-skip-member',
skip_model_member)
|
import numpy as np
import pandas as pd
from pdpbox import pdp
import unittest
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
class TestPdpbox(unittest.TestCase):
def test_simple_pdp(self):
# set up data
data = pd.read_csv("/input/tests/data/fifa_2018_stats.csv")
y = (data['Man of the Match'] == "Yes")
feature_names = [i for i in data.columns if data[i].dtype in [np.int64]]
X = data[feature_names]
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Build simple model
tree_model = DecisionTreeClassifier(random_state=0,
max_depth=3).fit(train_X, train_y)
# Set up pdp as table
pdp_goals = pdp.pdp_isolate(model=tree_model,
dataset=val_X,
model_features=feature_names,
feature='Goal Scored')
# make plot
pdp.pdp_plot(pdp_goals, 'Goal Scored')
|
import numpy as np
from scipy import linalg
from .. import EvokedArray, Evoked
from ..cov import Covariance, _regularized_covariance
from ..decoding import TransformerMixin, BaseEstimator
from ..epochs import BaseEpochs
from ..io import BaseRaw
from ..io.pick import _pick_data_channels, pick_info
from ..utils import logger, _check_option
def _construct_signal_from_epochs(epochs, events, sfreq, tmin):
"""Reconstruct pseudo continuous signal from epochs."""
n_epochs, n_channels, n_times = epochs.shape
tmax = tmin + n_times / float(sfreq)
start = (np.min(events[:, 0]) + int(tmin * sfreq))
stop = (np.max(events[:, 0]) + int(tmax * sfreq) + 1)
n_samples = stop - start
n_epochs, n_channels, n_times = epochs.shape
events_pos = events[:, 0] - events[0, 0]
raw = np.zeros((n_channels, n_samples))
for idx in range(n_epochs):
onset = events_pos[idx]
offset = onset + n_times
raw[:, onset:offset] = epochs[idx]
return raw
def _least_square_evoked(epochs_data, events, tmin, sfreq):
"""Least square estimation of evoked response from epochs data.
Parameters
----------
epochs_data : array, shape (n_channels, n_times)
The epochs data to estimate evoked.
events : array, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be ignored.
tmin : float
Start time before event.
sfreq : float
Sampling frequency.
Returns
-------
evokeds : array, shape (n_class, n_components, n_times)
An concatenated array of evoked data for each event type.
toeplitz : array, shape (n_class * n_components, n_channels)
An concatenated array of toeplitz matrix for each event type.
"""
n_epochs, n_channels, n_times = epochs_data.shape
tmax = tmin + n_times / float(sfreq)
# Deal with shuffled epochs
events = events.copy()
events[:, 0] -= events[0, 0] + int(tmin * sfreq)
# Construct raw signal
raw = _construct_signal_from_epochs(epochs_data, events, sfreq, tmin)
# Compute the independent evoked responses per condition, while correcting
# for event overlaps.
n_min, n_max = int(tmin * sfreq), int(tmax * sfreq)
window = n_max - n_min
n_samples = raw.shape[1]
toeplitz = list()
classes = np.unique(events[:, 2])
for ii, this_class in enumerate(classes):
# select events by type
sel = events[:, 2] == this_class
# build toeplitz matrix
trig = np.zeros((n_samples, 1))
ix_trig = (events[sel, 0]) + n_min
trig[ix_trig] = 1
toeplitz.append(linalg.toeplitz(trig[0:window], trig))
# Concatenate toeplitz
toeplitz = np.array(toeplitz)
X = np.concatenate(toeplitz)
# least square estimation
predictor = np.dot(linalg.pinv(np.dot(X, X.T)), X)
evokeds = np.dot(predictor, raw.T)
evokeds = np.transpose(np.vsplit(evokeds, len(classes)), (0, 2, 1))
return evokeds, toeplitz
def _fit_xdawn(epochs_data, y, n_components, reg=None, signal_cov=None,
events=None, tmin=0., sfreq=1., method_params=None, info=None):
"""Fit filters and coefs using Xdawn Algorithm.
Xdawn is a spatial filtering method designed to improve the signal
to signal + noise ratio (SSNR) of the event related responses. Xdawn was
originally designed for P300 evoked potential by enhancing the target
response with respect to the non-target response. This implementation is a
generalization to any type of event related response.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The epochs data.
y : array, shape (n_epochs)
The epochs class.
n_components : int (default 2)
The number of components to decompose the signals signals.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow
regularization for covariance estimation.
If float, shrinkage is used (0 <= shrinkage <= 1).
For str options, ``reg`` will be passed as ``method`` to
:func:`mne.compute_covariance`.
signal_cov : None | Covariance | array, shape (n_channels, n_channels)
The signal covariance used for whitening of the data.
if None, the covariance is estimated from the epochs signal.
events : array, shape (n_epochs, 3)
The epochs events, used to correct for epochs overlap.
tmin : float
Epochs starting time. Only used if events is passed to correct for
epochs overlap.
sfreq : float
Sampling frequency. Only used if events is passed to correct for
epochs overlap.
Returns
-------
filters : array, shape (n_channels, n_channels)
The Xdawn components used to decompose the data for each event type.
Each row corresponds to one component.
patterns : array, shape (n_channels, n_channels)
The Xdawn patterns used to restore the signals for each event type.
evokeds : array, shape (n_class, n_components, n_times)
The independent evoked responses per condition.
"""
if not isinstance(epochs_data, np.ndarray) or epochs_data.ndim != 3:
raise ValueError('epochs_data must be 3D ndarray')
classes = np.unique(y)
# XXX Eventually this could be made to deal with rank deficiency properly
# by exposing this "rank" parameter, but this will require refactoring
# the linalg.eigh call to operate in the lower-dimension
# subspace, then project back out.
# Retrieve or compute whitening covariance
if signal_cov is None:
signal_cov = _regularized_covariance(
np.hstack(epochs_data), reg, method_params, info, rank='full')
elif isinstance(signal_cov, Covariance):
signal_cov = signal_cov.data
if not isinstance(signal_cov, np.ndarray) or (
not np.array_equal(signal_cov.shape,
np.tile(epochs_data.shape[1], 2))):
raise ValueError('signal_cov must be None, a covariance instance, '
'or an array of shape (n_chans, n_chans)')
# Get prototype events
if events is not None:
evokeds, toeplitzs = _least_square_evoked(
epochs_data, events, tmin, sfreq)
else:
evokeds, toeplitzs = list(), list()
for c in classes:
# Prototyped response for each class
evokeds.append(np.mean(epochs_data[y == c, :, :], axis=0))
toeplitzs.append(1.)
filters = list()
patterns = list()
for evo, toeplitz in zip(evokeds, toeplitzs):
# Estimate covariance matrix of the prototype response
evo = np.dot(evo, toeplitz)
evo_cov = _regularized_covariance(evo, reg, method_params, info,
rank='full')
# Fit spatial filters
try:
evals, evecs = linalg.eigh(evo_cov, signal_cov)
except np.linalg.LinAlgError as exp:
raise ValueError('Could not compute eigenvalues, ensure '
'proper regularization (%s)' % (exp,))
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
_patterns = np.linalg.pinv(evecs.T)
filters.append(evecs[:, :n_components].T)
patterns.append(_patterns[:, :n_components].T)
filters = np.concatenate(filters, axis=0)
patterns = np.concatenate(patterns, axis=0)
evokeds = np.array(evokeds)
return filters, patterns, evokeds
class _XdawnTransformer(BaseEstimator, TransformerMixin):
"""Implementation of the Xdawn Algorithm compatible with scikit-learn.
Xdawn is a spatial filtering method designed to improve the signal
to signal + noise ratio (SSNR) of the event related responses. Xdawn was
originally designed for P300 evoked potential by enhancing the target
response with respect to the non-target response. This implementation is a
generalization to any type of event related response.
.. note:: _XdawnTransformer does not correct for epochs overlap. To correct
overlaps see ``Xdawn``.
Parameters
----------
n_components : int (default 2)
The number of components to decompose the signals.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow
regularization for covariance estimation.
If float, shrinkage is used (0 <= shrinkage <= 1).
For str options, ``reg`` will be passed to ``method`` to
:func:`mne.compute_covariance`.
signal_cov : None | Covariance | array, shape (n_channels, n_channels)
The signal covariance used for whitening of the data.
if None, the covariance is estimated from the epochs signal.
method_params : dict | None
Parameters to pass to :func:`mne.compute_covariance`.
.. versionadded:: 0.16
Attributes
----------
classes_ : array, shape (n_classes)
The event indices of the classes.
filters_ : array, shape (n_channels, n_channels)
The Xdawn components used to decompose the data for each event type.
patterns_ : array, shape (n_channels, n_channels)
The Xdawn patterns used to restore the signals for each event type.
"""
def __init__(self, n_components=2, reg=None, signal_cov=None,
method_params=None):
"""Init."""
self.n_components = n_components
self.signal_cov = signal_cov
self.reg = reg
self.method_params = method_params
def fit(self, X, y=None):
"""Fit Xdawn spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_samples)
The target data.
y : array, shape (n_epochs,) | None
The target labels. If None, Xdawn fit on the average evoked.
Returns
-------
self : Xdawn instance
The Xdawn instance.
"""
X, y = self._check_Xy(X, y)
# Main function
self.classes_ = np.unique(y)
self.filters_, self.patterns_, _ = _fit_xdawn(
X, y, n_components=self.n_components, reg=self.reg,
signal_cov=self.signal_cov, method_params=self.method_params)
return self
def transform(self, X):
"""Transform data with spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_samples)
The target data.
Returns
-------
X : array, shape (n_epochs, n_components * n_classes, n_samples)
The transformed data.
"""
X, _ = self._check_Xy(X)
# Check size
if self.filters_.shape[1] != X.shape[1]:
raise ValueError('X must have %i channels, got %i instead.' % (
self.filters_.shape[1], X.shape[1]))
# Transform
X = np.dot(self.filters_, X)
X = X.transpose((1, 0, 2))
return X
def inverse_transform(self, X):
"""Remove selected components from the signal.
Given the unmixing matrix, transform data, zero out components,
and inverse transform the data. This procedure will reconstruct
the signals from which the dynamics described by the excluded
components is subtracted.
Parameters
----------
X : array, shape (n_epochs, n_components * n_classes, n_times)
The transformed data.
Returns
-------
X : array, shape (n_epochs, n_channels * n_classes, n_times)
The inverse transform data.
"""
# Check size
X, _ = self._check_Xy(X)
n_epochs, n_comp, n_times = X.shape
if n_comp != (self.n_components * len(self.classes_)):
raise ValueError('X must have %i components, got %i instead' % (
self.n_components * len(self.classes_), n_comp))
# Transform
return np.dot(self.patterns_.T, X).transpose(1, 0, 2)
def _check_Xy(self, X, y=None):
"""Check X and y types and dimensions."""
# Check data
if not isinstance(X, np.ndarray) or X.ndim != 3:
raise ValueError('X must be an array of shape (n_epochs, '
'n_channels, n_samples).')
if y is None:
y = np.ones(len(X))
y = np.asarray(y)
if len(X) != len(y):
raise ValueError('X and y must have the same length')
return X, y
class Xdawn(_XdawnTransformer):
"""Implementation of the Xdawn Algorithm.
Xdawn [1]_ [2]_ is a spatial filtering method designed to improve the
signal to signal + noise ratio (SSNR) of the ERP responses. Xdawn was
originally designed for P300 evoked potential by enhancing the target
response with respect to the non-target response. This implementation
is a generalization to any type of ERP.
Parameters
----------
n_components : int, (default 2)
The number of components to decompose the signals.
signal_cov : None | Covariance | ndarray, shape (n_channels, n_channels)
(default None). The signal covariance used for whitening of the data.
if None, the covariance is estimated from the epochs signal.
correct_overlap : 'auto' or bool (default 'auto')
Compute the independent evoked responses per condition, while
correcting for event overlaps if any. If 'auto', then
overlapp_correction = True if the events do overlap.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow
regularization for covariance estimation.
If float, shrinkage is used (0 <= shrinkage <= 1).
For str options, ``reg`` will be passed as ``method`` to
:func:`mne.compute_covariance`.
Attributes
----------
filters_ : dict of ndarray
If fit, the Xdawn components used to decompose the data for each event
type, else empty. For each event type, the filters are in the rows of
the corresponding array.
patterns_ : dict of ndarray
If fit, the Xdawn patterns used to restore the signals for each event
type, else empty.
evokeds_ : dict of Evoked
If fit, the evoked response for each event type.
event_id_ : dict
The event id.
correct_overlap_ : bool
Whether overlap correction was applied.
See Also
--------
mne.decoding.CSP, mne.decoding.SPoC
Notes
-----
.. versionadded:: 0.10
References
----------
.. [1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to
brain-computer interface. Biomedical Engineering, IEEE Transactions
on, 56(8), 2035-2043.
.. [2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J.
(2011, August). Theoretical analysis of xDAWN algorithm:
application to an efficient sensor selection in a P300 BCI. In
Signal Processing Conference, 2011 19th European (pp. 1382-1386).
IEEE.
"""
def __init__(self, n_components=2, signal_cov=None, correct_overlap='auto',
reg=None):
"""Init."""
super(Xdawn, self).__init__(n_components=n_components,
signal_cov=signal_cov, reg=reg)
self.correct_overlap = _check_option('correct_overlap',
correct_overlap,
['auto', True, False])
def fit(self, epochs, y=None):
"""Fit Xdawn from epochs.
Parameters
----------
epochs : instance of Epochs
An instance of Epoch on which Xdawn filters will be fitted.
y : ndarray | None (default None)
If None, used epochs.events[:, 2].
Returns
-------
self : instance of Xdawn
The Xdawn instance.
"""
# Check data
if not isinstance(epochs, BaseEpochs):
raise ValueError('epochs must be an Epochs object.')
picks = _pick_data_channels(epochs.info)
use_info = pick_info(epochs.info, picks)
X = epochs.get_data()[:, picks, :]
y = epochs.events[:, 2] if y is None else y
self.event_id_ = epochs.event_id
# Check that no baseline was applied with correct overlap
correct_overlap = self.correct_overlap
if correct_overlap == 'auto':
# Events are overlapped if the minimal inter-stimulus
# interval is smaller than the time window.
isi = np.diff(np.sort(epochs.events[:, 0]))
window = int((epochs.tmax - epochs.tmin) * epochs.info['sfreq'])
correct_overlap = isi.min() < window
if epochs.baseline and correct_overlap:
raise ValueError('Cannot apply correct_overlap if epochs'
' were baselined.')
events, tmin, sfreq = None, 0., 1.
if correct_overlap:
events = epochs.events
tmin = epochs.tmin
sfreq = epochs.info['sfreq']
self.correct_overlap_ = correct_overlap
# Note: In this original version of Xdawn we compute and keep all
# components. The selection comes at transform().
n_components = X.shape[1]
# Main fitting function
filters, patterns, evokeds = _fit_xdawn(
X, y, n_components=n_components, reg=self.reg,
signal_cov=self.signal_cov, events=events, tmin=tmin, sfreq=sfreq,
method_params=self.method_params, info=use_info)
# Re-order filters and patterns according to event_id
filters = filters.reshape(-1, n_components, filters.shape[-1])
patterns = patterns.reshape(-1, n_components, patterns.shape[-1])
self.filters_, self.patterns_, self.evokeds_ = dict(), dict(), dict()
idx = np.argsort([value for _, value in epochs.event_id.items()])
for eid, this_filter, this_pattern, this_evo in zip(
epochs.event_id, filters[idx], patterns[idx], evokeds[idx]):
self.filters_[eid] = this_filter
self.patterns_[eid] = this_pattern
n_events = len(epochs[eid])
evoked = EvokedArray(this_evo, use_info, tmin=epochs.tmin,
comment=eid, nave=n_events)
self.evokeds_[eid] = evoked
return self
def transform(self, inst):
"""Apply Xdawn dim reduction.
Parameters
----------
inst : Epochs | Evoked | ndarray, shape ([n_epochs, ]n_channels, n_times)
Data on which Xdawn filters will be applied.
Returns
-------
X : ndarray, shape ([n_epochs, ]n_components * n_event_types, n_times)
Spatially filtered signals.
""" # noqa: E501
if isinstance(inst, BaseEpochs):
X = inst.get_data()
elif isinstance(inst, Evoked):
X = inst.data
elif isinstance(inst, np.ndarray):
X = inst
if X.ndim not in (2, 3):
raise ValueError('X must be 2D or 3D, got %s' % (X.ndim,))
else:
raise ValueError('Data input must be of Epoch type or numpy array')
filters = [filt[:self.n_components]
for filt in self.filters_.values()]
filters = np.concatenate(filters, axis=0)
X = np.dot(filters, X)
if X.ndim == 3:
X = X.transpose((1, 0, 2))
return X
def apply(self, inst, event_id=None, include=None, exclude=None):
"""Remove selected components from the signal.
Given the unmixing matrix, transform data,
zero out components, and inverse transform the data.
This procedure will reconstruct the signals from which
the dynamics described by the excluded components is subtracted.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
The data to be processed.
event_id : dict | list of str | None (default None)
The kind of event to apply. if None, a dict of inst will be return
one for each type of event xdawn has been fitted.
include : array_like of int | None (default None)
The indices referring to columns in the ummixing matrix. The
components to be kept. If None, the first n_components (as defined
in the Xdawn constructor) will be kept.
exclude : array_like of int | None (default None)
The indices referring to columns in the ummixing matrix. The
components to be zeroed out. If None, all the components except the
first n_components will be exclude.
Returns
-------
out : dict
A dict of instance (from the same type as inst input) for each
event type in event_id.
"""
if event_id is None:
event_id = self.event_id_
if not isinstance(inst, (BaseRaw, BaseEpochs, Evoked)):
raise ValueError('Data input must be Raw, Epochs or Evoked type')
picks = _pick_data_channels(inst.info)
# Define the components to keep
default_exclude = list(range(self.n_components, len(inst.ch_names)))
if exclude is None:
exclude = default_exclude
else:
exclude = list(set(list(default_exclude) + list(exclude)))
if isinstance(inst, BaseRaw):
out = self._apply_raw(raw=inst, include=include, exclude=exclude,
event_id=event_id, picks=picks)
elif isinstance(inst, BaseEpochs):
out = self._apply_epochs(epochs=inst, include=include, picks=picks,
exclude=exclude, event_id=event_id)
elif isinstance(inst, Evoked):
out = self._apply_evoked(evoked=inst, include=include, picks=picks,
exclude=exclude, event_id=event_id)
return out
def _apply_raw(self, raw, include, exclude, event_id, picks):
"""Aux method."""
if not raw.preload:
raise ValueError('Raw data must be preloaded to apply Xdawn')
raws = dict()
for eid in event_id:
data = raw[picks, :][0]
data = self._pick_sources(data, include, exclude, eid)
raw_r = raw.copy()
raw_r[picks, :] = data
raws[eid] = raw_r
return raws
def _apply_epochs(self, epochs, include, exclude, event_id, picks):
"""Aux method."""
if not epochs.preload:
raise ValueError('Epochs must be preloaded to apply Xdawn')
# special case where epochs come picked but fit was 'unpicked'.
epochs_dict = dict()
data = np.hstack(epochs.get_data()[:, picks])
for eid in event_id:
data_r = self._pick_sources(data, include, exclude, eid)
data_r = np.array(np.split(data_r, len(epochs.events), 1))
epochs_r = epochs.copy().load_data()
epochs_r._data[:, picks, :] = data_r
epochs_dict[eid] = epochs_r
return epochs_dict
def _apply_evoked(self, evoked, include, exclude, event_id, picks):
"""Aux method."""
data = evoked.data[picks]
evokeds = dict()
for eid in event_id:
data_r = self._pick_sources(data, include, exclude, eid)
evokeds[eid] = evoked.copy()
# restore evoked
evokeds[eid].data[picks] = data_r
return evokeds
def _pick_sources(self, data, include, exclude, eid):
"""Aux method."""
logger.info('Transforming to Xdawn space')
# Apply unmixing
sources = np.dot(self.filters_[eid], data)
if include not in (None, list()):
mask = np.ones(len(sources), dtype=bool)
mask[np.unique(include)] = False
sources[mask] = 0.
logger.info('Zeroing out %i Xdawn components' % mask.sum())
elif exclude not in (None, list()):
exclude_ = np.unique(exclude)
sources[exclude_] = 0.
logger.info('Zeroing out %i Xdawn components' % len(exclude_))
logger.info('Inverse transforming to sensor space')
data = np.dot(self.patterns_[eid].T, sources)
return data
def inverse_transform(self):
"""Not implemented, see Xdawn.apply() instead."""
# Exists because of _XdawnTransformer
raise NotImplementedError('See Xdawn.apply()')
|
from unittest import TestCase
from scattertext.Formatter import large_int_format
class TestLarge_int_format(TestCase):
def test_large_int_format(self):
self.assertEqual(large_int_format(1), '1')
self.assertEqual(large_int_format(6), '6')
self.assertEqual(large_int_format(10), '10')
self.assertEqual(large_int_format(19), '10')
self.assertEqual(large_int_format(88), '80')
self.assertEqual(large_int_format(999), '900')
self.assertEqual(large_int_format(1001), '1k')
self.assertEqual(large_int_format(205001), '200k')
self.assertEqual(large_int_format(2050010), '2mm')
self.assertEqual(large_int_format(205000010), '200mm')
self.assertEqual(large_int_format(2050000010), '2b')
|
from paasta_tools.autoscaling import load_boost
from paasta_tools.cli.utils import execute_paasta_cluster_boost_on_remote_master
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import list_clusters
from paasta_tools.utils import load_system_paasta_config
def add_subparser(subparsers):
boost_parser = subparsers.add_parser(
"boost",
help="Set, print the status, or clear a capacity boost for a given region in a PaaSTA cluster",
description=(
"'paasta boost' is used to temporarily provision more capacity in a given cluster "
"It operates by ssh'ing to a Mesos master of a remote cluster, and "
"interacting with the boost in the local zookeeper cluster. If you set or clear "
"a boost, you may want to run the cluster autoscaler manually afterward."
),
epilog=(
"The boost command may time out during heavy load. When that happens "
"users may execute the ssh command directly, in order to bypass the timeout."
),
)
boost_parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="""Print out more output regarding the state of the cluster.
Multiple v options increase verbosity. Maximum is 3.""",
)
boost_parser.add_argument(
"-c",
"--cluster",
type=str,
required=True,
help="""Paasta cluster(s) to boost. This option can take comma separated values.
If auto-completion doesn't work, you can get a list of cluster with `paasta list-clusters'""",
).completer = lazy_choices_completer(list_clusters)
boost_parser.add_argument(
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
boost_parser.add_argument(
"-p",
"--pool",
type=str,
default="default",
help="Name of the pool you want to increase the capacity. Default is 'default' pool.",
)
boost_parser.add_argument(
"-b",
"--boost",
type=float,
default=load_boost.DEFAULT_BOOST_FACTOR,
help="Boost factor to apply. Default is 1.5. A big failover should be 2, 3 is the max.",
)
boost_parser.add_argument(
"-d",
"--duration",
type=int,
default=load_boost.DEFAULT_BOOST_DURATION,
help="Duration of the capacity boost in minutes. Default is 40",
)
boost_parser.add_argument(
"-f",
"--force",
action="store_true",
dest="override",
help="Replace an existing boost. Default is false",
)
boost_parser.add_argument(
"action",
choices=["set", "status", "clear"],
help="You can view the status, set or clear a boost.",
)
boost_parser.set_defaults(command=paasta_boost)
def paasta_boost(args):
soa_dir = args.soa_dir
system_paasta_config = load_system_paasta_config()
all_clusters = list_clusters(soa_dir=soa_dir)
clusters = args.cluster.split(",")
for cluster in clusters:
if cluster not in all_clusters:
print(
f"Error: {cluster} doesn't look like a valid cluster. "
+ "Here is a list of valid paasta clusters:\n"
+ "\n".join(all_clusters)
)
return 1
return_code, output = execute_paasta_cluster_boost_on_remote_master(
clusters=clusters,
system_paasta_config=system_paasta_config,
action=args.action,
pool=args.pool,
duration=args.duration if args.action == "set" else None,
override=args.override if args.action == "set" else None,
boost=args.boost if args.action == "set" else None,
verbose=args.verbose,
)
print(output)
return return_code
|
import logging
import pytest
import voluptuous as vol
from homeassistant.helpers import collection, entity, entity_component, storage
from tests.common import flush_store
_LOGGER = logging.getLogger(__name__)
def track_changes(coll: collection.ObservableCollection):
"""Create helper to track changes in a collection."""
changes = []
async def listener(*args):
changes.append(args)
coll.async_add_listener(listener)
return changes
class MockEntity(entity.Entity):
"""Entity that is config based."""
def __init__(self, config):
"""Initialize entity."""
self._config = config
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._config["id"]
@property
def name(self):
"""Return name of entity."""
return self._config["name"]
@property
def state(self):
"""Return state of entity."""
return self._config["state"]
async def async_update_config(self, config):
"""Update entity config."""
self._config = config
self.async_write_ha_state()
class MockStorageCollection(collection.StorageCollection):
"""Mock storage collection."""
async def _process_create_data(self, data: dict) -> dict:
"""Validate the config is valid."""
if "name" not in data:
raise ValueError("invalid")
return data
def _get_suggested_id(self, info: dict) -> str:
"""Suggest an ID based on the config."""
return info["name"]
async def _update_data(self, data: dict, update_data: dict) -> dict:
"""Return a new updated data object."""
return {**data, **update_data}
def test_id_manager():
"""Test the ID manager."""
id_manager = collection.IDManager()
assert not id_manager.has_id("some_id")
data = {}
id_manager.add_collection(data)
assert not id_manager.has_id("some_id")
data["some_id"] = 1
assert id_manager.has_id("some_id")
assert id_manager.generate_id("some_id") == "some_id_2"
assert id_manager.generate_id("bla") == "bla"
async def test_observable_collection():
"""Test observerable collection."""
coll = collection.ObservableCollection(_LOGGER)
assert coll.async_items() == []
coll.data["bla"] = 1
assert coll.async_items() == [1]
changes = track_changes(coll)
await coll.notify_changes(
[collection.CollectionChangeSet("mock_type", "mock_id", {"mock": "item"})]
)
assert len(changes) == 1
assert changes[0] == ("mock_type", "mock_id", {"mock": "item"})
async def test_yaml_collection():
"""Test a YAML collection."""
id_manager = collection.IDManager()
coll = collection.YamlCollection(_LOGGER, id_manager)
changes = track_changes(coll)
await coll.async_load(
[{"id": "mock-1", "name": "Mock 1"}, {"id": "mock-2", "name": "Mock 2"}]
)
assert id_manager.has_id("mock-1")
assert id_manager.has_id("mock-2")
assert len(changes) == 2
assert changes[0] == (
collection.CHANGE_ADDED,
"mock-1",
{"id": "mock-1", "name": "Mock 1"},
)
assert changes[1] == (
collection.CHANGE_ADDED,
"mock-2",
{"id": "mock-2", "name": "Mock 2"},
)
# Test loading new data. Mock 1 is updated, 2 removed, 3 added.
await coll.async_load(
[{"id": "mock-1", "name": "Mock 1-updated"}, {"id": "mock-3", "name": "Mock 3"}]
)
assert len(changes) == 5
assert changes[2] == (
collection.CHANGE_UPDATED,
"mock-1",
{"id": "mock-1", "name": "Mock 1-updated"},
)
assert changes[3] == (
collection.CHANGE_ADDED,
"mock-3",
{"id": "mock-3", "name": "Mock 3"},
)
assert changes[4] == (
collection.CHANGE_REMOVED,
"mock-2",
{"id": "mock-2", "name": "Mock 2"},
)
async def test_yaml_collection_skipping_duplicate_ids():
"""Test YAML collection skipping duplicate IDs."""
id_manager = collection.IDManager()
id_manager.add_collection({"existing": True})
coll = collection.YamlCollection(_LOGGER, id_manager)
changes = track_changes(coll)
await coll.async_load(
[{"id": "mock-1", "name": "Mock 1"}, {"id": "existing", "name": "Mock 2"}]
)
assert len(changes) == 1
assert changes[0] == (
collection.CHANGE_ADDED,
"mock-1",
{"id": "mock-1", "name": "Mock 1"},
)
async def test_storage_collection(hass):
"""Test storage collection."""
store = storage.Store(hass, 1, "test-data")
await store.async_save(
{
"items": [
{"id": "mock-1", "name": "Mock 1", "data": 1},
{"id": "mock-2", "name": "Mock 2", "data": 2},
]
}
)
id_manager = collection.IDManager()
coll = MockStorageCollection(store, _LOGGER, id_manager)
changes = track_changes(coll)
await coll.async_load()
assert id_manager.has_id("mock-1")
assert id_manager.has_id("mock-2")
assert len(changes) == 2
assert changes[0] == (
collection.CHANGE_ADDED,
"mock-1",
{"id": "mock-1", "name": "Mock 1", "data": 1},
)
assert changes[1] == (
collection.CHANGE_ADDED,
"mock-2",
{"id": "mock-2", "name": "Mock 2", "data": 2},
)
item = await coll.async_create_item({"name": "Mock 3"})
assert item["id"] == "mock_3"
assert len(changes) == 3
assert changes[2] == (
collection.CHANGE_ADDED,
"mock_3",
{"id": "mock_3", "name": "Mock 3"},
)
updated_item = await coll.async_update_item("mock-2", {"name": "Mock 2 updated"})
assert id_manager.has_id("mock-2")
assert updated_item == {"id": "mock-2", "name": "Mock 2 updated", "data": 2}
assert len(changes) == 4
assert changes[3] == (collection.CHANGE_UPDATED, "mock-2", updated_item)
with pytest.raises(ValueError):
await coll.async_update_item("mock-2", {"id": "mock-2-updated"})
assert id_manager.has_id("mock-2")
assert not id_manager.has_id("mock-2-updated")
assert len(changes) == 4
await flush_store(store)
assert await storage.Store(hass, 1, "test-data").async_load() == {
"items": [
{"id": "mock-1", "name": "Mock 1", "data": 1},
{"id": "mock-2", "name": "Mock 2 updated", "data": 2},
{"id": "mock_3", "name": "Mock 3"},
]
}
async def test_attach_entity_component_collection(hass):
"""Test attaching collection to entity component."""
ent_comp = entity_component.EntityComponent(_LOGGER, "test", hass)
coll = collection.ObservableCollection(_LOGGER)
collection.attach_entity_component_collection(ent_comp, coll, MockEntity)
await coll.notify_changes(
[
collection.CollectionChangeSet(
collection.CHANGE_ADDED,
"mock_id",
{"id": "mock_id", "state": "initial", "name": "Mock 1"},
)
],
)
assert hass.states.get("test.mock_1").name == "Mock 1"
assert hass.states.get("test.mock_1").state == "initial"
await coll.notify_changes(
[
collection.CollectionChangeSet(
collection.CHANGE_UPDATED,
"mock_id",
{"id": "mock_id", "state": "second", "name": "Mock 1 updated"},
)
],
)
assert hass.states.get("test.mock_1").name == "Mock 1 updated"
assert hass.states.get("test.mock_1").state == "second"
await coll.notify_changes(
[collection.CollectionChangeSet(collection.CHANGE_REMOVED, "mock_id", None)],
)
assert hass.states.get("test.mock_1") is None
async def test_storage_collection_websocket(hass, hass_ws_client):
"""Test exposing a storage collection via websockets."""
store = storage.Store(hass, 1, "test-data")
coll = MockStorageCollection(store, _LOGGER)
changes = track_changes(coll)
collection.StorageCollectionWebsocket(
coll,
"test_item/collection",
"test_item",
{vol.Required("name"): str, vol.Required("immutable_string"): str},
{vol.Optional("name"): str},
).async_setup(hass)
client = await hass_ws_client(hass)
# Create invalid
await client.send_json(
{
"id": 1,
"type": "test_item/collection/create",
"name": 1,
# Forgot to add immutable_string
}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "invalid_format"
assert len(changes) == 0
# Create
await client.send_json(
{
"id": 2,
"type": "test_item/collection/create",
"name": "Initial Name",
"immutable_string": "no-changes",
}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {
"id": "initial_name",
"name": "Initial Name",
"immutable_string": "no-changes",
}
assert len(changes) == 1
assert changes[0] == (collection.CHANGE_ADDED, "initial_name", response["result"])
# List
await client.send_json({"id": 3, "type": "test_item/collection/list"})
response = await client.receive_json()
assert response["success"]
assert response["result"] == [
{
"id": "initial_name",
"name": "Initial Name",
"immutable_string": "no-changes",
}
]
assert len(changes) == 1
# Update invalid data
await client.send_json(
{
"id": 4,
"type": "test_item/collection/update",
"test_item_id": "initial_name",
"immutable_string": "no-changes",
}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "invalid_format"
assert len(changes) == 1
# Update invalid item
await client.send_json(
{
"id": 5,
"type": "test_item/collection/update",
"test_item_id": "non-existing",
"name": "Updated name",
}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "not_found"
assert len(changes) == 1
# Update
await client.send_json(
{
"id": 6,
"type": "test_item/collection/update",
"test_item_id": "initial_name",
"name": "Updated name",
}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {
"id": "initial_name",
"name": "Updated name",
"immutable_string": "no-changes",
}
assert len(changes) == 2
assert changes[1] == (collection.CHANGE_UPDATED, "initial_name", response["result"])
# Delete invalid ID
await client.send_json(
{"id": 7, "type": "test_item/collection/update", "test_item_id": "non-existing"}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "not_found"
assert len(changes) == 2
# Delete
await client.send_json(
{"id": 8, "type": "test_item/collection/delete", "test_item_id": "initial_name"}
)
response = await client.receive_json()
assert response["success"]
assert len(changes) == 3
assert changes[2] == (
collection.CHANGE_REMOVED,
"initial_name",
{
"id": "initial_name",
"immutable_string": "no-changes",
"name": "Updated name",
},
)
|
import socket
from contextlib import contextmanager
from functools import partial
from itertools import count
from time import sleep
from .common import ignore_errors
from .messaging import Consumer, Producer
from .log import get_logger
from .utils.compat import nested
from .utils.encoding import safe_repr
from .utils.limits import TokenBucket
from .utils.objects import cached_property
__all__ = ('ConsumerMixin', 'ConsumerProducerMixin')
logger = get_logger(__name__)
debug, info, warn, error = (
logger.debug,
logger.info,
logger.warning,
logger.error
)
W_CONN_LOST = """\
Connection to broker lost, trying to re-establish connection...\
"""
W_CONN_ERROR = """\
Broker connection error, trying again in %s seconds: %r.\
"""
class ConsumerMixin:
"""Convenience mixin for implementing consumer programs.
It can be used outside of threads, with threads, or greenthreads
(eventlet/gevent) too.
The basic class would need a :attr:`connection` attribute
which must be a :class:`~kombu.Connection` instance,
and define a :meth:`get_consumers` method that returns a list
of :class:`kombu.Consumer` instances to use.
Supporting multiple consumers is important so that multiple
channels can be used for different QoS requirements.
Example:
.. code-block:: python
class Worker(ConsumerMixin):
task_queue = Queue('tasks', Exchange('tasks'), 'tasks')
def __init__(self, connection):
self.connection = None
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self.task_queue],
callbacks=[self.on_task])]
def on_task(self, body, message):
print('Got task: {0!r}'.format(body))
message.ack()
Methods:
* :meth:`extra_context`
Optional extra context manager that will be entered
after the connection and consumers have been set up.
Takes arguments ``(connection, channel)``.
* :meth:`on_connection_error`
Handler called if the connection is lost/ or
is unavailable.
Takes arguments ``(exc, interval)``, where interval
is the time in seconds when the connection will be retried.
The default handler will log the exception.
* :meth:`on_connection_revived`
Handler called as soon as the connection is re-established
after connection failure.
Takes no arguments.
* :meth:`on_consume_ready`
Handler called when the consumer is ready to accept
messages.
Takes arguments ``(connection, channel, consumers)``.
Also keyword arguments to ``consume`` are forwarded
to this handler.
* :meth:`on_consume_end`
Handler called after the consumers are canceled.
Takes arguments ``(connection, channel)``.
* :meth:`on_iteration`
Handler called for every iteration while draining
events.
Takes no arguments.
* :meth:`on_decode_error`
Handler called if a consumer was unable to decode
the body of a message.
Takes arguments ``(message, exc)`` where message is the
original message object.
The default handler will log the error and
acknowledge the message, so if you override make
sure to call super, or perform these steps yourself.
"""
#: maximum number of retries trying to re-establish the connection,
#: if the connection is lost/unavailable.
connect_max_retries = None
#: When this is set to true the consumer should stop consuming
#: and return, so that it can be joined if it is the implementation
#: of a thread.
should_stop = False
def get_consumers(self, Consumer, channel):
raise NotImplementedError('Subclass responsibility')
def on_connection_revived(self):
pass
def on_consume_ready(self, connection, channel, consumers, **kwargs):
pass
def on_consume_end(self, connection, channel):
pass
def on_iteration(self):
pass
def on_decode_error(self, message, exc):
error("Can't decode message body: %r (type:%r encoding:%r raw:%r')",
exc, message.content_type, message.content_encoding,
safe_repr(message.body))
message.ack()
def on_connection_error(self, exc, interval):
warn(W_CONN_ERROR, interval, exc, exc_info=1)
@contextmanager
def extra_context(self, connection, channel):
yield
def run(self, _tokens=1, **kwargs):
restart_limit = self.restart_limit
errors = (self.connection.connection_errors +
self.connection.channel_errors)
while not self.should_stop:
try:
if restart_limit.can_consume(_tokens): # pragma: no cover
for _ in self.consume(limit=None, **kwargs):
pass
else:
sleep(restart_limit.expected_time(_tokens))
except errors:
warn(W_CONN_LOST, exc_info=1)
@contextmanager
def consumer_context(self, **kwargs):
with self.Consumer() as (connection, channel, consumers):
with self.extra_context(connection, channel):
self.on_consume_ready(connection, channel, consumers, **kwargs)
yield connection, channel, consumers
def consume(self, limit=None, timeout=None, safety_interval=1, **kwargs):
elapsed = 0
with self.consumer_context(**kwargs) as (conn, channel, consumers):
for i in limit and range(limit) or count():
if self.should_stop:
break
self.on_iteration()
try:
conn.drain_events(timeout=safety_interval)
except socket.timeout:
conn.heartbeat_check()
elapsed += safety_interval
if timeout and elapsed >= timeout:
raise
except OSError:
if not self.should_stop:
raise
else:
yield
elapsed = 0
debug('consume exiting')
def maybe_conn_error(self, fun):
"""Use :func:`kombu.common.ignore_errors` instead."""
return ignore_errors(self, fun)
def create_connection(self):
return self.connection.clone()
@contextmanager
def establish_connection(self):
with self.create_connection() as conn:
conn.ensure_connection(self.on_connection_error,
self.connect_max_retries)
yield conn
@contextmanager
def Consumer(self):
with self.establish_connection() as conn:
self.on_connection_revived()
info('Connected to %s', conn.as_uri())
channel = conn.default_channel
cls = partial(Consumer, channel,
on_decode_error=self.on_decode_error)
with self._consume_from(*self.get_consumers(cls, channel)) as c:
yield conn, channel, c
debug('Consumers canceled')
self.on_consume_end(conn, channel)
debug('Connection closed')
def _consume_from(self, *consumers):
return nested(*consumers)
@cached_property
def restart_limit(self):
return TokenBucket(1)
@cached_property
def connection_errors(self):
return self.connection.connection_errors
@cached_property
def channel_errors(self):
return self.connection.channel_errors
class ConsumerProducerMixin(ConsumerMixin):
"""Consumer and Producer mixin.
Version of ConsumerMixin having separate connection for also
publishing messages.
Example:
.. code-block:: python
class Worker(ConsumerProducerMixin):
def __init__(self, connection):
self.connection = connection
def get_consumers(self, Consumer, channel):
return [Consumer(queues=Queue('foo'),
on_message=self.handle_message,
accept='application/json',
prefetch_count=10)]
def handle_message(self, message):
self.producer.publish(
{'message': 'hello to you'},
exchange='',
routing_key=message.properties['reply_to'],
correlation_id=message.properties['correlation_id'],
retry=True,
)
"""
_producer_connection = None
def on_consume_end(self, connection, channel):
if self._producer_connection is not None:
self._producer_connection.close()
self._producer_connection = None
@property
def producer(self):
return Producer(self.producer_connection)
@property
def producer_connection(self):
if self._producer_connection is None:
conn = self.connection.clone()
conn.ensure_connection(self.on_connection_error,
self.connect_max_retries)
self._producer_connection = conn
return self._producer_connection
|
from homeassistant.components.ring import DOMAIN
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def setup_platform(hass, platform):
"""Set up the ring platform and prerequisites."""
MockConfigEntry(domain=DOMAIN, data={"username": "foo", "token": {}}).add_to_hass(
hass
)
with patch("homeassistant.components.ring.PLATFORMS", [platform]):
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
|
from datetime import datetime as dt
import tzlocal
from mock import patch
from pytest import raises
from arctic.date import mktz, TimezoneError
DEFAULT_TIME_ZONE_NAME = tzlocal.get_localzone().zone # 'Europe/London'
def test_mktz():
tz = mktz("Europe/London")
d = dt(2012, 2, 2, tzinfo=tz)
assert d.tzname() == 'GMT'
d = dt(2012, 7, 2, tzinfo=tz)
assert d.tzname() == 'BST'
tz = mktz('UTC')
d = dt(2012, 2, 2, tzinfo=tz)
assert d.tzname() == 'UTC'
d = dt(2012, 7, 2, tzinfo=tz)
assert d.tzname() == 'UTC' # --------replace_empty_timezones_with_default -----------------
def test_mktz_noarg():
tz = mktz()
assert DEFAULT_TIME_ZONE_NAME in str(tz)
def test_mktz_zone():
tz = mktz('UTC')
assert tz.zone == "UTC"
tz = mktz('/usr/share/zoneinfo/UTC')
assert tz.zone == "UTC"
def test_mktz_fails_if_invalid_timezone():
with patch('os.path.exists') as file_exists:
file_exists.return_value = False
with raises(TimezoneError):
mktz('junk')
|
from weblate.trans.models import ContributorAgreement
from weblate.trans.tests.test_views import FixtureTestCase
class AgreementTest(FixtureTestCase):
def test_basic(self):
self.assertFalse(
ContributorAgreement.objects.has_agreed(self.user, self.component)
)
ContributorAgreement.objects.create(self.user, self.component)
self.assertTrue(
ContributorAgreement.objects.has_agreed(self.user, self.component)
)
def test_perms(self):
self.assertTrue(self.user.has_perm("unit.edit", self.component))
self.component.agreement = "CLA"
self.user.clear_cache()
self.assertFalse(self.user.has_perm("unit.edit", self.component))
ContributorAgreement.objects.create(self.user, self.component)
self.user.clear_cache()
self.assertTrue(self.user.has_perm("unit.edit", self.component))
|
from os import path
from homeassistant import config as hass_config
import homeassistant.components.notify as notify
from homeassistant.components.telegram import DOMAIN
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_reload_notify(hass):
"""Verify we can reload the notify service."""
with patch("homeassistant.components.telegram_bot.async_setup", return_value=True):
assert await async_setup_component(
hass,
notify.DOMAIN,
{
notify.DOMAIN: [
{
"name": DOMAIN,
"platform": DOMAIN,
"chat_id": 1,
},
]
},
)
await hass.async_block_till_done()
assert hass.services.has_service(notify.DOMAIN, DOMAIN)
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"telegram/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert not hass.services.has_service(notify.DOMAIN, DOMAIN)
assert hass.services.has_service(notify.DOMAIN, "telegram_reloaded")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
import logging
import numpy as np
import numpy.ma as ma
import pandas as pd
from bson import Binary, SON
from .._compression import compress, decompress, compress_array
from ._serializer import Serializer
try:
from pandas.api.types import infer_dtype
except ImportError:
from pandas.lib import infer_dtype
try:
# pandas >= 0.23.0
from pandas._libs.writers import max_len_string_array
except ImportError:
try:
# pandas [0.20.0, 0.22.x]
from pandas._libs.lib import max_len_string_array
except ImportError:
# pandas <= 0.19.x
from pandas.lib import max_len_string_array
if int(pd.__version__.split('.')[1]) > 22:
from functools import partial
pd.concat = partial(pd.concat, sort=False)
DATA = 'd'
MASK = 'm'
TYPE = 't'
DTYPE = 'dt'
COLUMNS = 'c'
INDEX = 'i'
METADATA = 'md'
LENGTHS = 'ln'
class FrameConverter(object):
"""
Converts a Pandas Dataframe to and from PyMongo SON representation:
{
METADATA: {
COLUMNS: [col1, col2, ...] list of str
MASKS: {col1: mask, col2: mask, ...} dict of str: Binary
INDEX: [idx1, idx2, ...] list of str
TYPE: 'series' or 'dataframe'
LENGTHS: {col1: len, col2: len, ...} dict of str: int
}
DATA: BINARY(....) Compressed columns concatenated together
}
"""
def _convert_types(self, a):
"""
Converts object arrays of strings to numpy string arrays
"""
# No conversion for scalar type
if a.dtype != 'object':
return a, None
# We can't infer the type of an empty array, so just
# assume strings
if len(a) == 0:
return a.astype('U1'), None
# Compute a mask of missing values. Replace NaNs and Nones with
# empty strings so that type inference has a chance.
mask = pd.isnull(a)
if mask.sum() > 0:
a = a.copy()
np.putmask(a, mask, '')
else:
mask = None
if infer_dtype(a, skipna=False) == 'mixed':
# assume its a string, otherwise raise an error
try:
a = np.array([s.encode('ascii') for s in a])
a = a.astype('O')
except:
raise ValueError("Column of type 'mixed' cannot be converted to string")
type_ = infer_dtype(a, skipna=False)
if type_ in ['unicode', 'string']:
max_len = max_len_string_array(a)
return a.astype('U{:d}'.format(max_len)), mask
else:
raise ValueError('Cannot store arrays with {} dtype'.format(type_))
def docify(self, df):
"""
Convert a Pandas DataFrame to SON.
Parameters
----------
df: DataFrame
The Pandas DataFrame to encode
"""
dtypes = {}
masks = {}
lengths = {}
columns = []
data = Binary(b'')
start = 0
arrays = []
for c in df:
try:
columns.append(str(c))
arr, mask = self._convert_types(df[c].values)
dtypes[str(c)] = arr.dtype.str
if mask is not None:
masks[str(c)] = Binary(compress(mask.tostring()))
arrays.append(arr.tostring())
except Exception as e:
typ = infer_dtype(df[c], skipna=False)
msg = "Column '{}' type is {}".format(str(c), typ)
logging.warning(msg)
raise e
arrays = compress_array(arrays)
for index, c in enumerate(df):
d = Binary(arrays[index])
lengths[str(c)] = (start, start + len(d) - 1)
start += len(d)
data += d
doc = SON({DATA: data, METADATA: {}})
doc[METADATA] = {COLUMNS: columns,
MASK: masks,
LENGTHS: lengths,
DTYPE: dtypes
}
return doc
def objify(self, doc, columns=None):
"""
Decode a Pymongo SON object into an Pandas DataFrame
"""
cols = columns or doc[METADATA][COLUMNS]
data = {}
for col in cols:
# if there is missing data in a chunk, we can default to NaN
# and pandas will autofill the missing values to the correct length
if col not in doc[METADATA][LENGTHS]:
d = np.array(np.nan)
else:
d = decompress(doc[DATA][doc[METADATA][LENGTHS][col][0]: doc[METADATA][LENGTHS][col][1] + 1])
# d is ready-only but that's not an issue since DataFrame will copy the data anyway.
d = np.frombuffer(d, doc[METADATA][DTYPE][col])
if MASK in doc[METADATA] and col in doc[METADATA][MASK]:
mask_data = decompress(doc[METADATA][MASK][col])
mask = np.frombuffer(mask_data, 'bool')
d = ma.masked_array(d, mask)
data[col] = d
# Copy into
return pd.DataFrame(data, columns=cols, copy=True)[cols]
class FrametoArraySerializer(Serializer):
TYPE = 'FrameToArray'
def __init__(self):
self.converter = FrameConverter()
def serialize(self, df):
if isinstance(df, pd.Series):
dtype = 'series'
df = df.to_frame()
else:
dtype = 'dataframe'
if (len(df.index.names) > 1 and None in df.index.names) or None in list(df.columns.values):
raise Exception("All columns and indexes must be named")
if df.index.names != [None]:
index = df.index.names
df = df.reset_index()
ret = self.converter.docify(df)
ret[METADATA][INDEX] = index
ret[METADATA][TYPE] = dtype
return ret
ret = self.converter.docify(df)
ret[METADATA][TYPE] = dtype
return ret
def deserialize(self, data, columns=None):
"""
Deserializes SON to a DataFrame
Parameters
----------
data: SON data
columns: None, or list of strings
optionally you can deserialize a subset of the data in the SON. Index
columns are ALWAYS deserialized, and should not be specified
Returns
-------
pandas dataframe or series
"""
if not data:
return pd.DataFrame()
meta = data[0][METADATA] if isinstance(data, list) else data[METADATA]
index = INDEX in meta
if columns:
if index:
columns = columns[:]
columns.extend(meta[INDEX])
if len(columns) > len(set(columns)):
raise Exception("Duplicate columns specified, cannot de-serialize")
if not isinstance(data, list):
df = self.converter.objify(data, columns)
else:
df = pd.concat([self.converter.objify(d, columns) for d in data], ignore_index=not index)
if index:
df = df.set_index(meta[INDEX])
if meta[TYPE] == 'series':
return df[df.columns[0]]
return df
def combine(self, a, b):
if a.index.names != [None]:
return pd.concat([a, b]).sort_index()
return pd.concat([a, b])
|
import asyncio
from datetime import timedelta
import logging
from typing import Any, Dict
from wled import WLED, Device as WLEDDevice, WLEDConnectionError, WLEDError
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME, CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
DOMAIN,
)
SCAN_INTERVAL = timedelta(seconds=5)
WLED_COMPONENTS = (LIGHT_DOMAIN, SENSOR_DOMAIN, SWITCH_DOMAIN)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the WLED components."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up WLED from a config entry."""
# Create WLED instance for this entry
coordinator = WLEDDataUpdateCoordinator(hass, host=entry.data[CONF_HOST])
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
# For backwards compat, set unique ID
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry, unique_id=coordinator.data.info.mac_address
)
# Set up all platforms for this device/entry.
for component in WLED_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload WLED config entry."""
# Unload entities for this entry/device.
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_unload(entry, component)
for component in WLED_COMPONENTS
)
)
# Cleanup
del hass.data[DOMAIN][entry.entry_id]
if not hass.data[DOMAIN]:
del hass.data[DOMAIN]
return True
def wled_exception_handler(func):
"""Decorate WLED calls to handle WLED exceptions.
A decorator that wraps the passed in function, catches WLED errors,
and handles the availability of the device in the data coordinator.
"""
async def handler(self, *args, **kwargs):
try:
await func(self, *args, **kwargs)
self.coordinator.update_listeners()
except WLEDConnectionError as error:
_LOGGER.error("Error communicating with API: %s", error)
self.coordinator.last_update_success = False
self.coordinator.update_listeners()
except WLEDError as error:
_LOGGER.error("Invalid response from API: %s", error)
return handler
class WLEDDataUpdateCoordinator(DataUpdateCoordinator[WLEDDevice]):
"""Class to manage fetching WLED data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
*,
host: str,
):
"""Initialize global WLED data updater."""
self.wled = WLED(host, session=async_get_clientsession(hass))
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
def update_listeners(self) -> None:
"""Call update on all listeners."""
for update_callback in self._listeners:
update_callback()
async def _async_update_data(self) -> WLEDDevice:
"""Fetch data from WLED."""
try:
return await self.wled.update(full_update=not self.last_update_success)
except WLEDError as error:
raise UpdateFailed(f"Invalid response from API: {error}") from error
class WLEDEntity(CoordinatorEntity):
"""Defines a base WLED entity."""
def __init__(
self,
*,
entry_id: str,
coordinator: WLEDDataUpdateCoordinator,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
"""Initialize the WLED entity."""
super().__init__(coordinator)
self._enabled_default = enabled_default
self._entry_id = entry_id
self._icon = icon
self._name = name
self._unsub_dispatcher = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
class WLEDDeviceEntity(WLEDEntity):
"""Defines a WLED device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this WLED device."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self.coordinator.data.info.mac_address)},
ATTR_NAME: self.coordinator.data.info.name,
ATTR_MANUFACTURER: self.coordinator.data.info.brand,
ATTR_MODEL: self.coordinator.data.info.product,
ATTR_SOFTWARE_VERSION: self.coordinator.data.info.version,
}
|
from homeassistant import config_entries, setup
from homeassistant.components.ozw.config_flow import TITLE
from homeassistant.components.ozw.const import DOMAIN
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_user_create_entry(hass):
"""Test the user step creates an entry."""
hass.config.components.add("mqtt")
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.ozw.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TITLE
assert result2["data"] == {}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_mqtt_not_setup(hass):
"""Test that mqtt is required."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "mqtt_required"
async def test_one_instance_allowed(hass):
"""Test that only one instance is allowed."""
entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
|
import functools
import io
import os
from distutils.version import LooseVersion
import numpy as np
from ..core import indexing
from ..core.utils import FrozenDict, is_remote_uri, read_magic_number
from ..core.variable import Variable
from .common import BackendEntrypoint, WritableCFDataStore, find_root_and_group
from .file_manager import CachingFileManager, DummyFileManager
from .locks import HDF5_LOCK, combine_locks, ensure_lock, get_write_lock
from .netCDF4_ import (
BaseNetCDF4Array,
_encode_nc4_variable,
_extract_nc4_variable_encoding,
_get_datatype,
_nc4_require_group,
)
from .store import open_backend_dataset_store
class H5NetCDFArrayWrapper(BaseNetCDF4Array):
def get_array(self, needs_lock=True):
ds = self.datastore._acquire(needs_lock)
variable = ds.variables[self.variable_name]
return variable
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem
)
def _getitem(self, key):
# h5py requires using lists for fancy indexing:
# https://github.com/h5py/h5py/issues/992
key = tuple(list(k) if isinstance(k, np.ndarray) else k for k in key)
with self.datastore.lock:
array = self.get_array(needs_lock=False)
return array[key]
def maybe_decode_bytes(txt):
if isinstance(txt, bytes):
return txt.decode("utf-8")
else:
return txt
def _read_attributes(h5netcdf_var):
# GH451
# to ensure conventions decoding works properly on Python 3, decode all
# bytes attributes to strings
attrs = {}
for k, v in h5netcdf_var.attrs.items():
if k not in ["_FillValue", "missing_value"]:
v = maybe_decode_bytes(v)
attrs[k] = v
return attrs
_extract_h5nc_encoding = functools.partial(
_extract_nc4_variable_encoding, lsd_okay=False, h5py_okay=True, backend="h5netcdf"
)
def _h5netcdf_create_group(dataset, name):
return dataset.create_group(name)
class H5NetCDFStore(WritableCFDataStore):
"""Store for reading and writing data via h5netcdf"""
__slots__ = (
"autoclose",
"format",
"is_remote",
"lock",
"_filename",
"_group",
"_manager",
"_mode",
)
def __init__(self, manager, group=None, mode=None, lock=HDF5_LOCK, autoclose=False):
import h5netcdf
if isinstance(manager, (h5netcdf.File, h5netcdf.Group)):
if group is None:
root, group = find_root_and_group(manager)
else:
if not type(manager) is h5netcdf.File:
raise ValueError(
"must supply a h5netcdf.File if the group "
"argument is provided"
)
root = manager
manager = DummyFileManager(root)
self._manager = manager
self._group = group
self._mode = mode
self.format = None
# todo: utilizing find_root_and_group seems a bit clunky
# making filename available on h5netcdf.Group seems better
self._filename = find_root_and_group(self.ds)[0].filename
self.is_remote = is_remote_uri(self._filename)
self.lock = ensure_lock(lock)
self.autoclose = autoclose
@classmethod
def open(
cls,
filename,
mode="r",
format=None,
group=None,
lock=None,
autoclose=False,
invalid_netcdf=None,
phony_dims=None,
):
import h5netcdf
if isinstance(filename, bytes):
raise ValueError(
"can't open netCDF4/HDF5 as bytes "
"try passing a path or file-like object"
)
elif isinstance(filename, io.IOBase):
magic_number = read_magic_number(filename)
if not magic_number.startswith(b"\211HDF\r\n\032\n"):
raise ValueError(
f"{magic_number} is not the signature of a valid netCDF file"
)
if format not in [None, "NETCDF4"]:
raise ValueError("invalid format for h5netcdf backend")
kwargs = {"invalid_netcdf": invalid_netcdf}
if phony_dims is not None:
if LooseVersion(h5netcdf.__version__) >= LooseVersion("0.8.0"):
kwargs["phony_dims"] = phony_dims
else:
raise ValueError(
"h5netcdf backend keyword argument 'phony_dims' needs "
"h5netcdf >= 0.8.0."
)
if lock is None:
if mode == "r":
lock = HDF5_LOCK
else:
lock = combine_locks([HDF5_LOCK, get_write_lock(filename)])
manager = CachingFileManager(h5netcdf.File, filename, mode=mode, kwargs=kwargs)
return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)
def _acquire(self, needs_lock=True):
with self._manager.acquire_context(needs_lock) as root:
ds = _nc4_require_group(
root, self._group, self._mode, create_group=_h5netcdf_create_group
)
return ds
@property
def ds(self):
return self._acquire()
def open_store_variable(self, name, var):
import h5py
dimensions = var.dimensions
data = indexing.LazilyOuterIndexedArray(H5NetCDFArrayWrapper(name, self))
attrs = _read_attributes(var)
# netCDF4 specific encoding
encoding = {
"chunksizes": var.chunks,
"fletcher32": var.fletcher32,
"shuffle": var.shuffle,
}
# Convert h5py-style compression options to NetCDF4-Python
# style, if possible
if var.compression == "gzip":
encoding["zlib"] = True
encoding["complevel"] = var.compression_opts
elif var.compression is not None:
encoding["compression"] = var.compression
encoding["compression_opts"] = var.compression_opts
# save source so __repr__ can detect if it's local or not
encoding["source"] = self._filename
encoding["original_shape"] = var.shape
vlen_dtype = h5py.check_dtype(vlen=var.dtype)
if vlen_dtype is str:
encoding["dtype"] = str
elif vlen_dtype is not None: # pragma: no cover
# xarray doesn't support writing arbitrary vlen dtypes yet.
pass
else:
encoding["dtype"] = var.dtype
return Variable(dimensions, data, attrs, encoding)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return FrozenDict(_read_attributes(self.ds))
def get_dimensions(self):
return self.ds.dimensions
def get_encoding(self):
encoding = {}
encoding["unlimited_dims"] = {
k for k, v in self.ds.dimensions.items() if v is None
}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
if is_unlimited:
self.ds.dimensions[name] = None
self.ds.resize_dimension(name, length)
else:
self.ds.dimensions[name] = length
def set_attribute(self, key, value):
self.ds.attrs[key] = value
def encode_variable(self, variable):
return _encode_nc4_variable(variable)
def prepare_variable(
self, name, variable, check_encoding=False, unlimited_dims=None
):
import h5py
attrs = variable.attrs.copy()
dtype = _get_datatype(variable, raise_on_invalid_encoding=check_encoding)
fillvalue = attrs.pop("_FillValue", None)
if dtype is str and fillvalue is not None:
raise NotImplementedError(
"h5netcdf does not yet support setting a fill value for "
"variable-length strings "
"(https://github.com/shoyer/h5netcdf/issues/37). "
"Either remove '_FillValue' from encoding on variable %r "
"or set {'dtype': 'S1'} in encoding to use the fixed width "
"NC_CHAR type." % name
)
if dtype is str:
dtype = h5py.special_dtype(vlen=str)
encoding = _extract_h5nc_encoding(variable, raise_on_invalid=check_encoding)
kwargs = {}
# Convert from NetCDF4-Python style compression settings to h5py style
# If both styles are used together, h5py takes precedence
# If set_encoding=True, raise ValueError in case of mismatch
if encoding.pop("zlib", False):
if check_encoding and encoding.get("compression") not in (None, "gzip"):
raise ValueError("'zlib' and 'compression' encodings mismatch")
encoding.setdefault("compression", "gzip")
if (
check_encoding
and "complevel" in encoding
and "compression_opts" in encoding
and encoding["complevel"] != encoding["compression_opts"]
):
raise ValueError("'complevel' and 'compression_opts' encodings mismatch")
complevel = encoding.pop("complevel", 0)
if complevel != 0:
encoding.setdefault("compression_opts", complevel)
encoding["chunks"] = encoding.pop("chunksizes", None)
# Do not apply compression, filters or chunking to scalars.
if variable.shape:
for key in [
"compression",
"compression_opts",
"shuffle",
"chunks",
"fletcher32",
]:
if key in encoding:
kwargs[key] = encoding[key]
if name not in self.ds:
nc4_var = self.ds.create_variable(
name,
dtype=dtype,
dimensions=variable.dims,
fillvalue=fillvalue,
**kwargs,
)
else:
nc4_var = self.ds[name]
for k, v in attrs.items():
nc4_var.attrs[k] = v
target = H5NetCDFArrayWrapper(name, self)
return target, variable.data
def sync(self):
self.ds.sync()
def close(self, **kwargs):
self._manager.close(**kwargs)
def guess_can_open_h5netcdf(store_spec):
try:
return read_magic_number(store_spec).startswith(b"\211HDF\r\n\032\n")
except TypeError:
pass
try:
_, ext = os.path.splitext(store_spec)
except TypeError:
return False
return ext in {".nc", ".nc4", ".cdf"}
def open_backend_dataset_h5netcdf(
filename_or_obj,
*,
mask_and_scale=True,
decode_times=None,
concat_characters=None,
decode_coords=None,
drop_variables=None,
use_cftime=None,
decode_timedelta=None,
format=None,
group=None,
lock=None,
invalid_netcdf=None,
phony_dims=None,
):
store = H5NetCDFStore.open(
filename_or_obj,
format=format,
group=group,
lock=lock,
invalid_netcdf=invalid_netcdf,
phony_dims=phony_dims,
)
ds = open_backend_dataset_store(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
h5netcdf_backend = BackendEntrypoint(
open_dataset=open_backend_dataset_h5netcdf, guess_can_open=guess_can_open_h5netcdf
)
|
from datetime import datetime, timedelta
import logging
from typing import Callable, Dict, Optional, Union
import herepy
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_MODE,
CONF_MODE,
CONF_NAME,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
EVENT_HOMEASSISTANT_START,
TIME_MINUTES,
)
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers import location
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import DiscoveryInfoType
import homeassistant.util.dt as dt
_LOGGER = logging.getLogger(__name__)
CONF_DESTINATION_LATITUDE = "destination_latitude"
CONF_DESTINATION_LONGITUDE = "destination_longitude"
CONF_DESTINATION_ENTITY_ID = "destination_entity_id"
CONF_ORIGIN_LATITUDE = "origin_latitude"
CONF_ORIGIN_LONGITUDE = "origin_longitude"
CONF_ORIGIN_ENTITY_ID = "origin_entity_id"
CONF_API_KEY = "api_key"
CONF_TRAFFIC_MODE = "traffic_mode"
CONF_ROUTE_MODE = "route_mode"
CONF_ARRIVAL = "arrival"
CONF_DEPARTURE = "departure"
DEFAULT_NAME = "HERE Travel Time"
TRAVEL_MODE_BICYCLE = "bicycle"
TRAVEL_MODE_CAR = "car"
TRAVEL_MODE_PEDESTRIAN = "pedestrian"
TRAVEL_MODE_PUBLIC = "publicTransport"
TRAVEL_MODE_PUBLIC_TIME_TABLE = "publicTransportTimeTable"
TRAVEL_MODE_TRUCK = "truck"
TRAVEL_MODE = [
TRAVEL_MODE_BICYCLE,
TRAVEL_MODE_CAR,
TRAVEL_MODE_PEDESTRIAN,
TRAVEL_MODE_PUBLIC,
TRAVEL_MODE_PUBLIC_TIME_TABLE,
TRAVEL_MODE_TRUCK,
]
TRAVEL_MODES_PUBLIC = [TRAVEL_MODE_PUBLIC, TRAVEL_MODE_PUBLIC_TIME_TABLE]
TRAVEL_MODES_VEHICLE = [TRAVEL_MODE_CAR, TRAVEL_MODE_TRUCK]
TRAVEL_MODES_NON_VEHICLE = [TRAVEL_MODE_BICYCLE, TRAVEL_MODE_PEDESTRIAN]
TRAFFIC_MODE_ENABLED = "traffic_enabled"
TRAFFIC_MODE_DISABLED = "traffic_disabled"
ROUTE_MODE_FASTEST = "fastest"
ROUTE_MODE_SHORTEST = "shortest"
ROUTE_MODE = [ROUTE_MODE_FASTEST, ROUTE_MODE_SHORTEST]
ICON_BICYCLE = "mdi:bike"
ICON_CAR = "mdi:car"
ICON_PEDESTRIAN = "mdi:walk"
ICON_PUBLIC = "mdi:bus"
ICON_TRUCK = "mdi:truck"
UNITS = [CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL]
ATTR_DURATION = "duration"
ATTR_DISTANCE = "distance"
ATTR_ROUTE = "route"
ATTR_ORIGIN = "origin"
ATTR_DESTINATION = "destination"
ATTR_UNIT_SYSTEM = CONF_UNIT_SYSTEM
ATTR_TRAFFIC_MODE = CONF_TRAFFIC_MODE
ATTR_DURATION_IN_TRAFFIC = "duration_in_traffic"
ATTR_ORIGIN_NAME = "origin_name"
ATTR_DESTINATION_NAME = "destination_name"
SCAN_INTERVAL = timedelta(minutes=5)
NO_ROUTE_ERROR_MESSAGE = "HERE could not find a route based on the input"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Inclusive(
CONF_DESTINATION_LATITUDE, "destination_coordinates"
): cv.latitude,
vol.Inclusive(
CONF_DESTINATION_LONGITUDE, "destination_coordinates"
): cv.longitude,
vol.Exclusive(CONF_DESTINATION_LATITUDE, "destination"): cv.latitude,
vol.Exclusive(CONF_DESTINATION_ENTITY_ID, "destination"): cv.entity_id,
vol.Inclusive(CONF_ORIGIN_LATITUDE, "origin_coordinates"): cv.latitude,
vol.Inclusive(CONF_ORIGIN_LONGITUDE, "origin_coordinates"): cv.longitude,
vol.Exclusive(CONF_ORIGIN_LATITUDE, "origin"): cv.latitude,
vol.Exclusive(CONF_ORIGIN_ENTITY_ID, "origin"): cv.entity_id,
vol.Optional(CONF_DEPARTURE): cv.time,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MODE, default=TRAVEL_MODE_CAR): vol.In(TRAVEL_MODE),
vol.Optional(CONF_ROUTE_MODE, default=ROUTE_MODE_FASTEST): vol.In(ROUTE_MODE),
vol.Optional(CONF_TRAFFIC_MODE, default=False): cv.boolean,
vol.Optional(CONF_UNIT_SYSTEM): vol.In(UNITS),
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_DESTINATION_LATITUDE, CONF_DESTINATION_ENTITY_ID),
cv.has_at_least_one_key(CONF_ORIGIN_LATITUDE, CONF_ORIGIN_ENTITY_ID),
cv.key_value_schemas(
CONF_MODE,
{
None: PLATFORM_SCHEMA,
TRAVEL_MODE_BICYCLE: PLATFORM_SCHEMA,
TRAVEL_MODE_CAR: PLATFORM_SCHEMA,
TRAVEL_MODE_PEDESTRIAN: PLATFORM_SCHEMA,
TRAVEL_MODE_PUBLIC: PLATFORM_SCHEMA,
TRAVEL_MODE_TRUCK: PLATFORM_SCHEMA,
TRAVEL_MODE_PUBLIC_TIME_TABLE: PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_ARRIVAL, "arrival_departure"): cv.time,
vol.Exclusive(CONF_DEPARTURE, "arrival_departure"): cv.time,
}
),
},
),
)
async def async_setup_platform(
hass: HomeAssistant,
config: Dict[str, Union[str, bool]],
async_add_entities: Callable,
discovery_info: Optional[DiscoveryInfoType] = None,
) -> None:
"""Set up the HERE travel time platform."""
api_key = config[CONF_API_KEY]
here_client = herepy.RoutingApi(api_key)
if not await hass.async_add_executor_job(
_are_valid_client_credentials, here_client
):
_LOGGER.error(
"Invalid credentials. This error is returned if the specified token was invalid or no contract could be found for this token"
)
return
if config.get(CONF_ORIGIN_LATITUDE) is not None:
origin = f"{config[CONF_ORIGIN_LATITUDE]},{config[CONF_ORIGIN_LONGITUDE]}"
origin_entity_id = None
else:
origin = None
origin_entity_id = config[CONF_ORIGIN_ENTITY_ID]
if config.get(CONF_DESTINATION_LATITUDE) is not None:
destination = (
f"{config[CONF_DESTINATION_LATITUDE]},{config[CONF_DESTINATION_LONGITUDE]}"
)
destination_entity_id = None
else:
destination = None
destination_entity_id = config[CONF_DESTINATION_ENTITY_ID]
travel_mode = config[CONF_MODE]
traffic_mode = config[CONF_TRAFFIC_MODE]
route_mode = config[CONF_ROUTE_MODE]
name = config[CONF_NAME]
units = config.get(CONF_UNIT_SYSTEM, hass.config.units.name)
arrival = config.get(CONF_ARRIVAL)
departure = config.get(CONF_DEPARTURE)
here_data = HERETravelTimeData(
here_client, travel_mode, traffic_mode, route_mode, units, arrival, departure
)
sensor = HERETravelTimeSensor(
name, origin, destination, origin_entity_id, destination_entity_id, here_data
)
async_add_entities([sensor])
def _are_valid_client_credentials(here_client: herepy.RoutingApi) -> bool:
"""Check if the provided credentials are correct using defaults."""
known_working_origin = [38.9, -77.04833]
known_working_destination = [39.0, -77.1]
try:
here_client.car_route(
known_working_origin,
known_working_destination,
[
herepy.RouteMode[ROUTE_MODE_FASTEST],
herepy.RouteMode[TRAVEL_MODE_CAR],
herepy.RouteMode[TRAFFIC_MODE_DISABLED],
],
)
except herepy.InvalidCredentialsError:
return False
return True
class HERETravelTimeSensor(Entity):
"""Representation of a HERE travel time sensor."""
def __init__(
self,
name: str,
origin: str,
destination: str,
origin_entity_id: str,
destination_entity_id: str,
here_data: "HERETravelTimeData",
) -> None:
"""Initialize the sensor."""
self._name = name
self._origin_entity_id = origin_entity_id
self._destination_entity_id = destination_entity_id
self._here_data = here_data
self._unit_of_measurement = TIME_MINUTES
self._attrs = {
ATTR_UNIT_SYSTEM: self._here_data.units,
ATTR_MODE: self._here_data.travel_mode,
ATTR_TRAFFIC_MODE: self._here_data.traffic_mode,
}
if self._origin_entity_id is None:
self._here_data.origin = origin
if self._destination_entity_id is None:
self._here_data.destination = destination
async def async_added_to_hass(self) -> None:
"""Delay the sensor update to avoid entity not found warnings."""
@callback
def delayed_sensor_update(event):
"""Update sensor after Home Assistant started."""
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, delayed_sensor_update
)
@property
def state(self) -> Optional[str]:
"""Return the state of the sensor."""
if self._here_data.traffic_mode:
if self._here_data.traffic_time is not None:
return str(round(self._here_data.traffic_time / 60))
if self._here_data.base_time is not None:
return str(round(self._here_data.base_time / 60))
return None
@property
def name(self) -> str:
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(
self,
) -> Optional[Dict[str, Union[None, float, str, bool]]]:
"""Return the state attributes."""
if self._here_data.base_time is None:
return None
res = self._attrs
if self._here_data.attribution is not None:
res[ATTR_ATTRIBUTION] = self._here_data.attribution
res[ATTR_DURATION] = self._here_data.base_time / 60
res[ATTR_DISTANCE] = self._here_data.distance
res[ATTR_ROUTE] = self._here_data.route
res[ATTR_DURATION_IN_TRAFFIC] = self._here_data.traffic_time / 60
res[ATTR_ORIGIN] = self._here_data.origin
res[ATTR_DESTINATION] = self._here_data.destination
res[ATTR_ORIGIN_NAME] = self._here_data.origin_name
res[ATTR_DESTINATION_NAME] = self._here_data.destination_name
return res
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def icon(self) -> str:
"""Icon to use in the frontend depending on travel_mode."""
if self._here_data.travel_mode == TRAVEL_MODE_BICYCLE:
return ICON_BICYCLE
if self._here_data.travel_mode == TRAVEL_MODE_PEDESTRIAN:
return ICON_PEDESTRIAN
if self._here_data.travel_mode in TRAVEL_MODES_PUBLIC:
return ICON_PUBLIC
if self._here_data.travel_mode == TRAVEL_MODE_TRUCK:
return ICON_TRUCK
return ICON_CAR
async def async_update(self) -> None:
"""Update Sensor Information."""
# Convert device_trackers to HERE friendly location
if self._origin_entity_id is not None:
self._here_data.origin = await self._get_location_from_entity(
self._origin_entity_id
)
if self._destination_entity_id is not None:
self._here_data.destination = await self._get_location_from_entity(
self._destination_entity_id
)
await self.hass.async_add_executor_job(self._here_data.update)
async def _get_location_from_entity(self, entity_id: str) -> Optional[str]:
"""Get the location from the entity state or attributes."""
entity = self.hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self.hass.states.get(f"zone.{entity.state}")
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location", entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# Check if state is valid coordinate set
if self._entity_state_is_valid_coordinate_set(entity.state):
return entity.state
_LOGGER.error(
"The state of %s is not a valid set of coordinates: %s",
entity_id,
entity.state,
)
return None
@staticmethod
def _entity_state_is_valid_coordinate_set(state: str) -> bool:
"""Check that the given string is a valid set of coordinates."""
schema = vol.Schema(cv.gps)
try:
coordinates = state.split(",")
schema(coordinates)
return True
except (vol.MultipleInvalid):
return False
@staticmethod
def _get_location_from_attributes(entity: State) -> str:
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return f"{attr.get(ATTR_LATITUDE)},{attr.get(ATTR_LONGITUDE)}"
class HERETravelTimeData:
"""HERETravelTime data object."""
def __init__(
self,
here_client: herepy.RoutingApi,
travel_mode: str,
traffic_mode: bool,
route_mode: str,
units: str,
arrival: datetime,
departure: datetime,
) -> None:
"""Initialize herepy."""
self.origin = None
self.destination = None
self.travel_mode = travel_mode
self.traffic_mode = traffic_mode
self.route_mode = route_mode
self.arrival = arrival
self.departure = departure
self.attribution = None
self.traffic_time = None
self.distance = None
self.route = None
self.base_time = None
self.origin_name = None
self.destination_name = None
self.units = units
self._client = here_client
self.combine_change = True
def update(self) -> None:
"""Get the latest data from HERE."""
if self.traffic_mode:
traffic_mode = TRAFFIC_MODE_ENABLED
else:
traffic_mode = TRAFFIC_MODE_DISABLED
if self.destination is not None and self.origin is not None:
# Convert location to HERE friendly location
destination = self.destination.split(",")
origin = self.origin.split(",")
arrival = self.arrival
if arrival is not None:
arrival = convert_time_to_isodate(arrival)
departure = self.departure
if departure is not None:
departure = convert_time_to_isodate(departure)
if departure is None and arrival is None:
departure = "now"
_LOGGER.debug(
"Requesting route for origin: %s, destination: %s, route_mode: %s, mode: %s, traffic_mode: %s, arrival: %s, departure: %s",
origin,
destination,
herepy.RouteMode[self.route_mode],
herepy.RouteMode[self.travel_mode],
herepy.RouteMode[traffic_mode],
arrival,
departure,
)
try:
response = self._client.public_transport_timetable(
origin,
destination,
self.combine_change,
[
herepy.RouteMode[self.route_mode],
herepy.RouteMode[self.travel_mode],
herepy.RouteMode[traffic_mode],
],
arrival=arrival,
departure=departure,
)
except herepy.NoRouteFoundError:
# Better error message for cryptic no route error codes
_LOGGER.error(NO_ROUTE_ERROR_MESSAGE)
return
_LOGGER.debug("Raw response is: %s", response.response)
# pylint: disable=no-member
source_attribution = response.response.get("sourceAttribution")
if source_attribution is not None:
self.attribution = self._build_hass_attribution(source_attribution)
# pylint: disable=no-member
route = response.response["route"]
summary = route[0]["summary"]
waypoint = route[0]["waypoint"]
self.base_time = summary["baseTime"]
if self.travel_mode in TRAVEL_MODES_VEHICLE:
self.traffic_time = summary["trafficTime"]
else:
self.traffic_time = self.base_time
distance = summary["distance"]
if self.units == CONF_UNIT_SYSTEM_IMPERIAL:
# Convert to miles.
self.distance = distance / 1609.344
else:
# Convert to kilometers
self.distance = distance / 1000
# pylint: disable=no-member
self.route = response.route_short
self.origin_name = waypoint[0]["mappedRoadName"]
self.destination_name = waypoint[1]["mappedRoadName"]
@staticmethod
def _build_hass_attribution(source_attribution: Dict) -> Optional[str]:
"""Build a hass frontend ready string out of the sourceAttribution."""
suppliers = source_attribution.get("supplier")
if suppliers is not None:
supplier_titles = []
for supplier in suppliers:
title = supplier.get("title")
if title is not None:
supplier_titles.append(title)
joined_supplier_titles = ",".join(supplier_titles)
attribution = f"With the support of {joined_supplier_titles}. All information is provided without warranty of any kind."
return attribution
def convert_time_to_isodate(timestr: str) -> str:
"""Take a string like 08:00:00 and combine it with the current date."""
combined = datetime.combine(dt.start_of_local_day(), dt.parse_time(timestr))
if combined < datetime.now():
combined = combined + timedelta(days=1)
return combined.isoformat()
|
from datetime import datetime as dt
from mock import create_autospec
from pandas.util.testing import assert_frame_equal
from arctic.store.bitemporal_store import BitemporalStore
from tests.util import read_str_as_pandas
ts1 = read_str_as_pandas(""" sample_dt | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0""")
def test_add_observe_dt_index():
self = create_autospec(BitemporalStore, observe_column='col_a')
assert_frame_equal(BitemporalStore._add_observe_dt_index(self, ts1, as_of=dt(2001, 1, 1)),
read_str_as_pandas("""sample_dt | col_a | near
2012-09-08 17:06:11.040 | 2001-01-01 | 1.0
2012-10-08 17:06:11.040 | 2001-01-01 | 2.0
2012-10-09 17:06:11.040 | 2001-01-01 | 2.5
2012-11-08 17:06:11.040 | 2001-01-01 | 3.0""", num_index=2))
|
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
DEVICE_CLASS_GARAGE,
CoverEntity,
)
from .const import ATTR_DEVICE_TYPE, ATTR_DISCOVER_DEVICES
from .entity import HMDevice
HM_GARAGE = ("IPGarage",)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform."""
if discovery_info is None:
return
devices = []
for conf in discovery_info[ATTR_DISCOVER_DEVICES]:
if conf[ATTR_DEVICE_TYPE] in HM_GARAGE:
new_device = HMGarage(conf)
else:
new_device = HMCover(conf)
devices.append(new_device)
add_entities(devices, True)
class HMCover(HMDevice, CoverEntity):
"""Representation a HomeMatic Cover."""
@property
def current_cover_position(self):
"""
Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return int(self._hm_get_state() * 100)
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
if ATTR_POSITION in kwargs:
position = float(kwargs[ATTR_POSITION])
position = min(100, max(0, position))
level = position / 100.0
self._hmdevice.set_level(level, self._channel)
@property
def is_closed(self):
"""Return whether the cover is closed."""
if self.current_cover_position is not None:
return self.current_cover_position == 0
return None
def open_cover(self, **kwargs):
"""Open the cover."""
self._hmdevice.move_up(self._channel)
def close_cover(self, **kwargs):
"""Close the cover."""
self._hmdevice.move_down(self._channel)
def stop_cover(self, **kwargs):
"""Stop the device if in motion."""
self._hmdevice.stop(self._channel)
def _init_data_struct(self):
"""Generate a data dictionary (self._data) from metadata."""
self._state = "LEVEL"
self._data.update({self._state: None})
if "LEVEL_2" in self._hmdevice.WRITENODE:
self._data.update({"LEVEL_2": None})
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
if "LEVEL_2" not in self._data:
return None
return int(self._data.get("LEVEL_2", 0) * 100)
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
if "LEVEL_2" in self._data and ATTR_TILT_POSITION in kwargs:
position = float(kwargs[ATTR_TILT_POSITION])
position = min(100, max(0, position))
level = position / 100.0
self._hmdevice.set_cover_tilt_position(level, self._channel)
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
if "LEVEL_2" in self._data:
self._hmdevice.open_slats()
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
if "LEVEL_2" in self._data:
self._hmdevice.close_slats()
def stop_cover_tilt(self, **kwargs):
"""Stop cover tilt."""
if "LEVEL_2" in self._data:
self.stop_cover(**kwargs)
class HMGarage(HMCover):
"""Represents a Homematic Garage cover. Homematic garage covers do not support position attributes."""
@property
def current_cover_position(self):
"""
Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
# Garage covers do not support position; always return None
return None
@property
def is_closed(self):
"""Return whether the cover is closed."""
return self._hmdevice.is_closed(self._hm_get_state())
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_GARAGE
def _init_data_struct(self):
"""Generate a data dictionary (self._data) from metadata."""
self._state = "DOOR_STATE"
self._data.update({self._state: None})
|
import mock
from kubernetes.client import V1DeleteOptions
from pysensu_yelp import Status
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import (
evicted_pods_per_service,
)
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import EvictedPod
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import get_evicted_pods
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import get_pod_service
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import (
notify_service_owners,
)
from paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods import remove_pods
def test_get_evicted_pods():
pod1 = mock.MagicMock(
status=mock.MagicMock(reason="Evicted", phase="Failed"),
metadata=mock.MagicMock(),
)
pod1.metadata.name = "pod-id-1"
pod2 = mock.MagicMock(
status=mock.MagicMock(reason=None, phase="Running"), metadata=mock.MagicMock()
)
pod2.metadata.name = "pod-id-2"
pod3 = mock.MagicMock(
status=mock.MagicMock(reason=None, phase="Running"), metadata=mock.MagicMock()
)
pod3.metadata.name = "pod-id-3"
evicted_pods = get_evicted_pods([pod1, pod2, pod3])
assert len(evicted_pods) == 1
assert evicted_pods[0].metadata.name == "pod-id-1"
def test_get_pod_service():
pod1 = mock.MagicMock(
metadata=mock.MagicMock(labels={"paasta.yelp.com/service": "my-service"})
)
pod_service = get_pod_service(pod1)
assert pod_service == "my-service"
def test_get_pod_service_no_labels():
pod1 = mock.MagicMock(metadata=mock.MagicMock(labels=None))
pod_service = get_pod_service(pod1)
assert pod_service is None
def test_notify_service_owners():
service_map = {
"service1": [
EvictedPod("pod1", "namespace1", "Ran out of disk"),
EvictedPod("pod2", "namespace1", "Ran out of mem"),
]
}
check_output = "The following pods have been evicted and will be removed from the cluster:\n- pod1: Ran out of disk\n- pod2: Ran out of mem\n"
with mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.send_event",
autospec=True,
) as mock_send_event:
notify_service_owners(service_map, "/soa_dir", False)
mock_send_event.assert_called_with(
"service1",
"pod-eviction.service1",
mock.ANY,
Status.CRITICAL,
check_output,
"/soa_dir",
)
def test_notify_service_ownersi_dry_run():
service_map = {
"service1": [
EvictedPod("pod1", "namespace1", "Ran out of disk"),
EvictedPod("pod2", "namespace1", "Ran out of mem"),
]
}
with mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.send_event",
autospec=True,
) as mock_send_event, mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.log", autospec=True
) as mock_logging:
notify_service_owners(service_map, "/soa_dir", True)
assert mock_send_event.call_count == 0
mock_logging.info.assert_called_once_with(
"Would have notified owners for service service1"
)
def test_remove_pods():
service_map = {
"service1": [
EvictedPod("pod1", "namespace1", "Ran out of disk"),
EvictedPod("pod2", "namespace1", "Ran out of mem"),
EvictedPod("pod3", "namespace1", "Ran out of disk"),
]
}
mock_client = mock.MagicMock()
remove_pods(mock_client, service_map, False)
assert mock_client.core.delete_namespaced_pod.call_count == 2
assert mock_client.core.delete_namespaced_pod.mock_calls == [
mock.call(
"pod1",
"namespace1",
body=V1DeleteOptions(),
grace_period_seconds=0,
propagation_policy="Background",
),
mock.call(
"pod2",
"namespace1",
body=V1DeleteOptions(),
grace_period_seconds=0,
propagation_policy="Background",
),
]
def test_remove_pods_dry_run():
service_map = {
"service1": [
EvictedPod("pod1", "namespace1", "Ran out of disk"),
EvictedPod("pod2", "namespace1", "Ran out of mem"),
EvictedPod("pod3", "namespace1", "Ran out of disk"),
]
}
mock_client = mock.MagicMock()
with mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.log", autospec=True
) as mock_logging:
remove_pods(mock_client, service_map, True)
assert mock_client.core.delete_namespaced_pod.call_count == 0
assert mock_logging.info.mock_calls == [
mock.call("Would have removed pod pod1"),
mock.call("Would have removed pod pod2"),
]
def test_evicted_pods_per_service():
pod1 = mock.MagicMock(
status=mock.MagicMock(
reason="Evicted", phase="Failed", message="Ran out of disk"
),
metadata=mock.MagicMock(
labels={"paasta.yelp.com/service": "my-service"}, namespace="namespace1"
),
)
pod1.metadata.name = "pod-id-1"
pod2 = mock.MagicMock(
status=mock.MagicMock(reason=None, phase="Running", message=None),
metadata=mock.MagicMock(
labels={"paasta.yelp.com/service": "my-service"}, namespace="namespace1"
),
)
pod2.metadata.name = "pod-id-2"
pod3 = mock.MagicMock(
status=mock.MagicMock(reason=None, phase="Running", message=None),
metadata=mock.MagicMock(
labels={"paasta.yelp.com/service": "my-service"}, namespace="namespace1"
),
)
pod3.metadata.name = "pod-id-3"
mock_client = mock.MagicMock()
with mock.patch(
"paasta_tools.kubernetes.bin.kubernetes_remove_evicted_pods.get_all_pods",
autospec=True,
) as mock_get_all_pods:
mock_get_all_pods.return_value = [pod1, pod2, pod3]
evicted_pods = evicted_pods_per_service(mock_client)
assert evicted_pods == {
"my-service": [EvictedPod("pod-id-1", "namespace1", "Ran out of disk")]
}
|
import asyncio
from unittest.mock import Mock
import pytest
from homeassistant.setup import async_setup_component
from tests.common import mock_coro
@pytest.fixture
def mock_system_info(hass):
"""Mock system info."""
hass.helpers.system_info.async_get_system_info = Mock(
return_value=mock_coro({"hello": True})
)
async def test_info_endpoint_return_info(hass, hass_ws_client, mock_system_info):
"""Test that the info endpoint works."""
assert await async_setup_component(hass, "system_health", {})
client = await hass_ws_client(hass)
resp = await client.send_json({"id": 6, "type": "system_health/info"})
resp = await client.receive_json()
assert resp["success"]
data = resp["result"]
assert len(data) == 1
data = data["homeassistant"]
assert data == {"hello": True}
async def test_info_endpoint_register_callback(hass, hass_ws_client, mock_system_info):
"""Test that the info endpoint allows registering callbacks."""
async def mock_info(hass):
return {"storage": "YAML"}
hass.components.system_health.async_register_info("lovelace", mock_info)
assert await async_setup_component(hass, "system_health", {})
client = await hass_ws_client(hass)
resp = await client.send_json({"id": 6, "type": "system_health/info"})
resp = await client.receive_json()
assert resp["success"]
data = resp["result"]
assert len(data) == 2
data = data["lovelace"]
assert data == {"storage": "YAML"}
async def test_info_endpoint_register_callback_timeout(
hass, hass_ws_client, mock_system_info
):
"""Test that the info endpoint timing out."""
async def mock_info(hass):
raise asyncio.TimeoutError
hass.components.system_health.async_register_info("lovelace", mock_info)
assert await async_setup_component(hass, "system_health", {})
client = await hass_ws_client(hass)
resp = await client.send_json({"id": 6, "type": "system_health/info"})
resp = await client.receive_json()
assert resp["success"]
data = resp["result"]
assert len(data) == 2
data = data["lovelace"]
assert data == {"error": "Fetching info timed out"}
async def test_info_endpoint_register_callback_exc(
hass, hass_ws_client, mock_system_info
):
"""Test that the info endpoint requires auth."""
async def mock_info(hass):
raise Exception("TEST ERROR")
hass.components.system_health.async_register_info("lovelace", mock_info)
assert await async_setup_component(hass, "system_health", {})
client = await hass_ws_client(hass)
resp = await client.send_json({"id": 6, "type": "system_health/info"})
resp = await client.receive_json()
assert resp["success"]
data = resp["result"]
assert len(data) == 2
data = data["lovelace"]
assert data == {"error": "TEST ERROR"}
|
from sense_hat import SenseHat
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
SUPPORT_SENSEHAT = SUPPORT_BRIGHTNESS | SUPPORT_COLOR
DEFAULT_NAME = "sensehat"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sense Hat Light platform."""
sensehat = SenseHat()
name = config.get(CONF_NAME)
add_entities([SenseHatLight(sensehat, name)])
class SenseHatLight(LightEntity):
"""Representation of an Sense Hat Light."""
def __init__(self, sensehat, name):
"""Initialize an Sense Hat Light.
Full brightness and white color.
"""
self._sensehat = sensehat
self._name = name
self._is_on = False
self._brightness = 255
self._hs_color = [0, 0]
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def brightness(self):
"""Read back the brightness of the light."""
return self._brightness
@property
def hs_color(self):
"""Read back the color of the light."""
return self._hs_color
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_SENSEHAT
@property
def is_on(self):
"""Return true if light is on."""
return self._is_on
@property
def should_poll(self):
"""Return if we should poll this device."""
return False
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return True
def turn_on(self, **kwargs):
"""Instruct the light to turn on and set correct brightness & color."""
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_HS_COLOR in kwargs:
self._hs_color = kwargs[ATTR_HS_COLOR]
rgb = color_util.color_hsv_to_RGB(
self._hs_color[0], self._hs_color[1], self._brightness / 255 * 100
)
self._sensehat.clear(*rgb)
self._is_on = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._sensehat.clear()
self._is_on = False
self.schedule_update_ha_state()
|
import diamond.collector
import subprocess
import os
from diamond.collector import str_to_bool
class EximCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(EximCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the exim binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
'sudo_user': 'User to sudo as',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(EximCollector, self).get_default_config()
config.update({
'path': 'exim',
'bin': '/usr/sbin/exim',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'sudo_user': 'root',
})
return config
def collect(self):
if not os.access(self.config['bin'], os.X_OK):
return
command = [self.config['bin'], '-bpc']
if str_to_bool(self.config['use_sudo']):
command = [
self.config['sudo_cmd'],
'-u',
self.config['sudo_user']
].extend(command)
queuesize = subprocess.Popen(
command, stdout=subprocess.PIPE).communicate()[0].split()
if not len(queuesize):
return
queuesize = queuesize[-1]
self.publish('queuesize', queuesize)
|
from typing import Dict, Tuple
import cairo
from gi.repository import Gdk, GdkPixbuf, GObject, Gtk
class EmblemCellRenderer(Gtk.CellRenderer):
__gtype_name__ = "EmblemCellRenderer"
icon_cache: Dict[Tuple[str, int], GdkPixbuf.Pixbuf] = {}
icon_name = GObject.Property(
type=str,
nick='Named icon',
blurb='Name for base icon',
default='text-x-generic',
)
emblem_name = GObject.Property(
type=str,
nick='Named emblem icon',
blurb='Name for emblem icon to overlay',
)
secondary_emblem_name = GObject.Property(
type=str,
nick='Named secondary emblem icon',
blurb='Name for secondary emblem icon to overlay',
)
icon_tint = GObject.Property(
type=Gdk.RGBA,
nick='Icon tint',
blurb='GDK-parseable color to be used to tint icon',
)
def __init__(self):
super().__init__()
self._state = None
# FIXME: hardcoded sizes
self._icon_size = 16
self._emblem_size = 8
def _get_pixbuf(self, name, size):
if (name, size) not in self.icon_cache:
icon_theme = Gtk.IconTheme.get_default()
pixbuf = icon_theme.load_icon(name, size, 0).copy()
self.icon_cache[(name, size)] = pixbuf
return self.icon_cache[(name, size)]
def do_render(self, context, widget, background_area, cell_area, flags):
context.translate(cell_area.x, cell_area.y)
context.rectangle(0, 0, cell_area.width, cell_area.height)
context.clip()
# TODO: Incorporate padding
context.push_group()
if self.icon_name:
pixbuf = self._get_pixbuf(self.icon_name, self._icon_size)
context.set_operator(cairo.OPERATOR_SOURCE)
# Assumes square icons; may break if we don't get the requested
# size
height_offset = int((cell_area.height - pixbuf.get_height()) / 2)
Gdk.cairo_set_source_pixbuf(context, pixbuf, 0, height_offset)
context.rectangle(0, height_offset,
pixbuf.get_width(), pixbuf.get_height())
context.fill()
if self.icon_tint:
c = self.icon_tint
r, g, b = c.red, c.green, c.blue
# Figure out the difference between our tint colour and an
# empirically determined (i.e., guessed) satisfying luma and
# adjust the base colours accordingly
luma = (r + r + b + g + g + g) / 6.
extra_luma = (1.2 - luma) / 3.
r, g, b = [min(x + extra_luma, 1.) for x in (r, g, b)]
context.set_source_rgba(r, g, b, 0.4)
context.set_operator(cairo.OPERATOR_ATOP)
context.paint()
if self.emblem_name:
pixbuf = self._get_pixbuf(self.emblem_name, self._emblem_size)
x_offset = self._icon_size - self._emblem_size
context.set_operator(cairo.OPERATOR_OVER)
Gdk.cairo_set_source_pixbuf(context, pixbuf, x_offset, 0)
context.rectangle(x_offset, 0,
cell_area.width, self._emblem_size)
context.fill()
if self.secondary_emblem_name:
pixbuf = self._get_pixbuf(
self.secondary_emblem_name, self._emblem_size)
x_offset = self._icon_size - self._emblem_size
y_offset = self._icon_size - self._emblem_size + height_offset
context.set_operator(cairo.OPERATOR_OVER)
Gdk.cairo_set_source_pixbuf(
context, pixbuf, x_offset, y_offset)
context.rectangle(
x_offset, y_offset, cell_area.width, self._emblem_size)
context.fill()
context.pop_group_to_source()
context.set_operator(cairo.OPERATOR_OVER)
context.paint()
def do_get_size(self, widget, cell_area):
# TODO: Account for cell_area if we have alignment set
x_offset, y_offset = 0, 0
width, height = self._icon_size, self._icon_size
# TODO: Account for padding
return (x_offset, y_offset, width, height)
|
import errno
from contextlib import contextmanager
from queue import Empty
from time import sleep
from types import GeneratorType as generator # noqa
from kombu.log import get_logger
from kombu.utils.compat import fileno
from kombu.utils.eventio import ERR, READ, WRITE, poll
from kombu.utils.objects import cached_property
from vine import Thenable, promise
from .timer import Timer
__all__ = ('Hub', 'get_event_loop', 'set_event_loop')
logger = get_logger(__name__)
_current_loop = None
W_UNKNOWN_EVENT = """\
Received unknown event %r for fd %r, please contact support!\
"""
class Stop(BaseException):
"""Stops the event loop."""
def _raise_stop_error():
raise Stop()
@contextmanager
def _dummy_context(*args, **kwargs):
yield
def get_event_loop():
"""Get current event loop object."""
return _current_loop
def set_event_loop(loop):
"""Set the current event loop object."""
global _current_loop
_current_loop = loop
return loop
class Hub:
"""Event loop object.
Arguments:
timer (kombu.asynchronous.Timer): Specify custom timer instance.
"""
#: Flag set if reading from an fd will not block.
READ = READ
#: Flag set if writing to an fd will not block.
WRITE = WRITE
#: Flag set on error, and the fd should be read from asap.
ERR = ERR
#: List of callbacks to be called when the loop is exiting,
#: applied with the hub instance as sole argument.
on_close = None
def __init__(self, timer=None):
self.timer = timer if timer is not None else Timer()
self.readers = {}
self.writers = {}
self.on_tick = set()
self.on_close = set()
self._ready = set()
self._running = False
self._loop = None
# The eventloop (in celery.worker.loops)
# will merge fds in this set and then instead of calling
# the callback for each ready fd it will call the
# :attr:`consolidate_callback` with the list of ready_fds
# as an argument. This API is internal and is only
# used by the multiprocessing pool to find inqueues
# that are ready to write.
self.consolidate = set()
self.consolidate_callback = None
self.propagate_errors = ()
self._create_poller()
@property
def poller(self):
if not self._poller:
self._create_poller()
return self._poller
@poller.setter
def poller(self, value):
self._poller = value
def reset(self):
self.close()
self._create_poller()
def _create_poller(self):
self._poller = poll()
self._register_fd = self._poller.register
self._unregister_fd = self._poller.unregister
def _close_poller(self):
if self._poller is not None:
self._poller.close()
self._poller = None
self._register_fd = None
self._unregister_fd = None
def stop(self):
self.call_soon(_raise_stop_error)
def __repr__(self):
return '<Hub@{:#x}: R:{} W:{}>'.format(
id(self), len(self.readers), len(self.writers),
)
def fire_timers(self, min_delay=1, max_delay=10, max_timers=10,
propagate=()):
timer = self.timer
delay = None
if timer and timer._queue:
for i in range(max_timers):
delay, entry = next(self.scheduler)
if entry is None:
break
try:
entry()
except propagate:
raise
except (MemoryError, AssertionError):
raise
except OSError as exc:
if exc.errno == errno.ENOMEM:
raise
logger.error('Error in timer: %r', exc, exc_info=1)
except Exception as exc:
logger.error('Error in timer: %r', exc, exc_info=1)
return min(delay or min_delay, max_delay)
def _remove_from_loop(self, fd):
try:
self._unregister(fd)
finally:
self._discard(fd)
def add(self, fd, callback, flags, args=(), consolidate=False):
fd = fileno(fd)
try:
self.poller.register(fd, flags)
except ValueError:
self._remove_from_loop(fd)
raise
else:
dest = self.readers if flags & READ else self.writers
if consolidate:
self.consolidate.add(fd)
dest[fd] = None
else:
dest[fd] = callback, args
def remove(self, fd):
fd = fileno(fd)
self._remove_from_loop(fd)
def run_forever(self):
self._running = True
try:
while 1:
try:
self.run_once()
except Stop:
break
finally:
self._running = False
def run_once(self):
try:
next(self.loop)
except StopIteration:
self._loop = None
def call_soon(self, callback, *args):
if not isinstance(callback, Thenable):
callback = promise(callback, args)
self._ready.add(callback)
return callback
def call_later(self, delay, callback, *args):
return self.timer.call_after(delay, callback, args)
def call_at(self, when, callback, *args):
return self.timer.call_at(when, callback, args)
def call_repeatedly(self, delay, callback, *args):
return self.timer.call_repeatedly(delay, callback, args)
def add_reader(self, fds, callback, *args):
return self.add(fds, callback, READ | ERR, args)
def add_writer(self, fds, callback, *args):
return self.add(fds, callback, WRITE, args)
def remove_reader(self, fd):
writable = fd in self.writers
on_write = self.writers.get(fd)
try:
self._remove_from_loop(fd)
finally:
if writable:
cb, args = on_write
self.add(fd, cb, WRITE, args)
def remove_writer(self, fd):
readable = fd in self.readers
on_read = self.readers.get(fd)
try:
self._remove_from_loop(fd)
finally:
if readable:
cb, args = on_read
self.add(fd, cb, READ | ERR, args)
def _unregister(self, fd):
try:
self.poller.unregister(fd)
except (AttributeError, KeyError, OSError):
pass
def close(self, *args):
[self._unregister(fd) for fd in self.readers]
self.readers.clear()
[self._unregister(fd) for fd in self.writers]
self.writers.clear()
self.consolidate.clear()
self._close_poller()
for callback in self.on_close:
callback(self)
# Complete remaining todo before Hub close
# Eg: Acknowledge message
# To avoid infinite loop where one of the callables adds items
# to self._ready (via call_soon or otherwise).
# we create new list with current self._ready
todos = list(self._ready)
self._ready = set()
for item in todos:
item()
def _discard(self, fd):
fd = fileno(fd)
self.readers.pop(fd, None)
self.writers.pop(fd, None)
self.consolidate.discard(fd)
def on_callback_error(self, callback, exc):
logger.error(
'Callback %r raised exception: %r', callback, exc, exc_info=1,
)
def create_loop(self,
generator=generator, sleep=sleep, min=min, next=next,
Empty=Empty, StopIteration=StopIteration,
KeyError=KeyError, READ=READ, WRITE=WRITE, ERR=ERR):
readers, writers = self.readers, self.writers
poll = self.poller.poll
fire_timers = self.fire_timers
hub_remove = self.remove
scheduled = self.timer._queue
consolidate = self.consolidate
consolidate_callback = self.consolidate_callback
on_tick = self.on_tick
propagate = self.propagate_errors
while 1:
todo = self._ready
self._ready = set()
for tick_callback in on_tick:
tick_callback()
for item in todo:
if item:
item()
poll_timeout = fire_timers(propagate=propagate) if scheduled else 1
# print('[[[HUB]]]: %s' % (self.repr_active(),))
if readers or writers:
to_consolidate = []
try:
events = poll(poll_timeout)
# print('[EVENTS]: %s' % (self.repr_events(events),))
except ValueError: # Issue celery/#882
return
for fd, event in events or ():
general_error = False
if fd in consolidate and \
writers.get(fd) is None:
to_consolidate.append(fd)
continue
cb = cbargs = None
if event & READ:
try:
cb, cbargs = readers[fd]
except KeyError:
self.remove_reader(fd)
continue
elif event & WRITE:
try:
cb, cbargs = writers[fd]
except KeyError:
self.remove_writer(fd)
continue
elif event & ERR:
general_error = True
else:
logger.info(W_UNKNOWN_EVENT, event, fd)
general_error = True
if general_error:
try:
cb, cbargs = (readers.get(fd) or
writers.get(fd))
except TypeError:
pass
if cb is None:
self.remove(fd)
continue
if isinstance(cb, generator):
try:
next(cb)
except OSError as exc:
if exc.errno != errno.EBADF:
raise
hub_remove(fd)
except StopIteration:
pass
except Exception:
hub_remove(fd)
raise
else:
try:
cb(*cbargs)
except Empty:
pass
if to_consolidate:
consolidate_callback(to_consolidate)
else:
# no sockets yet, startup is probably not done.
sleep(min(poll_timeout, 0.1))
yield
def repr_active(self):
from .debug import repr_active
return repr_active(self)
def repr_events(self, events):
from .debug import repr_events
return repr_events(self, events or [])
@cached_property
def scheduler(self):
return iter(self.timer)
@property
def loop(self):
if self._loop is None:
self._loop = self.create_loop()
return self._loop
|
import copy
from functools import wraps
import logging
import time
from bluepy.btle import ( # pylint: disable=import-error, no-member, no-name-in-module
BTLEException,
)
import decora # pylint: disable=import-error, no-member
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.const import CONF_API_KEY, CONF_DEVICES, CONF_NAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util as util
_LOGGER = logging.getLogger(__name__)
SUPPORT_DECORA_LED = SUPPORT_BRIGHTNESS
def _name_validator(config):
"""Validate the name."""
config = copy.deepcopy(config)
for address, device_config in config[CONF_DEVICES].items():
if CONF_NAME not in device_config:
device_config[CONF_NAME] = util.slugify(address)
return config
DEVICE_SCHEMA = vol.Schema(
{vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_API_KEY): cv.string}
)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}}
),
_name_validator,
)
)
def retry(method):
"""Retry bluetooth commands."""
@wraps(method)
def wrapper_retry(device, *args, **kwargs):
"""Try send command and retry on error."""
initial = time.monotonic()
while True:
if time.monotonic() - initial >= 10:
return None
try:
return method(device, *args, **kwargs)
except (decora.decoraException, AttributeError, BTLEException):
_LOGGER.warning(
"Decora connect error for device %s. Reconnecting...",
device.name,
)
# pylint: disable=protected-access
device._switch.connect()
return wrapper_retry
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an Decora switch."""
lights = []
for address, device_config in config[CONF_DEVICES].items():
device = {}
device["name"] = device_config[CONF_NAME]
device["key"] = device_config[CONF_API_KEY]
device["address"] = address
light = DecoraLight(device)
lights.append(light)
add_entities(lights)
class DecoraLight(LightEntity):
"""Representation of an Decora light."""
def __init__(self, device):
"""Initialize the light."""
self._name = device["name"]
self._address = device["address"]
self._key = device["key"]
self._switch = decora.decora(self._address, self._key)
self._brightness = 0
self._state = False
@property
def unique_id(self):
"""Return the ID of this light."""
return self._address
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_DECORA_LED
@property
def assumed_state(self):
"""We can read the actual state."""
return False
@retry
def set_state(self, brightness):
"""Set the state of this lamp to the provided brightness."""
self._switch.set_brightness(int(brightness / 2.55))
self._brightness = brightness
@retry
def turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
self._switch.on()
self._state = True
if brightness is not None:
self.set_state(brightness)
@retry
def turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
self._switch.off()
self._state = False
@retry
def update(self):
"""Synchronise internal state with the actual light state."""
self._brightness = self._switch.get_brightness() * 2.55
self._state = self._switch.get_on()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import subprocess
from absl import flags
from absl.testing import _bazelize_command
from absl.testing import absltest
from absl.testing import parameterized
FLAGS = flags.FLAGS
class TestOrderRandomizationTest(parameterized.TestCase):
"""Integration tests: Runs a py_test binary with randomization.
This is done by setting flags and environment variables.
"""
def setUp(self):
super(TestOrderRandomizationTest, self).setUp()
self._test_name = 'absl/testing/tests/absltest_randomization_testcase'
def _run_test(self, extra_argv, extra_env):
"""Runs the py_test binary in a subprocess, with the given args or env.
Args:
extra_argv: extra args to pass to the test
extra_env: extra env vars to set when running the test
Returns:
(stdout, test_cases, exit_code) tuple of (str, list of strs, int).
"""
env = dict(os.environ)
# If *this* test is being run with this flag, we don't want to
# automatically set it for all tests we run.
env.pop('TEST_RANDOMIZE_ORDERING_SEED', '')
if extra_env is not None:
env.update(extra_env)
command = (
[_bazelize_command.get_executable_path(self._test_name)] + extra_argv)
proc = subprocess.Popen(
args=command,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
stdout, _ = proc.communicate()
test_lines = [l for l in stdout.splitlines() if l.startswith('class ')]
return stdout, test_lines, proc.wait()
def test_no_args(self):
output, tests, exit_code = self._run_test([], None)
self.assertEqual(0, exit_code, msg='command output: ' + output)
self.assertNotIn('Randomizing test order with seed:', output)
cases = ['class A test ' + t for t in ('A', 'B', 'C')]
self.assertEqual(cases, tests)
@parameterized.parameters(
{
'argv': ['--test_randomize_ordering_seed=random'],
'env': None,
},
{
'argv': [],
'env': {
'TEST_RANDOMIZE_ORDERING_SEED': 'random',
},
},)
def test_simple_randomization(self, argv, env):
output, tests, exit_code = self._run_test(argv, env)
self.assertEqual(0, exit_code, msg='command output: ' + output)
self.assertIn('Randomizing test order with seed: ', output)
cases = ['class A test ' + t for t in ('A', 'B', 'C')]
# This may come back in any order; we just know it'll be the same
# set of elements.
self.assertSameElements(cases, tests)
@parameterized.parameters(
{
'argv': ['--test_randomize_ordering_seed=1'],
'env': None,
},
{
'argv': [],
'env': {
'TEST_RANDOMIZE_ORDERING_SEED': '1'
},
},
{
'argv': [],
'env': {
'LATE_SET_TEST_RANDOMIZE_ORDERING_SEED': '1'
},
},
)
def test_fixed_seed(self, argv, env):
output, tests, exit_code = self._run_test(argv, env)
self.assertEqual(0, exit_code, msg='command output: ' + output)
self.assertIn('Randomizing test order with seed: 1', output)
# Even though we know the seed, we need to shuffle the tests here, since
# this behaves differently in Python2 vs Python3.
shuffled_cases = ['A', 'B', 'C']
random.Random(1).shuffle(shuffled_cases)
cases = ['class A test ' + t for t in shuffled_cases]
# We know what order this will come back for the random seed we've
# specified.
self.assertEqual(cases, tests)
@parameterized.parameters(
{
'argv': ['--test_randomize_ordering_seed=0'],
'env': {
'TEST_RANDOMIZE_ORDERING_SEED': 'random'
},
},
{
'argv': [],
'env': {
'TEST_RANDOMIZE_ORDERING_SEED': '0'
},
},)
def test_disabling_randomization(self, argv, env):
output, tests, exit_code = self._run_test(argv, env)
self.assertEqual(0, exit_code, msg='command output: ' + output)
self.assertNotIn('Randomizing test order with seed:', output)
cases = ['class A test ' + t for t in ('A', 'B', 'C')]
self.assertEqual(cases, tests)
if __name__ == '__main__':
absltest.main()
|
try:
import ast
from _markerlib.markers import default_environment, compile, interpret
except ImportError:
if 'ast' in globals():
raise
def default_environment():
return {}
def compile(marker):
def marker_fn(environment=None, override=None):
# 'empty markers are True' heuristic won't install extra deps.
return not marker.strip()
marker_fn.__doc__ = marker
return marker_fn
def interpret(marker, environment=None, override=None):
return compile(marker)()
|
from math import log, pi, sqrt
def erfinv(x, a=.147):
"""Approximation of the inverse error function
https://en.wikipedia.org/wiki/Error_function
#Approximation_with_elementary_functions
"""
lnx = log(1 - x * x)
part1 = (2 / (a * pi) + lnx / 2)
part2 = lnx / a
sgn = 1 if x > 0 else -1
return sgn * sqrt(sqrt(part1 * part1 - part2) - part1)
def norm_ppf(x):
if not 0 < x < 1:
raise ValueError("Can't compute the percentage point for value %d" % x)
return sqrt(2) * erfinv(2 * x - 1)
def ppf(x, n):
try:
from scipy import stats
except ImportError:
stats = None
if stats:
if n < 30:
return stats.t.ppf(x, n)
return stats.norm.ppf(x)
else:
if n < 30:
# TODO: implement power series:
# http://eprints.maths.ox.ac.uk/184/1/tdist.pdf
raise ImportError(
'You must have scipy installed to use t-student '
'when sample_size is below 30'
)
return norm_ppf(x)
# According to http://sphweb.bumc.bu.edu/otlt/MPH-Modules/BS/
# BS704_Confidence_Intervals/BS704_Confidence_Intervals_print.html
def confidence_interval_continuous(
point_estimate, stddev, sample_size, confidence=.95, **kwargs
):
"""Continuous confidence interval from sample size and standard error"""
alpha = ppf((confidence + 1) / 2, sample_size - 1)
margin = stddev / sqrt(sample_size)
return (point_estimate - alpha * margin, point_estimate + alpha * margin)
def confidence_interval_dichotomous(
point_estimate,
sample_size,
confidence=.95,
bias=False,
percentage=True,
**kwargs
):
"""Dichotomous confidence interval from sample size and maybe a bias"""
alpha = ppf((confidence + 1) / 2, sample_size - 1)
p = point_estimate
if percentage:
p /= 100
margin = sqrt(p * (1 - p) / sample_size)
if bias:
margin += .5 / sample_size
if percentage:
margin *= 100
return (point_estimate - alpha * margin, point_estimate + alpha * margin)
def confidence_interval_manual(point_estimate, low, high):
return (low, high)
|
from datetime import timedelta
import logging
from typing import Dict
from pycfdns import CloudflareUpdater
from pycfdns.exceptions import (
CloudflareAuthenticationException,
CloudflareConnectionException,
CloudflareException,
)
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_API_TOKEN, CONF_EMAIL, CONF_ZONE
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
from .const import (
CONF_RECORDS,
DATA_UNDO_UPDATE_INTERVAL,
DEFAULT_UPDATE_INTERVAL,
DOMAIN,
SERVICE_UPDATE_RECORDS,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.deprecated(CONF_EMAIL, invalidation_version="0.119"),
cv.deprecated(CONF_API_KEY, invalidation_version="0.119"),
cv.deprecated(CONF_ZONE, invalidation_version="0.119"),
cv.deprecated(CONF_RECORDS, invalidation_version="0.119"),
vol.Schema(
{
vol.Optional(CONF_EMAIL): cv.string,
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_ZONE): cv.string,
vol.Optional(CONF_RECORDS): vol.All(cv.ensure_list, [cv.string]),
}
),
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: Dict) -> bool:
"""Set up the component."""
hass.data.setdefault(DOMAIN, {})
if len(hass.config_entries.async_entries(DOMAIN)) > 0:
return True
if DOMAIN in config and CONF_API_KEY in config[DOMAIN]:
persistent_notification.async_create(
hass,
"Cloudflare integration now requires an API Token. Please go to the integrations page to setup.",
"Cloudflare Setup",
"cloudflare_setup",
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Cloudflare from a config entry."""
cfupdate = CloudflareUpdater(
async_get_clientsession(hass),
entry.data[CONF_API_TOKEN],
entry.data[CONF_ZONE],
entry.data[CONF_RECORDS],
)
try:
zone_id = await cfupdate.get_zone_id()
except CloudflareAuthenticationException:
_LOGGER.error("API access forbidden. Please reauthenticate")
return False
except CloudflareConnectionException as error:
raise ConfigEntryNotReady from error
async def update_records(now):
"""Set up recurring update."""
try:
await _async_update_cloudflare(cfupdate, zone_id)
except CloudflareException as error:
_LOGGER.error("Error updating zone %s: %s", entry.data[CONF_ZONE], error)
async def update_records_service(call):
"""Set up service for manual trigger."""
try:
await _async_update_cloudflare(cfupdate, zone_id)
except CloudflareException as error:
_LOGGER.error("Error updating zone %s: %s", entry.data[CONF_ZONE], error)
update_interval = timedelta(minutes=DEFAULT_UPDATE_INTERVAL)
undo_interval = async_track_time_interval(hass, update_records, update_interval)
hass.data[DOMAIN][entry.entry_id] = {
DATA_UNDO_UPDATE_INTERVAL: undo_interval,
}
hass.services.async_register(DOMAIN, SERVICE_UPDATE_RECORDS, update_records_service)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Cloudflare config entry."""
hass.data[DOMAIN][entry.entry_id][DATA_UNDO_UPDATE_INTERVAL]()
hass.data[DOMAIN].pop(entry.entry_id)
return True
async def _async_update_cloudflare(cfupdate: CloudflareUpdater, zone_id: str):
_LOGGER.debug("Starting update for zone %s", cfupdate.zone)
records = await cfupdate.get_record_info(zone_id)
_LOGGER.debug("Records: %s", records)
await cfupdate.update_records(zone_id, records)
_LOGGER.debug("Update for zone %s is complete", cfupdate.zone)
|
import tensornetwork as tn
import pytest
import numpy as np
def assert_nodes_eq(a, b):
assert type(a) == type(b) #pylint: disable=unidiomatic-typecheck
assert getattr(a, 'name', None) == getattr(b, 'name', None)
assert getattr(a, 'axis_names', None) == getattr(b, 'axis_names', None)
assert getattr(a, 'backend', None) == getattr(b, 'backend', None)
assert getattr(a, 'shape', None) == getattr(b, 'shape', None)
assert getattr(a, 'rank', None) == getattr(b, 'rank', None)
assert getattr(a, 'dtype', None) == getattr(b, 'dtype', None)
assert getattr(a, 'dimension', None) == getattr(b, 'dimension', None)
ta = getattr(a, 'tensor', None)
if isinstance(ta, np.ndarray):
assert (ta == getattr(b, 'tensor', None)).all()
def assert_edges_eq(a, b):
assert isinstance(a, tn.Edge) and isinstance(b, tn.Edge)
assert a.name == b.name
assert a._axes == b._axes
def assert_graphs_eq(a_nodes, b_nodes):
assert len(a_nodes) == len(b_nodes)
a_nodes_dict = {}
b_nodes_dict = {}
for i, (a, b) in enumerate(zip(a_nodes, b_nodes)):
a_nodes_dict[a] = i
b_nodes_dict[b] = i
for a, b in zip(a_nodes, b_nodes):
for e1, e2 in zip(a.edges, b.edges):
assert_edges_eq(e1, e2)
assert a_nodes_dict.get(e1.node2,
None) == b_nodes_dict.get(e2.node2, None)
def create_basic_network():
np.random.seed(10)
a = tn.Node(np.random.normal(size=[8]), name='an', axis_names=['a1'])
b = tn.Node(np.random.normal(size=[8, 8, 8]),
name='bn',
axis_names=['b1', 'b2', 'b3'])
c = tn.Node(np.random.normal(size=[8, 8, 8]),
name='cn',
axis_names=['c1', 'c2', 'c3'])
d = tn.Node(np.random.normal(size=[8, 8, 8]),
name='dn',
axis_names=['d1', 'd2', 'd3'])
a[0] ^ b[0]
b[1] ^ c[0]
c[1] ^ d[0]
c[2] ^ b[2]
return [a, b, c, d]
def test_basic_serial():
nodes = create_basic_network()
s = tn.nodes_to_json(nodes)
new_nodes, _ = tn.nodes_from_json(s)
for x, y in zip(nodes, new_nodes):
assert_nodes_eq(x, y)
assert_graphs_eq(nodes, new_nodes)
c = tn.contractors.greedy(nodes, ignore_edge_order=True)
new_c = tn.contractors.greedy(new_nodes, ignore_edge_order=True)
np.testing.assert_allclose(c.tensor, new_c.tensor)
def test_exlcuded_node_serial():
nodes = create_basic_network()
s = tn.nodes_to_json(nodes[:-1])
new_nodes, _ = tn.nodes_from_json(s)
for x, y in zip(nodes, new_nodes):
assert_nodes_eq(x, y)
with pytest.raises(AssertionError):
assert_graphs_eq(nodes, new_nodes)
sub_graph = nodes[:-1]
sub_graph[-1][1].disconnect(sub_graph[-1][1].name)
assert_graphs_eq(sub_graph, new_nodes)
def test_serial_with_bindings():
a, b, c, d = create_basic_network()
bindings = {}
a[0].name = 'ea0'
bindings['ea'] = a[0]
for s, n in zip(['eb', 'ec', 'ed'], [b, c, d]):
for i, e in enumerate(n.edges):
e.name = s + str(i)
bindings[s] = bindings.get(s, ()) + (e,)
s = tn.nodes_to_json([a, b, c, d], edge_binding=bindings)
_, new_bindings = tn.nodes_from_json(s)
assert len(new_bindings) == len(bindings)
assert bindings['ea'].name == new_bindings['ea'][0].name
for k in ['eb', 'ec', 'ed']:
new_names = {e.name for e in new_bindings[k]}
names = {e.name for e in bindings[k]}
assert names == new_names
def test_serial_non_str_keys():
a, b, c, d = create_basic_network()
bindings = {}
bindings[1] = a[0]
with pytest.raises(TypeError):
_ = tn.nodes_to_json([a, b, c, d], edge_binding=bindings)
def test_serial_non_edge_values():
a, b, c, d = create_basic_network()
bindings = {}
bindings['non_edge'] = a
with pytest.raises(TypeError):
_ = tn.nodes_to_json([a, b, c, d], edge_binding=bindings)
def test_serial_exclude_non_network_edges():
a, b, c, d = create_basic_network() # pylint: disable=unused-variable
bindings = {'include': a[0], 'boundary': b[1], 'exclude': d[0]}
s = tn.nodes_to_json([a, b], edge_binding=bindings)
nodes, new_bindings = tn.nodes_from_json(s)
assert len(nodes) == 2
assert 'include' in new_bindings and 'boundary' in new_bindings
assert 'exclude' not in new_bindings
|
import logging
import unittest
import os
import numpy
from gensim.models.wrappers import wordrank
from gensim.test.utils import datapath, get_tmpfile
class TestWordrank(unittest.TestCase):
def setUp(self):
wr_home = os.environ.get('WR_HOME', None)
self.wr_path = wr_home if wr_home else None
self.corpus_file = datapath('lee.cor')
self.out_name = 'testmodel'
self.wr_file = datapath('test_glove.txt')
if not self.wr_path:
return
self.test_model = wordrank.Wordrank.train(
self.wr_path, self.corpus_file, self.out_name, iter=6,
dump_period=5, period=5, np=4, cleanup_files=True
)
def testLoadWordrankFormat(self):
"""Test model successfully loaded from Wordrank format file"""
model = wordrank.Wordrank.load_wordrank_model(self.wr_file)
vocab_size, dim = 76, 50
self.assertEqual(model.vectors.shape, (vocab_size, dim))
self.assertEqual(len(model), vocab_size)
def testEnsemble(self):
"""Test ensemble of two embeddings"""
if not self.wr_path:
return
new_emb = self.test_model.ensemble_embedding(self.wr_file, self.wr_file)
self.assertEqual(new_emb.shape, (76, 50))
def testPersistence(self):
"""Test storing/loading the entire model"""
if not self.wr_path:
return
tmpf = get_tmpfile('gensim_wordrank.test')
self.test_model.save(tmpf)
loaded = wordrank.Wordrank.load(tmpf)
self.models_equal(self.test_model, loaded)
def testSimilarity(self):
"""Test n_similarity for vocab words"""
if not self.wr_path:
return
self.assertTrue(numpy.allclose(self.test_model.n_similarity(['the', 'and'], ['and', 'the']), 1.0))
self.assertEqual(self.test_model.similarity('the', 'and'), self.test_model.similarity('the', 'and'))
def testLookup(self):
if not self.wr_path:
return
self.assertTrue(numpy.allclose(self.test_model['night'], self.test_model[['night']]))
def models_equal(self, model, model2):
self.assertEqual(len(model), len(model2))
self.assertEqual(set(model.index_to_key), set(model2.index_to_key))
self.assertTrue(numpy.allclose(model.syn0, model2.syn0))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
import urwid
class CursorPudding(urwid.Widget):
_sizing = frozenset(['flow'])
_selectable = True
def __init__(self):
self.cursor_col = 0
def rows(self, size, focus=False):
return 1
def render(self, size, focus=False):
(maxcol,) = size
num_pudding = maxcol / len("Pudding")
cursor = None
if focus:
cursor = self.get_cursor_coords(size)
return urwid.TextCanvas(["Pudding" * num_pudding], [], cursor, maxcol)
def get_cursor_coords(self, size):
(maxcol,) = size
col = min(self.cursor_col, maxcol - 1)
return col, 0
def keypress(self, size, key):
(maxcol, ) = size
if key == 'left':
col = self.cursor_col - 1
elif key == 'right':
col = self.cursor_col + 1
else:
return key
self.cursor_x = max(0, min(maxcol - 1, col))
self._invalidate()
|
import warnings
from django.test import TestCase
from zinnia.spam_checker import get_spam_checker
from zinnia.spam_checker.backends.all_is_spam import backend
class SpamCheckerTestCase(TestCase):
"""Test cases for zinnia.spam_checker"""
def test_get_spam_checker(self):
with warnings.catch_warnings(record=True) as w:
self.assertEqual(get_spam_checker('mymodule.myclass'), None)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(
str(w[-1].message),
'mymodule.myclass backend cannot be imported')
with warnings.catch_warnings(record=True) as w:
self.assertEqual(
get_spam_checker(
'zinnia.tests.implementations.custom_spam_checker'), None)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(
str(w[-1].message),
'This backend only exists for testing')
self.assertEqual(
get_spam_checker('zinnia.spam_checker.backends.all_is_spam'),
backend)
|
import sys
from re import compile as re
from ._compat import StringIO
from .environment import env
encoding = re(r'#.*coding[:=]\s*([-\w.]+)')
def run_code():
""" Run python code in current buffer.
:returns: None
"""
errors, err = [], ''
line1, line2 = env.var('a:line1'), env.var('a:line2')
lines = __prepare_lines(line1, line2)
if encoding.match(lines[0]):
lines.pop(0)
if encoding.match(lines[0]):
lines.pop(0)
elif encoding.match(lines[1]):
lines.pop(1)
context = dict(
__name__='__main__',
__file__=env.var('expand("%:p")'),
input=env.user_input,
raw_input=env.user_input)
sys.stdout, stdout_ = StringIO(), sys.stdout
sys.stderr, stderr_ = StringIO(), sys.stderr
try:
code = compile('\n'.join(lines) + '\n', env.curbuf.name, 'exec')
sys.path.insert(0, env.curdir)
exec(code, context) # noqa
sys.path.pop(0)
except SystemExit as e:
if e.code:
# A non-false code indicates abnormal termination.
# A false code will be treated as a
# successful run, and the error will be hidden from Vim
env.error("Script exited with code %s" % e.code)
return env.stop()
except Exception:
import traceback
err = traceback.format_exc()
else:
err = sys.stderr.getvalue()
output = sys.stdout.getvalue()
output = env.prepare_value(output, dumps=False)
sys.stdout, sys.stderr = stdout_, stderr_
errors += [er for er in err.splitlines() if er and "<string>" not in er]
env.let('l:traceback', errors[2:])
env.let('l:output', [s for s in output.splitlines()])
def __prepare_lines(line1, line2):
lines = [l.rstrip() for l in env.lines[int(line1) - 1:int(line2)]]
indent = 0
for line in lines:
if line:
indent = len(line) - len(line.lstrip())
break
if len(lines) == 1:
lines.append('')
return [l[indent:] for l in lines]
|
import unittest, sys
from .common_imports import (
etree, html, BytesIO, _bytes, _str,
HelperTestCase, make_doctest, skipIf,
fileInTestDir, fileUrlInTestDir
)
class ETreeDtdTestCase(HelperTestCase):
def test_dtd(self):
pass
def test_dtd_file(self):
parse = etree.parse
tree = parse(fileInTestDir("test.xml"))
root = tree.getroot()
dtd = etree.DTD(fileInTestDir("test.dtd"))
self.assertTrue(dtd.validate(root))
def test_dtd_stringio(self):
root = etree.XML(_bytes("<b/>"))
dtd = etree.DTD(BytesIO("<!ELEMENT b EMPTY>"))
self.assertTrue(dtd.validate(root))
def test_dtd_parse_invalid(self):
fromstring = etree.fromstring
parser = etree.XMLParser(dtd_validation=True)
xml = _bytes('<!DOCTYPE b SYSTEM "%s"><b><a/></b>' %
fileInTestDir("test.dtd"))
self.assertRaises(etree.XMLSyntaxError,
fromstring, xml, parser=parser)
def test_dtd_parse_file_not_found(self):
fromstring = etree.fromstring
dtd_filename = fileUrlInTestDir("__nosuch.dtd")
parser = etree.XMLParser(dtd_validation=True)
xml = _bytes('<!DOCTYPE b SYSTEM "%s"><b><a/></b>' % dtd_filename)
self.assertRaises(etree.XMLSyntaxError,
fromstring, xml, parser=parser)
errors = None
try:
fromstring(xml, parser=parser)
except etree.XMLSyntaxError:
e = sys.exc_info()[1]
self.assertTrue(e.error_log)
self.assertTrue(parser.error_log)
errors = [entry.message for entry in e.error_log
if dtd_filename in entry.message]
self.assertTrue(errors)
def test_dtd_parse_valid(self):
parser = etree.XMLParser(dtd_validation=True)
xml = ('<!DOCTYPE a SYSTEM "%s"><a><b/></a>' %
fileUrlInTestDir("test.dtd"))
root = etree.fromstring(xml, parser=parser)
def test_dtd_parse_valid_file_url(self):
parser = etree.XMLParser(dtd_validation=True)
xml = ('<!DOCTYPE a SYSTEM "%s"><a><b/></a>' %
fileUrlInTestDir("test.dtd"))
root = etree.fromstring(xml, parser=parser)
def test_dtd_parse_valid_relative(self):
parser = etree.XMLParser(dtd_validation=True)
xml = '<!DOCTYPE a SYSTEM "test.dtd"><a><b/></a>'
root = etree.fromstring(
xml, parser=parser, base_url=fileUrlInTestDir("test.xml"))
def test_dtd_parse_valid_relative_file_url(self):
parser = etree.XMLParser(dtd_validation=True)
xml = '<!DOCTYPE a SYSTEM "test.dtd"><a><b/></a>'
root = etree.fromstring(
xml, parser=parser, base_url=fileUrlInTestDir("test.xml"))
def test_dtd_invalid(self):
root = etree.XML("<b><a/></b>")
dtd = etree.DTD(BytesIO("<!ELEMENT b EMPTY>"))
self.assertRaises(etree.DocumentInvalid, dtd.assertValid, root)
def test_dtd_assertValid(self):
root = etree.XML("<b><a/></b>")
dtd = etree.DTD(BytesIO("<!ELEMENT b (a)><!ELEMENT a EMPTY>"))
dtd.assertValid(root)
def test_dtd_internal(self):
root = etree.XML(_bytes('''
<!DOCTYPE b SYSTEM "none" [
<!ELEMENT b (a)>
<!ELEMENT a EMPTY>
]>
<b><a/></b>
'''))
dtd = etree.ElementTree(root).docinfo.internalDTD
self.assertTrue(dtd)
dtd.assertValid(root)
def test_dtd_internal_invalid(self):
root = etree.XML(_bytes('''
<!DOCTYPE b SYSTEM "none" [
<!ELEMENT b (a)>
<!ELEMENT a (c)>
<!ELEMENT c EMPTY>
]>
<b><a/></b>
'''))
dtd = etree.ElementTree(root).docinfo.internalDTD
self.assertTrue(dtd)
self.assertFalse(dtd.validate(root))
def test_dtd_invalid_duplicate_id(self):
root = etree.XML(_bytes('''
<a><b id="id1"/><b id="id2"/><b id="id1"/></a>
'''))
dtd = etree.DTD(BytesIO(_bytes("""
<!ELEMENT a (b*)>
<!ATTLIST b
id ID #REQUIRED
>
<!ELEMENT b EMPTY>
""")))
self.assertFalse(dtd.validate(root))
self.assertTrue(dtd.error_log)
self.assertTrue([error for error in dtd.error_log
if 'id1' in error.message])
def test_dtd_api_internal(self):
root = etree.XML(_bytes('''
<!DOCTYPE b SYSTEM "none" [
<!ATTLIST a
attr1 (x | y | z) "z"
attr2 CDATA #FIXED "X"
>
<!ELEMENT b (a)>
<!ELEMENT a EMPTY>
]>
<b><a/></b>
'''))
dtd = etree.ElementTree(root).docinfo.internalDTD
self.assertTrue(dtd)
dtd.assertValid(root)
seen = []
for el in dtd.iterelements():
if el.name == 'a':
self.assertEqual(2, len(el.attributes()))
for attr in el.iterattributes():
if attr.name == 'attr1':
self.assertEqual('enumeration', attr.type)
self.assertEqual('none', attr.default)
self.assertEqual('z', attr.default_value)
values = attr.values()
values.sort()
self.assertEqual(['x', 'y', 'z'], values)
else:
self.assertEqual('attr2', attr.name)
self.assertEqual('cdata', attr.type)
self.assertEqual('fixed', attr.default)
self.assertEqual('X', attr.default_value)
else:
self.assertEqual('b', el.name)
self.assertEqual(0, len(el.attributes()))
seen.append(el.name)
seen.sort()
self.assertEqual(['a', 'b'], seen)
self.assertEqual(2, len(dtd.elements()))
def test_internal_dtds(self):
for el_count in range(2, 5):
for attr_count in range(4):
root = etree.XML(_bytes('''
<!DOCTYPE el0 SYSTEM "none" [
''' + ''.join(['''
<!ATTLIST el%d
attr%d (x | y | z) "z"
>
''' % (e, a) for a in range(attr_count) for e in range(el_count)
]) + ''.join(['''
<!ELEMENT el%d EMPTY>
''' % e for e in range(1, el_count)
]) + '''
''' + '<!ELEMENT el0 (%s)>' % '|'.join([
'el%d' % e for e in range(1, el_count)]) + '''
]>
<el0><el1 %s /></el0>
''' % ' '.join(['attr%d="x"' % a for a in range(attr_count)])))
dtd = etree.ElementTree(root).docinfo.internalDTD
self.assertTrue(dtd)
dtd.assertValid(root)
e = -1
for e, el in enumerate(dtd.iterelements()):
self.assertEqual(attr_count, len(el.attributes()))
a = -1
for a, attr in enumerate(el.iterattributes()):
self.assertEqual('enumeration', attr.type)
self.assertEqual('none', attr.default)
self.assertEqual('z', attr.default_value)
values = sorted(attr.values())
self.assertEqual(['x', 'y', 'z'], values)
self.assertEqual(attr_count - 1, a)
self.assertEqual(el_count - 1, e)
self.assertEqual(el_count, len(dtd.elements()))
def test_dtd_broken(self):
self.assertRaises(etree.DTDParseError, etree.DTD,
BytesIO("<!ELEMENT b HONKEY>"))
def test_parse_file_dtd(self):
parser = etree.XMLParser(attribute_defaults=True)
tree = etree.parse(fileInTestDir('test.xml'), parser)
root = tree.getroot()
self.assertEqual(
"valueA",
root.get("default"))
self.assertEqual(
"valueB",
root[0].get("default"))
@skipIf(etree.LIBXML_VERSION == (2, 9, 0),
"DTD loading is broken for incremental parsing in libxml2 2.9.0")
def test_iterparse_file_dtd_start(self):
iterparse = etree.iterparse
iterator = iterparse(fileInTestDir("test.xml"), events=('start',),
attribute_defaults=True)
attributes = [ element.get("default")
for event, element in iterator ]
self.assertEqual(
["valueA", "valueB"],
attributes)
@skipIf(etree.LIBXML_VERSION == (2, 9, 0),
"DTD loading is broken for incremental parsing in libxml2 2.9.0")
def test_iterparse_file_dtd_end(self):
iterparse = etree.iterparse
iterator = iterparse(fileInTestDir("test.xml"), events=('end',),
attribute_defaults=True)
attributes = [ element.get("default")
for event, element in iterator ]
self.assertEqual(
["valueB", "valueA"],
attributes)
def test_dtd_attrs(self):
dtd = etree.DTD(fileUrlInTestDir("test.dtd"))
# Test DTD.system_url attribute
self.assertTrue(dtd.system_url.endswith("test.dtd"))
# Test elements and their attributes
a = dtd.elements()[0]
self.assertEqual(a.name, "a")
self.assertEqual(a.type, "element")
self.assertEqual(a.content.name, "b")
self.assertEqual(a.content.type, "element")
self.assertEqual(a.content.occur, "once")
aattr = a.attributes()[0]
self.assertEqual(aattr.name, "default")
self.assertEqual(aattr.type, "enumeration")
self.assertEqual(aattr.values(), ["valueA", "valueB"])
self.assertEqual(aattr.default_value, "valueA")
b = dtd.elements()[1]
self.assertEqual(b.name, "b")
self.assertEqual(b.type, "empty")
self.assertEqual(b.content, None)
# Test entities and their attributes
c = dtd.entities()[0]
self.assertEqual(c.name, "c")
self.assertEqual(c.orig, "*")
self.assertEqual(c.content, "*")
# Test DTD.name attribute
root = etree.XML(_bytes('''
<!DOCTYPE a SYSTEM "none" [
<!ELEMENT a EMPTY>
]>
<a/>
'''))
dtd = etree.ElementTree(root).docinfo.internalDTD
self.assertEqual(dtd.name, "a")
# Test DTD.name and DTD.systemID attributes
parser = etree.XMLParser(dtd_validation=True)
xml = '<!DOCTYPE a SYSTEM "test.dtd"><a><b/></a>'
root = etree.fromstring(xml, parser=parser,
base_url=fileUrlInTestDir("test.xml"))
dtd = root.getroottree().docinfo.internalDTD
self.assertEqual(dtd.name, "a")
self.assertEqual(dtd.system_url, "test.dtd")
def test_declaration_escape_quote_pid(self):
# Standard allows quotes in systemliteral, but in that case
# systemliteral must be escaped with single quotes.
# See http://www.w3.org/TR/REC-xml/#sec-prolog-dtd.
root = etree.XML('''<!DOCTYPE a PUBLIC 'foo' '"'><a/>''')
doc = root.getroottree()
self.assertEqual(doc.docinfo.doctype,
'''<!DOCTYPE a PUBLIC "foo" '"'>''')
self.assertEqual(etree.tostring(doc),
_bytes('''<!DOCTYPE a PUBLIC "foo" '"'>\n<a/>'''))
def test_declaration_quote_withoutpid(self):
root = etree.XML('''<!DOCTYPE a SYSTEM '"'><a/>''')
doc = root.getroottree()
self.assertEqual(doc.docinfo.doctype, '''<!DOCTYPE a SYSTEM '"'>''')
self.assertEqual(etree.tostring(doc),
_bytes('''<!DOCTYPE a SYSTEM '"'>\n<a/>'''))
def test_declaration_apos(self):
root = etree.XML('''<!DOCTYPE a SYSTEM "'"><a/>''')
doc = root.getroottree()
self.assertEqual(doc.docinfo.doctype, '''<!DOCTYPE a SYSTEM "'">''')
self.assertEqual(etree.tostring(doc),
_bytes('''<!DOCTYPE a SYSTEM "'">\n<a/>'''))
def test_ietf_decl(self):
html_data = (
'<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">\n'
'<html></html>')
root = etree.HTML(html_data)
doc = root.getroottree()
self.assertEqual(doc.docinfo.doctype,
'<!DOCTYPE html PUBLIC "-//IETF//DTD HTML//EN">')
self.assertEqual(etree.tostring(doc, method='html'), _bytes(html_data))
def test_set_decl_public(self):
doc = etree.Element('test').getroottree()
doc.docinfo.public_id = 'bar'
doc.docinfo.system_url = 'baz'
self.assertEqual(doc.docinfo.doctype,
'<!DOCTYPE test PUBLIC "bar" "baz">')
self.assertEqual(etree.tostring(doc),
_bytes('<!DOCTYPE test PUBLIC "bar" "baz">\n<test/>'))
def test_html_decl(self):
# Slightly different to one above: when we create an html element,
# we do not start with a blank slate.
doc = html.Element('html').getroottree()
doc.docinfo.public_id = 'bar'
doc.docinfo.system_url = 'baz'
self.assertEqual(doc.docinfo.doctype,
'<!DOCTYPE html PUBLIC "bar" "baz">')
self.assertEqual(etree.tostring(doc),
_bytes('<!DOCTYPE html PUBLIC "bar" "baz">\n<html/>'))
def test_clean_doctype(self):
doc = html.Element('html').getroottree()
self.assertTrue(doc.docinfo.doctype != '')
doc.docinfo.clear()
self.assertTrue(doc.docinfo.doctype == '')
def test_set_decl_system(self):
doc = etree.Element('test').getroottree()
doc.docinfo.system_url = 'baz'
self.assertEqual(doc.docinfo.doctype,
'<!DOCTYPE test SYSTEM "baz">')
self.assertEqual(etree.tostring(doc),
_bytes('<!DOCTYPE test SYSTEM "baz">\n<test/>'))
def test_empty_decl(self):
doc = etree.Element('test').getroottree()
doc.docinfo.public_id = None
self.assertEqual(doc.docinfo.doctype,
'<!DOCTYPE test>')
self.assertTrue(doc.docinfo.public_id is None)
self.assertTrue(doc.docinfo.system_url is None)
self.assertEqual(etree.tostring(doc),
_bytes('<!DOCTYPE test>\n<test/>'))
def test_invalid_decl_1(self):
docinfo = etree.Element('test').getroottree().docinfo
def set_public_id(value):
docinfo.public_id = value
self.assertRaises(ValueError, set_public_id, _str('ä'))
self.assertRaises(ValueError, set_public_id, _str('qwerty ä asdf'))
def test_invalid_decl_2(self):
docinfo = etree.Element('test').getroottree().docinfo
def set_system_url(value):
docinfo.system_url = value
self.assertRaises(ValueError, set_system_url, '\'"')
self.assertRaises(ValueError, set_system_url, '"\'')
self.assertRaises(ValueError, set_system_url, ' " \' ')
def test_comment_before_dtd(self):
data = '<!--comment--><!DOCTYPE test>\n<!-- --><test/>'
doc = etree.fromstring(data).getroottree()
self.assertEqual(etree.tostring(doc),
_bytes(data))
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeDtdTestCase)])
suite.addTests(
[make_doctest('../../../doc/validation.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
import json
import logging
import string
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import util
VOLUME_EXISTS_STATUSES = frozenset(['creating', 'available', 'in-use', 'error'])
VOLUME_DELETED_STATUSES = frozenset(['deleting', 'deleted'])
VOLUME_KNOWN_STATUSES = VOLUME_EXISTS_STATUSES | VOLUME_DELETED_STATUSES
STANDARD = 'standard'
GP2 = 'gp2'
IO1 = 'io1'
ST1 = 'st1'
SC1 = 'sc1'
DISK_TYPE = {
disk.STANDARD: STANDARD,
disk.REMOTE_SSD: GP2,
disk.PIOPS: IO1
}
DISK_METADATA = {
STANDARD: {
disk.MEDIA: disk.HDD,
disk.REPLICATION: disk.ZONE,
},
GP2: {
disk.MEDIA: disk.SSD,
disk.REPLICATION: disk.ZONE,
},
IO1: {
disk.MEDIA: disk.SSD,
disk.REPLICATION: disk.ZONE,
},
ST1: {
disk.MEDIA: disk.HDD,
disk.REPLICATION: disk.ZONE
},
SC1: {
disk.MEDIA: disk.HDD,
disk.REPLICATION: disk.ZONE
}
}
LOCAL_SSD_METADATA = {
disk.MEDIA: disk.SSD,
disk.REPLICATION: disk.NONE,
}
LOCAL_HDD_METADATA = {
disk.MEDIA: disk.HDD,
disk.REPLICATION: disk.NONE,
}
LOCAL_HDD_PREFIXES = ['d2', 'hs1', 'h1', 'c1', 'cc2', 'm1', 'm2']
# Following lists based on
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html
NON_EBS_NVME_TYPES = [
'c4', 'd2', 'f1', 'g3', 'h1', 'i3', 'm4', 'p2', 'p3', 'r4', 't2', 'x1',
'x1e', 'm1', 'm3', 'c1', 'cc2', 'c3', 'm2', 'cr1', 'r3', 'hs1', 'i2', 'g2',
't1'
]
NON_LOCAL_NVME_TYPES = LOCAL_HDD_PREFIXES + [
'c3', 'cr1', 'g2', 'i2', 'm3', 'r3', 'x1', 'x1e']
# Following dictionary based on
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
NUM_LOCAL_VOLUMES = {
'c1.medium': 1,
'c1.xlarge': 4,
'c3.large': 2,
'c3.xlarge': 2,
'c3.2xlarge': 2,
'c3.4xlarge': 2,
'c3.8xlarge': 2,
'cc2.8xlarge': 4,
'cg1.4xlarge': 2,
'cr1.8xlarge': 2,
'g2.2xlarge': 1,
'hi1.4xlarge': 2,
'hs1.8xlarge': 24,
'i2.xlarge': 1,
'i2.2xlarge': 2,
'i2.4xlarge': 4,
'i2.8xlarge': 8,
'm1.small': 1,
'm1.medium': 1,
'm1.large': 2,
'm1.xlarge': 4,
'm2.xlarge': 1,
'm2.2xlarge': 1,
'm2.4xlarge': 2,
'm3.medium': 1,
'm3.large': 1,
'm3.xlarge': 2,
'm3.2xlarge': 2,
'r3.large': 1,
'r3.xlarge': 1,
'r3.2xlarge': 1,
'r3.4xlarge': 1,
'r3.8xlarge': 2,
'd2.xlarge': 3,
'd2.2xlarge': 6,
'd2.4xlarge': 12,
'd2.8xlarge': 24,
'i3.large': 1,
'i3.xlarge': 1,
'i3.2xlarge': 1,
'i3.4xlarge': 2,
'i3.8xlarge': 4,
'i3.16xlarge': 8,
'i3.metal': 8,
'i3en.large': 1,
'i3en.xlarge': 1,
'i3en.2xlarge': 2,
'i3en.3xlarge': 1,
'i3en.6xlarge': 2,
'i3en.12xlarge': 4,
'i3en.24xlarge': 8,
'c5ad.large': 1,
'c5ad.xlarge': 1,
'c5ad.2xlarge': 1,
'c5ad.4xlarge': 2,
'c5ad.8xlarge': 2,
'c5ad.12xlarge': 2,
'c5ad.16xlarge': 2,
'c5ad.24xlarge': 2,
'c5d.large': 1,
'c5d.xlarge': 1,
'c5d.2xlarge': 1,
'c5d.4xlarge': 1,
'c5d.9xlarge': 1,
'c5d.18xlarge': 2,
'm5d.large': 1,
'm5d.xlarge': 1,
'm5d.2xlarge': 1,
'm5d.4xlarge': 2,
'm5d.12xlarge': 2,
'm5d.24xlarge': 4,
'm6gd.large': 1,
'm6gd.xlarge': 1,
'm6gd.2xlarge': 1,
'm6gd.4xlarge': 1,
'm6gd.8xlarge': 1,
'm6gd.12xlarge': 2,
'm6gd.16xlarge': 2,
'r5d.large': 1,
'r5d.xlarge': 1,
'r5d.2xlarge': 1,
'r5d.4xlarge': 2,
'r5d.12xlarge': 2,
'r5d.24xlarge': 4,
'z1d.large': 1,
'z1d.xlarge': 1,
'z1d.2xlarge': 1,
'z1d.3xlarge': 2,
'z1d.6xlarge': 1,
'z1d.12xlarge': 2,
'x1.16xlarge': 1,
'x1.32xlarge': 2,
'x1e.xlarge': 1,
'x1e.2xlarge': 1,
'x1e.4xlarge': 1,
'x1e.8xlarge': 1,
'x1e.16xlarge': 1,
'x1e.32xlarge': 2,
'f1.2xlarge': 1,
'f1.4xlarge': 1,
'f1.16xlarge': 4,
'p3dn.24xlarge': 2
}
def LocalDiskIsHDD(machine_type):
"""Check whether the local disks use spinning magnetic storage."""
return machine_type.split('.')[0].lower() in LOCAL_HDD_PREFIXES
def LocalDriveIsNvme(machine_type):
"""Check if the machine type uses NVMe driver."""
return machine_type.split('.')[0].lower() not in NON_LOCAL_NVME_TYPES
def EbsDriveIsNvme(machine_type):
"""Check if the machine type uses NVMe driver."""
instance_family = machine_type.split('.')[0].lower()
return (instance_family not in NON_EBS_NVME_TYPES or
'metal' in machine_type)
AWS = 'AWS'
disk.RegisterDiskTypeMap(AWS, DISK_TYPE)
class AwsDiskSpec(disk.BaseDiskSpec):
"""Object holding the information needed to create an AwsDisk.
Attributes:
iops: None or int. IOPS for Provisioned IOPS (SSD) volumes in AWS.
"""
CLOUD = aws.CLOUD
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super(AwsDiskSpec, cls)._ApplyFlags(config_values, flag_values)
if flag_values['aws_provisioned_iops'].present:
config_values['iops'] = flag_values.aws_provisioned_iops
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(AwsDiskSpec, cls)._GetOptionDecoderConstructions()
result.update({'iops': (option_decoders.IntDecoder, {'default': None,
'none_ok': True})})
return result
class AwsDisk(disk.BaseDisk):
"""Object representing an Aws Disk."""
_lock = threading.Lock()
vm_devices = {}
def __init__(self, disk_spec, zone, machine_type):
super(AwsDisk, self).__init__(disk_spec)
self.iops = disk_spec.iops
self.id = None
self.zone = zone
self.region = util.GetRegionFromZone(zone)
self.device_letter = None
self.attached_vm_id = None
self.machine_type = machine_type
if self.disk_type != disk.LOCAL:
self.metadata.update(DISK_METADATA.get(self.disk_type, {}))
else:
self.metadata.update((LOCAL_HDD_METADATA
if LocalDiskIsHDD(machine_type)
else LOCAL_SSD_METADATA))
if self.iops:
self.metadata['iops'] = self.iops
def AssignDeviceLetter(self, letter_suggestion, nvme_boot_drive_index):
if LocalDriveIsNvme(self.machine_type) and \
EbsDriveIsNvme(self.machine_type):
first_device_letter = 'b'
local_drive_number = ord(letter_suggestion) - ord(first_device_letter)
logging.info('local drive number is: %d', local_drive_number)
if local_drive_number < nvme_boot_drive_index:
self.device_letter = letter_suggestion
else:
# skip the boot drive
self.device_letter = chr(ord(letter_suggestion) + 1)
else:
self.device_letter = letter_suggestion
def _Create(self):
"""Creates the disk."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-volume',
'--region=%s' % self.region,
'--size=%s' % self.disk_size,
'--volume-type=%s' % self.disk_type]
if not util.IsRegion(self.zone):
create_cmd.append('--availability-zone=%s' % self.zone)
if self.disk_type == IO1:
create_cmd.append('--iops=%s' % self.iops)
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['VolumeId']
util.AddDefaultTags(self.id, self.region)
def _Delete(self):
"""Deletes the disk."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-volume',
'--region=%s' % self.region,
'--volume-id=%s' % self.id]
logging.info('Deleting AWS volume %s. This may fail if the disk is not '
'yet detached, but will be retried.', self.id)
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def _Exists(self):
"""Returns true if the disk exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-volumes',
'--region=%s' % self.region,
'--filter=Name=volume-id,Values=%s' % self.id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
volumes = response['Volumes']
assert len(volumes) < 2, 'Too many volumes.'
if not volumes:
return False
status = volumes[0]['State']
assert status in VOLUME_KNOWN_STATUSES, status
return status in VOLUME_EXISTS_STATUSES
def Attach(self, vm):
"""Attaches the disk to a VM.
Args:
vm: The AwsVirtualMachine instance to which the disk will be attached.
"""
with self._lock:
self.attached_vm_id = vm.id
if self.attached_vm_id not in AwsDisk.vm_devices:
AwsDisk.vm_devices[self.attached_vm_id] = set(
string.ascii_lowercase)
self.device_letter = min(AwsDisk.vm_devices[self.attached_vm_id])
AwsDisk.vm_devices[self.attached_vm_id].remove(self.device_letter)
device_name = '/dev/xvdb%s' % self.device_letter
attach_cmd = util.AWS_PREFIX + [
'ec2',
'attach-volume',
'--region=%s' % self.region,
'--instance-id=%s' % self.attached_vm_id,
'--volume-id=%s' % self.id,
'--device=%s' % device_name]
logging.info('Attaching AWS volume %s. This may fail if the disk is not '
'ready, but will be retried.', self.id)
util.IssueRetryableCommand(attach_cmd)
def Detach(self):
"""Detaches the disk from a VM."""
detach_cmd = util.AWS_PREFIX + [
'ec2',
'detach-volume',
'--region=%s' % self.region,
'--instance-id=%s' % self.attached_vm_id,
'--volume-id=%s' % self.id]
util.IssueRetryableCommand(detach_cmd)
with self._lock:
assert self.attached_vm_id in AwsDisk.vm_devices
AwsDisk.vm_devices[self.attached_vm_id].add(self.device_letter)
self.attached_vm_id = None
self.device_letter = None
def GetDevicePath(self):
"""Returns the path to the device inside the VM."""
if self.disk_type == disk.LOCAL:
if LocalDriveIsNvme(self.machine_type):
first_device_letter = 'b'
return '/dev/nvme%sn1' % str(
ord(self.device_letter) - ord(first_device_letter))
return '/dev/xvd%s' % self.device_letter
else:
if EbsDriveIsNvme(self.machine_type):
first_device_letter = 'a'
return '/dev/nvme%sn1' % (
1 + NUM_LOCAL_VOLUMES.get(self.machine_type, 0) +
ord(self.device_letter) - ord(first_device_letter))
else:
return '/dev/xvdb%s' % self.device_letter
|
from __future__ import division
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import mask_head_loss_post
from chainercv.links.model.fpn import mask_head_loss_pre
from chainercv.links.model.fpn import MaskHead
from chainercv.utils import mask_to_bbox
try:
import cv2 # NOQA
_cv2_available = True
except ImportError:
_cv2_available = False
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
@testing.parameterize(
{'n_class': 1 + 1},
{'n_class': 5 + 1},
{'n_class': 20 + 1},
)
class TestMaskHead(unittest.TestCase):
def setUp(self):
self.link = MaskHead(
n_class=self.n_class, scales=(1 / 2, 1 / 4, 1 / 8))
def _check_call(self):
hs = [
chainer.Variable(_random_array(self.link.xp, (2, 64, 32, 32))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 16, 16))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 8, 8))),
]
rois = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
self.link.xp.array((0,), dtype=np.int32),
self.link.xp.array((1, 0), dtype=np.int32),
self.link.xp.array((1,), dtype=np.int32),
]
segs = self.link(hs, rois, roi_indices)
self.assertIsInstance(segs, chainer.Variable)
self.assertIsInstance(segs.array, self.link.xp.ndarray)
self.assertEqual(
segs.shape,
(4, self.n_class, self.link.segm_size, self.link.segm_size))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def _check_distribute(self):
rois = self.link.xp.array((
(0, 0, 10, 10),
(0, 1000, 0, 1000),
(0, 0, 224, 224),
(100, 100, 224, 224),
), dtype=np.float32)
roi_indices = self.link.xp.array((0, 1, 0, 0), dtype=np.int32)
n_roi = len(roi_indices)
rois, roi_indices, order = self.link.distribute(rois, roi_indices)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
for l in range(3):
self.assertIsInstance(rois[l], self.link.xp.ndarray)
self.assertIsInstance(roi_indices[l], self.link.xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(sum(rois[l].shape[0] for l in range(3)), 4)
self.assertEqual(len(order), n_roi)
self.assertIsInstance(order, self.link.xp.ndarray)
def test_distribute_cpu(self):
self._check_distribute()
@attr.gpu
def test_distribute_gpu(self):
self.link.to_gpu()
self._check_distribute()
def _check_decode(self):
segms = [
_random_array(
self.link.xp,
(1, self.n_class, self.link.segm_size, self.link.segm_size)),
_random_array(
self.link.xp,
(2, self.n_class, self.link.segm_size, self.link.segm_size)),
_random_array(
self.link.xp,
(1, self.n_class, self.link.segm_size, self.link.segm_size))
]
bboxes = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
labels = [
self.link.xp.random.randint(
0, self.n_class - 1, size=(1,), dtype=np.int32),
self.link.xp.random.randint(
0, self.n_class - 1, size=(2,), dtype=np.int32),
self.link.xp.random.randint(
0, self.n_class - 1, size=(1,), dtype=np.int32),
]
sizes = [(56, 56), (48, 48), (72, 72)]
masks = self.link.decode(
segms, bboxes, labels, sizes)
self.assertEqual(len(masks), 3)
for n in range(3):
self.assertIsInstance(masks[n], self.link.xp.ndarray)
self.assertEqual(masks[n].shape[0], labels[n].shape[0])
self.assertEqual(masks[n].shape[1:], sizes[n])
@unittest.skipUnless(_cv2_available, 'cv2 is not installed')
def test_decode_cpu(self):
self._check_decode()
class TestMaskHeadLoss(unittest.TestCase):
def _check_mask_head_loss_pre(self, xp):
n_inst = 12
segm_size = 28
rois = [
xp.array(((4, 1, 6, 3),), dtype=np.float32),
xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
xp.array((0,), dtype=np.int32),
xp.array((1, 0), dtype=np.int32),
xp.array((1,), dtype=np.int32),
]
masks = [
_random_array(xp, (n_inst, 60, 70)),
_random_array(xp, (n_inst, 60, 70)),
]
bboxes = [mask_to_bbox(mask) for mask in masks]
labels = [
xp.array((1,), dtype=np.int32),
xp.array((10, 4), dtype=np.int32),
xp.array((3,), dtype=np.int32),
]
rois, roi_indices, gt_segms, gt_mask_labels = mask_head_loss_pre(
rois, roi_indices, masks, bboxes, labels, segm_size)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
self.assertEqual(len(gt_segms), 3)
self.assertEqual(len(gt_mask_labels), 3)
for l in range(3):
self.assertIsInstance(rois[l], xp.ndarray)
self.assertIsInstance(roi_indices[l], xp.ndarray)
self.assertIsInstance(gt_segms[l], xp.ndarray)
self.assertIsInstance(gt_mask_labels[l], xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_segms[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_mask_labels[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(gt_segms[l].shape[1:], (segm_size, segm_size))
self.assertEqual(gt_mask_labels[l].shape[1:], ())
self.assertEqual(gt_segms[l].dtype, np.float32)
self.assertEqual(gt_mask_labels[l].dtype, np.int32)
@unittest.skipUnless(_cv2_available, 'cv2 is not installed')
def test_mask_head_loss_pre_cpu(self):
self._check_mask_head_loss_pre(np)
@attr.gpu
@unittest.skipUnless(_cv2_available, 'cv2 is not installed')
def test_mask_head_loss_pre_gpu(self):
import cupy
self._check_mask_head_loss_pre(cupy)
def _check_head_loss_post(self, xp):
B = 2
segms = chainer.Variable(_random_array(xp, (20, 81, 28, 28)))
mask_roi_indices = [
xp.random.randint(0, B, size=5).astype(np.int32),
xp.random.randint(0, B, size=7).astype(np.int32),
xp.random.randint(0, B, size=8).astype(np.int32),
]
gt_segms = [
_random_array(xp, (5, 28, 28)),
_random_array(xp, (7, 28, 28)),
_random_array(xp, (8, 28, 28)),
]
gt_mask_labels = [
xp.random.randint(0, 80, size=5).astype(np.int32),
xp.random.randint(0, 80, size=7).astype(np.int32),
xp.random.randint(0, 80, size=8).astype(np.int32),
]
mask_head_loss = mask_head_loss_post(
segms, mask_roi_indices, gt_segms, gt_mask_labels, B)
self.assertIsInstance(mask_head_loss, chainer.Variable)
self.assertIsInstance(mask_head_loss.array, xp.ndarray)
self.assertEqual(mask_head_loss.shape, ())
def test_head_loss_post_cpu(self):
self._check_head_loss_post(np)
@attr.gpu
def test_head_loss_post_gpu(self):
import cupy
self._check_head_loss_post(cupy)
testing.run_module(__name__, __file__)
|
import os
from pylatex.utils import fix_filename
def test_no_dots():
fname = "aaa"
assert fix_filename(fname) == fname
def test_one_dot():
fname = "aa.a"
assert fix_filename(fname) == fname
def test_two_dots():
fname = "aa.a.a"
original_os_name = os.name
try:
os.name = 'posix'
assert fix_filename(fname) == "{aa.a}.a"
os.name = 'nt'
assert fix_filename(fname) == "aa.a.a"
finally:
os.name = original_os_name
def test_three_dots():
fname = "abc.def.fgh.ijk"
assert fix_filename(fname) == "{abc.def.fgh}.ijk"
def test_path_and_three_dots():
fname = "/auu/bcd/abc.def.fgh.ijk"
assert fix_filename(fname) == "/auu/bcd/{abc.def.fgh}.ijk"
def test_dots_in_path_none_in_filename():
fname = "/au.u/b.c.d/abc"
assert fix_filename(fname) == "/au.u/b.c.d/abc"
def test_dots_in_path_one_in_filename():
fname = "/au.u/b.c.d/abc.def"
assert fix_filename(fname) == "/au.u/b.c.d/abc.def"
def test_dots_in_path_and_multiple_in_filename():
fname = "/au.u/b.c.d/abc.def.fgh.ijk"
assert fix_filename(fname) == "/au.u/b.c.d/{abc.def.fgh}.ijk"
def test_tilde_in_filename():
fname = "/etc/local/foo.bar.baz/foo~1/document.pdf"
assert (fix_filename(fname) ==
'\detokenize{/etc/local/foo.bar.baz/foo~1/document.pdf}')
|
from unittest import mock
import pytest
from meld.meldbuffer import BufferLines, MeldBuffer
text = ("""0
1
2
3
4
5
6
7
8
9
10
""")
@pytest.mark.parametrize("line_start, line_end, expected_text", [
(0, 1, ["0"],),
(0, 2, ["0", "1"],),
# zero-sized slice
(9, 9, [],),
(9, 10, ["9"],),
(9, 11, ["9", "10"],),
# Past the end of the buffer
(9, 12, ["9", "10"],),
# Waaaay past the end of the buffer
(9, 9999, ["9", "10"],),
# And sidling towards past-the-end start indices
(10, 12, ["10"],),
(11, 12, [],),
])
def test_filter_text(line_start, line_end, expected_text):
with mock.patch('meld.meldbuffer.bind_settings', mock.DEFAULT):
buf = MeldBuffer()
buf.set_text(text)
buffer_lines = BufferLines(buf)
assert buffer_lines[line_start:line_end] == expected_text
|
from homeassistant.components.broadlink.const import DOMAIN, SENSOR_DOMAIN
from homeassistant.helpers.entity_registry import async_entries_for_device
from . import get_device
from tests.common import mock_device_registry, mock_registry
async def test_a1_sensor_setup(hass):
"""Test a successful e-Sensor setup."""
device = get_device("Bedroom")
mock_api = device.get_mock_api()
mock_api.check_sensors_raw.return_value = {
"temperature": 27.4,
"humidity": 59.3,
"air_quality": 3,
"light": 2,
"noise": 1,
}
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_api.check_sensors_raw.call_count == 1
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
entries = async_entries_for_device(entity_registry, device_entry.id)
sensors = {entry for entry in entries if entry.domain == SENSOR_DOMAIN}
assert len(sensors) == 5
sensors_and_states = {
(sensor.original_name, hass.states.get(sensor.entity_id).state)
for sensor in sensors
}
assert sensors_and_states == {
(f"{device.name} Temperature", "27.4"),
(f"{device.name} Humidity", "59.3"),
(f"{device.name} Air Quality", "3"),
(f"{device.name} Light", "2"),
(f"{device.name} Noise", "1"),
}
async def test_a1_sensor_update(hass):
"""Test a successful e-Sensor update."""
device = get_device("Bedroom")
mock_api = device.get_mock_api()
mock_api.check_sensors_raw.return_value = {
"temperature": 22.4,
"humidity": 47.3,
"air_quality": 3,
"light": 2,
"noise": 1,
}
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
entries = async_entries_for_device(entity_registry, device_entry.id)
sensors = {entry for entry in entries if entry.domain == SENSOR_DOMAIN}
assert len(sensors) == 5
mock_api.check_sensors_raw.return_value = {
"temperature": 22.5,
"humidity": 47.4,
"air_quality": 2,
"light": 3,
"noise": 2,
}
await hass.helpers.entity_component.async_update_entity(
next(iter(sensors)).entity_id
)
assert mock_api.check_sensors_raw.call_count == 2
sensors_and_states = {
(sensor.original_name, hass.states.get(sensor.entity_id).state)
for sensor in sensors
}
assert sensors_and_states == {
(f"{device.name} Temperature", "22.5"),
(f"{device.name} Humidity", "47.4"),
(f"{device.name} Air Quality", "2"),
(f"{device.name} Light", "3"),
(f"{device.name} Noise", "2"),
}
async def test_rm_pro_sensor_setup(hass):
"""Test a successful RM pro sensor setup."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.return_value = {"temperature": 18.2}
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_api.check_sensors.call_count == 1
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
entries = async_entries_for_device(entity_registry, device_entry.id)
sensors = {entry for entry in entries if entry.domain == SENSOR_DOMAIN}
assert len(sensors) == 1
sensors_and_states = {
(sensor.original_name, hass.states.get(sensor.entity_id).state)
for sensor in sensors
}
assert sensors_and_states == {(f"{device.name} Temperature", "18.2")}
async def test_rm_pro_sensor_update(hass):
"""Test a successful RM pro sensor update."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.return_value = {"temperature": 25.7}
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
entries = async_entries_for_device(entity_registry, device_entry.id)
sensors = {entry for entry in entries if entry.domain == SENSOR_DOMAIN}
assert len(sensors) == 1
mock_api.check_sensors.return_value = {"temperature": 25.8}
await hass.helpers.entity_component.async_update_entity(
next(iter(sensors)).entity_id
)
assert mock_api.check_sensors.call_count == 2
sensors_and_states = {
(sensor.original_name, hass.states.get(sensor.entity_id).state)
for sensor in sensors
}
assert sensors_and_states == {(f"{device.name} Temperature", "25.8")}
async def test_rm_mini3_no_sensor(hass):
"""Test we do not set up sensors for RM mini 3."""
device = get_device("Entrance")
mock_api = device.get_mock_api()
mock_api.check_sensors.return_value = {"temperature": 0}
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_api.check_sensors.call_count <= 1
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
entries = async_entries_for_device(entity_registry, device_entry.id)
sensors = {entry for entry in entries if entry.domain == SENSOR_DOMAIN}
assert len(sensors) == 0
async def test_rm4_pro_hts2_sensor_setup(hass):
"""Test a successful RM4 pro sensor setup with HTS2 cable."""
device = get_device("Garage")
mock_api = device.get_mock_api()
mock_api.check_sensors.return_value = {"temperature": 22.5, "humidity": 43.7}
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_api.check_sensors.call_count == 1
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
entries = async_entries_for_device(entity_registry, device_entry.id)
sensors = {entry for entry in entries if entry.domain == SENSOR_DOMAIN}
assert len(sensors) == 2
sensors_and_states = {
(sensor.original_name, hass.states.get(sensor.entity_id).state)
for sensor in sensors
}
assert sensors_and_states == {
(f"{device.name} Temperature", "22.5"),
(f"{device.name} Humidity", "43.7"),
}
async def test_rm4_pro_hts2_sensor_update(hass):
"""Test a successful RM4 pro sensor update with HTS2 cable."""
device = get_device("Garage")
mock_api = device.get_mock_api()
mock_api.check_sensors.return_value = {"temperature": 16.7, "humidity": 34.1}
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
entries = async_entries_for_device(entity_registry, device_entry.id)
sensors = {entry for entry in entries if entry.domain == SENSOR_DOMAIN}
assert len(sensors) == 2
mock_api.check_sensors.return_value = {"temperature": 16.8, "humidity": 34.0}
await hass.helpers.entity_component.async_update_entity(
next(iter(sensors)).entity_id
)
assert mock_api.check_sensors.call_count == 2
sensors_and_states = {
(sensor.original_name, hass.states.get(sensor.entity_id).state)
for sensor in sensors
}
assert sensors_and_states == {
(f"{device.name} Temperature", "16.8"),
(f"{device.name} Humidity", "34.0"),
}
async def test_rm4_pro_no_sensor(hass):
"""Test we do not set up sensors for RM4 pro without HTS2 cable."""
device = get_device("Garage")
mock_api = device.get_mock_api()
mock_api.check_sensors.return_value = {"temperature": 0, "humidity": 0}
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_api.check_sensors.call_count <= 1
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
entries = async_entries_for_device(entity_registry, device_entry.id)
sensors = {entry for entry in entries if entry.domain == SENSOR_DOMAIN}
assert len(sensors) == 0
|
import keras
import tensorflow as tf
from matchzoo.engine import hyper_spaces
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
class MVLSTM(BaseModel):
"""
MVLSTM Model.
Examples:
>>> model = MVLSTM()
>>> model.params['lstm_units'] = 32
>>> model.params['top_k'] = 50
>>> model.params['mlp_num_layers'] = 2
>>> model.params['mlp_num_units'] = 20
>>> model.params['mlp_num_fan_out'] = 10
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.params['dropout_rate'] = 0.5
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True, with_multi_layer_perceptron=True)
params.add(Param(name='lstm_units', value=32,
desc="Integer, the hidden size in the "
"bi-directional LSTM layer."))
params.add(Param(name='dropout_rate', value=0.0,
desc="Float, the dropout rate."))
params.add(Param(
'top_k', value=10,
hyper_space=hyper_spaces.quniform(low=2, high=100),
desc="Integer, the size of top-k pooling layer."
))
params['optimizer'] = 'adam'
return params
def build(self):
"""Build model structure."""
query, doc = self._make_inputs()
# Embedding layer
embedding = self._make_embedding_layer(mask_zero=True)
embed_query = embedding(query)
embed_doc = embedding(doc)
# Bi-directional LSTM layer
rep_query = keras.layers.Bidirectional(keras.layers.LSTM(
self._params['lstm_units'],
return_sequences=True,
dropout=self._params['dropout_rate']
))(embed_query)
rep_doc = keras.layers.Bidirectional(keras.layers.LSTM(
self._params['lstm_units'],
return_sequences=True,
dropout=self._params['dropout_rate']
))(embed_doc)
# Top-k matching layer
matching_matrix = keras.layers.Dot(
axes=[2, 2], normalize=False)([rep_query, rep_doc])
matching_signals = keras.layers.Reshape((-1,))(matching_matrix)
matching_topk = keras.layers.Lambda(
lambda x: tf.nn.top_k(x, k=self._params['top_k'], sorted=True)[0]
)(matching_signals)
# Multilayer perceptron layer.
mlp = self._make_multi_layer_perceptron_layer()(matching_topk)
mlp = keras.layers.Dropout(
rate=self._params['dropout_rate'])(mlp)
x_out = self._make_output_layer()(mlp)
self._backend = keras.Model(inputs=[query, doc], outputs=x_out)
|
from homeassistant import data_entry_flow
from homeassistant.components.poolsense.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from tests.async_mock import patch
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_invalid_credentials(hass):
"""Test we handle invalid credentials."""
with patch(
"poolsense.PoolSense.test_poolsense_credentials",
return_value=False,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_EMAIL: "test-email", CONF_PASSWORD: "test-password"},
)
assert result["type"] == "form"
assert result["errors"] == {"base": "invalid_auth"}
async def test_valid_credentials(hass):
"""Test we handle invalid credentials."""
with patch(
"poolsense.PoolSense.test_poolsense_credentials", return_value=True
), patch(
"homeassistant.components.poolsense.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.poolsense.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_EMAIL: "test-email", CONF_PASSWORD: "test-password"},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "test-email"
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
|
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import dateutil.relativedelta
def date_from_isoformat(isoformat_date):
"""Convert an ISO-8601 date into a `datetime.date` object.
Argument:
isoformat_date (str): a date in ISO-8601 format (YYYY-MM-DD)
Returns:
~datetime.date: the object corresponding to the given ISO date.
Raises:
ValueError: when the date could not be converted successfully.
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_.
"""
year, month, day = isoformat_date.split('-')
return datetime.date(int(year), int(month), int(day))
def get_times_from_cli(cli_token):
"""Convert a CLI token to a datetime tuple.
Argument:
cli_token (str): an isoformat datetime token ([ISO date]:[ISO date])
or a special value among:
* thisday
* thisweek
* thismonth
* thisyear
Returns:
tuple: a datetime.date objects couple, where the first item is
the start of a time frame and the second item the end of the
time frame. Both elements can also be None, if no date was
provided.
Raises:
ValueError: when the CLI token is not in the right format
(no colon in the token, not one of the special values, dates
are not in proper ISO-8601 format.)
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_.
"""
today = datetime.date.today()
if cli_token=="thisday":
return today, today
elif cli_token=="thisweek":
return today, today - dateutil.relativedelta.relativedelta(days=7)
elif cli_token=="thismonth":
return today, today - dateutil.relativedelta.relativedelta(months=1)
elif cli_token=="thisyear":
return today, today - dateutil.relativedelta.relativedelta(years=1)
else:
try:
start_date, stop_date = cli_token.split(':')
except ValueError:
raise ValueError("--time parameter must contain a colon (:)")
if not start_date and not stop_date: # ':', no start date, no stop date
return None, None
try:
start_date = date_from_isoformat(start_date) if start_date else None
stop_date = date_from_isoformat(stop_date) if stop_date else None
except ValueError:
raise ValueError("--time parameter was not provided ISO formatted dates")
if start_date is not None and stop_date is not None:
return max(start_date, stop_date), min(start_date, stop_date)
else:
return stop_date, start_date
|
import pytest
from homeassistant.components.pvpc_hourly_pricing import ATTR_TARIFF, DOMAIN
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CURRENCY_EURO,
ENERGY_KILO_WATT_HOUR,
)
from tests.common import load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
FIXTURE_JSON_DATA_2019_10_26 = "PVPC_CURV_DD_2019_10_26.json"
FIXTURE_JSON_DATA_2019_10_27 = "PVPC_CURV_DD_2019_10_27.json"
FIXTURE_JSON_DATA_2019_10_29 = "PVPC_CURV_DD_2019_10_29.json"
def check_valid_state(state, tariff: str, value=None, key_attr=None):
"""Ensure that sensor has a valid state and attributes."""
assert state
assert (
state.attributes[ATTR_UNIT_OF_MEASUREMENT]
== f"{CURRENCY_EURO}/{ENERGY_KILO_WATT_HOUR}"
)
try:
_ = float(state.state)
# safety margins for current electricity price (it shouldn't be out of [0, 0.2])
assert -0.1 < float(state.state) < 0.3
assert state.attributes[ATTR_TARIFF] == tariff
except ValueError:
pass
if value is not None and isinstance(value, str):
assert state.state == value
elif value is not None:
assert abs(float(state.state) - value) < 1e-6
if key_attr is not None:
assert abs(float(state.state) - state.attributes[key_attr]) < 1e-6
@pytest.fixture
def pvpc_aioclient_mock(aioclient_mock: AiohttpClientMocker):
"""Create a mock config entry."""
aioclient_mock.get(
"https://api.esios.ree.es/archives/70/download_json?locale=es&date=2019-10-26",
text=load_fixture(f"{DOMAIN}/{FIXTURE_JSON_DATA_2019_10_26}"),
)
aioclient_mock.get(
"https://api.esios.ree.es/archives/70/download_json?locale=es&date=2019-10-27",
text=load_fixture(f"{DOMAIN}/{FIXTURE_JSON_DATA_2019_10_27}"),
)
# missing day
aioclient_mock.get(
"https://api.esios.ree.es/archives/70/download_json?locale=es&date=2019-10-28",
text='{"message":"No values for specified archive"}',
)
aioclient_mock.get(
"https://api.esios.ree.es/archives/70/download_json?locale=es&date=2019-10-29",
text=load_fixture(f"{DOMAIN}/{FIXTURE_JSON_DATA_2019_10_29}"),
)
return aioclient_mock
|
import pytest
from yandextank.common.interfaces import TankInfo
class TestStatus(object):
@pytest.mark.parametrize('updates, result', [
([(['plugin', 'key1'], 'foo'), (['plugin', 'key2'], 42)], {'plugin': {'key1': 'foo', 'key2': 42}}),
([(['plugin1', 'key1'], 'foo'),
(['plugin1', 'key2'], 42),
(['plugin2', 'key1'], 'bar')], {'plugin1': {'key1': 'foo', 'key2': 42},
'plugin2': {'key1': 'bar'}})
])
def test_update(self, updates, result):
info = TankInfo(dict())
for args in updates:
info.update(*args)
assert info.get_info_dict() == result
|
from homeassistant.components.coronavirus.const import DOMAIN, OPTION_WORLDWIDE
from homeassistant.helpers import entity_registry
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, mock_registry
async def test_migration(hass):
"""Test that we can migrate coronavirus to stable unique ID."""
nl_entry = MockConfigEntry(domain=DOMAIN, title="Netherlands", data={"country": 34})
nl_entry.add_to_hass(hass)
worldwide_entry = MockConfigEntry(
domain=DOMAIN, title="Worldwide", data={"country": OPTION_WORLDWIDE}
)
worldwide_entry.add_to_hass(hass)
mock_registry(
hass,
{
"sensor.netherlands_confirmed": entity_registry.RegistryEntry(
entity_id="sensor.netherlands_confirmed",
unique_id="34-confirmed",
platform="coronavirus",
config_entry_id=nl_entry.entry_id,
),
"sensor.worldwide_confirmed": entity_registry.RegistryEntry(
entity_id="sensor.worldwide_confirmed",
unique_id="__worldwide-confirmed",
platform="coronavirus",
config_entry_id=worldwide_entry.entry_id,
),
},
)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
ent_reg = await entity_registry.async_get_registry(hass)
sensor_nl = ent_reg.async_get("sensor.netherlands_confirmed")
assert sensor_nl.unique_id == "Netherlands-confirmed"
sensor_worldwide = ent_reg.async_get("sensor.worldwide_confirmed")
assert sensor_worldwide.unique_id == "__worldwide-confirmed"
assert hass.states.get("sensor.netherlands_confirmed").state == "10"
assert hass.states.get("sensor.worldwide_confirmed").state == "11"
assert nl_entry.unique_id == "Netherlands"
assert worldwide_entry.unique_id == OPTION_WORLDWIDE
|
import glob
import os.path as op
import numpy as np
import pytest
from mne import what, create_info
from mne.datasets import testing
from mne.io import RawArray
from mne.preprocessing import ICA
from mne.utils import run_tests_if_main, requires_sklearn
data_path = testing.data_path(download=False)
@pytest.mark.slowtest
@requires_sklearn
@testing.requires_testing_data
def test_what(tmpdir, verbose_debug):
"""Test mne.what."""
# ICA
ica = ICA(max_iter=1)
raw = RawArray(np.random.RandomState(0).randn(3, 10),
create_info(3, 1000., 'eeg'))
with pytest.warns(None): # convergence sometimes
ica.fit(raw)
fname = op.join(str(tmpdir), 'x-ica.fif')
ica.save(fname)
assert what(fname) == 'ica'
# test files
fnames = glob.glob(
op.join(data_path, 'MEG', 'sample', '*.fif'))
fnames += glob.glob(
op.join(data_path, 'subjects', 'sample', 'bem', '*.fif'))
fnames = sorted(fnames)
want_dict = dict(eve='events', ave='evoked', cov='cov', inv='inverse',
fwd='forward', trans='transform', proj='proj',
raw='raw', meg='raw', sol='bem solution',
bem='bem surfaces', src='src', dense='bem surfaces',
sparse='bem surfaces', head='bem surfaces',
fiducials='fiducials')
for fname in fnames:
kind = op.splitext(fname)[0].split('-')[-1]
if len(kind) > 5:
kind = kind.split('_')[-1]
this = what(fname)
assert this == want_dict[kind]
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave_xfit.dip')
assert what(fname) == 'unknown'
run_tests_if_main()
|
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import CONF_TYPE
from . import IHC_CONTROLLER, IHC_INFO
from .const import CONF_INVERTING
from .ihcdevice import IHCDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC binary sensor platform."""
if discovery_info is None:
return
devices = []
for name, device in discovery_info.items():
ihc_id = device["ihc_id"]
product_cfg = device["product_cfg"]
product = device["product"]
# Find controller that corresponds with device id
ctrl_id = device["ctrl_id"]
ihc_key = f"ihc{ctrl_id}"
info = hass.data[ihc_key][IHC_INFO]
ihc_controller = hass.data[ihc_key][IHC_CONTROLLER]
sensor = IHCBinarySensor(
ihc_controller,
name,
ihc_id,
info,
product_cfg.get(CONF_TYPE),
product_cfg[CONF_INVERTING],
product,
)
devices.append(sensor)
add_entities(devices)
class IHCBinarySensor(IHCDevice, BinarySensorEntity):
"""IHC Binary Sensor.
The associated IHC resource can be any in or output from a IHC product
or function block, but it must be a boolean ON/OFF resources.
"""
def __init__(
self,
ihc_controller,
name,
ihc_id: int,
info: bool,
sensor_type: str,
inverting: bool,
product=None,
) -> None:
"""Initialize the IHC binary sensor."""
super().__init__(ihc_controller, name, ihc_id, info, product)
self._state = None
self._sensor_type = sensor_type
self.inverting = inverting
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def is_on(self):
"""Return true if the binary sensor is on/open."""
return self._state
def on_ihc_change(self, ihc_id, value):
"""IHC resource has changed."""
if self.inverting:
self._state = not value
else:
self._state = value
self.schedule_update_ha_state()
|
import pytest
from tests.async_mock import patch
@pytest.fixture()
def mock_get_stations():
"""Mock aioeafm.get_stations."""
with patch("homeassistant.components.eafm.config_flow.get_stations") as patched:
yield patched
@pytest.fixture()
def mock_get_station():
"""Mock aioeafm.get_station."""
with patch("homeassistant.components.eafm.sensor.get_station") as patched:
yield patched
|
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from translate.storage.resx import RESXFile
from weblate.addons.cleanup import BaseCleanupAddon
class ResxUpdateAddon(BaseCleanupAddon):
name = "weblate.resx.update"
verbose = _("Update RESX files")
description = _(
"Update all translation files to match the monolingual upstream base file. "
"Unused strings are removed, and new ones added as copies of the source "
"string."
)
icon = "refresh.svg"
compat = {"file_format": {"resx"}}
@cached_property
def template_store(self):
return self.instance.component.template_store.store
@staticmethod
def build_index(storage):
index = {}
for unit in storage.units:
index[unit.getid()] = unit
return index
def build_indexes(self):
index = self.build_index(self.template_store)
if self.instance.component.intermediate:
intermediate = self.build_index(
self.instance.component.intermediate_store.store
)
else:
intermediate = {}
return index, intermediate
@staticmethod
def get_index(index, intermediate, translation):
if intermediate and translation.is_source:
return intermediate
return index
def update_resx(self, index, translation, storage, changes):
"""Filter obsolete units in RESX storage.
This removes the corresponding XML element and also adds newly added, and
changed units.
"""
sindex = self.build_index(storage.store)
changed = False
# Add missing units
for unit in self.template_store.units:
if unit.getid() not in sindex:
storage.store.addunit(unit, True)
changed = True
# Remove extra units and apply target changes
for unit in storage.store.units:
unitid = unit.getid()
if unitid not in index:
storage.store.body.remove(unit.xmlelement)
changed = True
if unitid in changes:
unit.target = index[unitid].target
changed = True
if changed:
storage.save()
@staticmethod
def find_changes(index, storage):
"""Find changed string IDs in upstream repository."""
result = set()
for unit in storage.units:
unitid = unit.getid()
if unitid not in index:
continue
if unit.target != index[unitid].target:
result.add(unitid)
return result
def update_translations(self, component, previous_head):
index, intermediate = self.build_indexes()
if previous_head:
content = component.repository.get_file(component.template, previous_head)
changes = self.find_changes(index, RESXFile.parsestring(content))
else:
# No previous revision, probably first commit
changes = set()
for translation in self.iterate_translations(component):
self.update_resx(
self.get_index(index, intermediate, translation),
translation,
translation.store,
changes,
)
|
from homeassistant.components import switch, tellduslive
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import ToggleEntity
from .entry import TelldusLiveEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up tellduslive sensors dynamically."""
async def async_discover_switch(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[tellduslive.DOMAIN]
async_add_entities([TelldusLiveSwitch(client, device_id)])
async_dispatcher_connect(
hass,
tellduslive.TELLDUS_DISCOVERY_NEW.format(switch.DOMAIN, tellduslive.DOMAIN),
async_discover_switch,
)
class TelldusLiveSwitch(TelldusLiveEntity, ToggleEntity):
"""Representation of a Tellstick switch."""
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.is_on
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.device.turn_on()
self._update_callback()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.device.turn_off()
self._update_callback()
|
import asyncio
from datetime import timedelta
import logging
import aiohttp
from pyjuicenet import Api, TokenError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN, JUICENET_API, JUICENET_COORDINATOR
from .device import JuiceNetApi
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "switch"]
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_ACCESS_TOKEN): cv.string})},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the JuiceNet component."""
conf = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not conf:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up JuiceNet from a config entry."""
config = entry.data
session = async_get_clientsession(hass)
access_token = config[CONF_ACCESS_TOKEN]
api = Api(access_token, session)
juicenet = JuiceNetApi(api)
try:
await juicenet.setup()
except TokenError as error:
_LOGGER.error("JuiceNet Error %s", error)
return False
except aiohttp.ClientError as error:
_LOGGER.error("Could not reach the JuiceNet API %s", error)
raise ConfigEntryNotReady from error
if not juicenet.devices:
_LOGGER.error("No JuiceNet devices found for this account")
return False
_LOGGER.info("%d JuiceNet device(s) found", len(juicenet.devices))
async def async_update_data():
"""Update all device states from the JuiceNet API."""
for device in juicenet.devices:
await device.update_state(True)
return True
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="JuiceNet",
update_method=async_update_data,
update_interval=timedelta(seconds=30),
)
hass.data[DOMAIN][entry.entry_id] = {
JUICENET_API: juicenet,
JUICENET_COORDINATOR: coordinator,
}
await coordinator.async_refresh()
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
import asyncio
from functools import partial
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import CONF_WHITE_VALUE
import homeassistant.util.color as color_util
from . import CONF_COLOR, CONF_DIMMING, CONF_RESET_COLOR, FIBARO_DEVICES, FibaroDevice
def scaleto255(value):
"""Scale the input value from 0-100 to 0-255."""
# Fibaro has a funny way of storing brightness either 0-100 or 0-99
# depending on device type (e.g. dimmer vs led)
if value > 98:
value = 100
return max(0, min(255, ((value * 255.0) / 100.0)))
def scaleto100(value):
"""Scale the input value from 0-255 to 0-100."""
# Make sure a low but non-zero value is not rounded down to zero
if 0 < value < 3:
return 1
return max(0, min(100, ((value * 100.0) / 255.0)))
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Perform the setup for Fibaro controller devices."""
if discovery_info is None:
return
async_add_entities(
[FibaroLight(device) for device in hass.data[FIBARO_DEVICES]["light"]], True
)
class FibaroLight(FibaroDevice, LightEntity):
"""Representation of a Fibaro Light, including dimmable."""
def __init__(self, fibaro_device):
"""Initialize the light."""
self._brightness = None
self._color = (0, 0)
self._last_brightness = 0
self._supported_flags = 0
self._update_lock = asyncio.Lock()
self._white = 0
devconf = fibaro_device.device_config
self._reset_color = devconf.get(CONF_RESET_COLOR, False)
supports_color = (
"color" in fibaro_device.properties and "setColor" in fibaro_device.actions
)
supports_dimming = "levelChange" in fibaro_device.interfaces
supports_white_v = "setW" in fibaro_device.actions
# Configuration can override default capability detection
if devconf.get(CONF_DIMMING, supports_dimming):
self._supported_flags |= SUPPORT_BRIGHTNESS
if devconf.get(CONF_COLOR, supports_color):
self._supported_flags |= SUPPORT_COLOR
if devconf.get(CONF_WHITE_VALUE, supports_white_v):
self._supported_flags |= SUPPORT_WHITE_VALUE
super().__init__(fibaro_device)
self.entity_id = f"{DOMAIN}.{self.ha_id}"
@property
def brightness(self):
"""Return the brightness of the light."""
return scaleto255(self._brightness)
@property
def hs_color(self):
"""Return the color of the light."""
return self._color
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._white
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_flags
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
async with self._update_lock:
await self.hass.async_add_executor_job(partial(self._turn_on, **kwargs))
def _turn_on(self, **kwargs):
"""Really turn the light on."""
if self._supported_flags & SUPPORT_BRIGHTNESS:
target_brightness = kwargs.get(ATTR_BRIGHTNESS)
# No brightness specified, so we either restore it to
# last brightness or switch it on at maximum level
if target_brightness is None:
if self._brightness == 0:
if self._last_brightness:
self._brightness = self._last_brightness
else:
self._brightness = 100
else:
# We set it to the target brightness and turn it on
self._brightness = scaleto100(target_brightness)
if self._supported_flags & SUPPORT_COLOR:
if (
self._reset_color
and kwargs.get(ATTR_WHITE_VALUE) is None
and kwargs.get(ATTR_HS_COLOR) is None
and kwargs.get(ATTR_BRIGHTNESS) is None
):
self._color = (100, 0)
# Update based on parameters
self._white = kwargs.get(ATTR_WHITE_VALUE, self._white)
self._color = kwargs.get(ATTR_HS_COLOR, self._color)
rgb = color_util.color_hs_to_RGB(*self._color)
self.call_set_color(
round(rgb[0] * self._brightness / 100.0),
round(rgb[1] * self._brightness / 100.0),
round(rgb[2] * self._brightness / 100.0),
round(self._white * self._brightness / 100.0),
)
if self.state == "off":
self.set_level(int(self._brightness))
return
if self._reset_color:
bri255 = scaleto255(self._brightness)
self.call_set_color(bri255, bri255, bri255, bri255)
if self._supported_flags & SUPPORT_BRIGHTNESS:
self.set_level(int(self._brightness))
return
# The simplest case is left for last. No dimming, just switch on
self.call_turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
async with self._update_lock:
await self.hass.async_add_executor_job(partial(self._turn_off, **kwargs))
def _turn_off(self, **kwargs):
"""Really turn the light off."""
# Let's save the last brightness level before we switch it off
if (
(self._supported_flags & SUPPORT_BRIGHTNESS)
and self._brightness
and self._brightness > 0
):
self._last_brightness = self._brightness
self._brightness = 0
self.call_turn_off()
@property
def is_on(self):
"""Return true if device is on."""
return self.current_binary_state
async def async_update(self):
"""Update the state."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update)
def _update(self):
"""Really update the state."""
# Brightness handling
if self._supported_flags & SUPPORT_BRIGHTNESS:
self._brightness = float(self.fibaro_device.properties.value)
# Fibaro might report 0-99 or 0-100 for brightness,
# based on device type, so we round up here
if self._brightness > 99:
self._brightness = 100
# Color handling
if (
self._supported_flags & SUPPORT_COLOR
and "color" in self.fibaro_device.properties
and "," in self.fibaro_device.properties.color
):
# Fibaro communicates the color as an 'R, G, B, W' string
rgbw_s = self.fibaro_device.properties.color
if rgbw_s == "0,0,0,0" and "lastColorSet" in self.fibaro_device.properties:
rgbw_s = self.fibaro_device.properties.lastColorSet
rgbw_list = [int(i) for i in rgbw_s.split(",")][:4]
if rgbw_list[0] or rgbw_list[1] or rgbw_list[2]:
self._color = color_util.color_RGB_to_hs(*rgbw_list[:3])
if (self._supported_flags & SUPPORT_WHITE_VALUE) and self.brightness != 0:
self._white = min(255, max(0, rgbw_list[3] * 100.0 / self._brightness))
|
import logging
from typing import List, Optional
from connect_box import ConnectBox
from connect_box.exceptions import ConnectBoxError, ConnectBoxLoginError
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_IP = "192.168.0.1"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_IP): cv.string,
}
)
async def async_get_scanner(hass, config):
"""Return the UPC device scanner."""
conf = config[DOMAIN]
session = hass.helpers.aiohttp_client.async_get_clientsession()
connect_box = ConnectBox(session, conf[CONF_PASSWORD], host=conf[CONF_HOST])
# Check login data
try:
await connect_box.async_initialize_token()
except ConnectBoxLoginError:
_LOGGER.error("ConnectBox login data error!")
return None
except ConnectBoxError:
pass
async def _shutdown(event):
"""Shutdown event."""
await connect_box.async_close_session()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
return UPCDeviceScanner(connect_box)
class UPCDeviceScanner(DeviceScanner):
"""This class queries a router running UPC ConnectBox firmware."""
def __init__(self, connect_box: ConnectBox):
"""Initialize the scanner."""
self.connect_box: ConnectBox = connect_box
async def async_scan_devices(self) -> List[str]:
"""Scan for new devices and return a list with found device IDs."""
try:
await self.connect_box.async_get_devices()
except ConnectBoxError:
return []
return [device.mac for device in self.connect_box.devices]
async def async_get_device_name(self, device: str) -> Optional[str]:
"""Get the device name (the name of the wireless device not used)."""
for connected_device in self.connect_box.devices:
if (
connected_device.mac == device
and connected_device.hostname.lower() != "unknown"
):
return connected_device.hostname
return None
|
from homeassistant.config_entries import ENTRY_STATE_LOADED
from tests.components.plugwise.common import async_init_integration
async def test_adam_climate_switch_entities(hass, mock_smile_adam):
"""Test creation of climate related switch entities."""
entry = await async_init_integration(hass, mock_smile_adam)
assert entry.state == ENTRY_STATE_LOADED
state = hass.states.get("switch.cv_pomp")
assert str(state.state) == "on"
state = hass.states.get("switch.fibaro_hc2")
assert str(state.state) == "on"
async def test_adam_climate_switch_changes(hass, mock_smile_adam):
"""Test changing of climate related switch entities."""
entry = await async_init_integration(hass, mock_smile_adam)
assert entry.state == ENTRY_STATE_LOADED
await hass.services.async_call(
"switch",
"turn_off",
{"entity_id": "switch.cv_pomp"},
blocking=True,
)
state = hass.states.get("switch.cv_pomp")
assert str(state.state) == "off"
await hass.services.async_call(
"switch",
"toggle",
{"entity_id": "switch.fibaro_hc2"},
blocking=True,
)
state = hass.states.get("switch.fibaro_hc2")
assert str(state.state) == "off"
await hass.services.async_call(
"switch",
"toggle",
{"entity_id": "switch.fibaro_hc2"},
blocking=True,
)
state = hass.states.get("switch.fibaro_hc2")
assert str(state.state) == "on"
|
from diamond.collector import Collector
import diamond.convertor
import os
class MemoryLxcCollector(Collector):
def get_default_config_help(self):
"""
Return help text for collector configuration.
"""
config_help = super(MemoryLxcCollector, self).get_default_config_help()
config_help.update({
"sys_path": "Defaults to '/sys/fs/cgroup/lxc'",
})
return config_help
def get_default_config(self):
"""
Returns default settings for collector.
"""
config = super(MemoryLxcCollector, self).get_default_config()
config.update({
"path": "lxc",
"sys_path": "/sys/fs/cgroup/lxc",
})
return config
def collect(self):
"""
Collect memory stats of LXCs.
"""
lxc_metrics = ["memory.usage_in_bytes", "memory.limit_in_bytes"]
if os.path.isdir(self.config["sys_path"]) is False:
self.log.debug("sys_path '%s' isn't directory.",
self.config["sys_path"])
return {}
collected = {}
for item in os.listdir(self.config["sys_path"]):
fpath = "%s/%s" % (self.config["sys_path"], item)
if os.path.isdir(fpath) is False:
continue
for lxc_metric in lxc_metrics:
filename = "%s/%s" % (fpath, lxc_metric)
metric_name = "%s.%s" % (
item.replace(".", "_"),
lxc_metric.replace("_in_bytes", ""))
self.log.debug("Trying to collect from %s", filename)
collected[metric_name] = self._read_file(filename)
for key in collected.keys():
if collected[key] is None:
continue
for unit in self.config["byte_unit"]:
value = diamond.convertor.binary.convert(
collected[key],
oldUnit="B",
newUnit=unit)
new_key = "%s_in_%ss" % (key, unit)
self.log.debug("Publishing '%s %s'", new_key, value)
self.publish(new_key, value, metric_type="GAUGE")
def _read_file(self, filename):
"""
Read contents of given file.
"""
try:
with open(filename, "r") as fhandle:
stats = float(fhandle.readline().rstrip("\n"))
except Exception:
stats = None
return stats
|
import diamond.collector
import os
class EntropyStatCollector(diamond.collector.Collector):
PROC = '/proc/sys/kernel/random/entropy_avail'
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(EntropyStatCollector, self).get_default_config()
config.update({
'path': 'entropy'
})
return config
def collect(self):
if not os.access(self.PROC, os.R_OK):
return None
# open file
entropy_file = open(self.PROC)
# read value
entropy = entropy_file.read().strip()
# Close file
entropy_file.close()
# Publish value
self.publish_gauge("available", entropy)
|
import os
import shutil
from pylatex import Document
def test():
doc = Document('jobname_test', data=['Jobname test'])
doc.generate_pdf()
assert os.path.isfile('jobname_test.pdf')
os.remove('jobname_test.pdf')
folder = 'tmp_jobname'
os.makedirs(folder)
path = os.path.join(folder, 'jobname_test_dir')
doc = Document(path, data=['Jobname test dir'])
doc.generate_pdf()
assert os.path.isfile(path + '.pdf')
shutil.rmtree(folder)
folder = 'tmp_jobname2'
os.makedirs(folder)
path = os.path.join(folder, 'jobname_test_dir2')
doc = Document(path, data=['Jobname test dir'])
doc.generate_pdf(os.path.join(folder, ''))
assert os.path.isfile(path + '.pdf')
shutil.rmtree(folder)
|
import asyncio
from datetime import timedelta
import logging
import pymyq
from pymyq.errors import InvalidCredentialsError, MyQError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN, MYQ_COORDINATOR, MYQ_GATEWAY, PLATFORMS, UPDATE_INTERVAL
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the MyQ component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up MyQ from a config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
conf = entry.data
try:
myq = await pymyq.login(conf[CONF_USERNAME], conf[CONF_PASSWORD], websession)
except InvalidCredentialsError as err:
_LOGGER.error("There was an error while logging in: %s", err)
return False
except MyQError as err:
raise ConfigEntryNotReady from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="myq devices",
update_method=myq.update_device_info,
update_interval=timedelta(seconds=UPDATE_INTERVAL),
)
hass.data[DOMAIN][entry.entry_id] = {MYQ_GATEWAY: myq, MYQ_COORDINATOR: coordinator}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
import pytest
from rumps.utils import ListDict
class TestListDict(object):
def test_clear(self):
ld = ListDict()
ld[1] = 11
ld['b'] = 22
ld[object()] = 33
assert len(ld) == 3
ld.clear()
assert len(ld) == 0
assert ld.items() == []
|
import errno
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.wiffi.const import DOMAIN
from homeassistant.const import CONF_PORT, CONF_TIMEOUT
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
MOCK_CONFIG = {CONF_PORT: 8765}
@pytest.fixture(name="dummy_tcp_server")
def mock_dummy_tcp_server():
"""Mock a valid WiffiTcpServer."""
class Dummy:
async def start_server(self):
pass
async def close_server(self):
pass
server = Dummy()
with patch(
"homeassistant.components.wiffi.config_flow.WiffiTcpServer", return_value=server
):
yield server
@pytest.fixture(name="addr_in_use")
def mock_addr_in_use_server():
"""Mock a WiffiTcpServer with addr_in_use."""
class Dummy:
async def start_server(self):
raise OSError(errno.EADDRINUSE, "")
async def close_server(self):
pass
server = Dummy()
with patch(
"homeassistant.components.wiffi.config_flow.WiffiTcpServer", return_value=server
):
yield server
@pytest.fixture(name="start_server_failed")
def mock_start_server_failed():
"""Mock a WiffiTcpServer with start_server_failed."""
class Dummy:
async def start_server(self):
raise OSError(errno.EACCES, "")
async def close_server(self):
pass
server = Dummy()
with patch(
"homeassistant.components.wiffi.config_flow.WiffiTcpServer", return_value=server
):
yield server
async def test_form(hass, dummy_tcp_server):
"""Test how we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
assert result["step_id"] == config_entries.SOURCE_USER
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=MOCK_CONFIG,
)
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
async def test_form_addr_in_use(hass, addr_in_use):
"""Test how we handle addr_in_use error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=MOCK_CONFIG,
)
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "addr_in_use"
async def test_form_start_server_failed(hass, start_server_failed):
"""Test how we handle start_server_failed error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=MOCK_CONFIG,
)
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "start_server_failed"
async def test_option_flow(hass):
"""Test option flow."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG)
entry.add_to_hass(hass)
assert not entry.options
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_TIMEOUT: 9}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_TIMEOUT] == 9
|
import scipy
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from scattertext.Scalers import stretch_0_to_1
from scattertext.categoryprojector.CategoryProjection import CategoryProjection, CategoryProjectionBase
class CategoryProjectionEvaluator(object):
def evaluate(self, category_projection):
raise NotImplementedError()
class RipleyKCategoryProjectorEvaluator(CategoryProjectionEvaluator):
def __init__(self, max_distance=np.sqrt(2)):
self.max_distance = max_distance
def evaluate(self, category_projection):
assert type(category_projection) == CategoryProjection
try:
from astropy.stats import RipleysKEstimator
except:
raise Exception("Please install astropy")
assert issubclass(type(category_projection), CategoryProjectionBase)
ripley_estimator = RipleysKEstimator(area=1., x_max=1., y_max=1., x_min=0., y_min=0.)
proj = category_projection.projection[:, [category_projection.x_dim, category_projection.y_dim]]
scaled_proj = np.array([stretch_0_to_1(proj.T[0]), stretch_0_to_1(proj.T[1])]).T
radii = np.linspace(0, self.max_distance, 1000)
deviances = np.abs(ripley_estimator(scaled_proj, radii, mode='ripley') - ripley_estimator.poisson(radii))
return np.trapz(deviances, x=radii)
class MeanMorisitaIndexEvaluator(CategoryProjectionEvaluator):
def __init__(self, num_bin_range=None):
self.num_bin_range = num_bin_range if num_bin_range is not None else [10, 1000]
def evaluate(self, category_projection):
assert issubclass(type(category_projection), CategoryProjectionBase)
proj = category_projection.projection[:, [category_projection.x_dim, category_projection.y_dim]]
scaled_proj = np.array([stretch_0_to_1(proj.T[0]), stretch_0_to_1(proj.T[1])]).T
morista_sum = 0
N = scaled_proj.shape[0]
for i in range(self.num_bin_range[0], self.num_bin_range[1]):
bins, _, _ = np.histogram2d(scaled_proj.T[0], scaled_proj.T[1], i)
# I_M = Q * (\sum_{k=1}^{Q}{n_k * (n_k - 1)})/(N * (N _ 1))
Q = len(bins) # num_quadrants
# Eqn 1.
morista_sum += Q * np.sum(np.ravel(bins) * (np.ravel(bins) - 1)) / (N * (N - 1))
return morista_sum/(self.num_bin_range[1] - self.num_bin_range[0])
class EmbeddingsProjectorEvaluator(CategoryProjectionEvaluator):
def __init__(self, get_vector):
self.get_vector = get_vector
#import spacy
#assert issubclass(type(nlp), spacy.language.Language)
#self.nlp = nlp
#self.vector_func = lambda: nlp(x)[0].vector
def evaluate(self, category_projection):
assert issubclass(type(category_projection), CategoryProjectionBase)
topics = category_projection.get_nearest_terms()
total_similarity = 0
for topic in topics.values():
topic_vectors = np.array([self.get_vector(term) for term in topic])
#simport pdb; pdb.set_trace()
sim_matrix = cosine_similarity(topic_vectors)
tril_sim_matrix = np.tril(sim_matrix)
mean_similarity = tril_sim_matrix.sum()/(tril_sim_matrix.shape[0] ** 2 - tril_sim_matrix.shape[0]) / 2
total_similarity += mean_similarity
return total_similarity/len(topics)
|
import logging
from hangups.auth import GoogleAuthError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.conversation.util import create_matcher
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import dispatcher, intent
import homeassistant.helpers.config_validation as cv
# We need an import from .config_flow, without it .config_flow is never loaded.
from .config_flow import HangoutsFlowHandler # noqa: F401
from .const import (
CONF_BOT,
CONF_DEFAULT_CONVERSATIONS,
CONF_ERROR_SUPPRESSED_CONVERSATIONS,
CONF_INTENTS,
CONF_MATCHERS,
CONF_REFRESH_TOKEN,
CONF_SENTENCES,
DOMAIN,
EVENT_HANGOUTS_CONNECTED,
EVENT_HANGOUTS_CONVERSATIONS_CHANGED,
EVENT_HANGOUTS_CONVERSATIONS_RESOLVED,
INTENT_HELP,
INTENT_SCHEMA,
MESSAGE_SCHEMA,
SERVICE_RECONNECT,
SERVICE_SEND_MESSAGE,
SERVICE_UPDATE,
TARGETS_SCHEMA,
)
from .hangouts_bot import HangoutsBot
from .intents import HelpIntent
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_INTENTS, default={}): vol.Schema(
{cv.string: INTENT_SCHEMA}
),
vol.Optional(CONF_DEFAULT_CONVERSATIONS, default=[]): [TARGETS_SCHEMA],
vol.Optional(CONF_ERROR_SUPPRESSED_CONVERSATIONS, default=[]): [
TARGETS_SCHEMA
],
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Hangouts bot component."""
config = config.get(DOMAIN)
if config is None:
hass.data[DOMAIN] = {
CONF_INTENTS: {},
CONF_DEFAULT_CONVERSATIONS: [],
CONF_ERROR_SUPPRESSED_CONVERSATIONS: [],
}
return True
hass.data[DOMAIN] = {
CONF_INTENTS: config[CONF_INTENTS],
CONF_DEFAULT_CONVERSATIONS: config[CONF_DEFAULT_CONVERSATIONS],
CONF_ERROR_SUPPRESSED_CONVERSATIONS: config[
CONF_ERROR_SUPPRESSED_CONVERSATIONS
],
}
if (
hass.data[DOMAIN][CONF_INTENTS]
and INTENT_HELP not in hass.data[DOMAIN][CONF_INTENTS]
):
hass.data[DOMAIN][CONF_INTENTS][INTENT_HELP] = {CONF_SENTENCES: ["HELP"]}
for data in hass.data[DOMAIN][CONF_INTENTS].values():
matchers = []
for sentence in data[CONF_SENTENCES]:
matchers.append(create_matcher(sentence))
data[CONF_MATCHERS] = matchers
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass, config):
"""Set up a config entry."""
try:
bot = HangoutsBot(
hass,
config.data.get(CONF_REFRESH_TOKEN),
hass.data[DOMAIN][CONF_INTENTS],
hass.data[DOMAIN][CONF_DEFAULT_CONVERSATIONS],
hass.data[DOMAIN][CONF_ERROR_SUPPRESSED_CONVERSATIONS],
)
hass.data[DOMAIN][CONF_BOT] = bot
except GoogleAuthError as exception:
_LOGGER.error("Hangouts failed to log in: %s", str(exception))
return False
dispatcher.async_dispatcher_connect(
hass, EVENT_HANGOUTS_CONNECTED, bot.async_handle_update_users_and_conversations
)
dispatcher.async_dispatcher_connect(
hass, EVENT_HANGOUTS_CONVERSATIONS_CHANGED, bot.async_resolve_conversations
)
dispatcher.async_dispatcher_connect(
hass,
EVENT_HANGOUTS_CONVERSATIONS_RESOLVED,
bot.async_update_conversation_commands,
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, bot.async_handle_hass_stop)
await bot.async_connect()
hass.services.async_register(
DOMAIN,
SERVICE_SEND_MESSAGE,
bot.async_handle_send_message,
schema=MESSAGE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_UPDATE,
bot.async_handle_update_users_and_conversations,
schema=vol.Schema({}),
)
hass.services.async_register(
DOMAIN, SERVICE_RECONNECT, bot.async_handle_reconnect, schema=vol.Schema({})
)
intent.async_register(hass, HelpIntent(hass))
return True
async def async_unload_entry(hass, _):
"""Unload a config entry."""
bot = hass.data[DOMAIN].pop(CONF_BOT)
await bot.async_disconnect()
return True
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('bar')
def test_hostname(host):
assert 'instance-1' == host.check_output('hostname -s')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
f = host.file('/etc/molecule/instance-1')
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
from stash.tests.stashtest import StashTestCase
class AliasTests(StashTestCase):
"""Tests for the 'alias' command."""
def test_help(self):
"""test 'alias --help'"""
output = self.run_command("alias --help", exitcode=0)
self.assertIn("alias", output)
self.assertIn("-h", output)
self.assertIn("--help", output)
self.assertIn("name=", output)
def test_la_alias(self):
"""tests the unmount alias"""
# assert existence
output = self.run_command("alias", exitcode=0)
self.assertIn("la=", output)
# assert output identical
output = self.run_command("la", exitcode=0)
output_full = self.run_command("ls -a", exitcode=0)
self.assertEqual(output, output_full)
def test_alias(self):
"""create and test alias"""
# ensure alias not yet defined
output = self.run_command("alias", exitcode=0)
self.assertNotIn("testalias", output)
# create alias
output = self.run_command("alias 'testalias=echo alias test successfull!'", exitcode=0)
# ensure alias is defined
output = self.run_command("alias", exitcode=0)
self.assertIn("testalias=", output)
# check output
output = self.run_command("testalias", exitcode=0)
self.assertIn("alias test successfull!", output)
|
import asynctest
import mock
import pytest
from paasta_tools.monitoring.check_mesos_duplicate_frameworks import (
check_mesos_no_duplicate_frameworks,
)
def test_check_mesos_no_duplicate_frameworks_ok(capfd):
with mock.patch(
"paasta_tools.monitoring.check_mesos_duplicate_frameworks.parse_args",
autospec=True,
) as mock_parse_args, mock.patch(
"paasta_tools.monitoring.check_mesos_duplicate_frameworks.get_mesos_master",
autospec=True,
) as mock_get_mesos_master:
mock_opts = mock.MagicMock()
mock_opts.check = "marathon"
mock_parse_args.return_value = mock_opts
mock_master = mock.MagicMock()
mock_master.state = asynctest.CoroutineMock(
func=asynctest.CoroutineMock(), # https://github.com/notion/a_sync/pull/40
return_value={
"frameworks": [
{"name": "marathon"},
{"name": "marathon1"},
{"name": "foobar"},
{"name": "foobar"},
]
},
)
mock_get_mesos_master.return_value = mock_master
with pytest.raises(SystemExit) as error:
check_mesos_no_duplicate_frameworks()
out, err = capfd.readouterr()
assert "OK" in out
assert "Framework: marathon count: 2" in out
assert "foobar" not in out
assert error.value.code == 0
def test_check_mesos_no_duplicate_frameworks_critical(capfd):
with mock.patch(
"paasta_tools.monitoring.check_mesos_duplicate_frameworks.parse_args",
autospec=True,
) as mock_parse_args, mock.patch(
"paasta_tools.monitoring.check_mesos_duplicate_frameworks.get_mesos_master",
autospec=True,
) as mock_get_mesos_master:
mock_opts = mock.MagicMock()
mock_opts.check = "marathon"
mock_parse_args.return_value = mock_opts
mock_master = mock.MagicMock()
mock_master.state = asynctest.CoroutineMock(
func=asynctest.CoroutineMock(), # https://github.com/notion/a_sync/pull/40
return_value={
"frameworks": [
{"name": "marathon"},
{"name": "marathon1"},
{"name": "marathon1"},
{"name": "foobar"},
{"name": "foobar"},
]
},
)
mock_get_mesos_master.return_value = mock_master
with pytest.raises(SystemExit) as error:
check_mesos_no_duplicate_frameworks()
out, err = capfd.readouterr()
assert (
"CRITICAL: There are 2 connected marathon1 frameworks! (Expected 1)" in out
)
assert "marathon" in out
assert "foobar" not in out
assert error.value.code == 2
|
from abc import ABC
from dataclasses import dataclass
from typing import List, Optional, Tuple
from homeassistant.components.media_player import BrowseMedia
from homeassistant.components.media_player.const import (
MEDIA_CLASS_CHANNEL,
MEDIA_CLASS_DIRECTORY,
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_CHANNELS,
)
from homeassistant.core import HomeAssistant, callback
from .const import DOMAIN, URI_SCHEME, URI_SCHEME_REGEX
@dataclass
class PlayMedia:
"""Represents a playable media."""
url: str
mime_type: str
class BrowseMediaSource(BrowseMedia):
"""Represent a browsable media file."""
children: Optional[List["BrowseMediaSource"]]
def __init__(self, *, domain: Optional[str], identifier: Optional[str], **kwargs):
"""Initialize media source browse media."""
media_content_id = f"{URI_SCHEME}{domain or ''}"
if identifier:
media_content_id += f"/{identifier}"
super().__init__(media_content_id=media_content_id, **kwargs)
self.domain = domain
self.identifier = identifier
@dataclass
class MediaSourceItem:
"""A parsed media item."""
hass: HomeAssistant
domain: Optional[str]
identifier: str
async def async_browse(self) -> BrowseMediaSource:
"""Browse this item."""
if self.domain is None:
base = BrowseMediaSource(
domain=None,
identifier=None,
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type=MEDIA_TYPE_CHANNELS,
title="Media Sources",
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_CHANNEL,
)
base.children = [
BrowseMediaSource(
domain=source.domain,
identifier=None,
media_class=MEDIA_CLASS_CHANNEL,
media_content_type=MEDIA_TYPE_CHANNEL,
title=source.name,
can_play=False,
can_expand=True,
)
for source in self.hass.data[DOMAIN].values()
]
return base
return await self.async_media_source().async_browse_media(self)
async def async_resolve(self) -> PlayMedia:
"""Resolve to playable item."""
return await self.async_media_source().async_resolve_media(self)
@callback
def async_media_source(self) -> "MediaSource":
"""Return media source that owns this item."""
return self.hass.data[DOMAIN][self.domain]
@classmethod
def from_uri(cls, hass: HomeAssistant, uri: str) -> "MediaSourceItem":
"""Create an item from a uri."""
match = URI_SCHEME_REGEX.match(uri)
if not match:
raise ValueError("Invalid media source URI")
domain = match.group("domain")
identifier = match.group("identifier")
return cls(hass, domain, identifier)
class MediaSource(ABC):
"""Represents a source of media files."""
name: str = None
def __init__(self, domain: str):
"""Initialize a media source."""
self.domain = domain
if not self.name:
self.name = domain
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve a media item to a playable item."""
raise NotImplementedError
async def async_browse_media(
self, item: MediaSourceItem, media_types: Tuple[str]
) -> BrowseMediaSource:
"""Browse media."""
raise NotImplementedError
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.