text
stringlengths 213
32.3k
|
---|
import os.path
from django import forms
from django.contrib.admin.helpers import ActionForm
from django.utils.translation import gettext_lazy as _
class ImportForm(forms.Form):
import_file = forms.FileField(
label=_('File to import')
)
input_format = forms.ChoiceField(
label=_('Format'),
choices=(),
)
def __init__(self, import_formats, *args, **kwargs):
super().__init__(*args, **kwargs)
choices = []
for i, f in enumerate(import_formats):
choices.append((str(i), f().get_title(),))
if len(import_formats) > 1:
choices.insert(0, ('', '---'))
self.fields['input_format'].choices = choices
class ConfirmImportForm(forms.Form):
import_file_name = forms.CharField(widget=forms.HiddenInput())
original_file_name = forms.CharField(widget=forms.HiddenInput())
input_format = forms.CharField(widget=forms.HiddenInput())
def clean_import_file_name(self):
data = self.cleaned_data['import_file_name']
data = os.path.basename(data)
return data
class ExportForm(forms.Form):
file_format = forms.ChoiceField(
label=_('Format'),
choices=(),
)
def __init__(self, formats, *args, **kwargs):
super().__init__(*args, **kwargs)
choices = []
for i, f in enumerate(formats):
choices.append((str(i), f().get_title(),))
if len(formats) > 1:
choices.insert(0, ('', '---'))
self.fields['file_format'].choices = choices
def export_action_form_factory(formats):
"""
Returns an ActionForm subclass containing a ChoiceField populated with
the given formats.
"""
class _ExportActionForm(ActionForm):
"""
Action form with export format ChoiceField.
"""
file_format = forms.ChoiceField(
label=_('Format'), choices=formats, required=False)
_ExportActionForm.__name__ = str('ExportActionForm')
return _ExportActionForm
|
from test import CollectorTestCase
from test import get_collector_config
from netscalersnmp import NetscalerSNMPCollector
class TestNetscalerSNMPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('NetscalerSNMPCollector', {
'allowed_names': allowed_names,
'interval': 1
})
self.collector = NetscalerSNMPCollector(config, None)
def test_import(self):
self.assertTrue(NetscalerSNMPCollector)
|
import hangups
from common import run_example
async def enable_group_link_sharing(client, args):
request = hangups.hangouts_pb2.SetGroupLinkSharingEnabledRequest(
request_header=client.get_request_header(),
event_request_header=hangups.hangouts_pb2.EventRequestHeader(
conversation_id=hangups.hangouts_pb2.ConversationId(
id=args.conversation_id
),
client_generated_id=client.get_client_generated_id(),
),
group_link_sharing_status=(
hangups.hangouts_pb2.GROUP_LINK_SHARING_STATUS_ON
),
)
await client.set_group_link_sharing_enabled(request)
print('enabled group link sharing for conversation {}'.format(
args.conversation_id
))
request = hangups.hangouts_pb2.GetGroupConversationUrlRequest(
request_header=client.get_request_header(),
conversation_id=hangups.hangouts_pb2.ConversationId(
id=args.conversation_id,
)
)
response = await client.get_group_conversation_url(request)
print(response.group_conversation_url)
if __name__ == '__main__':
run_example(enable_group_link_sharing, '--conversation-id')
|
import asyncio
from typing import Dict, List, Set
from async_timeout import timeout
from sharkiqpy import (
AylaApi,
SharkIqAuthError,
SharkIqAuthExpiringError,
SharkIqNotAuthedError,
SharkIqVacuum,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import _LOGGER, API_TIMEOUT, DOMAIN, UPDATE_INTERVAL
class SharkIqUpdateCoordinator(DataUpdateCoordinator):
"""Define a wrapper class to update Shark IQ data."""
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
ayla_api: AylaApi,
shark_vacs: List[SharkIqVacuum],
) -> None:
"""Set up the SharkIqUpdateCoordinator class."""
self.ayla_api = ayla_api
self.shark_vacs: Dict[str, SharkIqVacuum] = {
sharkiq.serial_number: sharkiq for sharkiq in shark_vacs
}
self._config_entry = config_entry
self._online_dsns = set()
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=UPDATE_INTERVAL)
@property
def online_dsns(self) -> Set[str]:
"""Get the set of all online DSNs."""
return self._online_dsns
def device_is_online(self, dsn: str) -> bool:
"""Return the online state of a given vacuum dsn."""
return dsn in self._online_dsns
@staticmethod
async def _async_update_vacuum(sharkiq: SharkIqVacuum) -> None:
"""Asynchronously update the data for a single vacuum."""
dsn = sharkiq.serial_number
_LOGGER.debug("Updating sharkiq data for device DSN %s", dsn)
with timeout(API_TIMEOUT):
await sharkiq.async_update()
async def _async_update_data(self) -> bool:
"""Update data device by device."""
try:
all_vacuums = await self.ayla_api.async_list_devices()
self._online_dsns = {
v["dsn"]
for v in all_vacuums
if v["connection_status"] == "Online" and v["dsn"] in self.shark_vacs
}
_LOGGER.debug("Updating sharkiq data")
online_vacs = (self.shark_vacs[dsn] for dsn in self.online_dsns)
await asyncio.gather(*[self._async_update_vacuum(v) for v in online_vacs])
except (
SharkIqAuthError,
SharkIqNotAuthedError,
SharkIqAuthExpiringError,
) as err:
_LOGGER.exception("Bad auth state")
flow_context = {
"source": "reauth",
"unique_id": self._config_entry.unique_id,
}
matching_flows = [
flow
for flow in self.hass.config_entries.flow.async_progress()
if flow["context"] == flow_context
]
if not matching_flows:
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context=flow_context,
data=self._config_entry.data,
)
)
raise UpdateFailed(err) from err
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error updating SharkIQ")
raise UpdateFailed(err) from err
return True
|
import json
import subprocess
from zipfile import BadZipfile
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.forms import HiddenInput
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.http import urlencode
from django.utils.translation import gettext as _
from django.views.generic.edit import CreateView
from weblate.formats.models import FILE_FORMATS
from weblate.trans.forms import (
ComponentBranchForm,
ComponentCreateForm,
ComponentDiscoverForm,
ComponentDocCreateForm,
ComponentInitCreateForm,
ComponentScratchCreateForm,
ComponentSelectForm,
ComponentZipCreateForm,
ProjectCreateForm,
)
from weblate.trans.models import Component, Project
from weblate.trans.tasks import perform_update
from weblate.trans.util import get_clean_env
from weblate.utils import messages
from weblate.utils.errors import report_error
from weblate.utils.licenses import LICENSE_URLS
from weblate.utils.views import create_component_from_doc, create_component_from_zip
from weblate.vcs.models import VCS_REGISTRY
def scratch_create_component(project, name, slug, source_language, file_format):
format_cls = FILE_FORMATS[file_format]
template = f"{source_language.code}.{format_cls.extension()}"
# Create component
return Component.objects.create(
file_format=file_format,
filemask=f"*.{format_cls.extension()}",
template=template,
vcs="local",
repo="local:",
project=project,
source_language=source_language,
name=name,
slug=slug,
)
class BaseCreateView(CreateView):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.has_billing = "weblate.billing" in settings.INSTALLED_APPS
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
return kwargs
@method_decorator(login_required, name="dispatch")
class CreateProject(BaseCreateView):
model = Project
form_class = ProjectCreateForm
billings = None
def get_form(self, form_class=None):
form = super().get_form(form_class)
billing_field = form.fields["billing"]
if self.has_billing:
billing_field.queryset = self.billings
try:
billing_field.initial = int(self.request.GET["billing"])
except (ValueError, KeyError):
pass
billing_field.required = not self.request.user.is_superuser
if self.request.user.is_superuser:
billing_field.empty_label = "-- without billing --"
else:
billing_field.required = False
billing_field.widget = HiddenInput()
return form
def form_valid(self, form):
result = super().form_valid(form)
if self.has_billing and form.cleaned_data["billing"]:
billing = form.cleaned_data["billing"]
else:
billing = None
self.object.post_create(self.request.user, billing)
return result
def can_create(self):
return (self.has_billing and self.billings) or self.request.user.has_perm(
"project.add"
)
def post(self, request, *args, **kwargs):
if not self.can_create():
return redirect("create-project")
return super().post(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs = super().get_context_data(**kwargs)
kwargs["can_create"] = self.can_create()
if self.has_billing:
from weblate.billing.models import Billing
kwargs["user_billings"] = Billing.objects.for_user(
self.request.user
).exists()
return kwargs
def dispatch(self, request, *args, **kwargs):
if self.has_billing:
from weblate.billing.models import Billing
billings = Billing.objects.get_valid().for_user(request.user).prefetch()
pks = set()
for billing in billings:
limit = billing.plan.display_limit_projects
if limit == 0 or billing.count_projects < limit:
pks.add(billing.pk)
self.billings = Billing.objects.filter(pk__in=pks).prefetch()
return super().dispatch(request, *args, **kwargs)
@method_decorator(login_required, name="dispatch")
class CreateComponent(BaseCreateView):
model = Component
projects = None
stage = None
selected_project = ""
basic_fields = ("repo", "name", "slug", "vcs", "source_language")
empty_form = False
form_class = ComponentInitCreateForm
def get_form_class(self):
"""Return the form class to use."""
if self.stage == "create":
return ComponentCreateForm
if self.stage == "discover":
return ComponentDiscoverForm
return self.form_class
def get_form_kwargs(self):
result = super().get_form_kwargs()
if self.request.method != "POST":
if self.initial:
# When going from other form (for example ZIP import)
result.pop("data", None)
result.pop("files", None)
if self.has_all_fields() and not self.empty_form:
result["data"] = self.request.GET
return result
def get_success_url(self):
return reverse(
"component_progress", kwargs=self.object.get_reverse_url_kwargs()
)
def warn_outdated(self, form):
linked = form.instance.linked_component
if linked:
perform_update.delay("Component", linked.pk, auto=True)
if linked.repo_needs_merge():
messages.warning(
self.request,
_(
"The repository is outdated, you might not get "
"expected results until you update it."
),
)
def detect_license(self, form):
"""Automatic license detection based on licensee."""
try:
process_result = subprocess.run(
["licensee", "detect", "--json", form.instance.full_path],
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=get_clean_env(),
check=True,
)
except FileNotFoundError:
return
except (OSError, subprocess.CalledProcessError) as error:
if getattr(error, "returncode", 0) != 1:
report_error(cause="Failed licensee invocation")
return
result = json.loads(process_result.stdout)
for license_data in result["licenses"]:
spdx_id = license_data["spdx_id"]
for license in (f"{spdx_id}-or-later", f"{spdx_id}-only", spdx_id):
if license in LICENSE_URLS:
self.initial["license"] = license
messages.info(
self.request,
_("Detected license as %s, please check whether it is correct.")
% license,
)
return
def form_valid(self, form):
if self.stage == "create":
result = super().form_valid(form)
self.object.post_create(self.request.user)
return result
if self.stage == "discover":
# Move to create
self.initial = form.cleaned_data
self.stage = "create"
self.request.method = "GET"
self.warn_outdated(form)
self.detect_license(form)
return self.get(self, self.request)
# Move to discover
self.stage = "discover"
self.request.method = "GET"
self.initial = form.cleaned_data
self.warn_outdated(form)
return self.get(self, self.request)
def get_form(self, form_class=None, empty=False):
self.empty_form = empty
form = super().get_form(form_class)
if "project" in form.fields:
project_field = form.fields["project"]
project_field.queryset = self.projects
project_field.empty_label = None
if self.selected_project:
project_field.initial = self.selected_project
try:
form.fields["source_language"].initial = Component.objects.filter(
project=self.selected_project
)[0].source_language_id
except IndexError:
pass
self.empty_form = False
return form
def get_context_data(self, **kwargs):
kwargs = super().get_context_data(**kwargs)
kwargs["projects"] = self.projects
kwargs["stage"] = self.stage
return kwargs
def fetch_params(self, request):
try:
self.selected_project = int(
request.POST.get("project", request.GET.get("project", ""))
)
except ValueError:
self.selected_project = ""
if request.user.is_superuser:
self.projects = Project.objects.order()
elif self.has_billing:
from weblate.billing.models import Billing
self.projects = request.user.owned_projects.filter(
billing__in=Billing.objects.get_valid()
).order()
else:
self.projects = request.user.owned_projects
self.initial = {}
for field in self.basic_fields:
if field in request.GET:
self.initial[field] = request.GET[field]
def has_all_fields(self):
return self.stage == "init" and all(
field in self.request.GET for field in self.basic_fields
)
def dispatch(self, request, *args, **kwargs):
if "filemask" in request.POST:
self.stage = "create"
elif "discovery" in request.POST:
self.stage = "discover"
else:
self.stage = "init"
self.fetch_params(request)
# Proceed to post if all params are present
if self.has_all_fields():
return self.post(request, *args, **kwargs)
return super().dispatch(request, *args, **kwargs)
class CreateFromZip(CreateComponent):
form_class = ComponentZipCreateForm
def form_valid(self, form):
if self.stage != "init":
return super().form_valid(form)
try:
create_component_from_zip(form.cleaned_data)
except BadZipfile:
form.add_error("zipfile", _("Failed to parse uploaded ZIP file."))
return self.form_invalid(form)
# Move to discover phase
self.stage = "discover"
self.initial = form.cleaned_data
self.initial["vcs"] = "local"
self.initial["repo"] = "local:"
self.initial["branch"] = "main"
self.initial.pop("zipfile")
self.request.method = "GET"
return self.get(self, self.request)
class CreateFromDoc(CreateComponent):
form_class = ComponentDocCreateForm
def form_valid(self, form):
if self.stage != "init":
return super().form_valid(form)
create_component_from_doc(form.cleaned_data)
# Move to discover phase
self.stage = "discover"
self.initial = form.cleaned_data
self.initial["vcs"] = "local"
self.initial["repo"] = "local:"
self.initial["branch"] = "main"
self.initial.pop("docfile")
self.request.method = "GET"
return self.get(self, self.request)
class CreateComponentSelection(CreateComponent):
template_name = "trans/component_create.html"
components = None
origin = None
@cached_property
def branch_data(self):
def branch_exists(repo, branch):
return Component.objects.filter(repo=repo, branch=branch).exists()
result = {}
for component in self.components:
repo = component.repo
branches = [
branch
for branch in component.repository.list_remote_branches()
if branch != component.branch and not branch_exists(repo, branch)
]
if branches:
result[component.pk] = branches
return result
def fetch_params(self, request):
super().fetch_params(request)
self.components = (
Component.objects.filter_access(request.user)
.with_repo()
.prefetch()
.filter(project__in=self.projects)
.order_project()
)
if self.selected_project:
self.components = self.components.filter(project__pk=self.selected_project)
self.origin = request.POST.get("origin")
def get_context_data(self, **kwargs):
kwargs = super().get_context_data(**kwargs)
kwargs["components"] = self.components
kwargs["selected_project"] = self.selected_project
kwargs["existing_form"] = self.get_form(ComponentSelectForm, empty=True)
kwargs["branch_form"] = self.get_form(ComponentBranchForm, empty=True)
kwargs["branch_data"] = json.dumps(self.branch_data)
kwargs["full_form"] = self.get_form(ComponentInitCreateForm, empty=True)
if "local" in VCS_REGISTRY:
kwargs["zip_form"] = self.get_form(ComponentZipCreateForm, empty=True)
kwargs["scratch_form"] = self.get_form(
ComponentScratchCreateForm, empty=True
)
kwargs["doc_form"] = self.get_form(ComponentDocCreateForm, empty=True)
if self.origin == "branch":
kwargs["branch_form"] = kwargs["form"]
elif self.origin == "scratch":
kwargs["scratch_form"] = kwargs["form"]
else:
kwargs["existing_form"] = kwargs["form"]
return kwargs
def get_form(self, form_class=None, empty=False):
form = super().get_form(form_class, empty=empty)
if isinstance(form, ComponentBranchForm):
form.fields["component"].queryset = Component.objects.filter(
pk__in=self.branch_data.keys()
).order_project()
form.branch_data = self.branch_data
elif isinstance(form, ComponentSelectForm):
form.fields["component"].queryset = self.components
return form
def get_form_class(self):
if self.origin == "branch":
return ComponentBranchForm
if self.origin == "scratch":
return ComponentScratchCreateForm
return ComponentSelectForm
def redirect_create(self, **kwargs):
return redirect(
"{}?{}".format(reverse("create-component-vcs"), urlencode(kwargs))
)
def form_valid(self, form):
if self.origin == "scratch":
component = scratch_create_component(**form.cleaned_data)
return redirect(
reverse("component_progress", kwargs=component.get_reverse_url_kwargs())
)
component = form.cleaned_data["component"]
if self.origin == "existing":
return self.redirect_create(
repo=component.get_repo_link_url(),
project=component.project.pk,
name=form.cleaned_data["name"],
slug=form.cleaned_data["slug"],
vcs=component.vcs,
source_language=component.source_language.pk,
)
if self.origin == "branch":
form.instance.save()
return redirect(
reverse(
"component_progress", kwargs=form.instance.get_reverse_url_kwargs()
)
)
return redirect("create-component")
def post(self, request, *args, **kwargs):
if self.origin == "vcs":
kwargs = {}
if self.selected_project:
kwargs["project"] = self.selected_project
return self.redirect_create(**kwargs)
return super().post(request, *args, **kwargs)
|
import re
import os
import imp
from kalliope.core.Utils.Utils import KalliopeModuleNotFoundError
from kalliope.core.ConfigurationManager.SettingLoader import SettingLoader
class InvalidSynapeName(Exception):
"""
The name of the synapse is not correct. It should only contains alphanumerics at the beginning and the end of
its name. It can also contains dash in beetween alphanumerics.
"""
pass
class NoSynapeName(Exception):
"""
A synapse needs a name
"""
pass
class NoSynapeNeurons(Exception):
"""
A synapse must contains at least one neuron
.. seealso:: Synapse, Neuron
"""
pass
class NoSynapeSignals(Exception):
"""
A synapse must contains at least an Event or an Order
.. seealso:: Event, Order
"""
pass
class NoValidSignal(Exception):
"""
A synapse must contains at least a valid Event or an Order
.. seealso:: Event, Order
"""
pass
class MultipleSameSynapseName(Exception):
"""
A synapse name must be unique
"""
pass
class NoValidOrder(Exception):
pass
class ConfigurationChecker:
"""
This Class provides all method to Check the configuration files are properly set up.
"""
def __init__(self):
pass
@staticmethod
def check_synape_dict(synape_dict):
"""
Return True if the provided dict is well corresponding to a Synapse
:param synape_dict: The synapse Dictionary
:type synape_dict: Dict
:return: True if synapse are ok
:rtype: Boolean
:Example:
ConfigurationChecker().check_synape_dict(synapses_dict):
.. seealso:: Synapse
.. raises:: NoSynapeName, InvalidSynapeName, NoSynapeNeurons, NoSynapeSignals
.. warnings:: Static and Public
"""
if 'name' not in synape_dict:
raise NoSynapeName("The Synapse does not have a name: %s" % synape_dict)
# check that the name is conform
# Regex for [a - zA - Z0 - 9\-] with dashes allowed in between but not at the start or end
pattern = r'(?=[a-zA-Z0-9\-]{4,100}$)^[a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*$'
prog = re.compile(pattern)
result = prog.match(synape_dict["name"])
if result is None:
raise InvalidSynapeName("Error with synapse name \"%s\"."
"Valid syntax: "
"At least 4 characters "
"[a - zA - Z0 - 9\-] with dashes allowed in between but not at the start or end" %
synape_dict["name"])
if 'neurons' not in synape_dict:
raise NoSynapeNeurons("The Synapse does not have neurons: %s" % synape_dict)
if 'signals' not in synape_dict:
raise NoSynapeSignals("The Synapse does not have signals: %s" % synape_dict)
return True
@staticmethod
def check_neuron_dict(neuron_dict):
"""
Check received neuron dict is valid:
:param neuron_dict: The neuron Dictionary
:type neuron_dict: Dict
:return: True if neuron is ok
:rtype: Boolean
:Example:
ConfigurationChecker().check_neuron_dict(neurons_dict):
.. seealso:: Synapse
.. raises:: ModuleNotFoundError
.. warnings:: Static and Public
"""
def check_neuron_exist(neuron_module_name):
"""
Return True if the neuron_name python Class exist in neurons package
:param neuron_module_name: Name of the neuron module to check
:type neuron_module_name: str
:return:
"""
sl = SettingLoader()
settings = sl.settings
package_name = "kalliope.neurons" + "." + neuron_module_name.lower() + "." + neuron_module_name.lower()
if settings.resources.neuron_folder is not None:
neuron_resource_path = settings.resources.neuron_folder + \
os.sep + neuron_module_name.lower() + os.sep + \
neuron_module_name.lower() + ".py"
if os.path.exists(neuron_resource_path):
imp.load_source(neuron_module_name.capitalize(), neuron_resource_path)
package_name = neuron_module_name.capitalize()
try:
mod = __import__(package_name, fromlist=[neuron_module_name.capitalize()])
getattr(mod, neuron_module_name.capitalize())
except AttributeError:
raise KalliopeModuleNotFoundError("[AttributeError] The module %s does not exist in the package %s " % (
neuron_module_name.capitalize(),
package_name))
except ImportError:
raise KalliopeModuleNotFoundError(
"[ImportError] The module %s does not exist in the package %s " % (neuron_module_name.capitalize(),
package_name))
return True
if isinstance(neuron_dict, dict):
for neuron_name in neuron_dict:
check_neuron_exist(neuron_name)
else:
check_neuron_exist(neuron_dict)
return True
@staticmethod
def check_signal_dict(signal_dict):
def check_signal_exist(signal_name):
"""
Return True if the signal_name python Class exist in signals package
:param signal_name: Name of the neuron module to check
:type signal_name: str
:return:
"""
sl = SettingLoader()
settings = sl.settings
package_name = "kalliope.signals" + "." + signal_name.lower() + "." + signal_name.lower()
if settings.resources.signal_folder is not None:
signal_resource_path = settings.resources.signal_folder + \
os.sep + signal_name.lower() + os.sep + \
signal_name.lower() + ".py"
if os.path.exists(signal_resource_path):
imp.load_source(signal_name.capitalize(), signal_resource_path)
package_name = signal_name.capitalize()
try:
mod = __import__(package_name, fromlist=[signal_name.capitalize()])
getattr(mod, signal_name.capitalize())
except AttributeError:
raise KalliopeModuleNotFoundError(
"[AttributeError] The module %s does not exist in the package %s " % (signal_name.capitalize(),
package_name))
except ImportError:
raise KalliopeModuleNotFoundError(
"[ImportError] The module %s does not exist in the package %s " % (signal_name.capitalize(),
package_name))
return True
if isinstance(signal_dict, dict):
for signal_name in signal_dict:
check_signal_exist(signal_name)
else:
check_signal_exist(signal_dict)
return True
@staticmethod
def check_order_dict(order_dict):
"""
Check received order dictionary is valid:
:param order_dict: The Order Dict
:type order_dict: Dict
:return: True if event are ok
:rtype: Boolean
:Example:
ConfigurationChecker().check_order_dict(order_dict):
.. seealso:: Order
.. warnings:: Static and Public
"""
if order_dict is None or order_dict == "":
raise NoValidOrder("An order cannot be null or empty")
return True
@staticmethod
def check_synapes(synapses_list):
"""
Check the synapse list is ok:
- No double same name
:param synapses_list: The Synapse List
:type synapses_list: List
:return: list of Synapse
:rtype: List
:Example:
ConfigurationChecker().check_synapes(order_dict):
.. seealso:: Synapse
.. raises:: MultipleSameSynapseName
.. warnings:: Static and Public
"""
seen = set()
for synapse in synapses_list:
# convert ascii to UTF-8
synapse_name = synapse.name.encode('utf-8')
if synapse_name in seen:
raise MultipleSameSynapseName("Multiple synapse found with the same name: %s" % synapse_name)
seen.add(synapse_name)
return True
|
from http.client import HTTPException
import logging
from pycsspeechtts import pycsspeechtts
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from homeassistant.const import CONF_API_KEY, CONF_TYPE, PERCENTAGE
import homeassistant.helpers.config_validation as cv
CONF_GENDER = "gender"
CONF_OUTPUT = "output"
CONF_RATE = "rate"
CONF_VOLUME = "volume"
CONF_PITCH = "pitch"
CONF_CONTOUR = "contour"
CONF_REGION = "region"
_LOGGER = logging.getLogger(__name__)
SUPPORTED_LANGUAGES = [
"ar-eg",
"ar-sa",
"ca-es",
"cs-cz",
"da-dk",
"de-at",
"de-ch",
"de-de",
"el-gr",
"en-au",
"en-ca",
"en-gb",
"en-ie",
"en-in",
"en-us",
"es-es",
"es-mx",
"fi-fi",
"fr-ca",
"fr-ch",
"fr-fr",
"he-il",
"hi-in",
"hu-hu",
"id-id",
"it-it",
"ja-jp",
"ko-kr",
"nb-no",
"nl-nl",
"pl-pl",
"pt-br",
"pt-pt",
"ro-ro",
"ru-ru",
"sk-sk",
"sv-se",
"th-th",
"tr-tr",
"zh-cn",
"zh-hk",
"zh-tw",
]
GENDERS = ["Female", "Male"]
DEFAULT_LANG = "en-us"
DEFAULT_GENDER = "Female"
DEFAULT_TYPE = "ZiraRUS"
DEFAULT_OUTPUT = "audio-16khz-128kbitrate-mono-mp3"
DEFAULT_RATE = 0
DEFAULT_VOLUME = 0
DEFAULT_PITCH = "default"
DEFAULT_CONTOUR = ""
DEFAULT_REGION = "eastus"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES),
vol.Optional(CONF_GENDER, default=DEFAULT_GENDER): vol.In(GENDERS),
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): cv.string,
vol.Optional(CONF_RATE, default=DEFAULT_RATE): vol.All(
vol.Coerce(int), vol.Range(-100, 100)
),
vol.Optional(CONF_VOLUME, default=DEFAULT_VOLUME): vol.All(
vol.Coerce(int), vol.Range(-100, 100)
),
vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): cv.string,
vol.Optional(CONF_CONTOUR, default=DEFAULT_CONTOUR): cv.string,
vol.Optional(CONF_REGION, default=DEFAULT_REGION): cv.string,
}
)
def get_engine(hass, config, discovery_info=None):
"""Set up Microsoft speech component."""
return MicrosoftProvider(
config[CONF_API_KEY],
config[CONF_LANG],
config[CONF_GENDER],
config[CONF_TYPE],
config[CONF_RATE],
config[CONF_VOLUME],
config[CONF_PITCH],
config[CONF_CONTOUR],
config[CONF_REGION],
)
class MicrosoftProvider(Provider):
"""The Microsoft speech API provider."""
def __init__(
self, apikey, lang, gender, ttype, rate, volume, pitch, contour, region
):
"""Init Microsoft TTS service."""
self._apikey = apikey
self._lang = lang
self._gender = gender
self._type = ttype
self._output = DEFAULT_OUTPUT
self._rate = f"{rate}{PERCENTAGE}"
self._volume = f"{volume}{PERCENTAGE}"
self._pitch = pitch
self._contour = contour
self._region = region
self.name = "Microsoft"
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORTED_LANGUAGES
def get_tts_audio(self, message, language, options=None):
"""Load TTS from Microsoft."""
if language is None:
language = self._lang
try:
trans = pycsspeechtts.TTSTranslator(self._apikey, self._region)
data = trans.speak(
language=language,
gender=self._gender,
voiceType=self._type,
output=self._output,
rate=self._rate,
volume=self._volume,
pitch=self._pitch,
contour=self._contour,
text=message,
)
except HTTPException as ex:
_LOGGER.error("Error occurred for Microsoft TTS: %s", ex)
return (None, None)
return ("mp3", data)
|
from uuid import uuid4
from aiohttp import ClientConnectionError, ClientResponseError
from pysmartthings import InstalledAppStatus, OAuthToken
import pytest
from homeassistant.components import cloud, smartthings
from homeassistant.components.smartthings.const import (
CONF_CLOUDHOOK_URL,
CONF_INSTALLED_APP_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DOMAIN,
EVENT_BUTTON,
SIGNAL_SMARTTHINGS_UPDATE,
SUPPORTED_PLATFORMS,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_FORBIDDEN, HTTP_INTERNAL_SERVER_ERROR
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
async def test_migration_creates_new_flow(hass, smartthings_mock, config_entry):
"""Test migration deletes app and creates new flow."""
assert await async_setup_component(hass, "persistent_notification", {})
config_entry.version = 1
config_entry.add_to_hass(hass)
await smartthings.async_migrate_entry(hass, config_entry)
await hass.async_block_till_done()
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["handler"] == "smartthings"
assert flows[0]["context"] == {"source": "import"}
async def test_unrecoverable_api_errors_create_new_flow(
hass, config_entry, smartthings_mock
):
"""
Test a new config flow is initiated when there are API errors.
401 (unauthorized): Occurs when the access token is no longer valid.
403 (forbidden/not found): Occurs when the app or installed app could
not be retrieved/found (likely deleted?)
"""
assert await async_setup_component(hass, "persistent_notification", {})
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=401
)
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
# Assert entry was removed and new flow created
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["handler"] == "smartthings"
assert flows[0]["context"] == {"source": "import"}
hass.config_entries.flow.async_abort(flows[0]["flow_id"])
async def test_recoverable_api_errors_raise_not_ready(
hass, config_entry, smartthings_mock
):
"""Test config entry not ready raised for recoverable API errors."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_INTERNAL_SERVER_ERROR
)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_api_errors_raise_not_ready(
hass, config_entry, app, installed_app, smartthings_mock
):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.scenes.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_INTERNAL_SERVER_ERROR
)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_connection_errors_raise_not_ready(hass, config_entry, smartthings_mock):
"""Test config entry not ready raised for connection errors."""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = ClientConnectionError()
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_base_url_no_longer_https_does_not_load(
hass, config_entry, app, smartthings_mock
):
"""Test base_url no longer valid creates a new flow."""
await async_process_ha_core_config(
hass,
{"external_url": "http://example.local:8123"},
)
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
async def test_unauthorized_installed_app_raises_not_ready(
hass, config_entry, app, installed_app, smartthings_mock
):
"""Test config entry not ready raised when the app isn't authorized."""
config_entry.add_to_hass(hass)
installed_app.installed_app_status = InstalledAppStatus.PENDING
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_unauthorized_loads_platforms(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_FORBIDDEN
)
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_config_entry_loads_platforms(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
scene,
):
"""Test config entry loads properly and proxies to platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_config_entry_loads_unconnected_cloud(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
scene,
):
"""Test entry loads during startup when cloud isn't connected."""
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_unload_entry(hass, config_entry):
"""Test entries are unloaded correctly."""
connect_disconnect = Mock()
smart_app = Mock()
smart_app.connect_event.return_value = connect_disconnect
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), smart_app, [], [])
broker.connect()
hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] = broker
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as forward_mock:
assert await smartthings.async_unload_entry(hass, config_entry)
assert connect_disconnect.call_count == 1
assert config_entry.entry_id not in hass.data[DOMAIN][DATA_BROKERS]
# Assert platforms unloaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_remove_entry(hass, config_entry, smartthings_mock):
"""Test that the installed app and app are removed up."""
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_cloudhook(hass, config_entry, smartthings_mock):
"""Test that the installed app, app, and cloudhook are removed up."""
hass.config.components.add("cloud")
# Arrange
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
# Act
with patch.object(
cloud, "async_is_logged_in", return_value=True
) as mock_async_is_logged_in, patch.object(
cloud, "async_delete_cloudhook"
) as mock_async_delete_cloudhook:
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert mock_async_is_logged_in.call_count == 1
assert mock_async_delete_cloudhook.call_count == 1
async def test_remove_entry_app_in_use(hass, config_entry, smartthings_mock):
"""Test app is not removed if in use by another config entry."""
# Arrange
config_entry.add_to_hass(hass)
data = config_entry.data.copy()
data[CONF_INSTALLED_APP_ID] = str(uuid4())
entry2 = MockConfigEntry(version=2, domain=DOMAIN, data=data)
entry2.add_to_hass(hass)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_already_deleted(hass, config_entry, smartthings_mock):
"""Test handles when the apps have already been removed."""
request_info = Mock(real_url="http://example.com")
# Arrange
smartthings_mock.delete_installed_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_FORBIDDEN
)
smartthings_mock.delete_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_FORBIDDEN
)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_installedapp_api_error(
hass, config_entry, smartthings_mock
):
"""Test raises exceptions removing the installed app."""
request_info = Mock(real_url="http://example.com")
# Arrange
smartthings_mock.delete_installed_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_INTERNAL_SERVER_ERROR
)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_installedapp_unknown_error(
hass, config_entry, smartthings_mock
):
"""Test raises exceptions removing the installed app."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_app_api_error(hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
request_info = Mock(real_url="http://example.com")
smartthings_mock.delete_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_INTERNAL_SERVER_ERROR
)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_app_unknown_error(hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
smartthings_mock.delete_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_broker_regenerates_token(hass, config_entry):
"""Test the device broker regenerates the refresh token."""
token = Mock(OAuthToken)
token.refresh_token = str(uuid4())
stored_action = None
def async_track_time_interval(hass, action, interval):
nonlocal stored_action
stored_action = action
with patch(
"homeassistant.components.smartthings.async_track_time_interval",
new=async_track_time_interval,
):
broker = smartthings.DeviceBroker(hass, config_entry, token, Mock(), [], [])
broker.connect()
assert stored_action
await stored_action(None) # pylint:disable=not-callable
assert token.refresh.call_count == 1
assert config_entry.data[CONF_REFRESH_TOKEN] == token.refresh_token
async def test_event_handler_dispatches_updated_devices(
hass, config_entry, device_factory, event_request_factory, event_factory
):
"""Test the event handler dispatches updated devices."""
devices = [
device_factory("Bedroom 1 Switch", ["switch"]),
device_factory("Bathroom 1", ["switch"]),
device_factory("Sensor", ["motionSensor"]),
device_factory("Lock", ["lock"]),
]
device_ids = [
devices[0].device_id,
devices[1].device_id,
devices[2].device_id,
devices[3].device_id,
]
event = event_factory(
devices[3].device_id,
capability="lock",
attribute="lock",
value="locked",
data={"codeId": "1"},
)
request = event_request_factory(device_ids=device_ids, events=[event])
config_entry.data = {
**config_entry.data,
CONF_INSTALLED_APP_ID: request.installed_app_id,
}
called = False
def signal(ids):
nonlocal called
called = True
assert device_ids == ids
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), devices, [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
for device in devices:
assert device.status.values["Updated"] == "Value"
assert devices[3].status.attributes["lock"].value == "locked"
assert devices[3].status.attributes["lock"].data == {"codeId": "1"}
async def test_event_handler_ignores_other_installed_app(
hass, config_entry, device_factory, event_request_factory
):
"""Test the event handler dispatches updated devices."""
device = device_factory("Bedroom 1 Switch", ["switch"])
request = event_request_factory([device.device_id])
called = False
def signal(ids):
nonlocal called
called = True
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert not called
async def test_event_handler_fires_button_events(
hass, config_entry, device_factory, event_factory, event_request_factory
):
"""Test the event handler fires button events."""
device = device_factory("Button 1", ["button"])
event = event_factory(
device.device_id, capability="button", attribute="button", value="pushed"
)
request = event_request_factory(events=[event])
config_entry.data = {
**config_entry.data,
CONF_INSTALLED_APP_ID: request.installed_app_id,
}
called = False
def handler(evt):
nonlocal called
called = True
assert evt.data == {
"component_id": "main",
"device_id": device.device_id,
"location_id": event.location_id,
"value": "pushed",
"name": device.label,
"data": None,
}
hass.bus.async_listen(EVENT_BUTTON, handler)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
|
from homeassistant import config_entries, setup
from homeassistant.components.upb.const import DOMAIN
from tests.async_mock import MagicMock, PropertyMock, patch
def mocked_upb(sync_complete=True, config_ok=True):
"""Mock UPB lib."""
def _upb_lib_connect(callback):
callback()
upb_mock = MagicMock()
type(upb_mock).network_id = PropertyMock(return_value="42")
type(upb_mock).config_ok = PropertyMock(return_value=config_ok)
if sync_complete:
upb_mock.connect.side_effect = _upb_lib_connect
return patch(
"homeassistant.components.upb.config_flow.upb_lib.UpbPim", return_value=upb_mock
)
async def valid_tcp_flow(hass, sync_complete=True, config_ok=True):
"""Get result dict that are standard for most tests."""
await setup.async_setup_component(hass, "persistent_notification", {})
with mocked_upb(sync_complete, config_ok), patch(
"homeassistant.components.upb.async_setup_entry", return_value=True
):
flow = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"],
{"protocol": "TCP", "address": "1.2.3.4", "file_path": "upb.upe"},
)
return result
async def test_full_upb_flow_with_serial_port(hass):
"""Test a full UPB config flow with serial port."""
await setup.async_setup_component(hass, "persistent_notification", {})
with mocked_upb(), patch(
"homeassistant.components.upb.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.upb.async_setup_entry", return_value=True
) as mock_setup_entry:
flow = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"],
{
"protocol": "Serial port",
"address": "/dev/ttyS0:115200",
"file_path": "upb.upe",
},
)
await hass.async_block_till_done()
assert flow["type"] == "form"
assert flow["errors"] == {}
assert result["type"] == "create_entry"
assert result["title"] == "UPB"
assert result["data"] == {
"host": "serial:///dev/ttyS0:115200",
"file_path": "upb.upe",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_with_tcp_upb(hass):
"""Test we can setup a serial upb."""
result = await valid_tcp_flow(hass)
assert result["type"] == "create_entry"
assert result["data"] == {"host": "tcp://1.2.3.4", "file_path": "upb.upe"}
await hass.async_block_till_done()
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
from asyncio import TimeoutError
with patch(
"homeassistant.components.upb.config_flow.async_timeout.timeout",
side_effect=TimeoutError,
):
result = await valid_tcp_flow(hass, sync_complete=False)
assert result["type"] == "form"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_missing_upb_file(hass):
"""Test we handle cannot connect error."""
result = await valid_tcp_flow(hass, config_ok=False)
assert result["type"] == "form"
assert result["errors"] == {"base": "invalid_upb_file"}
async def test_form_user_with_already_configured(hass):
"""Test we can setup a TCP upb."""
_ = await valid_tcp_flow(hass)
result2 = await valid_tcp_flow(hass)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
await hass.async_block_till_done()
async def test_form_import(hass):
"""Test we get the form with import source."""
await setup.async_setup_component(hass, "persistent_notification", {})
with mocked_upb(), patch(
"homeassistant.components.upb.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.upb.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "tcp://42.4.2.42", "file_path": "upb.upe"},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "UPB"
assert result["data"] == {"host": "tcp://42.4.2.42", "file_path": "upb.upe"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_junk_input(hass):
"""Test we get the form with import source."""
await setup.async_setup_component(hass, "persistent_notification", {})
with mocked_upb():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"foo": "goo", "goo": "foo"},
)
assert result["type"] == "form"
assert result["errors"] == {"base": "unknown"}
await hass.async_block_till_done()
|
from api.soma_api import SomaApi
from requests import RequestException
from homeassistant import data_entry_flow
from homeassistant.components.soma import DOMAIN, config_flow
from tests.async_mock import patch
from tests.common import MockConfigEntry
MOCK_HOST = "123.45.67.89"
MOCK_PORT = 3000
async def test_form(hass):
"""Test user form showing."""
flow = config_flow.SomaFlowHandler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_import_abort(hass):
"""Test configuration from YAML aborting with existing entity."""
flow = config_flow.SomaFlowHandler()
flow.hass = hass
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
result = await flow.async_step_import()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_setup"
async def test_import_create(hass):
"""Test configuration from YAML."""
flow = config_flow.SomaFlowHandler()
flow.hass = hass
with patch.object(SomaApi, "list_devices", return_value={"result": "success"}):
result = await flow.async_step_import({"host": MOCK_HOST, "port": MOCK_PORT})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_error_status(hass):
"""Test Connect successfully returning error status."""
flow = config_flow.SomaFlowHandler()
flow.hass = hass
with patch.object(SomaApi, "list_devices", return_value={"result": "error"}):
result = await flow.async_step_import({"host": MOCK_HOST, "port": MOCK_PORT})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "result_error"
async def test_key_error(hass):
"""Test Connect returning empty string."""
flow = config_flow.SomaFlowHandler()
flow.hass = hass
with patch.object(SomaApi, "list_devices", return_value={}):
result = await flow.async_step_import({"host": MOCK_HOST, "port": MOCK_PORT})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "connection_error"
async def test_exception(hass):
"""Test if RequestException fires when no connection can be made."""
flow = config_flow.SomaFlowHandler()
flow.hass = hass
with patch.object(SomaApi, "list_devices", side_effect=RequestException()):
result = await flow.async_step_import({"host": MOCK_HOST, "port": MOCK_PORT})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "connection_error"
async def test_full_flow(hass):
"""Check classic use case."""
hass.data[DOMAIN] = {}
flow = config_flow.SomaFlowHandler()
flow.hass = hass
with patch.object(SomaApi, "list_devices", return_value={"result": "success"}):
result = await flow.async_step_user({"host": MOCK_HOST, "port": MOCK_PORT})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
|
import os.path as op
from numpy.testing import assert_allclose
import shutil
from mne.datasets.testing import data_path, requires_testing_data
from mne.utils import run_tests_if_main, requires_h5py
from mne.io import read_raw_snirf, read_raw_nirx
from mne.io.tests.test_raw import _test_raw_reader
fname_snirf_15_2_short = op.join(data_path(download=False),
'SNIRF',
'snirf_1_3_nirx_15_2_recording_w_short.snirf')
fname_original = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_2_recording_w_short')
@requires_testing_data
@requires_h5py
def test_snirf_basic():
"""Test reading NIRX files."""
raw = read_raw_snirf(fname_snirf_15_2_short, preload=True)
# Test data import
assert raw._data.shape == (26, 145)
assert raw.info['sfreq'] == 12.5
# Test channel naming
assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850",
"S1_D9 760", "S1_D9 850"]
assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"]
# Test frequency encoding
assert raw.info['chs'][0]['loc'][9] == 760
assert raw.info['chs'][1]['loc'][9] == 850
# Test source locations
assert_allclose([-8.6765 * 1e-2, 0.0049 * 1e-2, -2.6167 * 1e-2],
raw.info['chs'][0]['loc'][3:6], rtol=0.02)
assert_allclose([7.9579 * 1e-2, -2.7571 * 1e-2, -2.2631 * 1e-2],
raw.info['chs'][4]['loc'][3:6], rtol=0.02)
assert_allclose([-2.1387 * 1e-2, -8.8874 * 1e-2, 3.8393 * 1e-2],
raw.info['chs'][8]['loc'][3:6], rtol=0.02)
assert_allclose([1.8602 * 1e-2, 9.7164 * 1e-2, 1.7539 * 1e-2],
raw.info['chs'][12]['loc'][3:6], rtol=0.02)
assert_allclose([-0.1108 * 1e-2, 0.7066 * 1e-2, 8.9883 * 1e-2],
raw.info['chs'][16]['loc'][3:6], rtol=0.02)
# Test detector locations
assert_allclose([-8.0409 * 1e-2, -2.9677 * 1e-2, -2.5415 * 1e-2],
raw.info['chs'][0]['loc'][6:9], rtol=0.02)
assert_allclose([-8.7329 * 1e-2, 0.7577 * 1e-2, -2.7980 * 1e-2],
raw.info['chs'][3]['loc'][6:9], rtol=0.02)
assert_allclose([9.2027 * 1e-2, 0.0161 * 1e-2, -2.8909 * 1e-2],
raw.info['chs'][5]['loc'][6:9], rtol=0.02)
assert_allclose([7.7548 * 1e-2, -3.5901 * 1e-2, -2.3179 * 1e-2],
raw.info['chs'][7]['loc'][6:9], rtol=0.02)
assert 'fnirs_cw_amplitude' in raw
@requires_testing_data
@requires_h5py
def test_snirf_against_nirx():
"""Test against file snirf was created from."""
raw = read_raw_snirf(fname_snirf_15_2_short, preload=True)
raw_orig = read_raw_nirx(fname_original, preload=True)
# Check annotations are the same
assert_allclose(raw.annotations.onset, raw_orig.annotations.onset)
assert_allclose([float(d) for d in raw.annotations.description],
[float(d) for d in raw_orig.annotations.description])
assert_allclose(raw.annotations.duration, raw_orig.annotations.duration)
# Check names are the same
assert raw.info['ch_names'] == raw_orig.info['ch_names']
# Check frequencies are the same
num_chans = len(raw.ch_names)
new_chs = raw.info['chs']
ori_chs = raw_orig.info['chs']
assert_allclose([new_chs[idx]['loc'][9] for idx in range(num_chans)],
[ori_chs[idx]['loc'][9] for idx in range(num_chans)])
# Check data is the same
assert_allclose(raw.get_data(), raw_orig.get_data())
@requires_h5py
@requires_testing_data
def test_snirf_nonstandard(tmpdir):
"""Test custom tags."""
from mne.externals.pymatreader.utils import _import_h5py
h5py = _import_h5py()
shutil.copy(fname_snirf_15_2_short, str(tmpdir) + "/mod.snirf")
fname = str(tmpdir) + "/mod.snirf"
# Manually mark up the file to match MNE-NIRS custom tags
with h5py.File(fname, "r+") as f:
f.create_dataset("nirs/metaDataTags/middleName",
data=['X'.encode('UTF-8')])
f.create_dataset("nirs/metaDataTags/lastName",
data=['Y'.encode('UTF-8')])
f.create_dataset("nirs/metaDataTags/sex",
data=['1'.encode('UTF-8')])
raw = read_raw_snirf(fname, preload=True)
assert raw.info["subject_info"]["middle_name"] == 'X'
assert raw.info["subject_info"]["last_name"] == 'Y'
assert raw.info["subject_info"]["sex"] == 1
with h5py.File(fname, "r+") as f:
del f['nirs/metaDataTags/sex']
f.create_dataset("nirs/metaDataTags/sex",
data=['2'.encode('UTF-8')])
raw = read_raw_snirf(fname, preload=True)
assert raw.info["subject_info"]["sex"] == 2
with h5py.File(fname, "r+") as f:
del f['nirs/metaDataTags/sex']
f.create_dataset("nirs/metaDataTags/sex",
data=['0'.encode('UTF-8')])
raw = read_raw_snirf(fname, preload=True)
assert raw.info["subject_info"]["sex"] == 0
with h5py.File(fname, "r+") as f:
f.create_dataset("nirs/metaDataTags/MNE_coordFrame", data=[1])
@requires_testing_data
@requires_h5py
def test_snirf_standard():
"""Test standard operations."""
_test_raw_reader(read_raw_snirf, fname=fname_snirf_15_2_short,
boundary_decimal=0) # low fs
run_tests_if_main()
|
from compressor.templatetags.compress import CompressorNode
from compressor.exceptions import UncompressableFileError
from compressor.base import Compressor
from compressor.conf import settings
from compressor.utils import get_class
from django.template.base import TextNode
def compress(context, data, name):
"""
Data is the string from the template (the list of js files in this case)
Name is either 'js' or 'css' (the sekizai namespace)
Basically passes the string through the {% compress 'js' %} template tag
"""
# separate compressable from uncompressable files
parser = get_class(settings.COMPRESS_PARSER)(data)
compressor = Compressor()
compressable_elements, expanded_elements, deferred_elements = [], [], []
if name == 'js':
for elem in parser.js_elems():
attribs = parser.elem_attribs(elem)
try:
if 'src' in attribs:
compressor.get_basename(attribs['src'])
except UncompressableFileError:
if 'defer' in attribs:
deferred_elements.append(elem)
else:
expanded_elements.append(elem)
else:
compressable_elements.append(elem)
elif name == 'css':
for elem in parser.css_elems():
attribs = parser.elem_attribs(elem)
try:
if parser.elem_name(elem) == 'link' and attribs['rel'].lower() == 'stylesheet':
compressor.get_basename(attribs['href'])
except UncompressableFileError:
expanded_elements.append(elem)
else:
compressable_elements.append(elem)
# reconcatenate them
data = ''.join(parser.elem_str(e) for e in expanded_elements)
expanded_node = CompressorNode(nodelist=TextNode(data), kind=name, mode='file')
data = ''.join(parser.elem_str(e) for e in compressable_elements)
compressable_node = CompressorNode(nodelist=TextNode(data), kind=name, mode='file')
data = ''.join(parser.elem_str(e) for e in deferred_elements)
deferred_node = CompressorNode(nodelist=TextNode(data), kind=name, mode='file')
return '\n'.join([
expanded_node.get_original_content(context=context),
compressable_node.render(context=context),
deferred_node.get_original_content(context=context),
])
|
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola.plugins.compile.rest import _align_choice, _align_options_base
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
"""Plugin for soundclound directive."""
name = "rest_soundcloud"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
directives.register_directive('soundcloud', SoundCloud)
directives.register_directive('soundcloud_playlist', SoundCloudPlaylist)
return super().set_site(site)
CODE = """\
<div class="soundcloud-player{align}">
<iframe width="{width}" height="{height}"
scrolling="no" frameborder="no"
src="https://w.soundcloud.com/player/?url=http://api.soundcloud.com/{preslug}/{sid}">
</iframe>
</div>"""
class SoundCloud(Directive):
"""reST extension for inserting SoundCloud embedded music.
Usage:
.. soundcloud:: <sound id>
:height: 400
:width: 600
"""
has_content = True
required_arguments = 1
option_spec = {
'width': directives.positive_int,
'height': directives.positive_int,
"align": _align_choice
}
preslug = "tracks"
def run(self):
"""Run the soundcloud directive."""
self.check_content()
options = {
'sid': self.arguments[0],
'width': 600,
'height': 160,
'preslug': self.preslug,
}
options.update(self.options)
if self.options.get('align') in _align_options_base:
options['align'] = ' align-' + self.options['align']
else:
options['align'] = ''
return [nodes.raw('', CODE.format(**options), format='html')]
def check_content(self):
"""Emit a deprecation warning if there is content."""
if self.content: # pragma: no cover
raise self.warning("This directive does not accept content. The "
"'key=value' format for options is deprecated, "
"use ':key: value' instead")
class SoundCloudPlaylist(SoundCloud):
"""reST directive for SoundCloud playlists."""
preslug = "playlists"
|
from flask import Flask, request, redirect, Response, jsonify
from functools import wraps
from flasgger import Swagger
def requires_basic_auth(f):
"""Decorator to require HTTP Basic Auth for your endpoint."""
def check_auth(username, password):
return username == "guest" and password == "secret"
def authenticate():
return Response(
"Authentication required.", 401,
{"WWW-Authenticate": "Basic realm='Login Required'"},
)
@wraps(f)
def decorated(*args, **kwargs):
# NOTE: This example will require Basic Auth only when you run the
# app directly. For unit tests, we can't block it from getting the
# Swagger specs so we just allow it to go thru without auth.
# The following two lines of code wouldn't be needed in a normal
# production environment.
if __name__ != "__main__":
return f(*args, **kwargs)
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
app = Flask(__name__)
app.config["SWAGGER"] = {
"title": "Swagger Basic Auth App",
"uiversion": 2,
}
swag = Swagger(
template={
"swagger": "2.0",
"info": {
"title": "Swagger Basic Auth App",
"version": "1.0",
},
"consumes": [
"application/json",
],
"produces": [
"application/json",
],
},
)
# passing decorators in init_app
swag.init_app(app, decorators=[requires_basic_auth])
@app.route("/echo/<name>", methods=["GET", "POST"])
def echo(name):
"""
Echo back the name and any posted parameters.
---
tags:
- echo
parameters:
- in: path
name: name
type: string
required: true
- in: body
name: body
description: JSON parameters.
schema:
properties:
first_name:
type: string
description: First name.
example: Alice
last_name:
type: string
description: Last name.
example: Smith
dob:
type: string
format: date
description: Date of birth.
example: 1990-01-01
comment:
type: string
description: Something arbitrary.
example: Hello world
responses:
200:
description: OK.
"""
data = {
"url_name": name,
"json": request.json,
}
return jsonify(data)
@app.route("/")
def index():
return redirect("/apidocs")
if __name__ == "__main__":
app.run(debug=True)
|
from spotipy import Spotify, SpotifyException
import voluptuous as vol
from homeassistant.components.media_player import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.components.spotify import config_flow
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_CREDENTIALS, CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from homeassistant.helpers.config_entry_oauth2_flow import (
OAuth2Session,
async_get_config_entry_implementation,
)
from homeassistant.helpers.typing import ConfigType
from .const import (
DATA_SPOTIFY_CLIENT,
DATA_SPOTIFY_ME,
DATA_SPOTIFY_SESSION,
DOMAIN,
SPOTIFY_SCOPES,
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Inclusive(CONF_CLIENT_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, ATTR_CREDENTIALS): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Spotify integration."""
if DOMAIN not in config:
return True
if CONF_CLIENT_ID in config[DOMAIN]:
config_flow.SpotifyFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
"https://accounts.spotify.com/authorize",
"https://accounts.spotify.com/api/token",
),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Spotify from a config entry."""
implementation = await async_get_config_entry_implementation(hass, entry)
session = OAuth2Session(hass, entry, implementation)
await session.async_ensure_token_valid()
spotify = Spotify(auth=session.token["access_token"])
try:
current_user = await hass.async_add_executor_job(spotify.me)
except SpotifyException as err:
raise ConfigEntryNotReady from err
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_SPOTIFY_CLIENT: spotify,
DATA_SPOTIFY_ME: current_user,
DATA_SPOTIFY_SESSION: session,
}
if not set(session.token["scope"].split(" ")).issuperset(SPOTIFY_SCOPES):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "reauth"},
data=entry.data,
)
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, MEDIA_PLAYER_DOMAIN)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Spotify config entry."""
# Unload entities for this entry/device.
await hass.config_entries.async_forward_entry_unload(entry, MEDIA_PLAYER_DOMAIN)
# Cleanup
del hass.data[DOMAIN][entry.entry_id]
if not hass.data[DOMAIN]:
del hass.data[DOMAIN]
return True
|
from datetime import timedelta
import logging
from amcrest import AmcrestError
from homeassistant.const import CONF_NAME, CONF_SENSORS, PERCENTAGE
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import DATA_AMCREST, DEVICES, SENSOR_SCAN_INTERVAL_SECS, SERVICE_UPDATE
from .helpers import log_update_error, service_signal
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=SENSOR_SCAN_INTERVAL_SECS)
SENSOR_PTZ_PRESET = "ptz_preset"
SENSOR_SDCARD = "sdcard"
# Sensor types are defined like: Name, units, icon
SENSORS = {
SENSOR_PTZ_PRESET: ["PTZ Preset", None, "mdi:camera-iris"],
SENSOR_SDCARD: ["SD Used", PERCENTAGE, "mdi:sd"],
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a sensor for an Amcrest IP Camera."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
device = hass.data[DATA_AMCREST][DEVICES][name]
async_add_entities(
[
AmcrestSensor(name, device, sensor_type)
for sensor_type in discovery_info[CONF_SENSORS]
],
True,
)
class AmcrestSensor(Entity):
"""A sensor implementation for Amcrest IP camera."""
def __init__(self, name, device, sensor_type):
"""Initialize a sensor for Amcrest camera."""
self._name = f"{name} {SENSORS[sensor_type][0]}"
self._signal_name = name
self._api = device.api
self._sensor_type = sensor_type
self._state = None
self._attrs = {}
self._unit_of_measurement = SENSORS[sensor_type][1]
self._icon = SENSORS[sensor_type][2]
self._unsub_dispatcher = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit_of_measurement
@property
def available(self):
"""Return True if entity is available."""
return self._api.available
def update(self):
"""Get the latest data and updates the state."""
if not self.available:
return
_LOGGER.debug("Updating %s sensor", self._name)
try:
if self._sensor_type == SENSOR_PTZ_PRESET:
self._state = self._api.ptz_presets_count
elif self._sensor_type == SENSOR_SDCARD:
storage = self._api.storage_all
try:
self._attrs[
"Total"
] = f"{storage['total'][0]:.2f} {storage['total'][1]}"
except ValueError:
self._attrs[
"Total"
] = f"{storage['total'][0]} {storage['total'][1]}"
try:
self._attrs[
"Used"
] = f"{storage['used'][0]:.2f} {storage['used'][1]}"
except ValueError:
self._attrs["Used"] = f"{storage['used'][0]} {storage['used'][1]}"
try:
self._state = f"{storage['used_percent']:.2f}"
except ValueError:
self._state = storage["used_percent"]
except AmcrestError as error:
log_update_error(_LOGGER, "update", self.name, "sensor", error)
async def async_on_demand_update(self):
"""Update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Subscribe to update signal."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass,
service_signal(SERVICE_UPDATE, self._signal_name),
self.async_on_demand_update,
)
async def async_will_remove_from_hass(self):
"""Disconnect from update signal."""
self._unsub_dispatcher()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = None
def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
all_tensors: Boolean indicating whether to print all tensors.
"""
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
if all_tensors:
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
print(reader.get_tensor(key))
elif not tensor_name:
print(reader.debug_string().decode("utf-8"))
else:
print("tensor_name: ", tensor_name)
print(reader.get_tensor(tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def parse_numpy_printoption(kv_str):
"""Sets a single numpy printoption from a string of the form 'x=y'.
See documentation on numpy.set_printoptions() for details about what values
x and y can take. x can be any option listed there other than 'formatter'.
Args:
kv_str: A string of the form 'x=y', such as 'threshold=100000'
Raises:
argparse.ArgumentTypeError: If the string couldn't be used to set any
nump printoption.
"""
k_v_str = kv_str.split("=", 1)
if len(k_v_str) != 2 or not k_v_str[0]:
raise argparse.ArgumentTypeError("'%s' is not in the form k=v." % kv_str)
k, v_str = k_v_str
printoptions = np.get_printoptions()
if k not in printoptions:
raise argparse.ArgumentTypeError("'%s' is not a valid printoption." % k)
v_type = type(printoptions[k])
if v_type is type(None):
raise argparse.ArgumentTypeError(
"Setting '%s' from the command line is not supported." % k)
try:
v = (v_type(v_str) if v_type is not bool
else flags.BooleanParser().Parse(v_str))
except ValueError as e:
raise argparse.ArgumentTypeError(e.message)
np.set_printoptions(**{k: v})
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=checkpoint_file_name "
"[--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name,
FLAGS.all_tensors)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--file_name", type=str, default="", help="Checkpoint filename. "
"Note, if using Checkpoint V2 format, file_name is the "
"shared prefix between all files in the checkpoint.")
parser.add_argument(
"--tensor_name",
type=str,
default="",
help="Name of the tensor to inspect")
parser.add_argument(
"--all_tensors",
nargs="?",
const=True,
type="bool",
default=False,
help="If True, print the values of all the tensors.")
parser.add_argument(
"--printoptions",
nargs="*",
type=parse_numpy_printoption,
help="Argument for numpy.set_printoptions(), in the form 'k=v'.")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
import re
from typing import Dict, Union
import sentry_sdk
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
EVENT_HOMEASSISTANT_STARTED,
__version__ as current_version,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.loader import Integration, async_get_custom_components
from .const import (
CONF_DSN,
CONF_ENVIRONMENT,
CONF_EVENT_CUSTOM_COMPONENTS,
CONF_EVENT_HANDLED,
CONF_EVENT_THIRD_PARTY_PACKAGES,
CONF_LOGGING_EVENT_LEVEL,
CONF_LOGGING_LEVEL,
CONF_TRACING,
CONF_TRACING_SAMPLE_RATE,
DEFAULT_LOGGING_EVENT_LEVEL,
DEFAULT_LOGGING_LEVEL,
DEFAULT_TRACING_SAMPLE_RATE,
DOMAIN,
ENTITY_COMPONENTS,
)
CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.117")
LOGGER_INFO_REGEX = re.compile(r"^(\w+)\.?(\w+)?\.?(\w+)?\.?(\w+)?(?:\..*)?$")
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the Sentry component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Sentry from a config entry."""
# Migrate environment from config entry data to config entry options
if (
CONF_ENVIRONMENT not in entry.options
and CONF_ENVIRONMENT in entry.data
and entry.data[CONF_ENVIRONMENT]
):
options = {**entry.options, CONF_ENVIRONMENT: entry.data[CONF_ENVIRONMENT]}
data = entry.data.copy()
data.pop(CONF_ENVIRONMENT)
hass.config_entries.async_update_entry(entry, data=data, options=options)
# https://docs.sentry.io/platforms/python/logging/
sentry_logging = LoggingIntegration(
level=entry.options.get(CONF_LOGGING_LEVEL, DEFAULT_LOGGING_LEVEL),
event_level=entry.options.get(
CONF_LOGGING_EVENT_LEVEL, DEFAULT_LOGGING_EVENT_LEVEL
),
)
# Additional/extra data collection
channel = get_channel(current_version)
huuid = await hass.helpers.instance_id.async_get()
system_info = await hass.helpers.system_info.async_get_system_info()
custom_components = await async_get_custom_components(hass)
tracing = {}
if entry.options.get(CONF_TRACING):
tracing = {
"traces_sample_rate": entry.options.get(
CONF_TRACING_SAMPLE_RATE, DEFAULT_TRACING_SAMPLE_RATE
),
}
sentry_sdk.init(
dsn=entry.data[CONF_DSN],
environment=entry.options.get(CONF_ENVIRONMENT),
integrations=[sentry_logging, AioHttpIntegration(), SqlalchemyIntegration()],
release=current_version,
before_send=lambda event, hint: process_before_send(
hass,
entry.options,
channel,
huuid,
system_info,
custom_components,
event,
hint,
),
**tracing,
)
async def update_system_info(now):
nonlocal system_info
system_info = await hass.helpers.system_info.async_get_system_info()
# Update system info every hour
hass.helpers.event.async_call_later(3600, update_system_info)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, update_system_info)
return True
def get_channel(version: str) -> str:
"""Find channel based on version number."""
if "dev0" in version:
return "dev"
if "dev" in version:
return "nightly"
if "b" in version:
return "beta"
return "stable"
def process_before_send(
hass: HomeAssistant,
options,
channel: str,
huuid: str,
system_info: Dict[str, Union[bool, str]],
custom_components: Dict[str, Integration],
event,
hint,
):
"""Process a Sentry event before sending it to Sentry."""
# Filter out handled events by default
if (
"tags" in event
and event.tags.get("handled", "no") == "yes"
and not options.get(CONF_EVENT_HANDLED)
):
return None
# Additional tags to add to the event
additional_tags = {
"channel": channel,
"installation_type": system_info["installation_type"],
"uuid": huuid,
}
# Find out all integrations in use, filter "auth", because it
# triggers security rules, hiding all data.
integrations = [
integration
for integration in hass.config.components
if integration != "auth" and "." not in integration
]
# Add additional tags based on what caused the event.
platform = entity_platform.current_platform.get()
if platform is not None:
# This event happened in a platform
additional_tags["custom_component"] = "no"
additional_tags["integration"] = platform.platform_name
additional_tags["platform"] = platform.domain
elif "logger" in event:
# Logger event, try to get integration information from the logger name.
matches = LOGGER_INFO_REGEX.findall(event["logger"])
if matches:
group1, group2, group3, group4 = matches[0]
# Handle the "homeassistant." package differently
if group1 == "homeassistant" and group2 and group3:
if group2 == "components":
# This logger is from a component
additional_tags["custom_component"] = "no"
additional_tags["integration"] = group3
if group4 and group4 in ENTITY_COMPONENTS:
additional_tags["platform"] = group4
else:
# Not a component, could be helper, or something else.
additional_tags[group2] = group3
else:
# Not the "homeassistant" package, this third-party
if not options.get(CONF_EVENT_THIRD_PARTY_PACKAGES):
return None
additional_tags["package"] = group1
# If this event is caused by an integration, add a tag if this
# integration is custom or not.
if (
"integration" in additional_tags
and additional_tags["integration"] in custom_components
):
if not options.get(CONF_EVENT_CUSTOM_COMPONENTS):
return None
additional_tags["custom_component"] = "yes"
# Update event with the additional tags
event.setdefault("tags", {}).update(additional_tags)
# Set user context to the installation UUID
event.setdefault("user", {}).update({"id": huuid})
# Update event data with Home Assistant Context
event.setdefault("contexts", {}).update(
{
"Home Assistant": {
"channel": channel,
"custom_components": "\n".join(sorted(custom_components)),
"integrations": "\n".join(sorted(integrations)),
**system_info,
},
}
)
return event
|
import json
import posixpath
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import google_cloud_sdk
from perfkitbenchmarker.linux_packages import nvidia_driver
from perfkitbenchmarker.linux_packages import tensorflow
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'mlperf'
BENCHMARK_CONFIG = """
mlperf:
description: Runs MLPerf Benchmark.
vm_groups:
default:
os_type: ubuntu1604
disk_spec: *default_500_gb
vm_spec:
GCP:
machine_type: n1-highmem-96
zone: us-west1-b
boot_disk_size: 105
boot_disk_type: pd-ssd
min_cpu_platform: skylake
AWS:
machine_type: p3dn.24xlarge
zone: us-east-1a
boot_disk_size: 105
image: ami-0a4a0d42e3b855a2c
Azure:
machine_type: Standard_ND40s_v2
zone: eastus
boot_disk_size: 105
"""
flags.DEFINE_enum('mlperf_benchmark', 'resnet',
['resnet', 'transformer', 'mask', 'gnmt', 'ssd', 'minigo'],
'MLPerf benchmark test to run.')
flags.DEFINE_string(
'mlperf_gcs_resnet_checkpoint',
'gs://cloud-tpu-artifacts/resnet/resnet-nhwc-2018-02-07/model.ckpt-112603',
'A ResNet backbone trained on the ImageNet dataset.')
flags.DEFINE_string(
'mlperf_transformer_decode_dir', '', 'Transformer decode directory')
flags.DEFINE_string('wmt_data_dir',
'gs://pkb-sgpyc-us-west1/mlperf_v0.6_nv_transformer/',
'Directory where the wmt dataset is stored')
flags.DEFINE_string('coco_data_dir', 'gs://pkb-sgpyc-us-west1/coco2017/',
'Directory where the coco dataset is stored')
flags.DEFINE_string('gnmt_data_dir',
'gs://pkb-sgpyc-us-west1/mlperf_v0.6_nv_gnmt/',
'Directory where the nv v0.6 WMT dataset is stored')
flags.DEFINE_string('minigo_model_dir', None,
'Directory on GCS to copy minigo source data from. Files '
'will be copied from subdirectories of src_dir '
'corresponding to the board size.')
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
return config
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.imagenet_data_dir = FLAGS.imagenet_data_dir
benchmark_spec.benchmark = FLAGS.mlperf_benchmark
benchmark_spec.wmt_data_dir = FLAGS.wmt_data_dir
benchmark_spec.coco_data_dir = FLAGS.coco_data_dir
benchmark_spec.gnmt_data_dir = FLAGS.gnmt_data_dir
benchmark_spec.gcp_service_account = FLAGS.gcp_service_account
def _DownloadData(data_dir, data_path, vm):
"""Download remote benchmark data to local.
Args:
data_dir: remote benchmark location
data_path: local benchmark location
vm: vm to download the data
"""
vm.Install('google_cloud_sdk')
vm.RemoteCommand('if [ ! -d \"{data_path}\" ]; then '
' sudo mkdir -p {data_path} && '
' sudo chmod a+w {data_path} && '
' {gsutil_path} -m cp -r {data_dir}/* {data_path} ;'
'fi'.format(
data_dir=data_dir,
gsutil_path=google_cloud_sdk.GSUTIL_PATH,
data_path=data_path))
def Prepare(benchmark_spec, vm=None):
"""Install and set up MLPerf on the target vm.
Args:
benchmark_spec: The benchmark specification
vm: The VM to work on
Raises:
errors.Config.InvalidValue upon both GPUs and TPUs appear in the config
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
if vm is None:
vm = benchmark_spec.vms[0]
if (bool(benchmark_spec.tpus) and nvidia_driver.CheckNvidiaGpuExists(vm)):
raise errors.Config.InvalidValue(
'Invalid configuration. GPUs and TPUs can not both present in the config.'
)
vm.RemoteCommand(
'if [ ! -d "$HOME/training_results_v0.6" ]; then '
' git clone https://github.com/mlperf/training_results_v0.6.git ; '
'fi',
should_log=True)
vm.InstallPackages('python3-pip')
if benchmark_spec.tpus:
if vm == benchmark_spec.vms[0]:
storage_service = gcs.GoogleCloudStorageService()
benchmark_spec.storage_service = storage_service
bucket = 'pkb{}'.format(FLAGS.run_uri)
benchmark_spec.bucket = bucket
benchmark_spec.model_dir = 'gs://{}'.format(bucket)
location = benchmark_spec.tpu_groups['train'].GetZone()
storage_service.PrepareService(util.GetRegionFromZone(location))
storage_service.MakeBucket(bucket)
storage_service.ChmodBucket(benchmark_spec.gcp_service_account, 'W',
bucket)
# For MLPerf v0.6, the benchmake code of different hardware are different.
if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):
run_path = (
'$HOME/training_results_v0.6/Google/benchmarks/{model}/tpu-{tpus}'
.format(
model=benchmark_spec.benchmark,
tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))
else:
raise ValueError(
'MLPerf configurations do not support the hardware in PKB. PKB may '
'need to be updated if this is a new TPU type.')
if 'mask' in benchmark_spec.benchmark:
model = 'mask_rcnn'
elif 'gnmt' in benchmark_spec.benchmark:
model = 'nmt'
else:
model = benchmark_spec.benchmark
code_path = (
'$HOME/training_results_v0.6/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'
.format(
model=benchmark_spec.benchmark,
tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))
vm.RemoteCommand('pip3 install --upgrade pyyaml==3.13 ')
vm.RemoteCommand('pip3 install cloud-tpu-profiler==1.12')
if ('mask' in benchmark_spec.benchmark or
'ssd' in benchmark_spec.benchmark):
# TODO(user): coco whl package for python 3.5
vm.RemoteCommand(
'cd /tmp && '
'wget https://storage.cloud.google.com/mlperf_artifcats/v0.6_training/coco-1.1-cp36-cp36m-linux_x86_64.whl'
)
run_script = posixpath.join(run_path, 'setup.sh')
vm_util.ReplaceText(vm, '--progress-bar off', ' ', run_script)
vm_util.ReplaceText(vm, 'pip ', 'pip3 ', run_script)
vm.RemoteCommand('chmod 755 {script} && {script}'.format(script=run_script))
if 'mask' not in benchmark_spec.benchmark:
vm.RemoteCommand(
'pip3 uninstall -y tf-estimator-nightly && '
'pip3 install tf-estimator-nightly==1.14.0.dev2019051801')
if 'resnet' in benchmark_spec.benchmark:
data_dir = benchmark_spec.imagenet_data_dir
elif 'transformer' in benchmark_spec.benchmark:
data_dir = benchmark_spec.wmt_data_dir
elif 'mask' in benchmark_spec.benchmark:
data_dir = benchmark_spec.coco_data_dir
elif 'gnmt' in benchmark_spec.benchmark:
data_dir = benchmark_spec.gnmt_data_dir
elif 'ssd' in benchmark_spec.benchmark:
data_dir = benchmark_spec.coco_data_dir
else:
raise ValueError('Unknown operation, cannot find {} in benchmark'.format(
benchmark_spec.benchmark))
run_script = posixpath.join(run_path, 'run_and_time.sh')
data_dir = data_dir.replace('/', r'\/')
checkpoint = FLAGS.mlperf_gcs_resnet_checkpoint.replace('/', r'\/')
decode_dir = FLAGS.mlperf_transformer_decode_dir.replace('/', r'\/')
tpu = benchmark_spec.tpu_groups['train'].GetName()
vm_util.ReplaceText(vm, '--model_dir=.*', r'--model_dir=gs:\/\/{} \\\\'
.format(bucket), run_script)
vm_util.ReplaceText(vm, '--data_dir=.*',
r'--data_dir={} \\\\'.format(data_dir), run_script)
vm_util.ReplaceText(vm, '--training_file_pattern=.*',
r'--training_file_pattern={}\/train-* \\\\'
.format(data_dir), run_script)
vm_util.ReplaceText(vm, '--validation_file_pattern=.*',
r'--validation_file_pattern={}\/val-* \\\\'
.format(data_dir), run_script)
vm_util.ReplaceText(vm, '--val_json_file=.*',
r'--val_json_file={}\/instances_val2017.json \\\\'
.format(data_dir), run_script)
vm_util.ReplaceText(vm, '--resnet_checkpoint=.*',
r'--resnet_checkpoint={} \\\\'.format(checkpoint),
run_script)
vm_util.ReplaceText(vm, '--decode_from_file=.*',
r'--decode_from_file={}\/wmt14-en-de.src \\\\'
.format(decode_dir), run_script)
vm_util.ReplaceText(vm, '--decode_reference=.*',
r'--decode_reference={}\/wmt14-en-de.ref \\\\'
.format(decode_dir), run_script)
vm_util.ReplaceText(vm, '--decode_to_file=.*',
r'--decode_to_file={}\/decode.transformer_mlperf_tpu.'
r'translate_ende_wmt32k_packed.2x2_log_1018_2 \\\\'
.format(bucket), run_script)
vm_util.ReplaceText(vm, '--tpu=.*', r'--tpu={} \\\\'.format(tpu),
run_script)
vm_util.ReplaceText(vm, '--output_dir=.*', r'--output_dir=gs:\/\/{} \\\\'
.format(bucket), run_script)
vm_util.ReplaceText(vm, '--cloud_tpu_name=.*',
r'--cloud_tpu_name={} \\\\'.format(tpu), run_script)
vm_util.ReplaceText(vm, '--out_dir=.*',
r'--out_dir=gs:\/\/{} \\\\'.format(bucket), run_script)
vm_util.ReplaceText(vm, '--tpu_name=.*', r'--tpu_name={} \\\\'.format(tpu),
run_script)
vm.RemoteCommand('chmod 755 {}'.format(run_script))
if 'gnmt' in benchmark_spec.benchmark:
run_script = posixpath.join(code_path, model, 'metric.py')
vm_util.ReplaceText(vm, ' sacrebleu -t', ' python3 -m sacrebleu -t',
run_script)
else:
benchmark_spec.model_dir = '/tmp'
has_gpu = nvidia_driver.CheckNvidiaGpuExists(vm)
if has_gpu:
vm.Install('cuda_toolkit')
vm.Install('nvidia_docker')
vm.RemoteCommand('if [ ! -d "/data" ]; then sudo ln -s /scratch /data; fi')
if 'resnet' in benchmark_spec.benchmark:
vm.RemoteCommand(
'cd training_results_v0.6/NVIDIA/benchmarks/resnet/implementations/mxnet &&'
' sudo docker build --pull --network=host . -t mlperf-nvidia:image_classification',
should_log=True)
_DownloadData(benchmark_spec.imagenet_data_dir,
posixpath.join('/data', 'imagenet'), vm)
if 'transformer' in benchmark_spec.benchmark:
vm.RemoteCommand(
'cd training_results_v0.6/NVIDIA/benchmarks/transformer/implementations/pytorch &&'
' sudo docker build --pull --network=host . -t mlperf-nvidia:translation',
should_log=True)
_DownloadData(benchmark_spec.wmt_data_dir, posixpath.join('/data', 'wmt'),
vm)
if 'minigo' in benchmark_spec.benchmark:
build_path = 'training_results_v0.6/NVIDIA/benchmarks/minigo/implementations/tensorflow'
run_script = posixpath.join(build_path, 'run_and_time.sh')
vm_util.ReplaceText(vm, 'get_data.py', 'get_data.py --src_dir={}'.format(
FLAGS.minigo_model_dir.replace('/', r'\/')), run_script)
vm.RemoteCommand('cd {} && sudo docker build --pull --network=host -t '
'mlperf-nvidia:minigo .'.format(build_path),
should_log=True)
if 'mask' in benchmark_spec.benchmark:
vm.RemoteCommand(
'cd training_results_v0.6/NVIDIA/benchmarks/maskrcnn/implementations/pytorch && '
'sudo docker build --pull --network=host -t mlperf-nvidia:object_detection . ',
should_log=True)
_DownloadData(benchmark_spec.coco_data_dir,
posixpath.join('/data', 'coco2017'), vm)
if 'gnmt' in benchmark_spec.benchmark:
vm.RemoteCommand(
'cd training_results_v0.6/NVIDIA/benchmarks/gnmt/implementations/pytorch && '
'sudo docker build --pull --network=host -t mlperf-nvidia:rnn_translator . ',
should_log=True)
_DownloadData(benchmark_spec.gnmt_data_dir,
posixpath.join('/data', 'gnmt'), vm)
if 'ssd' in benchmark_spec.benchmark:
vm.RemoteCommand(
'cd training_results_v0.6/NVIDIA/benchmarks/ssd/implementations/pytorch && '
'sudo docker build --pull --network=host -t mlperf-nvidia:single_stage_detector . ',
should_log=True)
_DownloadData(benchmark_spec.coco_data_dir,
posixpath.join('/data', 'coco2017'), vm)
def _CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = {
'use_tpu': bool(benchmark_spec.tpus),
'model_dir': benchmark_spec.model_dir,
'model': benchmark_spec.benchmark,
'version': 'v0.6.0',
}
if benchmark_spec.tpus:
metadata.update({
'train_tpu_num_shards':
benchmark_spec.tpu_groups['train'].GetNumShards(),
'train_tpu_accelerator_type':
benchmark_spec.tpu_groups['train'].GetAcceleratorType()
})
return metadata
def MakeSamplesFromOutput(metadata, output, use_tpu=False, model='resnet'):
"""Create samples containing metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
use_tpu: bool, whether tpu is in use
model: string, model name
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mlperf_benchmark_test.py
Returns:
Samples containing training metrics.
"""
samples = []
results = regex_util.ExtractAllMatches(
r':::MLL (\d+\.\d+) eval_accuracy: {(.*)}', output)
start = None
for wall_time, result in results:
wall_time = float(wall_time)
if not start:
start = wall_time
metadata_copy = metadata.copy()
epoch = regex_util.ExtractExactlyOneMatch(r'"epoch_num": (\d+)', result)
if ('transformer' in model and (not use_tpu)):
value = float(regex_util.ExtractExactlyOneMatch(r'"value": "(\d+\.\d+)"',
result))
elif 'mask' in model:
mask_value, mask_metadata = regex_util.ExtractExactlyOneMatch(
r'^"value": (.*?), "metadata": (.*)$', result)
metadata_copy.update(json.loads(mask_value)['accuracy'])
metadata_copy.update(json.loads(mask_metadata))
value = float(json.loads(mask_value)['accuracy']['BBOX']) * 100
else:
value = float(regex_util.ExtractExactlyOneMatch(r'"value": (\d+\.\d+)',
result))
if 'ssd' in model or 'minigo' in model or 'resnet' in model:
value *= 100
metadata_copy['times'] = wall_time - start
metadata_copy['epoch'] = int(epoch)
samples.append(
sample.Sample('Eval Accuracy', value, '%', metadata_copy))
if 'resnet' in model:
results = re.findall(r'Speed: (\S+) samples/sec', output)
results.extend(re.findall(r'(\S+) examples/sec', output))
elif 'transformer' in model:
results = re.findall(r'wps=(\S+),', output)
elif 'gnmt' in model:
results = re.findall(r'Tok/s (\S+)', output)
elif 'ssd' in model:
results = re.findall(r'avg. samples / sec: (\S+)', output)
for speed in results:
samples.append(sample.Sample('speed', float(speed), 'samples/sec',
metadata))
if not use_tpu:
if 'minigo' in model:
times = regex_util.ExtractAllMatches(r'RESULT,.*,(\d+),.*,.*', output)
else:
times = regex_util.ExtractAllMatches(r'RESULT,.*,.*,(\d+),.*,.*', output)
samples.append(sample.Sample('Time', int(times[0]), 'seconds', metadata))
return samples
def Run(benchmark_spec):
"""Run MLPerf on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if benchmark_spec.tpus:
# For MLPerf v0.6, the benchmake code of different hardware are different.
if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or
benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):
run_path = (
'$HOME/training_results_v0.6/Google/benchmarks/{model}/tpu-{tpus}'
.format(
model=benchmark_spec.benchmark,
tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))
code_path = (
'$HOME/training_results_v0.6/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'
.format(
model=benchmark_spec.benchmark,
tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))
if 'mask' in benchmark_spec.benchmark:
model = 'mask_rcnn'
elif 'gnmt' in benchmark_spec.benchmark:
model = 'nmt'
else:
model = benchmark_spec.benchmark
mlperf_benchmark_cmd = ('cd {code_path} && '
'export PYTHONPATH=$(pwd):$(pwd)/{model} && '
'cd {model} && '
'{run_path}/run_and_time.sh'.format(
code_path=code_path,
model=model,
run_path=run_path))
if 'ssd' in benchmark_spec.benchmark:
mlperf_benchmark_cmd = (
'export '
'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'
' && {cmd}'.format(
checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,
cmd=mlperf_benchmark_cmd))
else:
raise ValueError(
'MLPerf configurations do not support the hardware in PKB. PKB may '
'need to be updated if this is a new TPU type.')
else:
benchmark_path = '$HOME/training_results_v0.6/NVIDIA/benchmarks'
common_env = 'DGXSYSTEM=DGX1 NEXP=1'
if 'resnet' in benchmark_spec.benchmark:
run_path = posixpath.join(benchmark_path, 'resnet/implementations/mxnet')
env = 'DATADIR=/data/imagenet LOGDIR=/tmp/resnet PULL=0'
elif 'transformer' in benchmark_spec.benchmark:
run_path = posixpath.join(benchmark_path,
'transformer/implementations/pytorch')
env = 'DATADIR=/data/wmt/utf8 LOGDIR=/tmp/transformer PULL=0'
elif 'minigo' in benchmark_spec.benchmark:
run_path = posixpath.join(benchmark_path,
'minigo/implementations/tensorflow')
env = 'LOGDIR=/tmp/minigo CONT=mlperf-nvidia:minigo'
elif 'mask' in benchmark_spec.benchmark:
run_path = posixpath.join(benchmark_path,
'maskrcnn/implementations/pytorch')
env = 'LOGDIR=/tmp/mask DATADIR=/data PULL=0'
elif 'gnmt' in benchmark_spec.benchmark:
run_path = posixpath.join(benchmark_path, 'gnmt/implementations/pytorch')
env = 'LOGDIR=/tmp/gnmt DATADIR=/data/gnmt PULL=0'
elif 'ssd' in benchmark_spec.benchmark:
run_path = posixpath.join(benchmark_path, 'ssd/implementations/pytorch')
env = 'LOGDIR=/tmp/ssd DATADIR=/data PULL=0'
run_script = posixpath.join(run_path, 'run.sub')
vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)
mlperf_benchmark_cmd = (
'cd {run_path} && chmod 755 run.sub && sudo {common_env} {env} '
'./run.sub'.format(run_path=run_path, common_env=common_env, env=env))
if nvidia_driver.CheckNvidiaGpuExists(vm):
mlperf_benchmark_cmd = '{env} {cmd}'.format(
env=tensorflow.GetEnvironmentVars(vm), cmd=mlperf_benchmark_cmd)
samples = []
metadata = _CreateMetadataDict(benchmark_spec)
stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd, should_log=True)
samples.extend(
MakeSamplesFromOutput(
metadata,
stdout,
use_tpu=bool(benchmark_spec.tpus),
model=benchmark_spec.benchmark))
return samples
def Cleanup(benchmark_spec):
"""Cleanup MLPerf on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if benchmark_spec.tpus:
benchmark_spec.storage_service.DeleteBucket(benchmark_spec.bucket)
|
from random import randrange
from time import sleep
from urllib.parse import parse_qs, urlparse
from httpobs.conf import (BROKER_URL,
SCANNER_ALLOW_KICKSTART,
SCANNER_ALLOW_KICKSTART_NUM_ABORTED,
SCANNER_BROKER_RECONNECTION_SLEEP_TIME,
SCANNER_CYCLE_SLEEP_TIME,
SCANNER_DATABASE_RECONNECTION_SLEEP_TIME,
SCANNER_MAINTENANCE_CYCLE_FREQUENCY,
SCANNER_MATERIALIZED_VIEW_REFRESH_FREQUENCY,
SCANNER_MAX_CPU_UTILIZATION,
SCANNER_MAX_LOAD)
from httpobs.database import (periodic_maintenance,
refresh_materialized_views,
update_scans_dequeue_scans)
from httpobs.scanner.tasks import scan
import datetime
import psutil
import redis
import subprocess
import sys
def main():
# Start each scanner at a random point in the range to spread out database maintenance
dequeue_loop_count = randrange(0, SCANNER_MAINTENANCE_CYCLE_FREQUENCY)
materialized_view_loop_count = randrange(0, SCANNER_MATERIALIZED_VIEW_REFRESH_FREQUENCY)
# Parse the BROKER_URL
broker_url = urlparse(BROKER_URL)
if broker_url.scheme.lower() not in ('redis', 'redis+socket'): # Currently the de-queuer only support redis
print('Sorry, the scanner currently only supports redis.', file=sys.stderr)
sys.exit(1)
# Get the current CPU utilization and wait a second to begin the loop for the next reading
psutil.cpu_percent()
sleep(1)
while True:
try:
# TODO: Document this madness and magic numbers, make it configurable
# If max cpu is 90 and current CPU is 50, that gives us a headroom of 8 scans
headroom = int((SCANNER_MAX_CPU_UTILIZATION - psutil.cpu_percent()) / 5)
dequeue_quantity = min(headroom, SCANNER_MAX_LOAD)
if headroom <= 0:
# If the cycle sleep time is .5, sleep 2 seconds at a minimum, 10 seconds at a maximum
sleep_time = min(max(abs(headroom), SCANNER_CYCLE_SLEEP_TIME * 4), 10)
print('[{time}] WARNING: Load too high. Sleeping for {num} second(s).'.format(
time=str(datetime.datetime.now()).split('.')[0],
num=sleep_time),
file=sys.stderr)
sleep(sleep_time)
continue
except:
# I've noticed that on laptops that Docker has a tendency to kill the scanner when the laptop sleeps; this
# is designed to catch that exception
sleep(1)
continue
# Every so many scans, let's opportunistically clear out any PENDING scans that are older than 1800 seconds
# Also update the grade_distribution table
# If it fails, we don't care. Of course, nobody reads the comments, so I should say that *I* don't care.
try:
if dequeue_loop_count % SCANNER_MAINTENANCE_CYCLE_FREQUENCY == 0:
print('[{time}] INFO: Performing periodic maintenance.'.format(
time=str(datetime.datetime.now()).split('.')[0]),
file=sys.stderr)
dequeue_loop_count = 0
num = periodic_maintenance()
if num > 0:
print('[{time}] INFO: Cleared {num} broken scan(s).'.format(
time=str(datetime.datetime.now()).split('.')[0],
num=num),
file=sys.stderr)
# Forcibly restart if things are going real bad, sleep for a bit to avoid flagging
if num > SCANNER_ALLOW_KICKSTART_NUM_ABORTED and SCANNER_ALLOW_KICKSTART:
sleep(10)
try:
print('[{time}] ERROR: Celery appears to be hung. Attempting to kickstart the scanners.'.format(
time=str(datetime.datetime.now()).split('.')[0]),
file=sys.stderr)
subprocess.call(['pkill', '-u', 'httpobs'])
except FileNotFoundError:
print('[{time}] ERROR: Tried to kickstart, but no pkill found.'.format(
time=str(datetime.datetime.now()).split('.')[0]),
file=sys.stderr)
except:
print('[{time}] ERROR: Tried to kickstart, but failed for unknown reasons.'.format(
time=str(datetime.datetime.now()).split('.')[0]),
file=sys.stderr)
except:
pass
finally:
dequeue_loop_count += 1
num = 0
# Every so often we need to refresh the materialized views that the statistics depend on
try:
if materialized_view_loop_count % SCANNER_MATERIALIZED_VIEW_REFRESH_FREQUENCY == 0:
print('[{time}] INFO: Refreshing materialized views.'.format(
time=str(datetime.datetime.now()).split('.')[0]),
file=sys.stderr)
materialized_view_loop_count = 0
refresh_materialized_views()
print('[{time}] INFO: Materialized views refreshed.'.format(
time=str(datetime.datetime.now()).split('.')[0]),
file=sys.stderr)
except:
pass
finally:
materialized_view_loop_count += 1
# Verify that the broker is still up; if it's down, let's sleep and try again later
try:
if broker_url.scheme.lower() == 'redis':
conn = redis.Connection(host=broker_url.hostname,
port=broker_url.port or 6379,
db=int(broker_url.path[1:] if len(broker_url.path) > 0 else 0),
password=broker_url.password)
else:
conn = redis.UnixDomainSocketConnection(path=broker_url.path,
db=int(parse_qs(broker_url.query).get(
'virtual_host', ['0'])
[0]))
conn.connect()
conn.can_read()
conn.disconnect()
del conn
except:
print('[{time}] ERROR: Unable to connect to to redis. Sleeping for {num} seconds.'.format(
time=str(datetime.datetime.now()).split('.')[0],
num=SCANNER_BROKER_RECONNECTION_SLEEP_TIME),
file=sys.stderr
)
sleep(SCANNER_BROKER_RECONNECTION_SLEEP_TIME)
continue
# Get a list of sites that are pending
try:
sites_to_scan = update_scans_dequeue_scans(dequeue_quantity)
except IOError:
print('[{time}] ERROR: Unable to retrieve lists of sites to scan. Sleeping for {num} seconds.'.format(
time=str(datetime.datetime.now()).split('.')[0],
num=SCANNER_DATABASE_RECONNECTION_SLEEP_TIME),
file=sys.stderr
)
sleep(SCANNER_DATABASE_RECONNECTION_SLEEP_TIME)
continue
try:
if sites_to_scan:
print('[{time}] INFO: Dequeuing {num} site(s): {sites}.'.format(
time=str(datetime.datetime.now()).split('.')[0],
num=len(sites_to_scan),
sites=', '.join([site[0] for site in sites_to_scan])),
file=sys.stderr
)
for site in sites_to_scan:
scan.delay(*site)
# Always sleep at least some amount of time so that CPU utilization measurements can track
sleep(SCANNER_CYCLE_SLEEP_TIME / 2)
else: # If the queue was empty, lets sleep a little bit
sleep(SCANNER_CYCLE_SLEEP_TIME)
except KeyboardInterrupt:
print('Exiting scanner backend')
sys.exit(1)
except: # this shouldn't trigger, but we don't want a scan breakage to kill the scanner
print('[{time}] ERROR: Unknown celery error.'.format(
time=str(datetime.datetime.now()).split('.')[0]),
file=sys.stderr)
if __name__ == '__main__':
main()
|
import radicale.rights.authenticated as authenticated
from radicale import pathutils
class Rights(authenticated.Rights):
def authorization(self, user, path):
if self._verify_user and not user:
return ""
sane_path = pathutils.strip_path(path)
if not sane_path:
return "R"
if self._verify_user:
owned = user == sane_path.split("/", maxsplit=1)[0]
else:
owned = True
if "/" not in sane_path:
return "RW" if owned else "R"
if sane_path.count("/") == 1:
return "rw" if owned else "r"
return ""
|
from homeassistant.components.group import GroupIntegrationRegistry
from homeassistant.const import STATE_OFF
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from .const import HVAC_MODE_OFF, HVAC_MODES
@callback
def async_describe_on_off_states(
hass: HomeAssistantType, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states(
set(HVAC_MODES) - {HVAC_MODE_OFF},
STATE_OFF,
)
|
from django.db.models import PositiveIntegerField
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from shop.models import order
class OrderItem(order.BaseOrderItem):
"""Default materialized model for OrderItem"""
quantity = PositiveIntegerField(_("Ordered quantity"))
class Meta:
verbose_name = pgettext_lazy('order_models', "Ordered Item")
verbose_name_plural = pgettext_lazy('order_models', "Ordered Items")
|
import re
import diamond.collector
try:
import beanstalkc
except ImportError:
beanstalkc = None
class BeanstalkdCollector(diamond.collector.Collector):
SKIP_LIST = ['version', 'id', 'hostname']
COUNTERS_REGEX = re.compile(
r'^(cmd-.*|job-timeouts|total-jobs|total-connections)$')
def get_default_config_help(self):
config_help = super(BeanstalkdCollector,
self).get_default_config_help()
config_help.update({
'host': 'Hostname',
'port': 'Port',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(BeanstalkdCollector, self).get_default_config()
config.update({
'path': 'beanstalkd',
'host': 'localhost',
'port': 11300,
})
return config
def _get_stats(self):
stats = {}
try:
connection = beanstalkc.Connection(self.config['host'],
int(self.config['port']))
except beanstalkc.BeanstalkcException as e:
self.log.error("Couldn't connect to beanstalkd: %s", e)
return {}
stats['instance'] = connection.stats()
stats['tubes'] = []
for tube in connection.tubes():
tube_stats = connection.stats_tube(tube)
stats['tubes'].append(tube_stats)
return stats
def collect(self):
if beanstalkc is None:
self.log.error('Unable to import beanstalkc')
return {}
info = self._get_stats()
for stat, value in info['instance'].items():
if stat not in self.SKIP_LIST:
self.publish(stat, value,
metric_type=self.get_metric_type(stat))
for tube_stats in info['tubes']:
tube = tube_stats['name']
for stat, value in tube_stats.items():
if stat != 'name':
self.publish('tubes.%s.%s' % (tube, stat), value,
metric_type=self.get_metric_type(stat))
def get_metric_type(self, stat):
if self.COUNTERS_REGEX.match(stat):
return 'COUNTER'
return 'GAUGE'
|
import os
import time
from urllib.request import urlopen
# Internal imports
import vcr
def test_disk_saver_nowrite(tmpdir, httpbin):
"""
Ensure that when you close a cassette without changing it it doesn't
rewrite the file
"""
fname = str(tmpdir.join("synopsis.yaml"))
with vcr.use_cassette(fname) as cass:
urlopen(httpbin.url).read()
assert cass.play_count == 0
last_mod = os.path.getmtime(fname)
with vcr.use_cassette(fname) as cass:
urlopen(httpbin.url).read()
assert cass.play_count == 1
assert cass.dirty is False
last_mod2 = os.path.getmtime(fname)
assert last_mod == last_mod2
def test_disk_saver_write(tmpdir, httpbin):
"""
Ensure that when you close a cassette after changing it it does
rewrite the file
"""
fname = str(tmpdir.join("synopsis.yaml"))
with vcr.use_cassette(fname) as cass:
urlopen(httpbin.url).read()
assert cass.play_count == 0
last_mod = os.path.getmtime(fname)
# Make sure at least 1 second passes, otherwise sometimes
# the mtime doesn't change
time.sleep(1)
with vcr.use_cassette(fname, record_mode=vcr.mode.ANY) as cass:
urlopen(httpbin.url).read()
urlopen(httpbin.url + "/get").read()
assert cass.play_count == 1
assert cass.dirty
last_mod2 = os.path.getmtime(fname)
assert last_mod != last_mod2
|
import copy
import os
import os.path as op
import numpy as np
from ..constants import FIFF
from ..open import fiff_open, _fiff_get_fid, _get_next_fname
from ..meas_info import read_meas_info
from ..tree import dir_tree_find
from ..tag import read_tag, read_tag_info
from ..base import (BaseRaw, _RawShell, _check_raw_compatibility,
_check_maxshield)
from ..utils import _mult_cal_one
from ...annotations import Annotations, _read_annotations_fif
from ...event import AcqParserFIF
from ...utils import (check_fname, logger, verbose, warn, fill_doc, _file_like,
_on_missing)
@fill_doc
class Raw(BaseRaw):
"""Raw data in FIF format.
Parameters
----------
fname : str | file-like
The raw filename to load. For files that have automatically been split,
the split part will be automatically loaded. Filenames should end
with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif,
raw_tsss.fif.gz, or _meg.fif. If a file-like object is provided,
preloading must be used.
.. versionchanged:: 0.18
Support for file-like objects.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
%(preload)s
%(on_split_missing)s
%(verbose)s
Attributes
----------
info : dict
:class:`Measurement info <mne.Info>`.
ch_names : list of string
List of channels' names.
n_times : int
Total number of time points in the raw file.
times : ndarray
Time vector in seconds. Starts from 0, independently of `first_samp`
value. Time interval between consecutive time samples is equal to the
inverse of the sampling frequency.
preload : bool
Indicates whether raw data are in memory.
%(verbose)s
"""
@verbose
def __init__(self, fname, allow_maxshield=False, preload=False,
on_split_missing='raise', verbose=None): # noqa: D102
raws = []
do_check_fname = not _file_like(fname)
next_fname = fname
while next_fname is not None:
raw, next_fname, buffer_size_sec = \
self._read_raw_file(next_fname, allow_maxshield,
preload, do_check_fname)
do_check_fname = False
raws.append(raw)
if next_fname is not None:
if not op.exists(next_fname):
msg = (
f'Split raw file detected but next file {next_fname} '
'does not exist. Ensure all files were transferred '
'properly and that split and original files were not '
'manually renamed on disk (split files should be '
'renamed by loading and re-saving with MNE-Python to '
'preserve proper filename linkage).')
_on_missing(on_split_missing, msg, name='on_split_missing')
break
if _file_like(fname):
# avoid serialization error when copying file-like
fname = None # noqa
_check_raw_compatibility(raws)
super(Raw, self).__init__(
copy.deepcopy(raws[0].info), False,
[r.first_samp for r in raws], [r.last_samp for r in raws],
[r.filename for r in raws], [r._raw_extras for r in raws],
raws[0].orig_format, None, buffer_size_sec=buffer_size_sec,
verbose=verbose)
# combine annotations
self.set_annotations(raws[0].annotations, emit_warning=False)
# Add annotations for in-data skips
for extra in self._raw_extras:
mask = [ent is None for ent in extra['ent']]
start = extra['bounds'][:-1][mask]
stop = extra['bounds'][1:][mask] - 1
duration = (stop - start + 1.) / self.info['sfreq']
annot = Annotations(onset=(start / self.info['sfreq']),
duration=duration,
description='BAD_ACQ_SKIP',
orig_time=self.info['meas_date'])
self._annotations += annot
if preload:
self._preload_data(preload)
else:
self.preload = False
# If using a file-like object, fix the filenames to be representative
# strings now instead of the file-like objects
self._filenames = [_get_fname_rep(fname) for fname in self._filenames]
@verbose
def _read_raw_file(self, fname, allow_maxshield, preload,
do_check_fname=True, verbose=None):
"""Read in header information from a raw file."""
logger.info('Opening raw data file %s...' % fname)
# Read in the whole file if preload is on and .fif.gz (saves time)
if not _file_like(fname):
if do_check_fname:
check_fname(fname, 'raw', (
'raw.fif', 'raw_sss.fif', 'raw_tsss.fif', 'raw.fif.gz',
'raw_sss.fif.gz', 'raw_tsss.fif.gz', '_meg.fif'))
# filename
fname = op.realpath(fname)
ext = os.path.splitext(fname)[1].lower()
whole_file = preload if '.gz' in ext else False
del ext
else:
# file-like
if not preload:
raise ValueError('preload must be used with file-like objects')
whole_file = True
fname_rep = _get_fname_rep(fname)
ff, tree, _ = fiff_open(fname, preload=whole_file)
with ff as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
annotations = _read_annotations_fif(fid, tree)
# Locate the data of interest
raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
if len(raw_node) == 0:
raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
if (len(raw_node) == 0):
raw_node = dir_tree_find(meas, FIFF.FIFFB_IAS_RAW_DATA)
if (len(raw_node) == 0):
raise ValueError('No raw data in %s' % fname_rep)
_check_maxshield(allow_maxshield)
info['maxshield'] = True
del meas
if len(raw_node) == 1:
raw_node = raw_node[0]
# Process the directory
directory = raw_node['directory']
nent = raw_node['nent']
nchan = int(info['nchan'])
first = 0
first_samp = 0
first_skip = 0
# Get first sample tag if it is there
if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, directory[first].pos)
first_samp = int(tag.data)
first += 1
_check_entry(first, nent)
# Omit initial skip
if directory[first].kind == FIFF.FIFF_DATA_SKIP:
# This first skip can be applied only after we know the bufsize
tag = read_tag(fid, directory[first].pos)
first_skip = int(tag.data)
first += 1
_check_entry(first, nent)
raw = _RawShell()
raw.filename = fname
raw.first_samp = first_samp
raw.set_annotations(annotations)
# Go through the remaining tags in the directory
raw_extras = list()
nskip = 0
orig_format = None
for k in range(first, nent):
ent = directory[k]
# There can be skips in the data (e.g., if the user unclicked)
# an re-clicked the button
if ent.kind == FIFF.FIFF_DATA_SKIP:
tag = read_tag(fid, ent.pos)
nskip = int(tag.data)
elif ent.kind == FIFF.FIFF_DATA_BUFFER:
# Figure out the number of samples in this buffer
if ent.type == FIFF.FIFFT_DAU_PACK16:
nsamp = ent.size // (2 * nchan)
elif ent.type == FIFF.FIFFT_SHORT:
nsamp = ent.size // (2 * nchan)
elif ent.type == FIFF.FIFFT_FLOAT:
nsamp = ent.size // (4 * nchan)
elif ent.type == FIFF.FIFFT_DOUBLE:
nsamp = ent.size // (8 * nchan)
elif ent.type == FIFF.FIFFT_INT:
nsamp = ent.size // (4 * nchan)
elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
nsamp = ent.size // (8 * nchan)
elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
nsamp = ent.size // (16 * nchan)
else:
raise ValueError('Cannot handle data buffers of type '
'%d' % ent.type)
if orig_format is None:
if ent.type == FIFF.FIFFT_DAU_PACK16:
orig_format = 'short'
elif ent.type == FIFF.FIFFT_SHORT:
orig_format = 'short'
elif ent.type == FIFF.FIFFT_FLOAT:
orig_format = 'single'
elif ent.type == FIFF.FIFFT_DOUBLE:
orig_format = 'double'
elif ent.type == FIFF.FIFFT_INT:
orig_format = 'int'
elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
orig_format = 'single'
elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
orig_format = 'double'
# Do we have an initial skip pending?
if first_skip > 0:
first_samp += nsamp * first_skip
raw.first_samp = first_samp
first_skip = 0
# Do we have a skip pending?
if nskip > 0:
raw_extras.append(dict(
ent=None, first=first_samp, nsamp=nskip * nsamp,
last=first_samp + nskip * nsamp - 1))
first_samp += nskip * nsamp
nskip = 0
# Add a data buffer
raw_extras.append(dict(ent=ent, first=first_samp,
last=first_samp + nsamp - 1,
nsamp=nsamp))
first_samp += nsamp
next_fname = _get_next_fname(fid, fname_rep, tree)
# reformat raw_extras to be a dict of list/ndarray rather than
# list of dict (faster access)
raw_extras = {key: [r[key] for r in raw_extras]
for key in raw_extras[0]}
for key in raw_extras:
if key != 'ent': # dict or None
raw_extras[key] = np.array(raw_extras[key], int)
if not np.array_equal(raw_extras['last'][:-1],
raw_extras['first'][1:] - 1):
raise RuntimeError('FIF file appears to be broken')
bounds = np.cumsum(np.concatenate(
[raw_extras['first'][:1], raw_extras['nsamp']]))
raw_extras['bounds'] = bounds
assert len(raw_extras['bounds']) == len(raw_extras['ent']) + 1
# store the original buffer size
buffer_size_sec = np.median(raw_extras['nsamp']) / info['sfreq']
del raw_extras['first']
del raw_extras['last']
del raw_extras['nsamp']
raw.last_samp = first_samp - 1
raw.orig_format = orig_format
# Add the calibration factors
cals = np.zeros(info['nchan'])
for k in range(info['nchan']):
cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
raw._cals = cals
raw._raw_extras = raw_extras
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % (
raw.first_samp, raw.last_samp,
float(raw.first_samp) / info['sfreq'],
float(raw.last_samp) / info['sfreq']))
raw.info = info
raw.verbose = verbose
logger.info('Ready.')
return raw, next_fname, buffer_size_sec
@property
def _dtype(self):
"""Get the dtype to use to store data from disk."""
if self._dtype_ is not None:
return self._dtype_
dtype = None
for raw_extra, filename in zip(self._raw_extras, self._filenames):
for ent in raw_extra['ent']:
if ent is not None:
with _fiff_get_fid(filename) as fid:
fid.seek(ent.pos, 0)
tag = read_tag_info(fid)
if tag is not None:
if tag.type in (FIFF.FIFFT_COMPLEX_FLOAT,
FIFF.FIFFT_COMPLEX_DOUBLE):
dtype = np.complex128
else:
dtype = np.float64
if dtype is not None:
break
if dtype is not None:
break
if dtype is None:
raise RuntimeError('bug in reading')
self._dtype_ = dtype
return dtype
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file."""
n_bad = 0
with _fiff_get_fid(self._filenames[fi]) as fid:
bounds = self._raw_extras[fi]['bounds']
ents = self._raw_extras[fi]['ent']
nchan = self._raw_extras[fi]['orig_nchan']
use = (stop > bounds[:-1]) & (start < bounds[1:])
offset = 0
for ei in np.where(use)[0]:
first = bounds[ei]
last = bounds[ei + 1]
nsamp = last - first
ent = ents[ei]
first_pick = max(start - first, 0)
last_pick = min(nsamp, stop - first)
picksamp = last_pick - first_pick
# only read data if it exists
if ent is not None:
one = read_tag(fid, ent.pos,
shape=(nsamp, nchan),
rlims=(first_pick, last_pick)).data
try:
one.shape = (picksamp, nchan)
except AttributeError: # one is None
n_bad += picksamp
else:
_mult_cal_one(data[:, offset:(offset + picksamp)],
one.T, idx, cals, mult)
offset += picksamp
if n_bad:
warn(f'FIF raw buffer could not be read, acquisition error '
f'likely: {n_bad} samples set to zero')
assert offset == stop - start
def fix_mag_coil_types(self):
"""Fix Elekta magnetometer coil types.
Returns
-------
raw : instance of Raw
The raw object. Operates in place.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of mne_fix_mag_coil_types is not mandatory.
"""
from ...channels import fix_mag_coil_types
fix_mag_coil_types(self.info)
return self
@property
def acqparser(self):
"""The AcqParserFIF for the measurement info.
See Also
--------
mne.AcqParserFIF
"""
if getattr(self, '_acqparser', None) is None:
self._acqparser = AcqParserFIF(self.info)
return self._acqparser
def _get_fname_rep(fname):
if not _file_like(fname):
return fname
else:
return 'File-like'
def _check_entry(first, nent):
"""Sanity check entries."""
if first >= nent:
raise IOError('Could not read data, perhaps this is a corrupt file')
@fill_doc
def read_raw_fif(fname, allow_maxshield=False, preload=False,
on_split_missing='raise', verbose=None):
"""Reader function for Raw FIF data.
Parameters
----------
fname : str | file-like
The raw filename to load. For files that have automatically been split,
the split part will be automatically loaded. Filenames should end
with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif,
raw_tsss.fif.gz, or _meg.fif. If a file-like object is provided,
preloading must be used.
.. versionchanged:: 0.18
Support for file-like objects.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
%(preload)s
%(on_split_missing)s
%(verbose)s
Returns
-------
raw : instance of Raw
A Raw object containing FIF data.
Notes
-----
.. versionadded:: 0.9.0
"""
return Raw(fname=fname, allow_maxshield=allow_maxshield,
preload=preload, verbose=verbose,
on_split_missing=on_split_missing)
|
from homeassistant.components.cast import home_assistant_cast
from homeassistant.config import async_process_ha_core_config
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_mock_signal
async def test_service_show_view(hass, mock_zeroconf):
"""Test we don't set app id in prod."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
await home_assistant_cast.async_setup_ha_cast(hass, MockConfigEntry())
calls = async_mock_signal(hass, home_assistant_cast.SIGNAL_HASS_CAST_SHOW_VIEW)
await hass.services.async_call(
"cast",
"show_lovelace_view",
{"entity_id": "media_player.kitchen", "view_path": "mock_path"},
blocking=True,
)
assert len(calls) == 1
controller, entity_id, view_path, url_path = calls[0]
assert controller.hass_url == "https://example.com"
assert controller.client_id is None
# Verify user did not accidentally submit their dev app id
assert controller.supporting_app_id == "B12CE3CA"
assert entity_id == "media_player.kitchen"
assert view_path == "mock_path"
assert url_path is None
async def test_service_show_view_dashboard(hass, mock_zeroconf):
"""Test casting a specific dashboard."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
await home_assistant_cast.async_setup_ha_cast(hass, MockConfigEntry())
calls = async_mock_signal(hass, home_assistant_cast.SIGNAL_HASS_CAST_SHOW_VIEW)
await hass.services.async_call(
"cast",
"show_lovelace_view",
{
"entity_id": "media_player.kitchen",
"view_path": "mock_path",
"dashboard_path": "mock-dashboard",
},
blocking=True,
)
assert len(calls) == 1
_controller, entity_id, view_path, url_path = calls[0]
assert entity_id == "media_player.kitchen"
assert view_path == "mock_path"
assert url_path == "mock-dashboard"
async def test_use_cloud_url(hass, mock_zeroconf):
"""Test that we fall back to cloud url."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
hass.config.components.add("cloud")
await home_assistant_cast.async_setup_ha_cast(hass, MockConfigEntry())
calls = async_mock_signal(hass, home_assistant_cast.SIGNAL_HASS_CAST_SHOW_VIEW)
with patch(
"homeassistant.components.cloud.async_remote_ui_url",
return_value="https://something.nabu.casa",
):
await hass.services.async_call(
"cast",
"show_lovelace_view",
{"entity_id": "media_player.kitchen", "view_path": "mock_path"},
blocking=True,
)
assert len(calls) == 1
controller = calls[0][0]
assert controller.hass_url == "https://something.nabu.casa"
|
import warnings
import requests
import urllib3
from gtts import gTTS
from kalliope.core.TTS.TTSModule import TTSModule, MissingTTSParameter
import logging
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Googletts(TTSModule):
def __init__(self, **kwargs):
super(Googletts, self).__init__(**kwargs)
self._check_parameters()
def say(self, words):
"""
:param words: The sentence to say
"""
self.generate_and_play(words, self._generate_audio_file)
def _check_parameters(self):
"""
Check parameters are ok, raise MissingTTSParameterException exception otherwise.
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingTTSParameterException
"""
if self.language == "default" or self.language is None:
raise MissingTTSParameter("[GoogleTTS] Missing parameters, check documentation !")
return True
def _generate_audio_file(self):
"""
Generic method used as a Callback in TTSModule
- must provided the audio file and write it on the disk
.. raises:: FailToLoadSoundFile
"""
# Since the gTTS lib disabled the SSL verification we get rid of insecure request warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
tts = gTTS(text=self.words, lang=self.language)
# OK we get the audio we can write the sound file
tts.save(self.file_path)
# Re enable the warnings to avoid affecting the whole kalliope process
warnings.resetwarnings()
|
import asyncio
from homeassistant import config_entries, setup
from homeassistant.components.griddy.const import DOMAIN
from tests.async_mock import MagicMock, patch
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.griddy.config_flow.AsyncGriddy.async_getnow",
return_value=MagicMock(),
), patch(
"homeassistant.components.griddy.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.griddy.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"loadzone": "LZ_HOUSTON"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Load Zone LZ_HOUSTON"
assert result2["data"] == {"loadzone": "LZ_HOUSTON"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.griddy.config_flow.AsyncGriddy.async_getnow",
side_effect=asyncio.TimeoutError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"loadzone": "LZ_NORTH"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
|
import copy
from homeassistant.components import ssdp
from homeassistant.components.songpal.const import CONF_ENDPOINT, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_SSDP, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from . import (
CONF_DATA,
ENDPOINT,
FRIENDLY_NAME,
HOST,
MODEL,
_create_mocked_device,
_patch_config_flow_device,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
UDN = "uuid:1234"
SSDP_DATA = {
ssdp.ATTR_UPNP_UDN: UDN,
ssdp.ATTR_UPNP_FRIENDLY_NAME: FRIENDLY_NAME,
ssdp.ATTR_SSDP_LOCATION: f"http://{HOST}:52323/dmr.xml",
"X_ScalarWebAPI_DeviceInfo": {
"X_ScalarWebAPI_BaseURL": ENDPOINT,
"X_ScalarWebAPI_ServiceList": {
"X_ScalarWebAPI_ServiceType": ["guide", "system", "audio", "avContent"],
},
},
}
def _flow_next(hass, flow_id):
return next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == flow_id
)
def _patch_setup():
return patch(
"homeassistant.components.songpal.async_setup_entry",
return_value=True,
)
async def test_flow_ssdp(hass):
"""Test working ssdp flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=SSDP_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "init"
assert result["description_placeholders"] == {
CONF_NAME: FRIENDLY_NAME,
CONF_HOST: HOST,
}
flow = _flow_next(hass, result["flow_id"])
assert flow["context"]["unique_id"] == UDN
with _patch_setup():
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == FRIENDLY_NAME
assert result["data"] == CONF_DATA
async def test_flow_user(hass):
"""Test working user initialized flow."""
mocked_device = _create_mocked_device()
with _patch_config_flow_device(mocked_device), _patch_setup():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] is None
_flow_next(hass, result["flow_id"])
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_ENDPOINT: ENDPOINT},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == MODEL
assert result["data"] == {
CONF_NAME: MODEL,
CONF_ENDPOINT: ENDPOINT,
}
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_called_once()
async def test_flow_import(hass):
"""Test working import flow."""
mocked_device = _create_mocked_device()
with _patch_config_flow_device(mocked_device), _patch_setup():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == FRIENDLY_NAME
assert result["data"] == CONF_DATA
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_not_called()
async def test_flow_import_without_name(hass):
"""Test import flow without optional name."""
mocked_device = _create_mocked_device()
with _patch_config_flow_device(mocked_device), _patch_setup():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_ENDPOINT: ENDPOINT}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == MODEL
assert result["data"] == {CONF_NAME: MODEL, CONF_ENDPOINT: ENDPOINT}
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_called_once()
def _create_mock_config_entry(hass):
MockConfigEntry(
domain=DOMAIN,
unique_id="uuid:0000",
data=CONF_DATA,
).add_to_hass(hass)
async def test_ssdp_bravia(hass):
"""Test discovering a bravia TV."""
ssdp_data = copy.deepcopy(SSDP_DATA)
ssdp_data["X_ScalarWebAPI_DeviceInfo"]["X_ScalarWebAPI_ServiceList"][
"X_ScalarWebAPI_ServiceType"
].append("videoScreen")
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp_data,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "not_songpal_device"
async def test_sddp_exist(hass):
"""Test discovering existed device."""
_create_mock_config_entry(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=SSDP_DATA,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_user_exist(hass):
"""Test user adding existed device."""
mocked_device = _create_mocked_device()
_create_mock_config_entry(hass)
with _patch_config_flow_device(mocked_device):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_called_once()
async def test_import_exist(hass):
"""Test importing existed device."""
mocked_device = _create_mocked_device()
_create_mock_config_entry(hass)
with _patch_config_flow_device(mocked_device):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_not_called()
async def test_user_invalid(hass):
"""Test using adding invalid config."""
mocked_device = _create_mocked_device(True)
_create_mock_config_entry(hass)
with _patch_config_flow_device(mocked_device):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_not_called()
async def test_import_invalid(hass):
"""Test importing invalid config."""
mocked_device = _create_mocked_device(True)
_create_mock_config_entry(hass)
with _patch_config_flow_device(mocked_device):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_not_called()
|
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.base import ModelBase
from django.test import TestCase
from polymorphic.models import PolymorphicModel, PolymorphicModelBase
from shop import deferred
import copy
from types import new_class
def create_regular_class(name, fields={}, meta={}):
meta.setdefault('app_label', 'foo')
Meta = type(str('Meta'), (), meta)
return type(str(name), (models.Model,), dict(Meta=Meta, __module__=__name__, **fields))
def create_deferred_base_class(name, fields={}, meta={}, polymorphic=False):
metaclass = deferred.ForeignKeyBuilder
model_class = models.Model
if polymorphic:
metaclass = deferred.PolymorphicForeignKeyBuilder
model_class = PolymorphicModel
meta.setdefault('app_label', 'foo')
meta.setdefault('abstract', True)
Meta = type(str('Meta'), (), meta)
cls_dict = dict(Meta=Meta, __metaclass__=metaclass, __module__=__name__, **fields)
return new_class(name, (model_class,), {'metaclass': metaclass}, lambda attrs: attrs.update(cls_dict))
def create_deferred_class(name, base, fields={}, meta={}, mixins=()):
meta.setdefault('app_label', 'bar')
Meta = type(str('Meta'), (), meta)
return type(str(name), mixins + (base,), dict(Meta=Meta, __module__=__name__, **fields))
RegularUser = create_regular_class('RegularUser')
DeferredBaseUser = create_deferred_base_class('DeferredBaseUser')
DeferredUser = create_deferred_class('DeferredUser', DeferredBaseUser)
RegularCustomer = create_regular_class('RegularCustomer', {
'user': models.OneToOneField(RegularUser, on_delete=models.PROTECT),
'advertised_by': models.ForeignKey('self', null=True, blank=True, on_delete=models.SET_NULL),
})
DeferredBaseCustomer = create_deferred_base_class('DeferredBaseCustomer', {
'user': deferred.OneToOneField(DeferredBaseUser, on_delete=models.PROTECT),
'advertised_by': deferred.ForeignKey('self', null=True, blank=True, on_delete=models.SET_NULL),
})
DeferredCustomer = create_deferred_class('DeferredCustomer', DeferredBaseCustomer)
RegularProduct = create_regular_class('RegularProduct')
DeferredBaseProduct = create_deferred_base_class('DeferredBaseProduct')
DeferredProduct = create_deferred_class('DeferredProduct', DeferredBaseProduct)
# Order is important, it must be declared before DeferredOrder, so that fulfillment tests make sense
DeferredBaseOrderItemBeforeOrder = create_deferred_base_class('DeferredBaseOrderItemBeforeOrder', {
'order': deferred.ForeignKey('DeferredBaseOrder', on_delete=models.CASCADE),
'product': deferred.ForeignKey(DeferredBaseProduct, on_delete=models.PROTECT),
})
DeferredOrderItemBeforeOrder = create_deferred_class('DeferredOrderItemBeforeOrder', DeferredBaseOrderItemBeforeOrder)
RegularOrder = create_regular_class('RegularOrder', {
'customer': models.ForeignKey(RegularCustomer, on_delete=models.PROTECT),
'items_simple': models.ManyToManyField(RegularProduct),
'items_through_fulfill_by_order_item': models.ManyToManyField('RegularProductAfterOrder', through='RegularOrderItemAfterOrderAndProduct'),
})
DeferredBaseOrder = create_deferred_base_class('DeferredBaseOrder', {
'customer': deferred.ForeignKey(DeferredBaseCustomer, on_delete=models.PROTECT),
'items_simple': deferred.ManyToManyField(DeferredBaseProduct),
'items_simple_fulfill_by_product': deferred.ManyToManyField('DeferredBaseProductAfterOrder'),
'items_through_fulfill_by_order_item': deferred.ManyToManyField('DeferredBaseProductAfterOrder', through='DeferredBaseOrderItemAfterOrderAndProduct'),
'items_through_fulfill_by_order': deferred.ManyToManyField(DeferredBaseProduct, through=DeferredBaseOrderItemBeforeOrder),
'items_through_fulfill_by_product': deferred.ManyToManyField('DeferredBaseProductAfterOrder', through='DeferredBaseOrderItemBeforeProduct'),
})
DeferredOrder = create_deferred_class('DeferredOrder', DeferredBaseOrder)
# Order is important, it must be declared before DeferredProductAfterOrder, so that fulfillment tests make sense
DeferredBaseOrderItemBeforeProduct = create_deferred_base_class('DeferredBaseOrderItemBeforeProduct', {
'order': deferred.ForeignKey(DeferredBaseOrder, on_delete=models.CASCADE),
'product': deferred.ForeignKey('DeferredBaseProductAfterOrder', on_delete=models.PROTECT),
})
DeferredOrderItemBeforeProduct = create_deferred_class('DeferredOrderItemBeforeProduct', DeferredBaseOrderItemBeforeProduct)
# Order is important, it must be declared after DeferredOrder, so that fulfillment tests make sense
RegularProductAfterOrder = create_regular_class('RegularProductAfterOrder')
DeferredBaseProductAfterOrder = create_deferred_base_class('DeferredBaseProductAfterOrder')
DeferredProductAfterOrder = create_deferred_class('DeferredProductAfterOrder', DeferredBaseProductAfterOrder)
# Order is important, it must be declared after DeferredOrder and DeferredPrdoductAfterOrder, so that fulfillment tests make sense
RegularOrderItemAfterOrderAndProduct = create_regular_class('RegularOrderItemAfterOrderAndProduct', {
'order': models.ForeignKey(RegularOrder, on_delete=models.CASCADE),
'product': models.ForeignKey(RegularProductAfterOrder, on_delete=models.PROTECT),
})
DeferredBaseOrderItemAfterOrderAndProduct = create_deferred_base_class('DeferredBaseOrderItemAfterOrderAndProduct', {
'order': deferred.ForeignKey(DeferredBaseOrder, on_delete=models.CASCADE),
'product': deferred.ForeignKey(DeferredBaseProductAfterOrder, on_delete=models.PROTECT),
})
DeferredOrderItemAfterOrderAndProduct = create_deferred_class('DeferredOrderItemAfterOrderAndProduct', DeferredBaseOrderItemAfterOrderAndProduct)
OrderPayment = create_deferred_base_class('OrderPayment', {
'order': deferred.ForeignKey(DeferredBaseOrder, on_delete=models.CASCADE),
}, {'abstract': False})
DeferredBaseOrderPaymentLog = create_deferred_base_class('DeferredBaseOrderPaymentLog', {
'order_payment': deferred.ForeignKey(OrderPayment, on_delete=models.CASCADE),
})
DeferredOrderPaymentLog = create_deferred_class('DeferredOrderPaymentLog', DeferredBaseOrderPaymentLog)
DeferredBasePolymorphicProduct = create_deferred_base_class('DeferredBasePolymorphicProduct', {
'owner': deferred.ForeignKey(DeferredBaseCustomer, on_delete=models.PROTECT),
}, polymorphic=True)
DeferredPolymorphicProduct = create_deferred_class('DeferredPolymorphicProduct', DeferredBasePolymorphicProduct)
class DeferredTestCase(TestCase):
def assert_same_model(self, to, model):
if isinstance(to, str):
self.assertEqual(to, model.__name__)
else:
self.assertIs(to, model)
def _test_foreign_key(self, from_class, to_class, field_attribute):
field = from_class._meta.get_field(field_attribute)
self.assertTrue(field.is_relation)
self.assertTrue(field.many_to_one)
self.assert_same_model(field.related_model, to_class)
def test_foreign_key_regular(self):
self._test_foreign_key(RegularOrder, RegularCustomer, 'customer')
def test_foreign_key_deferred(self):
self._test_foreign_key(DeferredOrder, DeferredCustomer, 'customer')
def _test_one_to_one_field(self, customer_class, user_class):
user_field = customer_class._meta.get_field('user')
self.assertTrue(user_field.is_relation)
self.assertTrue(user_field.one_to_one)
self.assert_same_model(user_field.related_model, user_class)
def test_one_to_one_field_regular(self):
self._test_one_to_one_field(RegularCustomer, RegularUser)
def test_one_to_one_field_deferred(self):
self._test_one_to_one_field(DeferredCustomer, DeferredUser)
def _test_many_to_may_field_simple(self, order_class, product_class, items_field_attribute):
items_field = order_class._meta.get_field(items_field_attribute)
self.assertTrue(items_field.is_relation)
self.assertTrue(items_field.many_to_many)
self.assert_same_model(items_field.related_model, product_class)
m2m_field_name = items_field.m2m_field_name()
m2m_field = items_field.remote_field.through._meta.get_field(m2m_field_name)
m2m_reverse_field_name = items_field.m2m_reverse_field_name()
m2m_reverse_field = items_field.remote_field.through._meta.get_field(m2m_reverse_field_name)
self.assert_same_model(m2m_field.related_model, order_class)
self.assert_same_model(m2m_reverse_field.related_model, product_class)
def test_many_to_many_field_simple_regular(self):
self._test_many_to_may_field_simple(
RegularOrder,
RegularProduct,
items_field_attribute='items_simple',
)
def test_many_to_many_field_simple_deferred(self):
self._test_many_to_may_field_simple(
DeferredOrder,
DeferredProduct,
items_field_attribute='items_simple',
)
def test_many_to_many_field_simple_deferred_by_product(self):
self._test_many_to_may_field_simple(
DeferredOrder,
DeferredProductAfterOrder,
items_field_attribute='items_simple_fulfill_by_product',
)
def _test_many_to_may_field_through(self, order_class, product_class, order_item_class, items_field_attribute):
items_field = order_class._meta.get_field(items_field_attribute)
self.assertTrue(items_field.is_relation)
self.assertTrue(items_field.many_to_many)
self.assert_same_model(items_field.related_model, product_class)
self.assert_same_model(items_field.remote_field.through, order_item_class)
m2m_field_name = items_field.m2m_field_name()
m2m_field = items_field.remote_field.through._meta.get_field(m2m_field_name)
m2m_reverse_field_name = items_field.m2m_reverse_field_name()
m2m_reverse_field = items_field.remote_field.through._meta.get_field(m2m_reverse_field_name)
self.assert_same_model(m2m_field.related_model, order_class)
self.assert_same_model(m2m_reverse_field.related_model, product_class)
def test_many_to_many_field_through_regular(self):
self._test_many_to_may_field_through(
RegularOrder,
RegularProductAfterOrder,
RegularOrderItemAfterOrderAndProduct,
items_field_attribute='items_through_fulfill_by_order_item',
)
def test_many_to_many_field_through_deferred(self):
self._test_many_to_may_field_through(
DeferredOrder,
DeferredProductAfterOrder,
DeferredOrderItemAfterOrderAndProduct,
items_field_attribute='items_through_fulfill_by_order_item',
)
def test_many_to_many_field_through_deferred_by_order(self):
self._test_many_to_may_field_through(
DeferredOrder,
DeferredProduct,
DeferredOrderItemBeforeOrder,
items_field_attribute='items_through_fulfill_by_order',
)
def test_many_to_many_field_through_deferred_by_product(self):
self._test_many_to_may_field_through(
DeferredOrder,
DeferredProductAfterOrder,
DeferredOrderItemBeforeProduct,
items_field_attribute='items_through_fulfill_by_product',
)
def _test_foreign_key_self(self, customer_class):
advertised_by_field = customer_class._meta.get_field('advertised_by')
self.assertTrue(advertised_by_field.is_relation)
self.assertTrue(advertised_by_field.many_to_one)
self.assert_same_model(advertised_by_field.related_model, customer_class)
def test_foreign_key_self_regular(self):
self._test_foreign_key_self(RegularCustomer)
def test_foreign_key_self_deferred(self):
self._test_foreign_key_self(DeferredCustomer)
def test_extend_deferred_model_allowed(self):
"""
Extending a deferred model is allowed,
but deferred relations will still reference the (first) deferred model.
"""
create_deferred_class('Customer', DeferredCustomer)
OrderBase = create_deferred_base_class('OrderBase', {
'customer': deferred.ForeignKey(DeferredBaseCustomer, on_delete=models.PROTECT),
})
Order = create_deferred_class('Order', OrderBase)
self._test_foreign_key(DeferredOrder, DeferredCustomer, 'customer')
self._test_foreign_key(Order, DeferredCustomer, 'customer')
def test_extend_deferred_base_model_allowed_only_once(self):
with self.assertRaisesRegex(ImproperlyConfigured, "Both Model classes 'Product' and 'DeferredProduct' inherited from abstract base class DeferredBaseProduct"):
create_deferred_class('Product', DeferredBaseProduct)
def test_non_abstract_deferred_base_model_allowed(self):
self._test_foreign_key(OrderPayment, DeferredOrder, 'order')
self._test_foreign_key(DeferredOrderPaymentLog, OrderPayment, 'order_payment'),
def test_extend_non_abstract_deferred_base_model_allowed(self):
"""
Extending a non abstract deferred model is allowed,
but deferred relations will still reference the (first) deferred model.
"""
create_deferred_class('OrderPaymentSubclass', OrderPayment)
BaseOrderPaymentLog = create_deferred_base_class('BaseOrderPaymentLog', {
'order_payment': deferred.ForeignKey(OrderPayment, on_delete=models.CASCADE),
})
OrderPaymentLog = create_deferred_class('OrderPaymentLog', BaseOrderPaymentLog)
self._test_foreign_key(DeferredOrderPaymentLog, OrderPayment, 'order_payment')
self._test_foreign_key(OrderPaymentLog, OrderPayment, 'order_payment')
def test_extend_non_abstract_deferred_base_model_always_allowed(self):
create_deferred_class('OrderPaymentSubclass1', OrderPayment)
create_deferred_class('OrderPaymentSubclass2', OrderPayment)
def test_polymorphic_base_model(self):
self.assertTrue(issubclass(DeferredPolymorphicProduct, PolymorphicModel))
self.assertTrue(isinstance(DeferredPolymorphicProduct, PolymorphicModelBase))
self._test_foreign_key(DeferredPolymorphicProduct, DeferredCustomer, 'owner')
def test_mixins_allowed(self):
SomeMixin = type(str('SomeMixin'), (object,), {})
BaseModel = create_regular_class('BaseModel', meta={'abstract': True})
MixinBaseProduct = create_deferred_base_class('MixinBaseProduct')
MixinProduct = create_deferred_class('MixinProduct', MixinBaseProduct, mixins=(SomeMixin, BaseModel))
self.assertTrue(issubclass(MixinProduct, SomeMixin))
self.assertTrue(issubclass(MixinProduct, BaseModel))
def test_check_for_pending_mappings(self):
deferred.ForeignKeyBuilder.check_for_pending_mappings()
PendingMappingBaseCustomer = create_deferred_base_class('PendingMappingBaseCustomer')
PendingMappingBaseOrder = create_deferred_base_class('PendingMappingBaseOrder', {
'customer': deferred.ForeignKey(PendingMappingBaseCustomer, on_delete=models.PROTECT),
})
deferred.ForeignKeyBuilder.check_for_pending_mappings()
create_deferred_class('PendingMappingOrder', PendingMappingBaseOrder)
with self.assertRaisesRegex(ImproperlyConfigured, "Deferred foreign key 'PendingMappingOrder.customer' has not been mapped"):
deferred.ForeignKeyBuilder.check_for_pending_mappings()
class MaterializedModelTestCase(TestCase):
def setUp(self):
self.OrderModel = deferred.MaterializedModel(DeferredBaseOrder)
def test_types(self):
self.assertTrue(isinstance(self.OrderModel, ModelBase))
self.assertTrue(issubclass(self.OrderModel, models.Model))
self.assertIs(type(self.OrderModel), deferred.MaterializedModel)
def test_call(self):
order = self.OrderModel()
self.assertTrue(isinstance(order, DeferredOrder))
def test_repr(self):
self.assertEqual(repr(self.OrderModel), "<MaterializedModel: <class 'test_deferred.DeferredBaseOrder'>>")
self.OrderModel._setup()
self.assertEqual(repr(self.OrderModel), "<MaterializedModel: <class 'test_deferred.DeferredOrder'>>")
def test_copy_uninitialized(self):
OrderModelDeepCopy = copy.copy(self.OrderModel)
self.assertIs(type(OrderModelDeepCopy), deferred.MaterializedModel)
# Ensure that base_model was copied
OrderModelDeepCopy._setup()
def test_copy_initialized(self):
self.OrderModel._setup()
OrderModelDeepCopy = copy.copy(self.OrderModel)
self.assertIs(OrderModelDeepCopy, DeferredOrder)
def test_deepcopy_uninitialized(self):
OrderModelDeepCopy = copy.deepcopy(self.OrderModel)
self.assertIs(type(OrderModelDeepCopy), deferred.MaterializedModel)
# Ensure that base_model was copied
OrderModelDeepCopy._setup()
def test_deepcopy_initialized(self):
self.OrderModel._setup()
OrderModelDeepCopy = copy.deepcopy(self.OrderModel)
self.assertIs(OrderModelDeepCopy, DeferredOrder)
def test_error_when_initializing_unmapped_model(self):
Unmapped = create_deferred_base_class('Unmapped')
UnmappedModel = deferred.MaterializedModel(Unmapped)
with self.assertRaisesRegex(ImproperlyConfigured, 'No class implements abstract base model: `Unmapped`.'):
UnmappedModel._setup()
|
from datetime import timedelta
import aiohttp
import pytest
from homeassistant.components import nws
from homeassistant.components.weather import (
ATTR_CONDITION_SUNNY,
ATTR_FORECAST,
DOMAIN as WEATHER_DOMAIN,
)
from homeassistant.const import STATE_UNAVAILABLE, STATE_UNKNOWN
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
from tests.components.nws.const import (
EXPECTED_FORECAST_IMPERIAL,
EXPECTED_FORECAST_METRIC,
EXPECTED_OBSERVATION_IMPERIAL,
EXPECTED_OBSERVATION_METRIC,
NONE_FORECAST,
NONE_OBSERVATION,
NWS_CONFIG,
)
@pytest.mark.parametrize(
"units,result_observation,result_forecast",
[
(IMPERIAL_SYSTEM, EXPECTED_OBSERVATION_IMPERIAL, EXPECTED_FORECAST_IMPERIAL),
(METRIC_SYSTEM, EXPECTED_OBSERVATION_METRIC, EXPECTED_FORECAST_METRIC),
],
)
async def test_imperial_metric(
hass, units, result_observation, result_forecast, mock_simple_nws
):
"""Test with imperial and metric units."""
# enable the hourly entity
registry = await hass.helpers.entity_registry.async_get_registry()
registry.async_get_or_create(
WEATHER_DOMAIN,
nws.DOMAIN,
"35_-75_hourly",
suggested_object_id="abc_hourly",
disabled_by=None,
)
hass.config.units = units
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("weather.abc_hourly")
assert state
assert state.state == ATTR_CONDITION_SUNNY
data = state.attributes
for key, value in result_observation.items():
assert data.get(key) == value
forecast = data.get(ATTR_FORECAST)
for key, value in result_forecast.items():
assert forecast[0].get(key) == value
state = hass.states.get("weather.abc_daynight")
assert state
assert state.state == ATTR_CONDITION_SUNNY
data = state.attributes
for key, value in result_observation.items():
assert data.get(key) == value
forecast = data.get(ATTR_FORECAST)
for key, value in result_forecast.items():
assert forecast[0].get(key) == value
async def test_none_values(hass, mock_simple_nws):
"""Test with none values in observation and forecast dicts."""
instance = mock_simple_nws.return_value
instance.observation = NONE_OBSERVATION
instance.forecast = NONE_FORECAST
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("weather.abc_daynight")
assert state.state == STATE_UNKNOWN
data = state.attributes
for key in EXPECTED_OBSERVATION_IMPERIAL:
assert data.get(key) is None
forecast = data.get(ATTR_FORECAST)
for key in EXPECTED_FORECAST_IMPERIAL:
assert forecast[0].get(key) is None
async def test_none(hass, mock_simple_nws):
"""Test with None as observation and forecast."""
instance = mock_simple_nws.return_value
instance.observation = None
instance.forecast = None
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("weather.abc_daynight")
assert state
assert state.state == STATE_UNKNOWN
data = state.attributes
for key in EXPECTED_OBSERVATION_IMPERIAL:
assert data.get(key) is None
forecast = data.get(ATTR_FORECAST)
assert forecast is None
async def test_error_station(hass, mock_simple_nws):
"""Test error in setting station."""
instance = mock_simple_nws.return_value
instance.set_station.side_effect = aiohttp.ClientError
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get("weather.abc_hourly") is None
assert hass.states.get("weather.abc_daynight") is None
async def test_entity_refresh(hass, mock_simple_nws):
"""Test manual refresh."""
instance = mock_simple_nws.return_value
await async_setup_component(hass, "homeassistant", {})
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
instance.update_observation.assert_called_once()
instance.update_forecast.assert_called_once()
instance.update_forecast_hourly.assert_called_once()
await hass.services.async_call(
"homeassistant",
"update_entity",
{"entity_id": "weather.abc_daynight"},
blocking=True,
)
await hass.async_block_till_done()
assert instance.update_observation.call_count == 2
assert instance.update_forecast.call_count == 2
instance.update_forecast_hourly.assert_called_once()
async def test_error_observation(hass, mock_simple_nws):
"""Test error during update observation."""
utc_time = dt_util.utcnow()
with patch("homeassistant.components.nws.utcnow") as mock_utc, patch(
"homeassistant.components.nws.weather.utcnow"
) as mock_utc_weather:
def increment_time(time):
mock_utc.return_value += time
mock_utc_weather.return_value += time
async_fire_time_changed(hass, mock_utc.return_value)
mock_utc.return_value = utc_time
mock_utc_weather.return_value = utc_time
instance = mock_simple_nws.return_value
# first update fails
instance.update_observation.side_effect = aiohttp.ClientError
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
instance.update_observation.assert_called_once()
state = hass.states.get("weather.abc_daynight")
assert state
assert state.state == STATE_UNAVAILABLE
# second update happens faster and succeeds
instance.update_observation.side_effect = None
increment_time(timedelta(minutes=1))
await hass.async_block_till_done()
assert instance.update_observation.call_count == 2
state = hass.states.get("weather.abc_daynight")
assert state
assert state.state == ATTR_CONDITION_SUNNY
# third udate fails, but data is cached
instance.update_observation.side_effect = aiohttp.ClientError
increment_time(timedelta(minutes=10))
await hass.async_block_till_done()
assert instance.update_observation.call_count == 3
state = hass.states.get("weather.abc_daynight")
assert state
assert state.state == ATTR_CONDITION_SUNNY
# after 20 minutes data caching expires, data is no longer shown
increment_time(timedelta(minutes=10))
await hass.async_block_till_done()
state = hass.states.get("weather.abc_daynight")
assert state
assert state.state == STATE_UNAVAILABLE
async def test_error_forecast(hass, mock_simple_nws):
"""Test error during update forecast."""
instance = mock_simple_nws.return_value
instance.update_forecast.side_effect = aiohttp.ClientError
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
instance.update_forecast.assert_called_once()
state = hass.states.get("weather.abc_daynight")
assert state
assert state.state == STATE_UNAVAILABLE
instance.update_forecast.side_effect = None
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=1))
await hass.async_block_till_done()
assert instance.update_forecast.call_count == 2
state = hass.states.get("weather.abc_daynight")
assert state
assert state.state == ATTR_CONDITION_SUNNY
async def test_error_forecast_hourly(hass, mock_simple_nws):
"""Test error during update forecast hourly."""
instance = mock_simple_nws.return_value
instance.update_forecast_hourly.side_effect = aiohttp.ClientError
# enable the hourly entity
registry = await hass.helpers.entity_registry.async_get_registry()
registry.async_get_or_create(
WEATHER_DOMAIN,
nws.DOMAIN,
"35_-75_hourly",
suggested_object_id="abc_hourly",
disabled_by=None,
)
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("weather.abc_hourly")
assert state
assert state.state == STATE_UNAVAILABLE
instance.update_forecast_hourly.assert_called_once()
instance.update_forecast_hourly.side_effect = None
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=1))
await hass.async_block_till_done()
assert instance.update_forecast_hourly.call_count == 2
state = hass.states.get("weather.abc_hourly")
assert state
assert state.state == ATTR_CONDITION_SUNNY
async def test_forecast_hourly_disable_enable(hass, mock_simple_nws):
"""Test error during update forecast hourly."""
entry = MockConfigEntry(
domain=nws.DOMAIN,
data=NWS_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get_or_create(
WEATHER_DOMAIN,
nws.DOMAIN,
"35_-75_hourly",
)
assert entry.disabled is True
# Test enabling entity
updated_entry = registry.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
|
from dataclasses import dataclass
from typing import List, Tuple
# pylint: disable=no-name-in-module
from pydantic.error_wrappers import ValidationError
from xbox.webapi.api.client import XboxLiveClient
from xbox.webapi.api.provider.catalog.models import FieldsTemplate, Image
from xbox.webapi.api.provider.gameclips.models import GameclipsResponse
from xbox.webapi.api.provider.screenshots.models import ScreenshotResponse
from xbox.webapi.api.provider.smartglass.models import InstalledPackage
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_GAME,
MEDIA_CLASS_IMAGE,
MEDIA_CLASS_VIDEO,
)
from homeassistant.components.media_source.const import MEDIA_MIME_TYPES
from homeassistant.components.media_source.models import (
BrowseMediaSource,
MediaSource,
MediaSourceItem,
PlayMedia,
)
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from .browse_media import _find_media_image
from .const import DOMAIN
MIME_TYPE_MAP = {
"gameclips": "video/mp4",
"screenshots": "image/png",
}
MEDIA_CLASS_MAP = {
"gameclips": MEDIA_CLASS_VIDEO,
"screenshots": MEDIA_CLASS_IMAGE,
}
async def async_get_media_source(hass: HomeAssistantType):
"""Set up Xbox media source."""
entry = hass.config_entries.async_entries(DOMAIN)[0]
client = hass.data[DOMAIN][entry.entry_id]["client"]
return XboxSource(hass, client)
@callback
def async_parse_identifier(
item: MediaSourceItem,
) -> Tuple[str, str, str]:
"""Parse identifier."""
identifier = item.identifier or ""
start = ["", "", ""]
items = identifier.lstrip("/").split("~~", 2)
return tuple(items + start[len(items) :])
@dataclass
class XboxMediaItem:
"""Represents gameclip/screenshot media."""
caption: str
thumbnail: str
uri: str
media_class: str
class XboxSource(MediaSource):
"""Provide Xbox screenshots and gameclips as media sources."""
name: str = "Xbox Game Media"
def __init__(self, hass: HomeAssistantType, client: XboxLiveClient):
"""Initialize Xbox source."""
super().__init__(DOMAIN)
self.hass: HomeAssistantType = hass
self.client: XboxLiveClient = client
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve media to a url."""
_, category, url = async_parse_identifier(item)
_, kind = category.split("#", 1)
return PlayMedia(url, MIME_TYPE_MAP[kind])
async def async_browse_media(
self, item: MediaSourceItem, media_types: Tuple[str] = MEDIA_MIME_TYPES
) -> BrowseMediaSource:
"""Return media."""
title, category, _ = async_parse_identifier(item)
if not title:
return await self._build_game_library()
if not category:
return _build_categories(title)
return await self._build_media_items(title, category)
async def _build_game_library(self):
"""Display installed games across all consoles."""
apps = await self.client.smartglass.get_installed_apps()
games = {
game.one_store_product_id: game
for game in apps.result
if game.is_game and game.title_id
}
app_details = await self.client.catalog.get_products(
games.keys(),
FieldsTemplate.BROWSE,
)
images = {
prod.product_id: prod.localized_properties[0].images
for prod in app_details.products
}
return BrowseMediaSource(
domain=DOMAIN,
identifier="",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title="Xbox Game Media",
can_play=False,
can_expand=True,
children=[_build_game_item(game, images) for game in games.values()],
children_media_class=MEDIA_CLASS_GAME,
)
async def _build_media_items(self, title, category):
"""Fetch requested gameclip/screenshot media."""
title_id, _, thumbnail = title.split("#", 2)
owner, kind = category.split("#", 1)
items: List[XboxMediaItem] = []
try:
if kind == "gameclips":
if owner == "my":
response: GameclipsResponse = (
await self.client.gameclips.get_recent_clips_by_xuid(
self.client.xuid, title_id
)
)
elif owner == "community":
response: GameclipsResponse = await self.client.gameclips.get_recent_community_clips_by_title_id(
title_id
)
else:
return None
items = [
XboxMediaItem(
item.user_caption
or dt_util.as_local(
dt_util.parse_datetime(item.date_recorded)
).strftime("%b. %d, %Y %I:%M %p"),
item.thumbnails[0].uri,
item.game_clip_uris[0].uri,
MEDIA_CLASS_VIDEO,
)
for item in response.game_clips
]
elif kind == "screenshots":
if owner == "my":
response: ScreenshotResponse = (
await self.client.screenshots.get_recent_screenshots_by_xuid(
self.client.xuid, title_id
)
)
elif owner == "community":
response: ScreenshotResponse = await self.client.screenshots.get_recent_community_screenshots_by_title_id(
title_id
)
else:
return None
items = [
XboxMediaItem(
item.user_caption
or dt_util.as_local(item.date_taken).strftime(
"%b. %d, %Y %I:%M%p"
),
item.thumbnails[0].uri,
item.screenshot_uris[0].uri,
MEDIA_CLASS_IMAGE,
)
for item in response.screenshots
]
except ValidationError:
# Unexpected API response
pass
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{category}",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title=f"{owner.title()} {kind.title()}",
can_play=False,
can_expand=True,
children=[_build_media_item(title, category, item) for item in items],
children_media_class=MEDIA_CLASS_MAP[kind],
thumbnail=thumbnail,
)
def _build_game_item(item: InstalledPackage, images: List[Image]):
"""Build individual game."""
thumbnail = ""
image = _find_media_image(images.get(item.one_store_product_id, []))
if image is not None:
thumbnail = image.uri
if thumbnail[0] == "/":
thumbnail = f"https:{thumbnail}"
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{item.title_id}#{item.name}#{thumbnail}",
media_class=MEDIA_CLASS_GAME,
media_content_type="",
title=item.name,
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_DIRECTORY,
thumbnail=thumbnail,
)
def _build_categories(title):
"""Build base categories for Xbox media."""
_, name, thumbnail = title.split("#", 2)
base = BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}",
media_class=MEDIA_CLASS_GAME,
media_content_type="",
title=name,
can_play=False,
can_expand=True,
children=[],
children_media_class=MEDIA_CLASS_DIRECTORY,
thumbnail=thumbnail,
)
owners = ["my", "community"]
kinds = ["gameclips", "screenshots"]
for owner in owners:
for kind in kinds:
base.children.append(
BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{owner}#{kind}",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title=f"{owner.title()} {kind.title()}",
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_MAP[kind],
)
)
return base
def _build_media_item(title: str, category: str, item: XboxMediaItem):
"""Build individual media item."""
_, kind = category.split("#", 1)
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{category}~~{item.uri}",
media_class=item.media_class,
media_content_type=MIME_TYPE_MAP[kind],
title=item.caption,
can_play=True,
can_expand=False,
thumbnail=item.thumbnail,
)
|
import json
from accuweather import ApiError, InvalidApiKeyError, RequestsExceededError
from homeassistant import data_entry_flow
from homeassistant.components.accuweather.const import CONF_FORECAST, DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
VALID_CONFIG = {
CONF_NAME: "abcd",
CONF_API_KEY: "32-character-string-1234567890qw",
CONF_LATITUDE: 55.55,
CONF_LONGITUDE: 122.12,
}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_api_key_too_short(hass):
"""Test that errors are shown when API key is too short."""
# The API key length check is done by the library without polling the AccuWeather
# server so we don't need to patch the library method.
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_NAME: "abcd",
CONF_API_KEY: "foo",
CONF_LATITUDE: 55.55,
CONF_LONGITUDE: 122.12,
},
)
assert result["errors"] == {CONF_API_KEY: "invalid_api_key"}
async def test_invalid_api_key(hass):
"""Test that errors are shown when API key is invalid."""
with patch(
"accuweather.AccuWeather._async_get_data",
side_effect=InvalidApiKeyError("Invalid API key"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["errors"] == {CONF_API_KEY: "invalid_api_key"}
async def test_api_error(hass):
"""Test API error."""
with patch(
"accuweather.AccuWeather._async_get_data",
side_effect=ApiError("Invalid response from AccuWeather API"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["errors"] == {"base": "cannot_connect"}
async def test_requests_exceeded_error(hass):
"""Test requests exceeded error."""
with patch(
"accuweather.AccuWeather._async_get_data",
side_effect=RequestsExceededError(
"The allowed number of requests has been exceeded"
),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["errors"] == {CONF_API_KEY: "requests_exceeded"}
async def test_integration_already_exists(hass):
"""Test we only allow a single config flow."""
with patch(
"accuweather.AccuWeather._async_get_data",
return_value=json.loads(load_fixture("accuweather/location_data.json")),
):
MockConfigEntry(
domain=DOMAIN,
unique_id="123456",
data=VALID_CONFIG,
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_create_entry(hass):
"""Test that the user step works."""
with patch(
"accuweather.AccuWeather._async_get_data",
return_value=json.loads(load_fixture("accuweather/location_data.json")),
), patch(
"homeassistant.components.accuweather.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "abcd"
assert result["data"][CONF_NAME] == "abcd"
assert result["data"][CONF_LATITUDE] == 55.55
assert result["data"][CONF_LONGITUDE] == 122.12
assert result["data"][CONF_API_KEY] == "32-character-string-1234567890qw"
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="123456",
data=VALID_CONFIG,
)
config_entry.add_to_hass(hass)
with patch(
"accuweather.AccuWeather._async_get_data",
return_value=json.loads(load_fixture("accuweather/location_data.json")),
), patch(
"accuweather.AccuWeather.async_get_current_conditions",
return_value=json.loads(
load_fixture("accuweather/current_conditions_data.json")
),
), patch(
"accuweather.AccuWeather.async_get_forecast"
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_FORECAST: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_FORECAST: True}
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
|
from django.forms import fields, widgets
from django.template import engines, TemplateDoesNotExist
from django.template.loader import select_template, get_template
from django.utils.translation import gettext_lazy as _
from django.utils.html import mark_safe
from entangled.forms import EntangledModelFormMixin
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.plugin_base import TransparentWrapper
from shop.cascade.extensions import ShopExtendableMixin, LeftRightExtensionMixin
from shop.cascade.plugin_base import ShopPluginBase
from shop.conf import app_settings
from shop.models.cart import CartModel
from shop.serializers.cart import CartSerializer
class ShopCartPluginForm(EntangledModelFormMixin):
CHOICES = [
('editable', _("Editable Cart")),
('static', _("Static Cart")),
('summary', _("Cart Summary")),
('watch', _("Watch List")),
]
render_type = fields.ChoiceField(
choices=CHOICES,
widget=widgets.RadioSelect,
label=_("Render as"),
initial='editable',
help_text=_("Shall the cart be editable or a static summary?"),
)
class Meta:
entangled_fields = {'glossary': ['render_type']}
class ShopCartPlugin(LeftRightExtensionMixin, TransparentWrapper, ShopPluginBase):
name = _("Shopping Cart")
require_parent = True
parent_classes = ['BootstrapColumnPlugin']
cache = False
allow_children = True
form = ShopCartPluginForm
model_mixins = (ShopExtendableMixin,)
@classmethod
def get_identifier(cls, instance):
render_type = instance.glossary.get('render_type')
return mark_safe(dict(cls.form.CHOICES).get(render_type, ''))
def get_render_template(self, context, instance, placeholder):
render_template = instance.glossary.get('render_template')
if render_template:
return get_template(render_template)
render_type = instance.glossary.get('render_type')
try:
return select_template([
'{}/cart/{}.html'.format(app_settings.APP_LABEL, render_type),
'shop/cart/{}.html'.format(render_type),
])
except TemplateDoesNotExist:
return get_template('shop/cart/editable.html')
def render(self, context, instance, placeholder):
try:
cart = CartModel.objects.get_from_request(context['request'])
context['is_cart_filled'] = cart.items.exists()
render_type = instance.glossary['render_type']
if render_type == 'static':
# update context for static cart with items to be endered as HTML
cart_serializer = CartSerializer(cart, context=context, label='cart', with_items=True)
context['cart'] = cart_serializer.data
elif render_type == 'summary':
# update context for cart summary to be endered as HTML
cart_serializer = CartSerializer(cart, context=context, label='cart')
context['cart'] = cart_serializer.data
except (KeyError, CartModel.DoesNotExist):
pass
return self.super(ShopCartPlugin, self).render(context, instance, placeholder)
plugin_pool.register_plugin(ShopCartPlugin)
|
import os.path as op
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne.channels import (read_dig_fif, make_dig_montage,
make_standard_montage)
p_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit', 'tests', 'data')
elp = op.join(p_dir, 'test_elp.txt')
hsp = op.join(p_dir, 'test_hsp.txt')
hpi = op.join(p_dir, 'test_mrk.sqd')
point_names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
io_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
fif_fname = op.join(io_dir, 'test_raw.fif')
def test_plot_montage():
"""Test plotting montages."""
m = make_standard_montage('easycap-M1')
m.plot()
plt.close('all')
m.plot(kind='3d')
plt.close('all')
m.plot(kind='3d', show_names=True)
plt.close('all')
m.plot(kind='topomap')
plt.close('all')
m.plot(kind='topomap', show_names=True)
plt.close('all')
N_HSP, N_HPI = 2, 1
montage = make_dig_montage(nasion=[1, 1, 1], lpa=[2, 2, 2], rpa=[3, 3, 3],
hsp=np.full((N_HSP, 3), 4),
hpi=np.full((N_HPI, 3), 4),
coord_frame='head')
assert '0 channels' in repr(montage)
with pytest.raises(RuntimeError, match='No valid channel positions'):
montage.plot()
d = read_dig_fif(fname=fif_fname)
assert '61 channels' in repr(d)
# XXX this is broken; dm.point_names is used. Sometimes we say this should
# Just contain the HPI coils, other times that it's all channels (e.g.,
# EEG channels). But there is redundancy with this and dm.dig_ch_pos.
# This should be addressed in the pending big refactoring.
# d.plot()
# plt.close('all')
@pytest.mark.parametrize('name, n', [
('standard_1005', 342), ('standard_postfixed', 85),
('standard_primed', 85), ('standard_1020', 93)
])
def test_plot_defect_montage(name, n):
"""Test plotting defect montages (i.e. with duplicate labels)."""
# montage name and number of unique labels
m = make_standard_montage(name)
n -= 3 # new montage does not have fiducials
fig = m.plot()
collection = fig.axes[0].collections[0]
assert collection._edgecolors.shape[0] == n
assert collection._facecolors.shape[0] == n
assert collection._offsets.shape[0] == n
def test_plot_digmontage():
"""Test plot DigMontage."""
montage = make_dig_montage(
ch_pos=dict(zip(list('abc'), np.eye(3))),
coord_frame='head'
)
montage.plot()
plt.close('all')
|
from abc import ABC, abstractmethod
from collections import Counter
from functools import reduce
from re import split
from sys import version_info
import pandas as pd
from flashtext import KeywordProcessor
from scattertext.ScatterChart import check_topic_model_string_format
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
class FeatsFromTopicModelBase(ABC):
def __init__(self, topic_model):
self._topic_model = topic_model
self._lexicon_df = self._get_lexicon_df_from_topic_model(topic_model)
def _get_lexicon_df_from_topic_model(self, topic_model):
return (pd.DataFrame(pd.Series(topic_model)
.apply(pd.Series)
.reset_index())
.melt(id_vars=['index'])
[['index', 'value']]
.rename(columns={'index': 'cat', 'value': 'term'})
.set_index('term'))
def _analyze(self, doc):
text_df = (pd.DataFrame(pd.Series(self._get_terms_from_doc(doc)))
.join(self._lexicon_df)
.dropna()
.groupby('cat')
.sum())
return text_df
def get_doc_metadata(self, doc, prefix=''):
feature_counter = Counter()
if version_info[0] >= 3:
doc = str(doc)
for category, score in self._analyze(doc).to_dict()[0].items():
feature_counter[prefix + category] = int(score)
return feature_counter
@abstractmethod
def _get_terms_from_doc(self, doc):
pass
class FeatsFromTopicModel(FeatsFromTopicModelBase, FeatsFromSpacyDoc):
def __init__(self,
topic_model,
use_lemmas=False,
entity_types_to_censor=set(),
entity_types_to_use=None,
tag_types_to_censor=set(),
strip_final_period=False,
keyword_processor_args={'case_sensitive': False}):
self._keyword_processor = KeywordProcessor(**keyword_processor_args)
self._topic_model = topic_model.copy()
if keyword_processor_args.get('case_sensitive', None) is False:
for k, v in self._topic_model.items():
self._topic_model[k] = [e.lower() for e in v]
for keyphrase in reduce(lambda x, y: set(x) | set(y), self._topic_model.values()):
self._keyword_processor.add_keyword(keyphrase)
FeatsFromSpacyDoc.__init__(self, use_lemmas, entity_types_to_censor,
tag_types_to_censor, strip_final_period)
FeatsFromTopicModelBase.__init__(self, topic_model)
def get_top_model_term_lists(self):
return self._topic_model
def _get_terms_from_doc(self, doc):
return Counter(self._keyword_processor.extract_keywords(str(doc)))
def get_feats(self, doc):
return Counter(self._get_terms_from_doc(str(doc)))
"""
class FeatsFromTopicModel(FeatsFromSpacyDoc, FeatsFromTopicModelBase):
def __init__(self,
topic_model,
use_lemmas=False,
entity_types_to_censor=set(),
tag_types_to_censor=set(),
strip_final_period=False,
**kwargs):
'''
Parameters
----------
topic_model : dict
{topicmodelname: [term1, term2, ....], ...}
Other parameters from FeatsFromSpacyDoc.__init__
'''
check_topic_model_string_format(topic_model)
self._topic_model = topic_model
self._lexicon_df = self._get_lexicon_df_from_topic_model(topic_model)
super(FeatsFromTopicModel, self).__init__(use_lemmas,
entity_types_to_censor,
tag_types_to_censor,
strip_final_period)
def _get_terms_from_doc(self, doc):
return Counter(t for t in split(r"(\W)", doc.lower()) if t.strip())
def has_metadata_term_list(self):
return True
def get_top_model_term_lists(self):
return self._topic_model
"""
|
import os
import tempfile
from . import helpers, pprint
def parser(subparsers, repo):
"""Adds the diff parser to the given subparsers object."""
desc = 'show changes to files'
diff_parser = subparsers.add_parser(
'diff', help=desc, description=(
desc.capitalize() + '. ' +
'By default all tracked modified files are diffed. To customize the '
' set of files to diff use the only, exclude, and include flags'), aliases=['df'])
helpers.oei_flags(diff_parser, repo)
diff_parser.set_defaults(func=main)
def main(args, repo):
files = helpers.oei_fs(args, repo)
if not files:
pprint.warn('No files to diff')
success = True
curr_b = repo.current_branch
with tempfile.NamedTemporaryFile(mode='w', delete=False) as tf:
total_additions = 0
total_deletions = 0
patches = []
for fp in files:
try:
patch = curr_b.diff_file(fp)
except KeyError:
pprint.err('Can\'t diff non-existent file {0}'.format(fp))
success = False
continue
if patch.delta.is_binary:
pprint.warn('Not showing diffs for binary file {0}'.format(fp))
continue
additions = patch.line_stats[1]
deletions = patch.line_stats[2]
total_additions += additions
total_deletions += deletions
if (not additions) and (not deletions):
pprint.warn('No diffs to output for {0}'.format(fp))
continue
patches.append(patch)
if patches:
pprint.diff_totals(total_additions, total_deletions, stream=tf.write)
for patch in patches:
pprint.diff(patch, stream=tf.write)
if os.path.getsize(tf.name) > 0:
helpers.page(tf.name, repo)
os.remove(tf.name)
return success
|
import os
import sys
import tempfile
from glob import glob
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
"from smart_open import smart_open\n",
def _notebook_run(path):
"""Execute a notebook via nbconvert and collect output.
:returns (parsed nb object, execution errors)
"""
kernel_name = 'python%d' % sys.version_info[0]
this_file_directory = os.path.dirname(__file__)
errors = []
with tempfile.NamedTemporaryFile(suffix=".ipynb", mode='wt') as fout:
with smart_open(path, 'rb') as f:
nb = nbformat.read(f, as_version=4)
nb.metadata.get('kernelspec', {})['name'] = kernel_name
ep = ExecutePreprocessor(kernel_name=kernel_name, timeout=10)
try:
ep.preprocess(nb, {'metadata': {'path': this_file_directory}})
except CellExecutionError as e:
if "SKIP" in e.traceback:
print(str(e.traceback).split("\n")[-2])
else:
raise e
except RuntimeError as e:
print(e)
finally:
nbformat.write(nb, fout)
return nb, errors
def test_notebooks():
for notebook in glob("*.ipynb"):
if " " in notebook:
continue
print("Testing {}".format(notebook))
nb, errors = _notebook_run(notebook)
assert errors == []
|
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.http import HomeAssistantView
from homeassistant.config import async_check_ha_config_file
from homeassistant.const import CONF_UNIT_SYSTEM_IMPERIAL, CONF_UNIT_SYSTEM_METRIC
from homeassistant.helpers import config_validation as cv
from homeassistant.util import location
async def async_setup(hass):
"""Set up the Hassbian config."""
hass.http.register_view(CheckConfigView)
websocket_api.async_register_command(hass, websocket_update_config)
websocket_api.async_register_command(hass, websocket_detect_config)
return True
class CheckConfigView(HomeAssistantView):
"""Hassbian packages endpoint."""
url = "/api/config/core/check_config"
name = "api:config:core:check_config"
async def post(self, request):
"""Validate configuration and return results."""
errors = await async_check_ha_config_file(request.app["hass"])
state = "invalid" if errors else "valid"
return self.json({"result": state, "errors": errors})
@websocket_api.require_admin
@websocket_api.async_response
@websocket_api.websocket_command(
{
"type": "config/core/update",
vol.Optional("latitude"): cv.latitude,
vol.Optional("longitude"): cv.longitude,
vol.Optional("elevation"): int,
vol.Optional("unit_system"): cv.unit_system,
vol.Optional("location_name"): str,
vol.Optional("time_zone"): cv.time_zone,
vol.Optional("external_url"): vol.Any(cv.url, None),
vol.Optional("internal_url"): vol.Any(cv.url, None),
}
)
async def websocket_update_config(hass, connection, msg):
"""Handle update core config command."""
data = dict(msg)
data.pop("id")
data.pop("type")
try:
await hass.config.async_update(**data)
connection.send_result(msg["id"])
except ValueError as err:
connection.send_error(msg["id"], "invalid_info", str(err))
@websocket_api.require_admin
@websocket_api.async_response
@websocket_api.websocket_command({"type": "config/core/detect"})
async def websocket_detect_config(hass, connection, msg):
"""Detect core config."""
session = hass.helpers.aiohttp_client.async_get_clientsession()
location_info = await location.async_detect_location_info(session)
info = {}
if location_info is None:
connection.send_result(msg["id"], info)
return
if location_info.use_metric:
info["unit_system"] = CONF_UNIT_SYSTEM_METRIC
else:
info["unit_system"] = CONF_UNIT_SYSTEM_IMPERIAL
if location_info.latitude:
info["latitude"] = location_info.latitude
if location_info.longitude:
info["longitude"] = location_info.longitude
if location_info.time_zone:
info["time_zone"] = location_info.time_zone
connection.send_result(msg["id"], info)
|
import os
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import patch
from diamond.collector import Collector
from userscripts import UserScriptsCollector
##########################################################################
def run_only_if_kitchen_is_available(func):
import subprocess
pred = lambda: subprocess is not None
return run_only(func, pred)
class TestUserScriptsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UserScriptsCollector', {
'interval': 10,
'scripts_path': os.path.dirname(__file__) + '/fixtures/',
})
self.collector = UserScriptsCollector(config, None)
def test_import(self):
self.assertTrue(UserScriptsCollector)
@run_only_if_kitchen_is_available
@patch.object(Collector, 'publish')
def test_should_work_with_example(self, publish_mock):
self.collector.collect()
metrics = {
'example.1': 42,
'example.2': 24,
'example.3': 12.1212,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics)
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_kitchen_is_available
@patch.object(Collector, 'publish')
def test_should_skip_over_unrunnable_files(self, publish_mock):
self.collector.collect()
# Just make sure publish got called >0 times, if this test fails it'll
# be due to raising an exception. Meh.
assert publish_mock.call_args_list
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from homeassistant.components.met.const import DOMAIN
from homeassistant.config_entries import ENTRY_STATE_LOADED, ENTRY_STATE_NOT_LOADED
from . import init_integration
async def test_unload_entry(hass):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
|
from collections import defaultdict
from lxml.etree import HTMLParser
IGNORE = {"body", "html"}
class MarkupExtractor:
def __init__(self):
self.found_tags = set()
self.found_attributes = defaultdict(set)
def start(self, tag, attrs):
if tag in IGNORE:
return
self.found_tags.add(tag)
self.found_attributes[tag].update(attrs.keys())
def extract_bleach(text):
"""Exctract tags from text in a form suitable for bleach."""
extractor = MarkupExtractor()
parser = HTMLParser(collect_ids=False, target=extractor)
parser.feed(text)
return {"tags": extractor.found_tags, "attributes": extractor.found_attributes}
|
import logging
import unittest
import os
import gensim.downloader as api
import shutil
import numpy as np
@unittest.skipIf(
os.environ.get("SKIP_NETWORK_TESTS", False) == "1",
"Skip network-related tests (probably SSL problems on this CI/OS)"
)
class TestApi(unittest.TestCase):
def test_base_dir_creation(self):
if os.path.isdir(api.BASE_DIR):
shutil.rmtree(api.BASE_DIR)
api._create_base_dir()
self.assertTrue(os.path.isdir(api.BASE_DIR))
os.rmdir(api.BASE_DIR)
def test_load_dataset(self):
dataset_path = os.path.join(api.BASE_DIR, "__testing_matrix-synopsis", "__testing_matrix-synopsis.gz")
if os.path.isdir(api.BASE_DIR):
shutil.rmtree(api.BASE_DIR)
self.assertEqual(api.load("__testing_matrix-synopsis", return_path=True), dataset_path)
shutil.rmtree(api.BASE_DIR)
self.assertEqual(len(list(api.load("__testing_matrix-synopsis"))), 1)
shutil.rmtree(api.BASE_DIR)
def test_load_model(self):
if os.path.isdir(api.BASE_DIR):
shutil.rmtree(api.BASE_DIR)
vector_dead = np.array([
0.17403787, -0.10167074, -0.00950371, -0.10367849, -0.14034484,
-0.08751217, 0.10030612, 0.07677923, -0.32563496, 0.01929072,
0.20521086, -0.1617067, 0.00475458, 0.21956187, -0.08783089,
-0.05937332, 0.26528183, -0.06771874, -0.12369668, 0.12020949,
0.28731, 0.36735833, 0.28051138, -0.10407482, 0.2496888,
-0.19372769, -0.28719661, 0.11989869, -0.00393865, -0.2431484,
0.02725661, -0.20421691, 0.0328669, -0.26947051, -0.08068217,
-0.10245913, 0.1170633, 0.16583319, 0.1183883, -0.11217165,
0.1261425, -0.0319365, -0.15787181, 0.03753783, 0.14748634,
0.00414471, -0.02296237, 0.18336892, -0.23840059, 0.17924534
])
dataset_path = os.path.join(
api.BASE_DIR, "__testing_word2vec-matrix-synopsis", "__testing_word2vec-matrix-synopsis.gz"
)
model = api.load("__testing_word2vec-matrix-synopsis")
vector_dead_calc = model.wv["dead"]
self.assertTrue(np.allclose(vector_dead, vector_dead_calc))
shutil.rmtree(api.BASE_DIR)
self.assertEqual(api.load("__testing_word2vec-matrix-synopsis", return_path=True), dataset_path)
shutil.rmtree(api.BASE_DIR)
def test_multipart_load(self):
dataset_path = os.path.join(
api.BASE_DIR, '__testing_multipart-matrix-synopsis', '__testing_multipart-matrix-synopsis.gz'
)
if os.path.isdir(api.BASE_DIR):
shutil.rmtree(api.BASE_DIR)
self.assertEqual(dataset_path, api.load("__testing_multipart-matrix-synopsis", return_path=True))
shutil.rmtree(api.BASE_DIR)
dataset = api.load("__testing_multipart-matrix-synopsis")
self.assertEqual(len(list(dataset)), 1)
def test_info(self):
data = api.info("text8")
self.assertEqual(data["parts"], 1)
self.assertEqual(data["file_name"], 'text8.gz')
data = api.info()
self.assertEqual(sorted(data.keys()), sorted(['models', 'corpora']))
self.assertTrue(len(data['models']))
self.assertTrue(len(data['corpora']))
name_only_data = api.info(name_only=True)
self.assertEqual(len(name_only_data.keys()), 2)
self.assertTrue({'models', 'corpora'} == set(name_only_data))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
unittest.main()
|
from kalliope.core.Cortex import Cortex
from kalliope.core.Utils import Utils
import logging
logging.basicConfig()
logger = logging.getLogger("kalliope")
class NeuronParameterLoader(object):
@classmethod
def get_parameters(cls, synapse_order, user_order):
"""
Class method to get all params coming from a string order. Returns a dict of key/value.
"""
params = dict()
if Utils.is_containing_bracket(synapse_order):
params = cls._associate_order_params_to_values(user_order, synapse_order)
logger.debug("[NeuronParameterLoader.get_parameters]Parameters for order: %s" % params)
# we place the dict of parameters load from order into a cache in Cortex so the user can save it later
Cortex.add_parameters_from_order(params)
return params
@classmethod
def _associate_order_params_to_values(cls, order, order_to_check):
"""
Associate the variables from the order to the incoming user order
:param order_to_check: the order to check incoming from the brain
:type order_to_check: str
:param order: the order from user
:type order: str
:return: the dict corresponding to the key / value of the params
"""
logger.debug("[NeuronParameterLoader._associate_order_params_to_values] user order: %s, "
"order from synapse: %s" % (order, order_to_check))
list_word_in_order = Utils.remove_spaces_in_brackets(order_to_check).split()
# remove sentence before order which are sentences not matching anyway
truncate_list_word_said = order.split()
# make dict var:value
dict_var = dict()
for idx, ow in enumerate(list_word_in_order):
if not Utils.is_containing_bracket(ow):
while truncate_list_word_said and ow.lower() != truncate_list_word_said[0].lower():
truncate_list_word_said = truncate_list_word_said[1:]
else:
# remove bracket and grab the next value / stop value
var_name = ow.replace("{{", "").replace("}}", "")
stop_value = Utils.get_next_value_list(list_word_in_order[idx:])
if stop_value is None:
dict_var[var_name] = " ".join(truncate_list_word_said)
break
for word_said in truncate_list_word_said:
if word_said.lower() == stop_value.lower(): # Do not consider the case
break
if var_name in dict_var:
dict_var[var_name] += " " + word_said
truncate_list_word_said = truncate_list_word_said[1:]
else:
dict_var[var_name] = word_said
truncate_list_word_said = truncate_list_word_said[1:]
return dict_var
|
from flexx import flx
# Raw data obtained from
# http://cdn.knmi.nl/knmi/map/page/klimatologie/gegevens/maandgegevens/mndgeg_290_tg.txt
raw_data = """
Deze gegevens mogen vrij worden gebruikt mits de volgende bronvermelding wordt gegeven:
KONINKLIJK NEDERLANDS METEOROLOGISCH INSTITUUT (KNMI)
These data can be used freely provided that the following source is acknowledged:
ROYAL NETHERLANDS METEOROLOGICAL INSTITUTE
MAAND- en JAARGEMIDDELDE TEMPERATUREN (0.1 graden Celsius)
MONTHLY AND YEARLY MEAN TEMPERATURES (0.1 degrees Celsius)
STN = stationsnummer / WMO-number = 06... (235=De Kooy,240=Schiphol,260=De Bilt,270=Leeuwarden,280=Eelde,
290=Twenthe,310=Vlissingen,344=Rotterdam,370=Eindhoven,380=Maastricht)
STN,YYYY, JAN, FEB, MAR, APR, MAY, JUN, JUL, AUG, SEP, OCT, NOV, DEC, YEAR
290,1951, 35, 35, 34, 74, 120, 154, 169, 171, 155, 85, 82, 44, 97
290,1952, 19, 20, 43, 107, 129, 149, 172, 173, 111, 80, 23, 10, 86
290,1953, 11, 15, 47, 91, 133, 162, 170, 167, 140, 115, 71, 48, 98
290,1954, -11, -10, 57, 65, 128, 156, 142, 160, 138, 116, 65, 51, 88
290,1955, -1, -5, 16, 82, 100, 146, 179, 181, 146, 92, 58, 42, 86
290,1956, 17, -71, 50, 50, 127, 130, 166, 140, 146, 94, 46, 49, 79
290,1957, 32, 49, 80, 83, 102, 166, 176, 156, 124, 107, 60, 24, 97
290,1958, 16, 32, 15, 62, 129, 148, 168, 177, 157, 109, 54, 44, 93
290,1959, 12, 7, 72, 103, 131, 164, 192, 180, 156, 111, 52, 39, 102
290,1960, 21, 25, 55, 88, 130, 161, 153, 155, 133, 107, 73, 26, 94
290,1961, 14, 61, 71, 104, 106, 157, 152, 158, 172, 114, 45, 8, 97
290,1962, 31, 21, 14, 80, 99, 135, 145, 152, 127, 104, 39, -14, 78
290,1963, -63, -38, 45, 91, 112, 158, 165, 153, 137, 91, 81, -15, 76
290,1964, -1, 28, 23, 91, 143, 156, 169, 161, 142, 78, 58, 20, 89
290,1965, 24, 11, 37, 76, 117, 150, 147, 152, 131, 98, 19, 38, 83
290,1966, -3, 37, 46, 87, 135, 171, 155, 158, 134, 111, 38, 36, 92
290,1967, 32, 48, 64, 70, 128, 146, 181, 163, 141, 116, 47, 25, 97
290,1968, 7, 13, 59, 95, 106, 155, 161, 168, 142, 111, 50, -7, 88
290,1969, 37, -7, 13, 77, 129, 145, 175, 168, 137, 115, 60, -25, 85
290,1970, -4, 4, 21, 56, 125, 170, 154, 165, 136, 99, 72, 18, 85
290,1971, 22, 32, 23, 78, 138, 138, 170, 171, 125, 97, 46, 50, 91
290,1972, -3, 34, 61, 71, 116, 137, 172, 151, 108, 82, 53, 32, 85
290,1973, 23, 23, 49, 54, 119, 160, 171, 176, 145, 83, 49, 23, 90
290,1974, 51, 42, 55, 85, 112, 143, 147, 164, 129, 64, 62, 66, 93
290,1975, 62, 27, 43, 69, 108, 147, 176, 195, 148, 77, 45, 23, 93
290,1976, 26, 19, 22, 71, 129, 173, 188, 170, 134, 107, 61, 7, 92
290,1977, 22, 45, 68, 60, 117, 143, 161, 158, 126, 114, 61, 42, 93
290,1978, 27, 2, 61, 67, 121, 145, 150, 147, 136, 110, 66, 13, 87
290,1979, -39, -22, 41, 73, 116, 150, 150, 151, 125, 96, 45, 46, 78
290,1980, -8, 38, 38, 74, 115, 145, 153, 166, 146, 87, 42, 29, 85
290,1981, 16, 9, 81, 87, 138, 147, 163, 164, 148, 85, 60, -10, 91
290,1982, 3, 23, 50, 71, 122, 161, 186, 169, 160, 112, 76, 31, 97
290,1983, 57, 0, 56, 89, 111, 159, 191, 174, 134, 94, 54, 27, 96
290,1984, 23, 14, 33, 71, 100, 129, 150, 168, 124, 109, 73, 35, 86
290,1985, -46, -14, 35, 81, 132, 131, 165, 150, 132, 97, 17, 52, 78
290,1986, 16, -46, 45, 68, 137, 160, 162, 151, 105, 109, 77, 41, 85
290,1987, -40, 17, 13, 103, 99, 137, 163, 155, 145, 104, 61, 38, 83
290,1988, 58, 42, 43, 81, 144, 145, 160, 166, 139, 104, 53, 55, 99
290,1989, 41, 48, 77, 65, 138, 159, 178, 171, 154, 122, 50, 44, 104
290,1990, 50, 73, 79, 84, 138, 151, 161, 182, 122, 119, 54, 35, 104
290,1991, 29, -9, 83, 81, 97, 126, 188, 173, 147, 94, 50, 32, 91
290,1992, 22, 47, 64, 83, 154, 170, 182, 180, 143, 73, 74, 33, 102
290,1993, 42, 12, 51, 112, 142, 152, 157, 146, 125, 86, 15, 44, 90
290,1994, 44, 6, 70, 81, 123, 151, 213, 174, 133, 86, 87, 48, 101
290,1995, 27, 60, 46, 89, 124, 145, 200, 187, 136, 124, 55, -16, 98
290,1996, -16, -10, 23, 89, 106, 151, 161, 175, 115, 99, 52, -6, 78
290,1997, -17, 58, 72, 70, 124, 157, 173, 201, 134, 89, 57, 41, 97
290,1998, 43, 57, 67, 90, 144, 155, 159, 160, 144, 90, 29, 35, 98
290,1999, 45, 26, 66, 97, 135, 149, 188, 172, 177, 101, 59, 41, 105
290,2000, 37, 52, 63, 102, 145, 160, 152, 169, 150, 111, 77, 46, 105
290,2001, 22, 37, 42, 79, 140, 147, 183, 183, 128, 140, 61, 21, 99
290,2002, 39, 65, 64, 89, 135, 167, 174, 186, 139, 86, 71, 16, 103
290,2003, 16, 7, 68, 93, 134, 179, 185, 193, 139, 66, 78, 33, 99
290,2004, 27, 40, 53, 103, 116, 152, 165, 188, 149, 110, 54, 27, 99
290,2005, 43, 14, 58, 102, 125, 162, 176, 157, 152, 129, 61, 33, 101
290,2006, 4, 18, 32, 85, 143, 168, 222, 159, 177, 136, 88, 62, 108
290,2007, 61, 54, 74, 125, 139, 174, 169, 168, 133, 93, 61, 31, 107
290,2008, 59, 47, 53, 83, 150, 164, 180, 173, 131, 97, 62, 18, 101
290,2009, 1, 26, 55, 125, 137, 153, 178, 182, 144, 93, 94, 17, 100
290,2010, -17, 9, 58, 93, 100, 164, 204, 164, 126, 95, 52, -27, 85
290,2011, 27, 36, 52, 124, 138, 162, 158, 168, 152, 106, 60, 55, 103
290,2012, 36, 1, 77, 82, 145, 145, 169, 184, 134, 96, 64, 41, 98
290,2013, 15, 9, 12, 82, 116, 154, 188, 179, 138, 118, 59, 55, 94
290,2014, 48, 64, 81, 118, 127, 157, 195, 157, 153, 132, 80, 41, 113
290,2015, 34, 26, 57, 87, 120, 153, 184, 185, 132, 94, 91, 92, 105
290,2016, 34, 39, 49, 82, 144, 171, 181, 175, 171, 89, 48, 41, 102
290,2017, 2, 44, 85, 79, 149, 179, 180, 172, 135, 126, 63, 42, 105
"""
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'total']
def parse_data(raw_data):
years, data = [], [[] for i in range(13)]
for line in raw_data.splitlines():
if line.startswith('290'):
parts = [int(i.strip()) for i in line.split(',')]
years.append(parts[1])
for i in range(13):
data[i].append(parts[i+2]/10.0)
return years, data
years, data = parse_data(raw_data)
class Twente(flx.Widget):
def init(self):
with flx.HFix():
flx.Widget(flex=1)
with flx.VBox(flex=0, minsize=200):
with flx.GroupWidget(title='Plot options'):
flx.Label(text='Month')
self.month = flx.ComboBox(options=months, selected_index=12, style='width: 100%')
self.smoothing_label = flx.Label(text='Smoothing')
self.smoothing = flx.Slider(max=20, step=2, text='{value} samples')
flx.Widget(flex=3)
with flx.VBox(flex=4):
self.plot = flx.PlotWidget(flex=1,
xdata=years, yrange=(-5, 20),
title='Average monthly temperature',
xlabel='year', ylabel=u'temperature (°C)')
flx.Widget(flex=1)
@flx.reaction
def _update_plot(self):
smoothing = self.smoothing.value
yy1 = data[self.month.selected_index]
yy2 = []
sm2 = int(smoothing / 2)
for i in range(len(yy1)):
val = 0
n = 0
for j in range(max(0, i-sm2), min(len(yy1), i+sm2+1)):
val += yy1[j]
n += 1
if n == 0:
yy2.append(yy1[i])
else:
yy2.append(val / n)
self.plot.set_data(self.plot.xdata, yy2)
if __name__ == '__main__':
a = flx.App(Twente, title='Temperature 1951 - 2014', style = 'background:#eaeaea;')
m = a.launch('app', size=(900, 400))
flx.run()
|
from datetime import timedelta
import logging
from math import cos, pi, radians, sin
import random
from typing import Optional
from homeassistant.components.geo_location import GeolocationEvent
from homeassistant.const import LENGTH_KILOMETERS
from homeassistant.helpers.event import track_time_interval
_LOGGER = logging.getLogger(__name__)
AVG_KM_PER_DEGREE = 111.0
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=1)
MAX_RADIUS_IN_KM = 50
NUMBER_OF_DEMO_DEVICES = 5
EVENT_NAMES = [
"Bushfire",
"Hazard Reduction",
"Grass Fire",
"Burn off",
"Structure Fire",
"Fire Alarm",
"Thunderstorm",
"Tornado",
"Cyclone",
"Waterspout",
"Dust Storm",
"Blizzard",
"Ice Storm",
"Earthquake",
"Tsunami",
]
SOURCE = "demo"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Demo geolocations."""
DemoManager(hass, add_entities)
class DemoManager:
"""Device manager for demo geolocation events."""
def __init__(self, hass, add_entities):
"""Initialise the demo geolocation event manager."""
self._hass = hass
self._add_entities = add_entities
self._managed_devices = []
self._update(count=NUMBER_OF_DEMO_DEVICES)
self._init_regular_updates()
def _generate_random_event(self):
"""Generate a random event in vicinity of this HA instance."""
home_latitude = self._hass.config.latitude
home_longitude = self._hass.config.longitude
# Approx. 111km per degree (north-south).
radius_in_degrees = random.random() * MAX_RADIUS_IN_KM / AVG_KM_PER_DEGREE
radius_in_km = radius_in_degrees * AVG_KM_PER_DEGREE
angle = random.random() * 2 * pi
# Compute coordinates based on radius and angle. Adjust longitude value
# based on HA's latitude.
latitude = home_latitude + radius_in_degrees * sin(angle)
longitude = home_longitude + radius_in_degrees * cos(angle) / cos(
radians(home_latitude)
)
event_name = random.choice(EVENT_NAMES)
return DemoGeolocationEvent(
event_name, radius_in_km, latitude, longitude, LENGTH_KILOMETERS
)
def _init_regular_updates(self):
"""Schedule regular updates based on configured time interval."""
track_time_interval(
self._hass, lambda now: self._update(), DEFAULT_UPDATE_INTERVAL
)
def _update(self, count=1):
"""Remove events and add new random events."""
# Remove devices.
for _ in range(1, count + 1):
if self._managed_devices:
device = random.choice(self._managed_devices)
if device:
_LOGGER.debug("Removing %s", device)
self._managed_devices.remove(device)
self._hass.add_job(device.async_remove())
# Generate new devices from events.
new_devices = []
for _ in range(1, count + 1):
new_device = self._generate_random_event()
_LOGGER.debug("Adding %s", new_device)
new_devices.append(new_device)
self._managed_devices.append(new_device)
self._add_entities(new_devices)
class DemoGeolocationEvent(GeolocationEvent):
"""This represents a demo geolocation event."""
def __init__(self, name, distance, latitude, longitude, unit_of_measurement):
"""Initialize entity with data provided."""
self._name = name
self._distance = distance
self._latitude = latitude
self._longitude = longitude
self._unit_of_measurement = unit_of_measurement
@property
def source(self) -> str:
"""Return source value of this external event."""
return SOURCE
@property
def name(self) -> Optional[str]:
"""Return the name of the event."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo geolocation event."""
return False
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return self._distance
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return self._latitude
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return self._longitude
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import threading
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine, linux_virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.openstack import os_disk
from perfkitbenchmarker.providers.openstack import os_network
from perfkitbenchmarker.providers.openstack import utils as os_utils
from six.moves import range
NONE = 'None'
VALIDATION_ERROR_MESSAGE = '{0} {1} could not be found.'
FLAGS = flags.FLAGS
class OpenStackVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an OpenStack Virtual Machine"""
CLOUD = providers.OPENSTACK
DEFAULT_IMAGE = None
_lock = threading.Lock() # _lock guards the following:
command_works = False
validated_resources_set = set()
uploaded_keypair_set = set()
deleted_keypair_set = set()
created_server_group_dict = {}
deleted_server_group_set = set()
floating_network_id = None
def __init__(self, vm_spec):
"""Initialize an OpenStack virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(OpenStackVirtualMachine, self).__init__(vm_spec)
self.key_name = 'perfkit_key_%s' % FLAGS.run_uri
self.user_name = FLAGS.openstack_image_username
self.image = self.image or self.DEFAULT_IMAGE
# FIXME(meteorfox): Remove --openstack_public_network and
# --openstack_private_network once depreciation time has expired
self.network_name = (FLAGS.openstack_network or
FLAGS.openstack_private_network)
self.floating_ip_pool_name = (FLAGS.openstack_floating_ip_pool or
FLAGS.openstack_public_network)
self.id = None
self.boot_volume_id = None
self.server_group_id = None
self.floating_ip = None
self.firewall = None
self.public_network = None
self.subnet_id = None
self.post_provisioning_script = FLAGS.openstack_post_provisioning_script
@property
def group_id(self):
"""Returns the security group ID of this VM."""
return 'perfkit_sc_group'
def _CreateDependencies(self):
"""Validate and Create dependencies prior creating the VM."""
self._CheckPrerequisites()
self.firewall = os_network.OpenStackFirewall.GetFirewall()
self.public_network = os_network.OpenStackFloatingIPPool(
OpenStackVirtualMachine.floating_network_id)
self._UploadSSHPublicKey()
source_range = self._GetInternalNetworkCIDR()
self.firewall.AllowPort(self, os_network.MIN_PORT, os_network.MAX_PORT,
source_range)
self.firewall.AllowICMP(self) # Allowing ICMP traffic (i.e. ping)
self.AllowRemoteAccessPorts()
def _Create(self):
"""Creates an OpenStack VM instance and waits until it is ACTIVE."""
if FLAGS.openstack_boot_from_volume:
vol_name = '%s_volume' % self.name
disk_resp = os_disk.CreateBootVolume(self, vol_name, self.image)
self.boot_volume_id = disk_resp['id']
os_disk.WaitForVolumeCreation(self, self.boot_volume_id)
self._CreateInstance()
@vm_util.Retry(max_retries=4, poll_interval=2)
def _PostCreate(self):
self._SetIPAddresses()
def _Delete(self):
if self.id is None:
return
self._DeleteInstance()
if self.floating_ip:
self.public_network.release(self, self.floating_ip)
if self.server_group_id:
self._DeleteServerGroup()
if self.boot_volume_id:
os_disk.DeleteVolume(self, self.boot_volume_id)
self.boot_volume_id = None
def _DeleteDependencies(self):
"""Delete dependencies that were needed for the VM after the VM has been
deleted."""
self._DeleteSSHPublicKey()
def _Exists(self):
if self.id is None:
return False
show_cmd = os_utils.OpenStackCLICommand(self, 'server', 'show', self.id)
stdout, _, _ = show_cmd.Issue(suppress_warning=True)
try:
resp = json.loads(stdout)
return resp
except ValueError:
return False
def _CheckCanaryCommand(self):
if OpenStackVirtualMachine.command_works: # fast path
return
with self._lock:
if OpenStackVirtualMachine.command_works:
return
logging.info('Testing OpenStack CLI command is installed and working')
cmd = os_utils.OpenStackCLICommand(self, 'image', 'list')
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Config.InvalidValue(
'OpenStack CLI test command failed. Please make sure the OpenStack '
'CLI client is installed and properly configured')
OpenStackVirtualMachine.command_works = True
def _CheckPrerequisites(self):
"""Checks prerequisites are met otherwise aborts execution."""
self._CheckCanaryCommand()
if self.zone in self.validated_resources_set:
return # No need to check again
with self._lock:
if self.zone in self.validated_resources_set:
return
logging.info('Validating prerequisites.')
self._CheckImage()
self._CheckFlavor()
self._CheckNetworks()
self.validated_resources_set.add(self.zone)
logging.info('Prerequisites validated.')
def _CheckImage(self):
"""Tries to get image, if found continues execution otherwise aborts."""
cmd = os_utils.OpenStackCLICommand(self, 'image', 'show', self.image)
err_msg = VALIDATION_ERROR_MESSAGE.format('Image', self.image)
self._IssueCommandCheck(cmd, err_msg)
def _CheckFlavor(self):
"""Tries to get flavor, if found continues execution otherwise aborts."""
cmd = os_utils.OpenStackCLICommand(self, 'flavor', 'show',
self.machine_type)
err_msg = VALIDATION_ERROR_MESSAGE.format('Machine type', self.machine_type)
self._IssueCommandCheck(cmd, err_msg)
def _CheckNetworks(self):
"""Tries to get network, if found continues execution otherwise aborts."""
if not self.network_name:
if self.floating_ip_pool_name:
msg = ('Cannot associate floating-ip address from pool %s without '
'an internally routable network. Make sure '
'--openstack_network flag is set.')
else:
msg = ('Cannot build instance without a network. Make sure to set '
'either just --openstack_network or both '
'--openstack_network and --openstack_floating_ip_pool flags.')
raise errors.Error(msg)
self._CheckNetworkExists(self.network_name)
if self.floating_ip_pool_name:
floating_network_dict = self._CheckFloatingIPNetworkExists(
self.floating_ip_pool_name)
OpenStackVirtualMachine.floating_network_id = floating_network_dict['id']
def _CheckFloatingIPNetworkExists(self, floating_network_name_or_id):
network = self._CheckNetworkExists(floating_network_name_or_id)
if network['router:external'] not in ('External', True):
raise errors.Config.InvalidValue('Network "%s" is not External'
% self.floating_ip_pool_name)
return network
def _CheckNetworkExists(self, network_name_or_id):
cmd = os_utils.OpenStackCLICommand(self, 'network', 'show',
network_name_or_id)
err_msg = VALIDATION_ERROR_MESSAGE.format('Network', network_name_or_id)
stdout = self._IssueCommandCheck(cmd, err_msg)
network = json.loads(stdout)
return network
def _IssueCommandCheck(self, cmd, err_msg=None):
"""Issues command and, if stderr is non-empty, raises an error message
Args:
cmd: The command to be issued.
err_msg: string. Error message if command fails.
"""
if err_msg is None:
err_msg = ""
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Config.InvalidValue(err_msg)
return stdout
def _UploadSSHPublicKey(self):
"""Uploads SSH public key to the VM's region."""
with self._lock:
if self.zone in self.uploaded_keypair_set:
return
cmd = os_utils.OpenStackCLICommand(self, 'keypair', 'create',
self.key_name)
cmd.flags['public-key'] = self.ssh_public_key
cmd.IssueRetryable()
self.uploaded_keypair_set.add(self.zone)
if self.zone in self.deleted_keypair_set:
self.deleted_keypair_set.remove(self.zone)
def _DeleteSSHPublicKey(self):
"""Deletes SSH public key used for the VM."""
with self._lock:
if self.zone in self.deleted_keypair_set:
return
cmd = os_utils.OpenStackCLICommand(self, 'keypair', 'delete',
self.key_name)
del cmd.flags['format'] # keypair delete does not support json output
cmd.Issue()
self.deleted_keypair_set.add(self.zone)
if self.zone in self.uploaded_keypair_set:
self.uploaded_keypair_set.remove(self.zone)
def _CreateInstance(self):
"""Execute command for creating an OpenStack VM instance."""
create_cmd = self._GetCreateCommand()
stdout, stderr, _ = create_cmd.Issue()
if stderr:
raise errors.Error(stderr)
resp = json.loads(stdout)
self.id = resp['id']
def _GetCreateCommand(self):
cmd = os_utils.OpenStackCLICommand(self, 'server', 'create', self.name)
cmd.flags['flavor'] = self.machine_type
cmd.flags['security-group'] = self.group_id
cmd.flags['key-name'] = self.key_name
cmd.flags['availability-zone'] = self.zone
cmd.flags['nic'] = 'net-id=%s' % self.network_name
cmd.flags['wait'] = True
if FLAGS.openstack_config_drive:
cmd.flags['config-drive'] = 'True'
hints = self._GetSchedulerHints()
if hints:
cmd.flags['hint'] = hints
if FLAGS.openstack_boot_from_volume:
cmd.flags['volume'] = self.boot_volume_id
else:
cmd.flags['image'] = self.image
if self.post_provisioning_script:
cmd.flags['user-data'] = self.post_provisioning_script
return cmd
def _GetSchedulerHints(self):
if FLAGS.openstack_scheduler_policy == NONE:
return None
with self._lock:
group_name = 'perfkit_server_group_%s' % FLAGS.run_uri
hint_temp = 'group=%s'
if self.zone in self.created_server_group_dict:
hint = hint_temp % self.created_server_group_dict[self.zone]['id']
return hint
server_group = self._CreateServerGroup(group_name)
self.server_group_id = server_group['id']
self.created_server_group_dict[self.zone] = server_group
if self.zone in self.deleted_server_group_set:
self.deleted_server_group_set.remove(self.zone)
return hint_temp % server_group['id']
def _CreateServerGroup(self, group_name):
cmd = os_utils.OpenStackCLICommand(self, 'server group', 'create',
group_name)
cmd.flags['policy'] = FLAGS.openstack_scheduler_policy
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(stderr)
server_group = json.loads(stdout)
return server_group
def _DeleteServerGroup(self):
with self._lock:
if self.zone in self.deleted_server_group_set:
return
cmd = os_utils.OpenStackCLICommand(self, 'server group', 'delete',
self.server_group_id)
del cmd.flags['format'] # delete does not support json output
cmd.Issue()
self.deleted_server_group_set.add(self.zone)
if self.zone in self.created_server_group_dict:
del self.created_server_group_dict[self.zone]
def _DeleteInstance(self):
cmd = os_utils.OpenStackCLICommand(self, 'server', 'delete', self.id)
del cmd.flags['format'] # delete does not support json output
cmd.flags['wait'] = True
cmd.Issue(suppress_warning=True)
def _SetIPAddresses(self):
show_cmd = os_utils.OpenStackCLICommand(self, 'server', 'show', self.name)
stdout, _, _ = show_cmd.Issue()
server_dict = json.loads(stdout)
self.ip_address = self._GetNetworkIPAddress(server_dict, self.network_name)
self.internal_ip = self.ip_address
if self.floating_ip_pool_name:
self.floating_ip = self._AllocateFloatingIP()
self.internal_ip = self.ip_address
self.ip_address = self.floating_ip.floating_ip_address
def _GetNetworkIPAddress(self, server_dict, network_name):
addresses = server_dict['addresses'].split(',')
for address in addresses:
if network_name in address:
_, ip = address.split('=')
return ip
def _GetInternalNetworkCIDR(self):
"""Returns IP addresses source range of internal network."""
net_cmd = os_utils.OpenStackCLICommand(self, 'network', 'show',
self.network_name)
net_stdout, _, _ = net_cmd.Issue()
network = json.loads(net_stdout)
if isinstance(network['subnets'], list):
self.subnet_id = network['subnets'][0]
else:
self.subnet_id = network['subnets']
subnet_cmd = os_utils.OpenStackCLICommand(self, 'subnet', 'show',
self.subnet_id)
stdout, _, _ = subnet_cmd.Issue()
subnet_dict = json.loads(stdout)
return subnet_dict['cidr']
def _AllocateFloatingIP(self):
floating_ip = self.public_network.associate(self)
logging.info('floating-ip associated: {}'.format(
floating_ip.floating_ip_address))
return floating_ip
def CreateScratchDisk(self, disk_spec):
disks_names = ('%s_data_%d_%d'
% (self.name, len(self.scratch_disks), i)
for i in range(disk_spec.num_striped_disks))
disks = [os_disk.OpenStackDisk(disk_spec, name, self.zone)
for name in disks_names]
self._CreateScratchDiskFromDisks(disk_spec, disks)
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the VM.
Returns:
dict mapping string property key to value.
"""
result = super(OpenStackVirtualMachine, self).GetResourceMetadata()
if self.post_provisioning_script:
result['post_provisioning_script'] = self.post_provisioning_script
return result
class Rhel7BasedOpenStackVirtualMachine(OpenStackVirtualMachine,
linux_virtual_machine.Rhel7Mixin):
DEFAULT_IMAGE = 'rhel-7.2'
class CentOs7BasedOpenStackVirtualMachine(OpenStackVirtualMachine,
linux_virtual_machine.CentOs7Mixin):
DEFAULT_IMAGE = 'centos7'
class ClearBasedOpenStackVirtualMachine(OpenStackVirtualMachine,
linux_virtual_machine.ClearMixin):
DEFAULT_IMAGE = 'upstream-clear'
|
import types
try:
from collections.abc import Mapping, MutableMapping
except ImportError:
from collections import Mapping, MutableMapping
# Shamelessly stolen from https://github.com/kennethreitz/requests/blob/master/requests/structures.py
class CaseInsensitiveDict(MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.abc.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
def partition_dict(predicate, dictionary):
true_dict = {}
false_dict = {}
for key, value in dictionary.items():
this_dict = true_dict if predicate(key, value) else false_dict
this_dict[key] = value
return true_dict, false_dict
def compose(*functions):
def composed(incoming):
res = incoming
for function in reversed(functions):
if function:
res = function(res)
return res
return composed
def read_body(request):
if hasattr(request.body, "read"):
return request.body.read()
return request.body
def auto_decorate(decorator, predicate=lambda name, value: isinstance(value, types.FunctionType)):
def maybe_decorate(attribute, value):
if predicate(attribute, value):
value = decorator(value)
return value
class DecorateAll(type):
def __setattr__(cls, attribute, value):
return super().__setattr__(attribute, maybe_decorate(attribute, value))
def __new__(cls, name, bases, attributes_dict):
new_attributes_dict = {
attribute: maybe_decorate(attribute, value) for attribute, value in attributes_dict.items()
}
return super().__new__(cls, name, bases, new_attributes_dict)
return DecorateAll
|
from datetime import timedelta
import logging
import pybbox
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_MONITORED_VARIABLES,
CONF_NAME,
DATA_RATE_MEGABITS_PER_SECOND,
DEVICE_CLASS_TIMESTAMP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by Bouygues Telecom"
DEFAULT_NAME = "Bbox"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
# Sensor types are defined like so: Name, unit, icon
SENSOR_TYPES = {
"down_max_bandwidth": [
"Maximum Download Bandwidth",
DATA_RATE_MEGABITS_PER_SECOND,
"mdi:download",
],
"up_max_bandwidth": [
"Maximum Upload Bandwidth",
DATA_RATE_MEGABITS_PER_SECOND,
"mdi:upload",
],
"current_down_bandwidth": [
"Currently Used Download Bandwidth",
DATA_RATE_MEGABITS_PER_SECOND,
"mdi:download",
],
"current_up_bandwidth": [
"Currently Used Upload Bandwidth",
DATA_RATE_MEGABITS_PER_SECOND,
"mdi:upload",
],
"uptime": ["Uptime", None, "mdi:clock"],
"number_of_reboots": ["Number of reboot", None, "mdi:restart"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_VARIABLES): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Bbox sensor."""
# Create a data fetcher to support all of the configured sensors. Then make
# the first call to init the data.
try:
bbox_data = BboxData()
bbox_data.update()
except requests.exceptions.HTTPError as error:
_LOGGER.error(error)
return False
name = config[CONF_NAME]
sensors = []
for variable in config[CONF_MONITORED_VARIABLES]:
if variable == "uptime":
sensors.append(BboxUptimeSensor(bbox_data, variable, name))
else:
sensors.append(BboxSensor(bbox_data, variable, name))
add_entities(sensors, True)
class BboxUptimeSensor(Entity):
"""Bbox uptime sensor."""
def __init__(self, bbox_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.bbox_data = bbox_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
def update(self):
"""Get the latest data from Bbox and update the state."""
self.bbox_data.update()
uptime = utcnow() - timedelta(
seconds=self.bbox_data.router_infos["device"]["uptime"]
)
self._state = uptime.replace(microsecond=0).isoformat()
class BboxSensor(Entity):
"""Implementation of a Bbox sensor."""
def __init__(self, bbox_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.bbox_data = bbox_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest data from Bbox and update the state."""
self.bbox_data.update()
if self.type == "down_max_bandwidth":
self._state = round(self.bbox_data.data["rx"]["maxBandwidth"] / 1000, 2)
elif self.type == "up_max_bandwidth":
self._state = round(self.bbox_data.data["tx"]["maxBandwidth"] / 1000, 2)
elif self.type == "current_down_bandwidth":
self._state = round(self.bbox_data.data["rx"]["bandwidth"] / 1000, 2)
elif self.type == "current_up_bandwidth":
self._state = round(self.bbox_data.data["tx"]["bandwidth"] / 1000, 2)
elif self.type == "number_of_reboots":
self._state = self.bbox_data.router_infos["device"]["numberofboots"]
class BboxData:
"""Get data from the Bbox."""
def __init__(self):
"""Initialize the data object."""
self.data = None
self.router_infos = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the Bbox."""
try:
box = pybbox.Bbox()
self.data = box.get_ip_stats()
self.router_infos = box.get_bbox_info()
except requests.exceptions.HTTPError as error:
_LOGGER.error(error)
self.data = None
self.router_infos = None
return False
|
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.chainer_experimental.datasets.sliceable.sliceable_dataset \
import _is_iterable
class TransformDataset(GetterDataset):
"""A sliceable version of :class:`chainer.datasets.TransformDataset`.
Note that it requires :obj:`keys` to determine the names of returned
values.
Here is an example.
>>> def transfrom(in_data):
>>> img, bbox, label = in_data
>>> ...
>>> return new_img, new_label
>>>
>>> dataset = TramsformDataset(dataset, ('img', 'label'), transform)
>>> dataset.keys # ('img', 'label')
Args:
dataset: The underlying dataset.
This dataset should have :meth:`__len__` and :meth:`__getitem__`.
keys (string or tuple of strings): The name(s) of data
that the transform function returns.
If this parameter is omitted, :meth:`__init__` fetches a sample
from the underlying dataset to determine the number of data.
transform (callable): A function that is called to transform values
returned by the underlying dataset's :meth:`__getitem__`.
"""
def __init__(self, dataset, keys, transform=None):
if transform is None:
keys, transform = None, keys
super(TransformDataset, self).__init__()
self._dataset = dataset
self._transform = transform
if keys is None:
sample = self._get(0)
if isinstance(sample, tuple):
keys = (None,) * len(sample)
else:
keys = None
self.add_getter(keys, self._get)
if not _is_iterable(keys):
self.keys = 0
def __len__(self):
return len(self._dataset)
def _get(self, index):
return self._transform(self._dataset[index])
|
from contextlib import suppress
import numpy as np
import pytest
from xarray import Variable
from xarray.coding import strings
from xarray.core import indexing
from . import (
IndexerMaker,
assert_array_equal,
assert_identical,
raises_regex,
requires_dask,
)
with suppress(ImportError):
import dask.array as da
def test_vlen_dtype():
dtype = strings.create_vlen_dtype(str)
assert dtype.metadata["element_type"] == str
assert strings.is_unicode_dtype(dtype)
assert not strings.is_bytes_dtype(dtype)
assert strings.check_vlen_dtype(dtype) is str
dtype = strings.create_vlen_dtype(bytes)
assert dtype.metadata["element_type"] == bytes
assert not strings.is_unicode_dtype(dtype)
assert strings.is_bytes_dtype(dtype)
assert strings.check_vlen_dtype(dtype) is bytes
assert strings.check_vlen_dtype(np.dtype(object)) is None
def test_EncodedStringCoder_decode():
coder = strings.EncodedStringCoder()
raw_data = np.array([b"abc", "ß∂µ∆".encode()])
raw = Variable(("x",), raw_data, {"_Encoding": "utf-8"})
actual = coder.decode(raw)
expected = Variable(("x",), np.array(["abc", "ß∂µ∆"], dtype=object))
assert_identical(actual, expected)
assert_identical(coder.decode(actual[0]), expected[0])
@requires_dask
def test_EncodedStringCoder_decode_dask():
coder = strings.EncodedStringCoder()
raw_data = np.array([b"abc", "ß∂µ∆".encode()])
raw = Variable(("x",), raw_data, {"_Encoding": "utf-8"}).chunk()
actual = coder.decode(raw)
assert isinstance(actual.data, da.Array)
expected = Variable(("x",), np.array(["abc", "ß∂µ∆"], dtype=object))
assert_identical(actual, expected)
actual_indexed = coder.decode(actual[0])
assert isinstance(actual_indexed.data, da.Array)
assert_identical(actual_indexed, expected[0])
def test_EncodedStringCoder_encode():
dtype = strings.create_vlen_dtype(str)
raw_data = np.array(["abc", "ß∂µ∆"], dtype=dtype)
expected_data = np.array([r.encode("utf-8") for r in raw_data], dtype=object)
coder = strings.EncodedStringCoder(allows_unicode=True)
raw = Variable(("x",), raw_data, encoding={"dtype": "S1"})
actual = coder.encode(raw)
expected = Variable(("x",), expected_data, attrs={"_Encoding": "utf-8"})
assert_identical(actual, expected)
raw = Variable(("x",), raw_data)
assert_identical(coder.encode(raw), raw)
coder = strings.EncodedStringCoder(allows_unicode=False)
assert_identical(coder.encode(raw), expected)
@pytest.mark.parametrize(
"original",
[
Variable(("x",), [b"ab", b"cdef"]),
Variable((), b"ab"),
Variable(("x",), [b"a", b"b"]),
Variable((), b"a"),
],
)
def test_CharacterArrayCoder_roundtrip(original):
coder = strings.CharacterArrayCoder()
roundtripped = coder.decode(coder.encode(original))
assert_identical(original, roundtripped)
@pytest.mark.parametrize(
"data",
[
np.array([b"a", b"bc"]),
np.array([b"a", b"bc"], dtype=strings.create_vlen_dtype(bytes)),
],
)
def test_CharacterArrayCoder_encode(data):
coder = strings.CharacterArrayCoder()
raw = Variable(("x",), data)
actual = coder.encode(raw)
expected = Variable(("x", "string2"), np.array([[b"a", b""], [b"b", b"c"]]))
assert_identical(actual, expected)
@pytest.mark.parametrize(
["original", "expected_char_dim_name"],
[
(Variable(("x",), [b"ab", b"cdef"]), "string4"),
(Variable(("x",), [b"ab", b"cdef"], encoding={"char_dim_name": "foo"}), "foo"),
],
)
def test_CharacterArrayCoder_char_dim_name(original, expected_char_dim_name):
coder = strings.CharacterArrayCoder()
encoded = coder.encode(original)
roundtripped = coder.decode(encoded)
assert encoded.dims[-1] == expected_char_dim_name
assert roundtripped.encoding["char_dim_name"] == expected_char_dim_name
assert roundtripped.dims[-1] == original.dims[-1]
def test_StackedBytesArray():
array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S")
actual = strings.StackedBytesArray(array)
expected = np.array([b"abc", b"def"], dtype="S")
assert actual.dtype == expected.dtype
assert actual.shape == expected.shape
assert actual.size == expected.size
assert actual.ndim == expected.ndim
assert len(actual) == len(expected)
assert_array_equal(expected, actual)
B = IndexerMaker(indexing.BasicIndexer)
assert_array_equal(expected[:1], actual[B[:1]])
with pytest.raises(IndexError):
actual[B[:, :2]]
def test_StackedBytesArray_scalar():
array = np.array([b"a", b"b", b"c"], dtype="S")
actual = strings.StackedBytesArray(array)
expected = np.array(b"abc")
assert actual.dtype == expected.dtype
assert actual.shape == expected.shape
assert actual.size == expected.size
assert actual.ndim == expected.ndim
with pytest.raises(TypeError):
len(actual)
np.testing.assert_array_equal(expected, actual)
B = IndexerMaker(indexing.BasicIndexer)
with pytest.raises(IndexError):
actual[B[:2]]
def test_StackedBytesArray_vectorized_indexing():
array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S")
stacked = strings.StackedBytesArray(array)
expected = np.array([[b"abc", b"def"], [b"def", b"abc"]])
V = IndexerMaker(indexing.VectorizedIndexer)
indexer = V[np.array([[0, 1], [1, 0]])]
actual = stacked[indexer]
assert_array_equal(actual, expected)
def test_char_to_bytes():
array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]])
expected = np.array([b"abc", b"def"])
actual = strings.char_to_bytes(array)
assert_array_equal(actual, expected)
expected = np.array([b"ad", b"be", b"cf"])
actual = strings.char_to_bytes(array.T) # non-contiguous
assert_array_equal(actual, expected)
def test_char_to_bytes_ndim_zero():
expected = np.array(b"a")
actual = strings.char_to_bytes(expected)
assert_array_equal(actual, expected)
def test_char_to_bytes_size_zero():
array = np.zeros((3, 0), dtype="S1")
expected = np.array([b"", b"", b""])
actual = strings.char_to_bytes(array)
assert_array_equal(actual, expected)
@requires_dask
def test_char_to_bytes_dask():
numpy_array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]])
array = da.from_array(numpy_array, ((2,), (3,)))
expected = np.array([b"abc", b"def"])
actual = strings.char_to_bytes(array)
assert isinstance(actual, da.Array)
assert actual.chunks == ((2,),)
assert actual.dtype == "S3"
assert_array_equal(np.array(actual), expected)
with raises_regex(ValueError, "stacked dask character array"):
strings.char_to_bytes(array.rechunk(1))
def test_bytes_to_char():
array = np.array([[b"ab", b"cd"], [b"ef", b"gh"]])
expected = np.array([[[b"a", b"b"], [b"c", b"d"]], [[b"e", b"f"], [b"g", b"h"]]])
actual = strings.bytes_to_char(array)
assert_array_equal(actual, expected)
expected = np.array([[[b"a", b"b"], [b"e", b"f"]], [[b"c", b"d"], [b"g", b"h"]]])
actual = strings.bytes_to_char(array.T) # non-contiguous
assert_array_equal(actual, expected)
@requires_dask
def test_bytes_to_char_dask():
numpy_array = np.array([b"ab", b"cd"])
array = da.from_array(numpy_array, ((1, 1),))
expected = np.array([[b"a", b"b"], [b"c", b"d"]])
actual = strings.bytes_to_char(array)
assert isinstance(actual, da.Array)
assert actual.chunks == ((1, 1), ((2,)))
assert actual.dtype == "S1"
assert_array_equal(np.array(actual), expected)
|
from coverage.backunittest import TestCase
from coverage.backward import iitems, binary_bytes, bytes_to_ints
class BackwardTest(TestCase):
"""Tests of things from backward.py."""
def test_iitems(self):
d = {'a': 1, 'b': 2, 'c': 3}
items = [('a', 1), ('b', 2), ('c', 3)]
self.assertCountEqual(list(iitems(d)), items)
def test_binary_bytes(self):
byte_values = [0, 255, 17, 23, 42, 57]
bb = binary_bytes(byte_values)
self.assertEqual(len(bb), len(byte_values))
self.assertEqual(byte_values, list(bytes_to_ints(bb)))
|
from itertools import combinations
import numpy as np
import pandas as pd
import networkx as nx
from tqdm import tqdm
from joblib import Parallel, delayed
from sklearn.metrics import (
mutual_info_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
)
from pgmpy.base import DAG
from pgmpy.estimators import StructureEstimator
from pgmpy.global_vars import SHOW_PROGRESS
class TreeSearch(StructureEstimator):
def __init__(self, data, root_node, n_jobs=-1, **kwargs):
"""
Search class for learning tree related graph structure. The algorithms
supported are Chow-Liu and Tree-augmented naive bayes (TAN).
Chow-Liu constructs the maximum-weight spanning tree with mutual information
score as edge weights.
TAN is an extension of Naive Bayes classifier to allow a tree structure over
the independent variables to account for interaction.
Parameters
----------
data: pandas.DataFrame object
dataframe object where each column represents one variable.
root_node: str, int, or any hashable python object.
The root node of the tree structure.
n_jobs: int (default: -1)
Number of jobs to run in parallel. `-1` means use all processors.
References
----------
[1] Chow, C. K.; Liu, C.N. (1968), "Approximating discrete probability
distributions with dependence trees", IEEE Transactions on Information
Theory, IT-14 (3): 462–467
[2] Friedman N, Geiger D and Goldszmidt M (1997). Bayesian network classifiers.
Machine Learning 29: 131–163
"""
if root_node not in data.columns:
raise ValueError(f"Root node: {root_node} not found in data columns.")
self.data = data
self.root_node = root_node
self.n_jobs = n_jobs
super(TreeSearch, self).__init__(data, **kwargs)
def estimate(
self,
estimator_type="chow-liu",
class_node=None,
edge_weights_fn="mutual_info",
show_progress=True,
):
"""
Estimate the `DAG` structure that fits best to the given data set without
parametrization.
Parameters
----------
estimator_type: str (chow-liu | tan)
The algorithm to use for estimating the DAG.
class_node: string, int or any hashable python object. (optional)
Required if estimator_type = 'tan'.
edge_weights_fn: str or function (default: mutual info)
Method to use for computing edge weights. By default Mutual Info Score is
used.
show_progress: boolean
If True, shows a progress bar for the running algorithm.
Returns
-------
model: `pgmpy.base.DAG` instance
The estimated model structure.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import networkx as nx
>>> import matplotlib.pyplot as plt
>>> from pgmpy.estimators import TreeSearch
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> est = TreeSearch(values, root_node='B')
>>> model = est.estimate(estimator_type='chow-liu')
>>> nx.draw_circular(model, with_labels=True, arrowsize=20, arrowstyle='fancy',
... alpha=0.3)
>>> plt.show()
>>> est = TreeSearch(values, root_node='B')
>>> model = est.estimate(estimator_type='tan', class_node='A')
>>> nx.draw_circular(model, with_labels=True, arrowsize=20, arrowstyle='fancy',
... alpha=0.3)
>>> plt.show()
"""
# Step 1. Argument checks
if estimator_type not in {"chow-liu", "tan"}:
raise ValueError(
f"Invalid estimator_type. Expected either chow-liu or tan. Got: {self.return_type}"
)
# Step 2: If estimator_type = "chow-liu", estimate the DAG and return.
if estimator_type == "chow-liu":
return TreeSearch.chow_liu(
self.data, self.root_node, edge_weights_fn, self.n_jobs, show_progress
)
# Step 3: If estimator_type = "tan":
elif estimator_type == "tan":
# Step 3.1: Checks for class_node and root_node != class_node
if class_node is None:
raise ValueError("class_node must be specified for estimator_type=tiu")
elif class_node not in self.data.columns:
raise ValueError(f"Class node: {class_node} not found in data columns")
elif self.root_node == class_node:
raise ValueError(
f"Root node: {self.root_node} and class node: {class_node} are identical"
)
# Step 3.2:: Construct chow-liu on {data.columns - class_node}
df_features = self.data.loc[:, self.data.columns != class_node]
D = TreeSearch.chow_liu(
df_features, self.root_node, edge_weights_fn, self.n_jobs, show_progress
)
# Step 3.3: Add edges from class_node to all other nodes.
D.add_edges_from([(class_node, node) for node in df_features.columns])
return D
@staticmethod
def chow_liu(
data, root_node, edge_weights_fn="mutual_info", n_jobs=-1, show_progress=True
):
"""
Chow-Liu algorithm for estimating tree structure from given data. Refer to
pgmpy.estimators.TreeSearch for more details.
Parameters
----------
data: pandas.DataFrame object
dataframe object where each column represents one variable.
root_node: str, int, or any hashable python object.
The root node of the tree structure.
n_jobs: int (default: -1)
Number of jobs to run in parallel. `-1` means use all processors.
edge_weights_fn: str or function (default: mutual_info)
Method to use for computing edge weights. Options are:
1. 'mutual_info': Mutual Information Score.
2. 'adjusted_mutual_info': Adjusted Mutual Information Score.
3. 'normalized_mutual_info': Normalized Mutual Information Score.
4. function(array[n_samples,], array[n_samples,]): Custom function.
show_progress: boolean
If True, shows a progress bar for the running algorithm.
Returns
-------
model: `pgmpy.base.DAG` instance
The estimated model structure.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.estimators import TreeSearch
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> est = TreeSearch(values, root_node='B')
>>> model = est.estimate(estimator_type='chow-liu')
"""
# Step 0: Check for edge weight computation method
if edge_weights_fn == "mutual_info":
edge_weights_fn = mutual_info_score
elif not isinstance(edge_weights_fn, callable):
raise ValueError(
f"edge_weights_fn should either be 'mutual_info', 'adjusted_mutual_info', "
f"'normalized_mutual_info'or a function of form fun(array, array). Got: f{edge_weights_fn}"
)
# Step 1: Compute edge weights for a fully connected graph.
n_vars = len(data.columns)
if show_progress and SHOW_PROGRESS:
pbar = tqdm(
combinations(data.columns, 2), total=(n_vars * (n_vars - 1) / 2)
)
pbar.set_description("Building tree")
else:
pbar = combinations(data.columns, 2)
vals = Parallel(n_jobs=n_jobs, prefer="threads")(
delayed(edge_weights_fn)(data.loc[:, u], data.loc[:, v]) for u, v in pbar
)
weights = np.zeros((n_vars, n_vars))
weights[np.triu_indices(n_vars, k=1)] = vals
# Step 2: Compute the maximum spanning tree using the weights.
T = nx.maximum_spanning_tree(
nx.from_pandas_adjacency(
pd.DataFrame(weights, index=data.columns, columns=data.columns),
create_using=nx.Graph,
)
)
# Step 3: Create DAG by directing edges away from root node and return
D = nx.bfs_tree(T, root_node)
return DAG(D)
|
import logging
from homeassistant.components.climate import DOMAIN as CLIMATE_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .bridge import CannotConnect, DeviceHelper
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Gree Climate component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Gree Climate from a config entry."""
devices = []
# First we'll grab as many devices as we can find on the network
# it's necessary to bind static devices anyway
_LOGGER.debug("Scanning network for Gree devices")
for device_info in await DeviceHelper.find_devices():
try:
device = await DeviceHelper.try_bind_device(device_info)
except CannotConnect:
_LOGGER.error("Unable to bind to gree device: %s", device_info)
continue
_LOGGER.debug(
"Adding Gree device at %s:%i (%s)",
device.device_info.ip,
device.device_info.port,
device.device_info.name,
)
devices.append(device)
hass.data[DOMAIN]["devices"] = devices
hass.data[DOMAIN]["pending"] = devices
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, CLIMATE_DOMAIN)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_forward_entry_unload(
entry, CLIMATE_DOMAIN
)
if unload_ok:
hass.data[DOMAIN].pop("devices", None)
hass.data[DOMAIN].pop("pending", None)
return unload_ok
|
import mock
from paasta_tools import remote_git
def test_make_determine_wants_func():
refs = {b"refs/heads/foo": b"abcde", b"refs/tags/blah": b"12345"}
# nothing changed, so nothing should change
determine_wants = remote_git._make_determine_wants_func(lambda x: x)
assert determine_wants(refs) == refs
# don't delete anything.
determine_wants = remote_git._make_determine_wants_func(lambda x: {})
assert determine_wants(refs) == refs
# don't modify anything existing.
determine_wants = remote_git._make_determine_wants_func(
lambda x: {k: v[::-1] for k, v in x.items()}
)
assert determine_wants(refs) == refs
# only allow new things
determine_wants = remote_git._make_determine_wants_func(lambda x: {"foo": "bar"})
actual = determine_wants(refs)
expected = dict(refs)
expected.update({b"foo": b"bar"})
assert actual == expected
def test_non_ascii_tags():
"""git tags can be UTF-8 encoded"""
with mock.patch(
"dulwich.client.get_transport_and_path",
autospec=True,
return_value=(
mock.Mock(
**{"fetch_pack.return_value": {"☃".encode("UTF-8"): b"deadbeef"}}
),
"path",
),
):
with mock.patch("time.sleep", autospec=True):
ret = remote_git.list_remote_refs("git-url")
assert ret == {"☃": "deadbeef"}
def test_make_force_push_mutate_refs_func_overwrites_shas():
targets = ["refs/heads/targeta", "refs/tags/targetb"]
input_refs = {
b"refs/heads/foo": b"12345",
b"refs/heads/targeta": b"12345",
b"refs/tags/targetb": b"12345",
b"refs/heads/ignored": b"12345",
b"refs/tags/blah": b"12345",
}
expected = {
b"refs/heads/foo": b"12345",
b"refs/heads/targeta": b"newsha",
b"refs/tags/targetb": b"newsha",
b"refs/heads/ignored": b"12345",
b"refs/tags/blah": b"12345",
}
mutate_refs_func = remote_git.make_force_push_mutate_refs_func(
targets=targets, sha="newsha"
)
actual = mutate_refs_func(input_refs)
assert actual == expected
assert all([isinstance(k, bytes) for k in actual])
@mock.patch("dulwich.client", autospec=True)
@mock.patch("paasta_tools.remote_git._make_determine_wants_func", autospec=True)
def test_create_remote_refs_is_safe_by_default(
mock_make_determine_wants_func, mock_dulwich_client
):
git_url = "fake_git_url"
fake_git_client = mock.Mock()
mock_dulwich_client.get_transport_and_path.return_value = (
fake_git_client,
"fake_path",
)
remote_git.create_remote_refs(
git_url=git_url, ref_mutator=mock.sentinel.ref_mutator
)
fake_git_client.send_pack.assert_called_once_with(
"fake_path", mock_make_determine_wants_func.return_value, mock.ANY
)
@mock.patch("dulwich.client", autospec=True)
def test_create_remote_refs_allows_force_and_uses_the_provided_mutator(
mock_dulwich_client,
):
git_url = "fake_git_url"
fake_git_client = mock.Mock()
mock_dulwich_client.get_transport_and_path.return_value = (
fake_git_client,
"fake_path",
)
remote_git.create_remote_refs(
git_url=git_url, ref_mutator=mock.sentinel.ref_mutator, force=True
)
fake_git_client.send_pack.assert_called_once_with(
"fake_path", mock.sentinel.ref_mutator, mock.ANY
)
@mock.patch("paasta_tools.remote_git._run", autospec=True)
def test_get_authors_fails_with_bad_url(mock_run):
expected = 1, mock.ANY
assert expected == remote_git.get_authors("bad", "a", "b")
@mock.patch("paasta_tools.remote_git._run", autospec=True)
def test_get_authors_fails_with_unknown(mock_run):
url = "[email protected]:something.git"
expected = 1, mock.ANY
assert expected == remote_git.get_authors(url, "a", "b")
@mock.patch("paasta_tools.remote_git._run", autospec=True)
def test_get_authors_works_with_good_url(mock_run):
mock_run.return_value = (0, "it worked")
expected = mock_run.return_value
assert expected == remote_git.get_authors(
"[email protected]:yelp-main", "a", "b"
)
mock_run.assert_called_once_with(
command="ssh [email protected] authors-of-changeset yelp-main a b",
timeout=5.0,
)
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from math import sin, cos, radians
import numpy.random as random
import numpy as np
from numpy import array
from numpy.random import randn
import matplotlib.pyplot as plt
from filterpy.kalman import IMMEstimator, KalmanFilter
from filterpy.common import Q_discrete_white_noise, Saver
DO_PLOT = False
class NoisySensor(object):
def __init__(self, noise_factor=1):
self.noise_factor = noise_factor
def sense(self, pos):
return (pos[0] + randn()*self.noise_factor,
pos[1] + randn()*self.noise_factor)
def angle_between(x, y):
return min(y-x, y-x+360, y-x-360, key=abs)
class ManeuveringTarget(object):
def __init__(self, x0, y0, v0, heading):
self.x = x0
self.y = y0
self.vel = v0
self.hdg = heading
self.cmd_vel = v0
self.cmd_hdg = heading
self.vel_step = 0
self.hdg_step = 0
self.vel_delta = 0
self.hdg_delta = 0
def update(self):
vx = self.vel * cos(radians(90-self.hdg))
vy = self.vel * sin(radians(90-self.hdg))
self.x += vx
self.y += vy
if self.hdg_step > 0:
self.hdg_step -= 1
self.hdg += self.hdg_delta
if self.vel_step > 0:
self.vel_step -= 1
self.vel += self.vel_delta
return (self.x, self.y)
def set_commanded_heading(self, hdg_degrees, steps):
self.cmd_hdg = hdg_degrees
self.hdg_delta = angle_between(self.cmd_hdg,
self.hdg) / steps
if abs(self.hdg_delta) > 0:
self.hdg_step = steps
else:
self.hdg_step = 0
def set_commanded_speed(self, speed, steps):
self.cmd_vel = speed
self.vel_delta = (self.cmd_vel - self.vel) / steps
if abs(self.vel_delta) > 0:
self.vel_step = steps
else:
self.vel_step = 0
def make_cv_filter(dt, noise_factor):
cvfilter = KalmanFilter(dim_x = 2, dim_z=1)
cvfilter.x = array([0., 0.])
cvfilter.P *= 3
cvfilter.R *= noise_factor**2
cvfilter.F = array([[1, dt],
[0, 1]], dtype=float)
cvfilter.H = array([[1, 0]], dtype=float)
cvfilter.Q = Q_discrete_white_noise(dim=2, dt=dt, var=0.02)
return cvfilter
def make_ca_filter(dt, noise_factor):
cafilter = KalmanFilter(dim_x=3, dim_z=1)
cafilter.x = array([0., 0., 0.])
cafilter.P *= 3
cafilter.R *= noise_factor**2
cafilter.Q = Q_discrete_white_noise(dim=3, dt=dt, var=0.02)
cafilter.F = array([[1, dt, 0.5*dt*dt],
[0, 1, dt],
[0, 0, 1]], dtype=float)
cafilter.H = array([[1, 0, 0]], dtype=float)
return cafilter
def generate_data(steady_count, noise_factor):
t = ManeuveringTarget(x0=0, y0=0, v0=0.3, heading=0)
xs = []
ys = []
for i in range(30):
x, y = t.update()
xs.append(x)
ys.append(y)
t.set_commanded_heading(310, 25)
t.set_commanded_speed(1, 15)
for i in range(steady_count):
x, y = t.update()
xs.append(x)
ys.append(y)
ns = NoisySensor(noise_factor=noise_factor)
pos = array(list(zip(xs, ys)))
zs = array([ns.sense(p) for p in pos])
return pos, zs
def test_imm():
""" This test is drawn from Crassidis [1], example 4.6.
** References**
[1] Crassidis. "Optimal Estimation of Dynamic Systems", CRC Press,
Second edition.
"""
r = 100.
dt = 1.
phi_sim = np.array(
[[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]])
gam = np.array([[dt**2/2, 0],
[dt, 0],
[0, dt**2/2],
[0, dt]])
x = np.array([[2000, 0, 10000, -15.]]).T
simxs = []
N = 600
for i in range(N):
x = np.dot(phi_sim, x)
if i >= 400:
x += np.dot(gam, np.array([[.075, .075]]).T)
simxs.append(x)
simxs = np.array(simxs)
zs = np.zeros((N, 2))
for i in range(len(zs)):
zs[i, 0] = simxs[i, 0] + randn()*r
zs[i, 1] = simxs[i, 2] + randn()*r
'''
try:
# data to test against crassidis' IMM matlab code
zs_tmp = np.genfromtxt('c:/users/rlabbe/dropbox/Crassidis/mycode/xx.csv', delimiter=',')[:-1]
zs = zs_tmp
except:
pass
'''
ca = KalmanFilter(6, 2)
cano = KalmanFilter(6, 2)
dt2 = (dt**2)/2
ca.F = np.array(
[[1, dt, dt2, 0, 0, 0],
[0, 1, dt, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, dt, dt2],
[0, 0, 0, 0, 1, dt],
[0, 0, 0, 0, 0, 1]])
cano.F = ca.F.copy()
ca.x = np.array([[2000., 0, 0, 10000, -15, 0]]).T
cano.x = ca.x.copy()
ca.P *= 1.e-12
cano.P *= 1.e-12
ca.R *= r**2
cano.R *= r**2
cano.Q *= 0
q = np.array([[.05, .125, 1./6],
[.125, 1/3, .5],
[1./6, .5, 1.]])*1.e-3
ca.Q[0:3, 0:3] = q
ca.Q[3:6, 3:6] = q
ca.H = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
cano.H = ca.H.copy()
filters = [ca, cano]
trans = np.array([[0.97, 0.03],
[0.03, 0.97]])
bank = IMMEstimator(filters, (0.5, 0.5), trans)
# ensure __repr__ doesn't have problems
str(bank)
s = Saver(bank)
ca_s = Saver(ca)
cano_s = Saver(cano)
for i, z in enumerate(zs):
z = np.array([z]).T
bank.update(z)
bank.predict()
s.save()
ca_s.save()
cano_s.save()
if DO_PLOT:
s.to_array()
ca_s.to_array()
cano_s.to_array()
plt.figure()
plt.subplot(121)
plt.plot(s.x[:, 0], s.x[:, 3], 'k')
#plt.plot(cvxs[:, 0], caxs[:, 3])
#plt.plot(simxs[:, 0], simxs[:, 2], 'g')
plt.scatter(zs[:, 0], zs[:, 1], marker='+', alpha=0.2)
plt.subplot(122)
plt.plot(s.mu[:, 0])
plt.plot(s.mu[:, 1])
plt.ylim(0, 1)
plt.title('probability ratio p(cv)/p(ca)')
'''plt.figure()
plt.plot(cvxs, label='CV')
plt.plot(caxs, label='CA')
plt.plot(xs[:, 0], label='GT')
plt.legend()
plt.figure()
plt.plot(xs)
plt.plot(xs[:, 0])'''
def test_misshapen():
"""Ensure we get a ValueError if the filter banks are not designed
properly
"""
ca = KalmanFilter(3, 1)
cv = KalmanFilter(2, 1)
trans = np.array([[0.97, 0.03],
[0.03, 0.97]])
try:
IMMEstimator([ca, cv], (0.5, 0.5), trans)
assert "IMM should raise ValueError on filter banks with filters of different sizes"
except ValueError:
pass
try:
IMMEstimator([], (0.5, 0.5), trans)
assert "Should raise ValueError on empty bank"
except ValueError:
pass
if __name__ == '__main__':
#test_misshapen()
DO_PLOT = True
test_imm()
|
import pytest
from homeassistant.const import (
LENGTH_FEET,
LENGTH_KILOMETERS,
LENGTH_METERS,
LENGTH_MILES,
)
import homeassistant.util.distance as distance_util
INVALID_SYMBOL = "bob"
VALID_SYMBOL = LENGTH_KILOMETERS
def test_convert_same_unit():
"""Test conversion from any unit to same unit."""
assert distance_util.convert(5, LENGTH_KILOMETERS, LENGTH_KILOMETERS) == 5
assert distance_util.convert(2, LENGTH_METERS, LENGTH_METERS) == 2
assert distance_util.convert(10, LENGTH_MILES, LENGTH_MILES) == 10
assert distance_util.convert(9, LENGTH_FEET, LENGTH_FEET) == 9
def test_convert_invalid_unit():
"""Test exception is thrown for invalid units."""
with pytest.raises(ValueError):
distance_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)
with pytest.raises(ValueError):
distance_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)
def test_convert_nonnumeric_value():
"""Test exception is thrown for nonnumeric type."""
with pytest.raises(TypeError):
distance_util.convert("a", LENGTH_KILOMETERS, LENGTH_METERS)
def test_convert_from_miles():
"""Test conversion from miles to other units."""
miles = 5
assert distance_util.convert(miles, LENGTH_MILES, LENGTH_KILOMETERS) == 8.04672
assert distance_util.convert(miles, LENGTH_MILES, LENGTH_METERS) == 8046.72
assert distance_util.convert(miles, LENGTH_MILES, LENGTH_FEET) == 26400.0008448
def test_convert_from_feet():
"""Test conversion from feet to other units."""
feet = 5000
assert distance_util.convert(feet, LENGTH_FEET, LENGTH_KILOMETERS) == 1.524
assert distance_util.convert(feet, LENGTH_FEET, LENGTH_METERS) == 1524
assert distance_util.convert(feet, LENGTH_FEET, LENGTH_MILES) == 0.9469694040000001
def test_convert_from_kilometers():
"""Test conversion from kilometers to other units."""
km = 5
assert distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_FEET) == 16404.2
assert distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_METERS) == 5000
assert distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_MILES) == 3.106855
def test_convert_from_meters():
"""Test conversion from meters to other units."""
m = 5000
assert distance_util.convert(m, LENGTH_METERS, LENGTH_FEET) == 16404.2
assert distance_util.convert(m, LENGTH_METERS, LENGTH_KILOMETERS) == 5
assert distance_util.convert(m, LENGTH_METERS, LENGTH_MILES) == 3.106855
|
import logging
from typing import Any, Awaitable, Callable, List, Optional, Set, cast
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_ALIAS,
CONF_DEVICE_ID,
CONF_ENTITY_ID,
CONF_ID,
CONF_MODE,
CONF_PLATFORM,
CONF_VARIABLES,
CONF_ZONE,
EVENT_HOMEASSISTANT_STARTED,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import (
Context,
CoreState,
HomeAssistant,
callback,
split_entity_id,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import condition, extract_domain_configs, template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.script import (
ATTR_CUR,
ATTR_MAX,
ATTR_MODE,
CONF_MAX,
CONF_MAX_EXCEEDED,
SCRIPT_MODE_SINGLE,
Script,
make_script_schema,
)
from homeassistant.helpers.script_variables import ScriptVariables
from homeassistant.helpers.service import async_register_admin_service
from homeassistant.helpers.trigger import async_initialize_triggers
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util.dt import parse_datetime
# mypy: allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs, no-warn-return-any
DOMAIN = "automation"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
GROUP_NAME_ALL_AUTOMATIONS = "all automations"
CONF_DESCRIPTION = "description"
CONF_HIDE_ENTITY = "hide_entity"
CONF_CONDITION = "condition"
CONF_ACTION = "action"
CONF_TRIGGER = "trigger"
CONF_CONDITION_TYPE = "condition_type"
CONF_INITIAL_STATE = "initial_state"
CONF_SKIP_CONDITION = "skip_condition"
CONF_STOP_ACTIONS = "stop_actions"
CONDITION_USE_TRIGGER_VALUES = "use_trigger_values"
CONDITION_TYPE_AND = "and"
CONDITION_TYPE_NOT = "not"
CONDITION_TYPE_OR = "or"
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_INITIAL_STATE = True
DEFAULT_STOP_ACTIONS = True
EVENT_AUTOMATION_RELOADED = "automation_reloaded"
EVENT_AUTOMATION_TRIGGERED = "automation_triggered"
ATTR_LAST_TRIGGERED = "last_triggered"
ATTR_SOURCE = "source"
ATTR_VARIABLES = "variables"
SERVICE_TRIGGER = "trigger"
_LOGGER = logging.getLogger(__name__)
AutomationActionType = Callable[[HomeAssistant, TemplateVarsType], Awaitable[None]]
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HIDE_ENTITY, invalidation_version="0.110"),
make_script_schema(
{
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): cv.TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Optional(CONF_VARIABLES): cv.SCRIPT_VARIABLES_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
},
SCRIPT_MODE_SINGLE,
),
)
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
@callback
def automations_with_entity(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all automations that reference the entity."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
automation_entity.entity_id
for automation_entity in component.entities
if entity_id in automation_entity.referenced_entities
]
@callback
def entities_in_automation(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all entities in a scene."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
automation_entity = component.get_entity(entity_id)
if automation_entity is None:
return []
return list(automation_entity.referenced_entities)
@callback
def automations_with_device(hass: HomeAssistant, device_id: str) -> List[str]:
"""Return all automations that reference the device."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
automation_entity.entity_id
for automation_entity in component.entities
if device_id in automation_entity.referenced_devices
]
@callback
def devices_in_automation(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all devices in a scene."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
automation_entity = component.get_entity(entity_id)
if automation_entity is None:
return []
return list(automation_entity.referenced_devices)
async def async_setup(hass, config):
"""Set up the automation."""
hass.data[DOMAIN] = component = EntityComponent(_LOGGER, DOMAIN, hass)
await _async_process_config(hass, config, component)
async def trigger_service_handler(entity, service_call):
"""Handle automation triggers."""
await entity.async_trigger(
service_call.data[ATTR_VARIABLES],
skip_condition=service_call.data[CONF_SKIP_CONDITION],
context=service_call.context,
)
component.async_register_entity_service(
SERVICE_TRIGGER,
{
vol.Optional(ATTR_VARIABLES, default={}): dict,
vol.Optional(CONF_SKIP_CONDITION, default=True): bool,
},
trigger_service_handler,
)
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(
SERVICE_TURN_OFF,
{vol.Optional(CONF_STOP_ACTIONS, default=DEFAULT_STOP_ACTIONS): cv.boolean},
"async_turn_off",
)
async def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
hass.bus.async_fire(EVENT_AUTOMATION_RELOADED, context=service_call.context)
async_register_admin_service(
hass, DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})
)
return True
class AutomationEntity(ToggleEntity, RestoreEntity):
"""Entity to show status of entity."""
def __init__(
self,
automation_id,
name,
trigger_config,
cond_func,
action_script,
initial_state,
variables,
):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._trigger_config = trigger_config
self._async_detach_triggers = None
self._cond_func = cond_func
self.action_script = action_script
self.action_script.change_listener = self.async_write_ha_state
self._initial_state = initial_state
self._is_enabled = False
self._referenced_entities: Optional[Set[str]] = None
self._referenced_devices: Optional[Set[str]] = None
self._logger = _LOGGER
self._variables: ScriptVariables = variables
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._id
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
attrs = {
ATTR_LAST_TRIGGERED: self.action_script.last_triggered,
ATTR_MODE: self.action_script.script_mode,
ATTR_CUR: self.action_script.runs,
}
if self.action_script.supports_max:
attrs[ATTR_MAX] = self.action_script.max_runs
return attrs
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None or self._is_enabled
@property
def referenced_devices(self):
"""Return a set of referenced devices."""
if self._referenced_devices is not None:
return self._referenced_devices
referenced = self.action_script.referenced_devices
if self._cond_func is not None:
for conf in self._cond_func.config:
referenced |= condition.async_extract_devices(conf)
for conf in self._trigger_config:
device = _trigger_extract_device(conf)
if device is not None:
referenced.add(device)
self._referenced_devices = referenced
return referenced
@property
def referenced_entities(self):
"""Return a set of referenced entities."""
if self._referenced_entities is not None:
return self._referenced_entities
referenced = self.action_script.referenced_entities
if self._cond_func is not None:
for conf in self._cond_func.config:
referenced |= condition.async_extract_entities(conf)
for conf in self._trigger_config:
for entity_id in _trigger_extract_entities(conf):
referenced.add(entity_id)
self._referenced_entities = referenced
return referenced
async def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
await super().async_added_to_hass()
self._logger = logging.getLogger(
f"{__name__}.{split_entity_id(self.entity_id)[1]}"
)
self.action_script.update_logger(self._logger)
state = await self.async_get_last_state()
if state:
enable_automation = state.state == STATE_ON
last_triggered = state.attributes.get("last_triggered")
if last_triggered is not None:
self.action_script.last_triggered = parse_datetime(last_triggered)
self._logger.debug(
"Loaded automation %s with state %s from state "
" storage last state %s",
self.entity_id,
enable_automation,
state,
)
else:
enable_automation = DEFAULT_INITIAL_STATE
self._logger.debug(
"Automation %s not in state storage, state %s from default is used",
self.entity_id,
enable_automation,
)
if self._initial_state is not None:
enable_automation = self._initial_state
self._logger.debug(
"Automation %s initial state %s overridden from "
"config initial_state",
self.entity_id,
enable_automation,
)
if enable_automation:
await self.async_enable()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on and update the state."""
await self.async_enable()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
if CONF_STOP_ACTIONS in kwargs:
await self.async_disable(kwargs[CONF_STOP_ACTIONS])
else:
await self.async_disable()
async def async_trigger(self, run_variables, context=None, skip_condition=False):
"""Trigger automation.
This method is a coroutine.
"""
if self._variables:
try:
variables = self._variables.async_render(self.hass, run_variables)
except template.TemplateError as err:
self._logger.error("Error rendering variables: %s", err)
return
else:
variables = run_variables
if (
not skip_condition
and self._cond_func is not None
and not self._cond_func(variables)
):
return
# Create a new context referring to the old context.
parent_id = None if context is None else context.id
trigger_context = Context(parent_id=parent_id)
self.async_set_context(trigger_context)
event_data = {
ATTR_NAME: self._name,
ATTR_ENTITY_ID: self.entity_id,
}
if "trigger" in variables and "description" in variables["trigger"]:
event_data[ATTR_SOURCE] = variables["trigger"]["description"]
@callback
def started_action():
self.hass.bus.async_fire(
EVENT_AUTOMATION_TRIGGERED, event_data, context=trigger_context
)
try:
await self.action_script.async_run(
variables, trigger_context, started_action
)
except Exception: # pylint: disable=broad-except
self._logger.exception("While executing automation %s", self.entity_id)
async def async_will_remove_from_hass(self):
"""Remove listeners when removing automation from Home Assistant."""
await super().async_will_remove_from_hass()
await self.async_disable()
async def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self._is_enabled:
return
self._is_enabled = True
# HomeAssistant is starting up
if self.hass.state != CoreState.not_running:
self._async_detach_triggers = await self._async_attach_triggers(False)
self.async_write_ha_state()
return
async def async_enable_automation(event):
"""Start automation on startup."""
# Don't do anything if no longer enabled or already attached
if not self._is_enabled or self._async_detach_triggers is not None:
return
self._async_detach_triggers = await self._async_attach_triggers(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, async_enable_automation
)
self.async_write_ha_state()
async def async_disable(self, stop_actions=DEFAULT_STOP_ACTIONS):
"""Disable the automation entity."""
if not self._is_enabled and not self.action_script.runs:
return
self._is_enabled = False
if self._async_detach_triggers is not None:
self._async_detach_triggers()
self._async_detach_triggers = None
if stop_actions:
await self.action_script.async_stop()
self.async_write_ha_state()
async def _async_attach_triggers(
self, home_assistant_start: bool
) -> Optional[Callable[[], None]]:
"""Set up the triggers."""
def log_cb(level, msg):
self._logger.log(level, "%s %s", msg, self._name)
return await async_initialize_triggers(
cast(HomeAssistant, self.hass),
self._trigger_config,
self.async_trigger,
DOMAIN,
self._name,
log_cb,
home_assistant_start,
)
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {CONF_ID: self._id}
async def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or f"{config_key} {list_no}"
initial_state = config_block.get(CONF_INITIAL_STATE)
action_script = Script(
hass,
config_block[CONF_ACTION],
name,
DOMAIN,
running_description="automation actions",
script_mode=config_block[CONF_MODE],
max_runs=config_block[CONF_MAX],
max_exceeded=config_block[CONF_MAX_EXCEEDED],
logger=_LOGGER,
# We don't pass variables here
# Automation will already render them to use them in the condition
# and so will pass them on to the script.
)
if CONF_CONDITION in config_block:
cond_func = await _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
cond_func = None
entity = AutomationEntity(
automation_id,
name,
config_block[CONF_TRIGGER],
cond_func,
action_script,
initial_state,
config_block.get(CONF_VARIABLES),
)
entities.append(entity)
if entities:
await component.async_add_entities(entities)
async def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config[CONF_CONDITION]
checks = []
for if_config in if_configs:
try:
checks.append(await condition.async_from_config(hass, if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning("Invalid condition: %s", ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
if_action.config = if_configs
return if_action
@callback
def _trigger_extract_device(trigger_conf: dict) -> Optional[str]:
"""Extract devices from a trigger config."""
if trigger_conf[CONF_PLATFORM] != "device":
return None
return trigger_conf[CONF_DEVICE_ID]
@callback
def _trigger_extract_entities(trigger_conf: dict) -> List[str]:
"""Extract entities from a trigger config."""
if trigger_conf[CONF_PLATFORM] in ("state", "numeric_state"):
return trigger_conf[CONF_ENTITY_ID]
if trigger_conf[CONF_PLATFORM] == "zone":
return trigger_conf[CONF_ENTITY_ID] + [trigger_conf[CONF_ZONE]]
if trigger_conf[CONF_PLATFORM] == "geo_location":
return [trigger_conf[CONF_ZONE]]
if trigger_conf[CONF_PLATFORM] == "sun":
return ["sun.sun"]
return []
|
import json
import logging
import socket
from threading import Thread
import paho
import paho.mqtt.client as mqtt
from kalliope.core.SynapseLauncher import SynapseLauncher
logging.basicConfig()
logger = logging.getLogger("kalliope")
class MqttClient(Thread):
def __init__(self, broker=None, brain=None):
"""
Class used to instantiate mqtt client
Thread used to be non blocking when called from parent class
:param broker: broker object
:type broker: Broker
"""
super(MqttClient, self).__init__()
self.broker = broker
self.brain = brain
self.client = mqtt.Client(client_id=self.broker.client_id, protocol=self._get_protocol(self.broker.protocol))
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_subscribe = self.on_subscribe
if self.broker.username is not None and self.broker.password is not None:
logger.debug("[MqttClient] Username and password are set")
self.client.username_pw_set(self.broker.username, self.broker.password)
if self.broker.ca_cert is not None and self.broker.certfile is not None and self.broker.keyfile is not None:
logger.debug("[MqttClient] Active TLS with client certificate authentication")
self.client.tls_set(ca_certs=self.broker.ca_cert,
certfile=self.broker.certfile,
keyfile=self.broker.keyfile)
self.client.tls_insecure_set(self.broker.tls_insecure)
elif self.broker.ca_cert is not None:
logger.debug("[MqttClient] Active TLS with server CA certificate only")
self.client.tls_set(ca_certs=self.broker.ca_cert)
self.client.tls_insecure_set(self.broker.tls_insecure)
def run(self):
logger.debug("[MqttClient] Try to connect to broker: %s, port: %s, "
"keepalive: %s, protocol: %s" % (self.broker.broker_ip,
self.broker.port,
self.broker.keepalive,
self.broker.protocol))
try:
self.client.connect(self.broker.broker_ip, self.broker.port, self.broker.keepalive)
self.client.loop_forever()
except socket.error:
logger.debug("[MqttClient] Unable to connect to broker %s" % self.broker.broker_ip)
def on_connect(self, client, userdata, flags, rc):
"""
The callback for when the client receives a CONNACK response from the server.
"""
logger.debug("[MqttClient] Broker %s connection result code %s" % (self.broker.broker_ip, str(rc)))
if rc == 0: # success connection
logger.debug("[MqttClient] Successfully connected to broker %s" % self.broker.broker_ip)
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
for topic in self.broker.topics:
logger.debug("[MqttClient] Trying to subscribe to topic %s" % topic.name)
client.subscribe(topic.name)
else:
logger.debug("[MqttClient] Broker %s connection failled. Disconnect" % self.broker.broker_ip)
self.client.disconnect()
def on_message(self, client, userdata, msg):
"""
The callback for when a PUBLISH message is received from the server
"""
logger.debug("[MqttClient] " + msg.topic + ": " + str(msg.payload))
self.call_concerned_synapses(msg.topic, msg.payload)
def on_subscribe(self, mqttc, obj, mid, granted_qos):
"""
The callback for when the client successfully subscribe to a topic on the server
"""
logger.debug("[MqttClient] Successfully subscribed to topic")
def call_concerned_synapses(self, topic_name, message):
"""
Call synapse launcher class for each synapse concerned by the subscribed topic
convert the message to json if needed before.
The synapse is loaded with a parameter called "mqtt_subscriber_message" that can be used in neurons
:param topic_name: name of the topic that received a message from the broker
:param message: string message received from the broker
"""
# find concerned topic by name
target_topic = next(topic for topic in self.broker.topics if topic.name == topic_name)
# convert payload to a dict if necessary
if target_topic.is_json:
message = json.loads(message)
logger.debug("[MqttClient] Payload message converted to JSON dict: %s" % message)
else:
logger.debug("[MqttClient] Payload message is plain text: %s" % message)
# run each synapse
for synapse in target_topic.synapses:
logger.debug("[MqttClient] start synapse name %s" % synapse.name)
overriding_parameter_dict = dict()
overriding_parameter_dict["mqtt_subscriber_message"] = message
SynapseLauncher.start_synapse_by_list_name([synapse.name],
brain=self.brain,
overriding_parameter_dict=overriding_parameter_dict)
@staticmethod
def _get_protocol(protocol):
"""
return the right protocol version number from the lib depending on the string protocol
"""
if protocol == "MQTTv31":
return paho.mqtt.client.MQTTv31
return paho.mqtt.client.MQTTv311
|
import logging
import sys
import threading
import time
from kalliope.core import NeuronModule
from kalliope.core.NeuronModule import MissingParameterException, InvalidParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class TimerThread(threading.Thread):
def __init__(self, time_to_wait_seconds, callback):
"""
A Thread that will call the given callback method after waiting time_to_wait_seconds
:param time_to_wait_seconds: number of second to wait before call the callback method
:param callback: callback method
"""
threading.Thread.__init__(self)
self.time_to_wait_seconds = time_to_wait_seconds
self.callback = callback
def run(self):
# wait the amount of seconds
logger.debug("[Neurotimer] wait %s seconds" % self.time_to_wait_seconds)
time.sleep(self.time_to_wait_seconds)
# then run the callback method
self.callback()
class Neurotimer(NeuronModule):
def __init__(self, **kwargs):
super(Neurotimer, self).__init__(**kwargs)
# get parameters
self.seconds = kwargs.get('seconds', None)
self.minutes = kwargs.get('minutes', None)
self.hours = kwargs.get('hours', None)
self.synapse = kwargs.get('synapse', None)
self.forwarded_parameter = kwargs.get('forwarded_parameters', None)
# do some check
if self._is_parameters_ok():
# make the sum of all time parameter in seconds
retarding_time_seconds = self._get_retarding_time_seconds()
# now wait before running the target synapse
ds = TimerThread(time_to_wait_seconds=retarding_time_seconds, callback=self.callback_run_synapse)
# ds.daemon = True
ds.start()
def _is_parameters_ok(self):
"""
Check given neuron parameters are valid
:return: True if the neuron has been well configured
"""
# at least one time parameter must be set
if self.seconds is None and self.minutes is None and self.hours is None:
raise MissingParameterException("Neurotimer must have at least one time "
"parameter: seconds, minutes, hours")
self.seconds = self.get_integer_time_parameter(self.seconds)
self.minutes = self.get_integer_time_parameter(self.minutes)
self.hours = self.get_integer_time_parameter(self.hours)
if self.synapse is None:
raise MissingParameterException("Neurotimer must have a synapse name parameter")
return True
@staticmethod
def get_integer_time_parameter(time_parameter):
"""
Check if a given time parameter is a valid integer:
- must be > 0
- if type no an integer, must be convertible to integer
:param time_parameter: string or integer
:return: integer
"""
if time_parameter is not None:
if not isinstance(time_parameter, int):
# try to convert into integer
try:
time_parameter = int(time_parameter)
except ValueError:
raise InvalidParameterException("[Neurotimer] %s is not a valid integer" % time_parameter)
# check if positive
if time_parameter < 0:
raise InvalidParameterException("[Neurotimer] %s must be > 0" % time_parameter)
return time_parameter
def _get_retarding_time_seconds(self):
"""
Return the sum of given time parameters
seconds + minutes + hours
:return: integer, number of total seconds
"""
returned_time = 0
if self.seconds is not None:
returned_time += self.seconds
if self.minutes is not None:
returned_time += self.minutes * 60
if self.hours is not None:
returned_time += self.hours * 3600
logger.debug("[Neurotimer] get_retarding_time_seconds: %s" % returned_time)
return returned_time
def callback_run_synapse(self):
"""
Callback method which will be started by the timer thread once the time is over
:return:
"""
logger.debug("[Neurotimer] waiting time is over, start the synapse %s" % self.synapse)
self.run_synapse_by_name(synapse_name=self.synapse,
high_priority=False,
overriding_parameter_dict=self.forwarded_parameter)
|
import unittest
import numpy as np
from chainer.testing import attr
from chainer import Variable
from chainercv.links import SEResNet101
from chainercv.links import SEResNet152
from chainercv.links import SEResNet50
from chainercv.utils import testing
@testing.parameterize(*(
testing.product_dict(
[
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'res5',
'shapes': (1, 2048, 7, 7), 'n_class': None},
{'pick': ['res2', 'conv1'],
'shapes': ((1, 256, 56, 56), (1, 64, 112, 112)), 'n_class': None},
],
[
{'model_class': SEResNet50},
{'model_class': SEResNet101},
{'model_class': SEResNet152},
],
)
))
class TestSEResNetCall(unittest.TestCase):
def setUp(self):
self.link = self.model_class(
n_class=self.n_class, pretrained_model=None)
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
features = self.link(x)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
@testing.parameterize(*testing.product({
'model': [SEResNet50, SEResNet101, SEResNet152],
'n_class': [None, 500, 1000],
'pretrained_model': ['imagenet'],
'mean': [None, np.random.uniform((3, 1, 1)).astype(np.float32)],
}))
class TestSEResNetPretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_class': self.n_class,
'pretrained_model': self.pretrained_model,
'mean': self.mean,
}
if self.pretrained_model == 'imagenet':
valid = self.n_class in {None, 1000}
if valid:
self.model(**kwargs)
else:
with self.assertRaises(ValueError):
self.model(**kwargs)
testing.run_module(__name__, __file__)
|
import logging
from typing import Optional
from typing import Sequence
from paasta_tools import kubernetes_tools
from paasta_tools import monitoring_tools
from paasta_tools.check_services_replication_tools import main
from paasta_tools.kubernetes_tools import filter_pods_by_service_instance
from paasta_tools.kubernetes_tools import is_pod_ready
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.kubernetes_tools import V1Pod
from paasta_tools.long_running_service_tools import get_proxy_port_for_instance
from paasta_tools.smartstack_tools import KubeSmartstackEnvoyReplicationChecker
log = logging.getLogger(__name__)
DEFAULT_ALERT_AFTER = "10m"
def check_healthy_kubernetes_tasks_for_service_instance(
instance_config: KubernetesDeploymentConfig,
expected_count: int,
all_pods: Sequence[V1Pod],
) -> None:
si_pods = filter_pods_by_service_instance(
pod_list=all_pods,
service=instance_config.service,
instance=instance_config.instance,
)
num_healthy_tasks = len([pod for pod in si_pods if is_pod_ready(pod)])
log.info(
f"Checking {instance_config.service}.{instance_config.instance} in kubernetes as it is not in smartstack"
)
monitoring_tools.send_replication_event_if_under_replication(
instance_config=instance_config,
expected_count=expected_count,
num_available=num_healthy_tasks,
)
def check_kubernetes_pod_replication(
instance_config: KubernetesDeploymentConfig,
all_tasks_or_pods: Sequence[V1Pod],
replication_checker: KubeSmartstackEnvoyReplicationChecker,
) -> Optional[bool]:
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack/envoy or k8s)
:param instance_config: an instance of KubernetesDeploymentConfig
:param replication_checker: an instance of KubeSmartstackEnvoyReplicationChecker
"""
default_alert_after = DEFAULT_ALERT_AFTER
expected_count = instance_config.get_instances()
log.info(
"Expecting %d total tasks for %s" % (expected_count, instance_config.job_id)
)
proxy_port = get_proxy_port_for_instance(instance_config)
registrations = instance_config.get_registrations()
# If this instance does not autoscale and only has 1 instance, set alert after to 20m.
# Otherwise, set it to 10 min.
if (
not instance_config.is_autoscaling_enabled()
and instance_config.get_instances() == 1
):
default_alert_after = "20m"
if "monitoring" not in instance_config.config_dict:
instance_config.config_dict["monitoring"] = {}
instance_config.config_dict["monitoring"][
"alert_after"
] = instance_config.config_dict["monitoring"].get(
"alert_after", default_alert_after
)
# if the primary registration does not match the service_instance name then
# the best we can do is check k8s for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
is_well_replicated = monitoring_tools.check_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
replication_checker=replication_checker,
)
return is_well_replicated
else:
check_healthy_kubernetes_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
all_pods=all_tasks_or_pods,
)
return None
if __name__ == "__main__":
main(
kubernetes_tools.KubernetesDeploymentConfig,
check_kubernetes_pod_replication,
namespace="paasta",
)
|
from abc import abstractmethod
from asyncio import TimeoutError as AsyncIOTimeoutError
import logging
from typing import Any, Dict, Optional
from aiohttp import ClientError
from homeassistant.const import ATTR_NAME
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .utils import BondDevice, BondHub
_LOGGER = logging.getLogger(__name__)
class BondEntity(Entity):
"""Generic Bond entity encapsulating common features of any Bond controlled device."""
def __init__(
self, hub: BondHub, device: BondDevice, sub_device: Optional[str] = None
):
"""Initialize entity with API and device info."""
self._hub = hub
self._device = device
self._sub_device = sub_device
self._available = True
@property
def unique_id(self) -> Optional[str]:
"""Get unique ID for the entity."""
hub_id = self._hub.bond_id
device_id = self._device.device_id
sub_device_id: str = f"_{self._sub_device}" if self._sub_device else ""
return f"{hub_id}_{device_id}{sub_device_id}"
@property
def name(self) -> Optional[str]:
"""Get entity name."""
return self._device.name
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Get a an HA device representing this Bond controlled device."""
return {
ATTR_NAME: self.name,
"identifiers": {(DOMAIN, self._device.device_id)},
"via_device": (DOMAIN, self._hub.bond_id),
}
@property
def assumed_state(self) -> bool:
"""Let HA know this entity relies on an assumed state tracked by Bond."""
return self._hub.is_bridge and not self._device.trust_state
@property
def available(self) -> bool:
"""Report availability of this entity based on last API call results."""
return self._available
async def async_update(self):
"""Fetch assumed state of the cover from the hub using API."""
try:
state: dict = await self._hub.bond.device_state(self._device.device_id)
except (ClientError, AsyncIOTimeoutError, OSError) as error:
if self._available:
_LOGGER.warning(
"Entity %s has become unavailable", self.entity_id, exc_info=error
)
self._available = False
else:
_LOGGER.debug("Device state for %s is:\n%s", self.entity_id, state)
if not self._available:
_LOGGER.info("Entity %s has come back", self.entity_id)
self._available = True
self._apply_state(state)
@abstractmethod
def _apply_state(self, state: dict):
raise NotImplementedError
|
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import (
ALLOWED_WATERING_TIME,
CONF_WATERING_TIME,
DATA_HYDRAWISE,
DEFAULT_WATERING_TIME,
SWITCHES,
HydrawiseEntity,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=SWITCHES): vol.All(
cv.ensure_list, [vol.In(SWITCHES)]
),
vol.Optional(CONF_WATERING_TIME, default=DEFAULT_WATERING_TIME): vol.All(
vol.In(ALLOWED_WATERING_TIME)
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Hydrawise device."""
hydrawise = hass.data[DATA_HYDRAWISE].data
default_watering_timer = config.get(CONF_WATERING_TIME)
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
# Create a switch for each zone
for zone in hydrawise.relays:
sensors.append(HydrawiseSwitch(default_watering_timer, zone, sensor_type))
add_entities(sensors, True)
class HydrawiseSwitch(HydrawiseEntity, SwitchEntity):
"""A switch implementation for Hydrawise device."""
def __init__(self, default_watering_timer, *args):
"""Initialize a switch for Hydrawise device."""
super().__init__(*args)
self._default_watering_timer = default_watering_timer
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
relay_data = self.data["relay"] - 1
if self._sensor_type == "manual_watering":
self.hass.data[DATA_HYDRAWISE].data.run_zone(
self._default_watering_timer, relay_data
)
elif self._sensor_type == "auto_watering":
self.hass.data[DATA_HYDRAWISE].data.suspend_zone(0, relay_data)
def turn_off(self, **kwargs):
"""Turn the device off."""
relay_data = self.data["relay"] - 1
if self._sensor_type == "manual_watering":
self.hass.data[DATA_HYDRAWISE].data.run_zone(0, relay_data)
elif self._sensor_type == "auto_watering":
self.hass.data[DATA_HYDRAWISE].data.suspend_zone(365, relay_data)
def update(self):
"""Update device state."""
relay_data = self.data["relay"] - 1
mydata = self.hass.data[DATA_HYDRAWISE].data
_LOGGER.debug("Updating Hydrawise switch: %s", self._name)
if self._sensor_type == "manual_watering":
self._state = mydata.relays[relay_data]["timestr"] == "Now"
elif self._sensor_type == "auto_watering":
self._state = (mydata.relays[relay_data]["timestr"] != "") and (
mydata.relays[relay_data]["timestr"] != "Now"
)
|
import arrow
from freezegun import freeze_time
def test_convert_validity_years(session):
from lemur.common.missing import convert_validity_years
with freeze_time("2016-01-01"):
data = convert_validity_years(dict(validity_years=2))
assert data["validity_start"] == arrow.utcnow().isoformat()
assert data["validity_end"] == arrow.utcnow().shift(years=+2).isoformat()
with freeze_time("2015-01-10"):
data = convert_validity_years(dict(validity_years=1))
assert (
data["validity_end"]
== arrow.utcnow().shift(years=+1, days=-2).isoformat()
)
|
from homeassistant.const import (
ATTR_ATTRIBUTION,
DEVICE_CLASS_POWER,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
VOLT,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import (
ACTIVE_NAME,
ACTIVE_TYPE,
ATTRIBUTION,
CONSUMPTION_ID,
CONSUMPTION_NAME,
DOMAIN,
ICON,
MDI_ICONS,
PRODUCTION_ID,
PRODUCTION_NAME,
SENSE_DATA,
SENSE_DEVICE_UPDATE,
SENSE_DEVICES_DATA,
SENSE_DISCOVERED_DEVICES_DATA,
SENSE_TRENDS_COORDINATOR,
)
class SensorConfig:
"""Data structure holding sensor configuration."""
def __init__(self, name, sensor_type):
"""Sensor name and type to pass to API."""
self.name = name
self.sensor_type = sensor_type
# Sensor types/ranges
ACTIVE_SENSOR_TYPE = SensorConfig(ACTIVE_NAME, ACTIVE_TYPE)
# Sensor types/ranges
TRENDS_SENSOR_TYPES = {
"daily": SensorConfig("Daily", "DAY"),
"weekly": SensorConfig("Weekly", "WEEK"),
"monthly": SensorConfig("Monthly", "MONTH"),
"yearly": SensorConfig("Yearly", "YEAR"),
}
# Production/consumption variants
SENSOR_VARIANTS = [PRODUCTION_ID, CONSUMPTION_ID]
def sense_to_mdi(sense_icon):
"""Convert sense icon to mdi icon."""
return "mdi:{}".format(MDI_ICONS.get(sense_icon, "power-plug"))
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Sense sensor."""
data = hass.data[DOMAIN][config_entry.entry_id][SENSE_DATA]
sense_devices_data = hass.data[DOMAIN][config_entry.entry_id][SENSE_DEVICES_DATA]
trends_coordinator = hass.data[DOMAIN][config_entry.entry_id][
SENSE_TRENDS_COORDINATOR
]
# Request only in case it takes longer
# than 60s
await trends_coordinator.async_request_refresh()
sense_monitor_id = data.sense_monitor_id
sense_devices = hass.data[DOMAIN][config_entry.entry_id][
SENSE_DISCOVERED_DEVICES_DATA
]
devices = [
SenseEnergyDevice(sense_devices_data, device, sense_monitor_id)
for device in sense_devices
if device["tags"]["DeviceListAllowed"] == "true"
]
for var in SENSOR_VARIANTS:
name = ACTIVE_SENSOR_TYPE.name
sensor_type = ACTIVE_SENSOR_TYPE.sensor_type
is_production = var == PRODUCTION_ID
unique_id = f"{sense_monitor_id}-active-{var}"
devices.append(
SenseActiveSensor(
data, name, sensor_type, is_production, sense_monitor_id, var, unique_id
)
)
for i in range(len(data.active_voltage)):
devices.append(SenseVoltageSensor(data, i, sense_monitor_id))
for type_id in TRENDS_SENSOR_TYPES:
typ = TRENDS_SENSOR_TYPES[type_id]
for var in SENSOR_VARIANTS:
name = typ.name
sensor_type = typ.sensor_type
is_production = var == PRODUCTION_ID
unique_id = f"{sense_monitor_id}-{type_id}-{var}"
devices.append(
SenseTrendsSensor(
data,
name,
sensor_type,
is_production,
trends_coordinator,
unique_id,
)
)
async_add_entities(devices)
class SenseActiveSensor(Entity):
"""Implementation of a Sense energy sensor."""
def __init__(
self,
data,
name,
sensor_type,
is_production,
sense_monitor_id,
sensor_id,
unique_id,
):
"""Initialize the Sense sensor."""
name_type = PRODUCTION_NAME if is_production else CONSUMPTION_NAME
self._name = f"{name} {name_type}"
self._unique_id = unique_id
self._available = False
self._data = data
self._sense_monitor_id = sense_monitor_id
self._sensor_type = sensor_type
self._is_production = is_production
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return the availability of the sensor."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return POWER_WATT
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def should_poll(self):
"""Return the device should not poll for updates."""
return False
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SENSE_DEVICE_UPDATE}-{self._sense_monitor_id}",
self._async_update_from_data,
)
)
@callback
def _async_update_from_data(self):
"""Update the sensor from the data. Must not do I/O."""
new_state = round(
self._data.active_solar_power
if self._is_production
else self._data.active_power
)
if self._available and self._state == new_state:
return
self._state = new_state
self._available = True
self.async_write_ha_state()
class SenseVoltageSensor(Entity):
"""Implementation of a Sense energy voltage sensor."""
def __init__(
self,
data,
index,
sense_monitor_id,
):
"""Initialize the Sense sensor."""
line_num = index + 1
self._name = f"L{line_num} Voltage"
self._unique_id = f"{sense_monitor_id}-L{line_num}"
self._available = False
self._data = data
self._sense_monitor_id = sense_monitor_id
self._voltage_index = index
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return the availability of the sensor."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return VOLT
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def should_poll(self):
"""Return the device should not poll for updates."""
return False
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SENSE_DEVICE_UPDATE}-{self._sense_monitor_id}",
self._async_update_from_data,
)
)
@callback
def _async_update_from_data(self):
"""Update the sensor from the data. Must not do I/O."""
new_state = round(self._data.active_voltage[self._voltage_index], 1)
if self._available and self._state == new_state:
return
self._available = True
self._state = new_state
self.async_write_ha_state()
class SenseTrendsSensor(Entity):
"""Implementation of a Sense energy sensor."""
def __init__(
self,
data,
name,
sensor_type,
is_production,
trends_coordinator,
unique_id,
):
"""Initialize the Sense sensor."""
name_type = PRODUCTION_NAME if is_production else CONSUMPTION_NAME
self._name = f"{name} {name_type}"
self._unique_id = unique_id
self._available = False
self._data = data
self._sensor_type = sensor_type
self._coordinator = trends_coordinator
self._is_production = is_production
self._state = None
self._unit_of_measurement = ENERGY_KILO_WATT_HOUR
self._had_any_update = False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return round(self._data.get_trend(self._sensor_type, self._is_production), 1)
@property
def available(self):
"""Return if entity is available."""
return self._had_any_update and self._coordinator.last_update_success
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def should_poll(self):
"""No need to poll. Coordinator notifies entity of updates."""
return False
@callback
def _async_update(self):
"""Track if we had an update so we do not report zero data."""
self._had_any_update = True
self.async_write_ha_state()
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self._coordinator.async_request_refresh()
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(self._coordinator.async_add_listener(self._async_update))
class SenseEnergyDevice(Entity):
"""Implementation of a Sense energy device."""
def __init__(self, sense_devices_data, device, sense_monitor_id):
"""Initialize the Sense binary sensor."""
self._name = f"{device['name']} {CONSUMPTION_NAME}"
self._id = device["id"]
self._available = False
self._sense_monitor_id = sense_monitor_id
self._unique_id = f"{sense_monitor_id}-{self._id}-{CONSUMPTION_ID}"
self._icon = sense_to_mdi(device["icon"])
self._sense_devices_data = sense_devices_data
self._state = None
@property
def state(self):
"""Return the wattage of the sensor."""
return self._state
@property
def available(self):
"""Return the availability of the sensor."""
return self._available
@property
def name(self):
"""Return the name of the power sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique id of the power sensor."""
return self._unique_id
@property
def icon(self):
"""Return the icon of the power sensor."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return POWER_WATT
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def device_class(self):
"""Return the device class of the power sensor."""
return DEVICE_CLASS_POWER
@property
def should_poll(self):
"""Return the device should not poll for updates."""
return False
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SENSE_DEVICE_UPDATE}-{self._sense_monitor_id}",
self._async_update_from_data,
)
)
@callback
def _async_update_from_data(self):
"""Get the latest data, update state. Must not do I/O."""
device_data = self._sense_devices_data.get_device_by_id(self._id)
if not device_data or "w" not in device_data:
new_state = 0
else:
new_state = int(device_data["w"])
if self._available and self._state == new_state:
return
self._state = new_state
self._available = True
self.async_write_ha_state()
|
from functools import reduce
from django.conf import settings
from weblate.machinery.base import MachineTranslation, MissingConfiguration
LANGUAGE_MAP = {
"ca": "cat",
"cy": "cym",
"eo": "epo",
"gl": "glg",
"bs": "hbs_BS",
"es": "spa",
"en": "eng",
"en_US": "eng",
"en_UK": "eng",
"nl": "nld",
"de": "deu",
"fr": "fra",
"sl": "slv",
"sr": "hbs",
"nb_NO": "nob",
"nn": "nno",
"se": "sme",
"oc": "oci",
"pt": "por",
"co": "cos",
"fi": "fin",
"ia": "ina",
"ro": "ron",
"cs": "ces",
"sk": "slk",
"ru": "rus",
"av": "ava",
"is": "isl",
"pl": "pol",
"kk": "kaz",
"tt": "tat",
"be": "bel",
"uk": "ukr",
"gn": "grn",
"mt": "mlt",
"it": "ita",
"zh_Hant": "zho",
"br": "bre",
"qu": "qve",
"an": "arg",
"mr": "mar",
"af": "afr",
"fa": "pes",
"el": "ell",
"lv": "lvs",
"as": "asm",
"hi": "hin",
"te": "tel",
"hy": "hye",
"th": "tha",
"mk": "mkd",
"la": "lat",
"ga": "gle",
"sw": "swa",
"hu": "hun",
"ml": "mal",
}
class ApertiumAPYTranslation(MachineTranslation):
"""Apertium machine translation support."""
name = "Apertium APy"
max_score = 90
def __init__(self):
"""Check configuration."""
super().__init__()
self.url = self.get_server_url()
@staticmethod
def get_server_url():
"""Return URL of a server."""
if settings.MT_APERTIUM_APY is None:
raise MissingConfiguration("Not configured Apertium APy URL")
return settings.MT_APERTIUM_APY.rstrip("/")
@property
def all_langs(self):
"""Return all language codes known to service."""
return reduce(lambda acc, x: acc.union(x), self.supported_languages, set())
def map_language_code(self, code):
"""Convert language to service specific code."""
code = super().map_language_code(code).replace("-", "_")
# Force download of supported languages
if code not in self.all_langs and code in LANGUAGE_MAP:
return LANGUAGE_MAP[code]
return code
def download_languages(self):
"""Download list of supported languages from a service."""
data = self.request_status("get", f"{self.url}/listPairs")
return [
(item["sourceLanguage"], item["targetLanguage"])
for item in data["responseData"]
]
def is_supported(self, source, language):
"""Check whether given language combination is supported."""
return (source, language) in self.supported_languages
def download_translations(
self,
source,
language,
text: str,
unit,
user,
search: bool,
threshold: int = 75,
):
"""Download list of possible translations from Apertium."""
args = {
"langpair": f"{source}|{language}",
"q": text,
"markUnknown": "no",
}
response = self.request_status("get", f"{self.url}/translate", params=args)
yield {
"text": response["responseData"]["translatedText"],
"quality": self.max_score,
"service": self.name,
"source": text,
}
|
from copy import deepcopy
import pytest
from homeassistant.components.deconz.const import DOMAIN as DECONZ_DOMAIN
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from homeassistant.components.fan import (
ATTR_SPEED,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_SPEED,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
FANS = {
"1": {
"etag": "432f3de28965052961a99e3c5494daf4",
"hascolor": False,
"manufacturername": "King Of Fans, Inc.",
"modelid": "HDC52EastwindFan",
"name": "Ceiling fan",
"state": {
"alert": "none",
"bri": 254,
"on": False,
"reachable": True,
"speed": 4,
},
"swversion": "0000000F",
"type": "Fan",
"uniqueid": "00:22:a3:00:00:27:8b:81-01",
}
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, FAN_DOMAIN, {"fan": {"platform": DECONZ_DOMAIN}}
)
is True
)
assert DECONZ_DOMAIN not in hass.data
async def test_no_fans(hass):
"""Test that no fan entities are created."""
await setup_deconz_integration(hass)
assert len(hass.states.async_all()) == 0
async def test_fans(hass):
"""Test that all supported fan entities are created."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["lights"] = deepcopy(FANS)
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 2 # Light and fan
assert hass.states.get("fan.ceiling_fan")
# Test states
assert hass.states.get("fan.ceiling_fan").state == STATE_ON
assert hass.states.get("fan.ceiling_fan").attributes["speed"] == SPEED_HIGH
state_changed_event = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "1",
"state": {"speed": 0},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("fan.ceiling_fan").state == STATE_OFF
assert hass.states.get("fan.ceiling_fan").attributes["speed"] == SPEED_OFF
# Test service calls
ceiling_fan_device = gateway.api.lights["1"]
# Service turn on fan
with patch.object(
ceiling_fan_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "fan.ceiling_fan"},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/lights/1/state", json={"speed": 4})
# Service turn off fan
with patch.object(
ceiling_fan_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "fan.ceiling_fan"},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/lights/1/state", json={"speed": 0})
# Service set fan speed to low
with patch.object(
ceiling_fan_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: "fan.ceiling_fan", ATTR_SPEED: SPEED_LOW},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/lights/1/state", json={"speed": 1})
# Service set fan speed to medium
with patch.object(
ceiling_fan_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: "fan.ceiling_fan", ATTR_SPEED: SPEED_MEDIUM},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/lights/1/state", json={"speed": 2})
# Service set fan speed to high
with patch.object(
ceiling_fan_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: "fan.ceiling_fan", ATTR_SPEED: SPEED_HIGH},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/lights/1/state", json={"speed": 4})
# Service set fan speed to off
with patch.object(
ceiling_fan_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: "fan.ceiling_fan", ATTR_SPEED: SPEED_OFF},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/lights/1/state", json={"speed": 0})
# Service set fan speed to unsupported value
with patch.object(
ceiling_fan_device, "_request", return_value=True
) as set_callback, pytest.raises(ValueError):
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: "fan.ceiling_fan", ATTR_SPEED: "bad value"},
blocking=True,
)
await hass.async_block_till_done()
# Events with an unsupported speed gets converted to default speed "medium"
state_changed_event = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "1",
"state": {"speed": 3},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("fan.ceiling_fan").state == STATE_ON
assert hass.states.get("fan.ceiling_fan").attributes["speed"] == SPEED_MEDIUM
await hass.config_entries.async_unload(config_entry.entry_id)
assert len(hass.states.async_all()) == 0
|
import os.path as op
import shutil
import pytest
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
import numpy as np
import scipy.io as sio
from mne.datasets import testing
from mne.io import read_raw_gdf
from mne.io.tests.test_raw import _test_raw_reader
from mne.utils import run_tests_if_main
from mne import pick_types, find_events, events_from_annotations
data_path = testing.data_path(download=False)
gdf1_path = op.join(data_path, 'GDF', 'test_gdf_1.25')
gdf2_path = op.join(data_path, 'GDF', 'test_gdf_2.20')
gdf_1ch_path = op.join(data_path, 'GDF', 'test_1ch.gdf')
@testing.requires_testing_data
def test_gdf_data():
"""Test reading raw GDF 1.x files."""
raw = read_raw_gdf(gdf1_path + '.gdf', eog=None, misc=None, preload=True)
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
data, _ = raw[picks]
# Test Status is added as event
EXPECTED_EVS_ONSETS = raw._raw_extras[0]['events'][1]
EXPECTED_EVS_ID = {
'{}'.format(evs): i for i, evs in enumerate(
[32769, 32770, 33024, 33025, 33026, 33027, 33028, 33029, 33040,
33041, 33042, 33043, 33044, 33045, 33285, 33286], 1)
}
evs, evs_id = events_from_annotations(raw)
assert_array_equal(evs[:, 0], EXPECTED_EVS_ONSETS)
assert evs_id == EXPECTED_EVS_ID
# this .npy was generated using the official biosig python package
raw_biosig = np.load(gdf1_path + '_biosig.npy')
raw_biosig = raw_biosig * 1e-6 # data are stored in microvolts
data_biosig = raw_biosig[picks]
# Assert data are almost equal
assert_array_almost_equal(data, data_biosig, 8)
# Test for events
assert len(raw.annotations.duration == 963)
# gh-5604
assert raw.info['meas_date'] is None
@testing.requires_testing_data
def test_gdf2_birthday(tmpdir):
"""Test reading raw GDF 2.x files."""
new_fname = str(tmpdir.join('temp.gdf'))
shutil.copyfile(gdf2_path + '.gdf', new_fname)
d = int(3.1e15) # chosen by trial and error to give a reasonable age
with open(new_fname, 'r+b') as fid:
fid.seek(176, 0)
assert np.fromfile(fid, np.uint64, 1)[0] == 0
fid.seek(176, 0)
fid.write(np.array([d], np.uint64).tobytes())
fid.seek(176, 0)
assert np.fromfile(fid, np.uint64, 1)[0] == d
raw = read_raw_gdf(new_fname, eog=None, misc=None, preload=True)
assert raw._raw_extras[0]['subject_info']['age'] == 44
# XXX this is a bug, it should be populated...
assert raw.info['subject_info'] is None
@testing.requires_testing_data
def test_gdf2_data():
"""Test reading raw GDF 2.x files."""
raw = read_raw_gdf(gdf2_path + '.gdf', eog=None, misc=None, preload=True)
assert raw._raw_extras[0]['subject_info']['age'] is None
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
data, _ = raw[picks]
# This .mat was generated using the official biosig matlab package
mat = sio.loadmat(gdf2_path + '_biosig.mat')
data_biosig = mat['dat'] * 1e-6 # data are stored in microvolts
data_biosig = data_biosig[picks]
# Assert data are almost equal
assert_array_almost_equal(data, data_biosig, 8)
# Find events
events = find_events(raw, verbose=1)
events[:, 2] >>= 8 # last 8 bits are system events in biosemi files
assert_equal(events.shape[0], 2) # 2 events in file
assert_array_equal(events[:, 2], [20, 28])
# gh-5604
assert raw.info['meas_date'] is None
_test_raw_reader(read_raw_gdf, input_fname=gdf2_path + '.gdf',
eog=None, misc=None,
test_scaling=False, # XXX this should be True
)
@testing.requires_testing_data
def test_one_channel_gdf():
"""Test a one-channel GDF file."""
with pytest.warns(RuntimeWarning, match='different highpass'):
ecg = read_raw_gdf(gdf_1ch_path, preload=True)
assert ecg['ECG'][0].shape == (1, 4500)
assert 150.0 == ecg.info['sfreq']
@testing.requires_testing_data
def test_gdf_exclude_channels():
"""Test reading GDF data with excluded channels."""
raw = read_raw_gdf(gdf1_path + '.gdf', exclude=('FP1', 'O1'))
assert 'FP1' not in raw.ch_names
assert 'O1' not in raw.ch_names
raw = read_raw_gdf(gdf2_path + '.gdf', exclude=('Fp1', 'O1'))
assert 'Fp1' not in raw.ch_names
assert 'O1' not in raw.ch_names
run_tests_if_main()
|
import asyncio
import pathlib
from collections import namedtuple
from typing import Any, NamedTuple
from pathlib import Path
import pytest
from pytest_mock import MockFixture
from redbot.pytest.downloader import *
from redbot.cogs.downloader.repo_manager import Installable
from redbot.cogs.downloader.repo_manager import Candidate, ProcessFormatter, RepoManager, Repo
from redbot.cogs.downloader.errors import (
AmbiguousRevision,
ExistingGitRepo,
GitException,
UnknownRevision,
)
class FakeCompletedProcess(NamedTuple):
returncode: int
stdout: bytes = b""
stderr: bytes = b""
def _mock_run(
mocker: MockFixture, repo: Repo, returncode: int, stdout: bytes = b"", stderr: bytes = b""
):
return mocker.patch.object(
repo, "_run", autospec=True, return_value=FakeCompletedProcess(returncode, stdout, stderr)
)
def _mock_setup_repo(mocker: MockFixture, repo: Repo, commit: str):
def update_commit(*args, **kwargs):
repo.commit = commit
return mocker.DEFAULT
return mocker.patch.object(
repo, "_setup_repo", autospec=True, side_effect=update_commit, return_value=None
)
def test_existing_git_repo(tmp_path):
repo_folder = tmp_path / "repos" / "squid" / ".git"
repo_folder.mkdir(parents=True, exist_ok=True)
r = Repo(
url="https://github.com/tekulvw/Squid-Plugins",
name="squid",
branch="rewrite_cogs",
commit="6acb5decbb717932e5dc0cda7fca0eff452c47dd",
folder_path=repo_folder.parent,
)
exists, git_path = r._existing_git_repo()
assert exists is True
assert git_path == repo_folder
ancestor_rev = "c950fc05a540dd76b944719c2a3302da2e2f3090"
descendant_rev = "fb99eb7d2d5bed514efc98fe6686b368f8425745"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"maybe_ancestor_rev,descendant_rev,returncode,expected",
[(ancestor_rev, descendant_rev, 0, True), (descendant_rev, ancestor_rev, 1, False)],
)
async def test_is_ancestor(mocker, repo, maybe_ancestor_rev, descendant_rev, returncode, expected):
m = _mock_run(mocker, repo, returncode)
ret = await repo.is_ancestor(maybe_ancestor_rev, descendant_rev)
m.assert_called_once_with(
ProcessFormatter().format(
repo.GIT_IS_ANCESTOR,
path=repo.folder_path,
maybe_ancestor_rev=maybe_ancestor_rev,
descendant_rev=descendant_rev,
),
valid_exit_codes=(0, 1),
debug_only=True,
)
assert ret is expected
@pytest.mark.asyncio
async def test_is_ancestor_object_raise(mocker, repo):
m = _mock_run(mocker, repo, 128, b"", b"fatal: Not a valid object name invalid1")
with pytest.raises(UnknownRevision):
await repo.is_ancestor("invalid1", "invalid2")
m.assert_called_once_with(
ProcessFormatter().format(
repo.GIT_IS_ANCESTOR,
path=repo.folder_path,
maybe_ancestor_rev="invalid1",
descendant_rev="invalid2",
),
valid_exit_codes=(0, 1),
debug_only=True,
)
@pytest.mark.asyncio
async def test_is_ancestor_commit_raise(mocker, repo):
m = _mock_run(
mocker,
repo,
128,
b"",
b"fatal: Not a valid commit name 0123456789abcde0123456789abcde0123456789",
)
with pytest.raises(UnknownRevision):
await repo.is_ancestor(
"0123456789abcde0123456789abcde0123456789", "c950fc05a540dd76b944719c2a3302da2e2f3090"
)
m.assert_called_once_with(
ProcessFormatter().format(
repo.GIT_IS_ANCESTOR,
path=repo.folder_path,
maybe_ancestor_rev="0123456789abcde0123456789abcde0123456789",
descendant_rev="c950fc05a540dd76b944719c2a3302da2e2f3090",
),
valid_exit_codes=(0, 1),
debug_only=True,
)
@pytest.mark.asyncio
async def test_get_file_update_statuses(mocker, repo):
old_rev = "c950fc05a540dd76b944719c2a3302da2e2f3090"
new_rev = "fb99eb7d2d5bed514efc98fe6686b368f8425745"
m = _mock_run(
mocker,
repo,
0,
b"A\x00added_file.txt\x00\t"
b"M\x00mycog/__init__.py\x00\t"
b"D\x00sample_file1.txt\x00\t"
b"D\x00sample_file2.txt\x00\t"
b"A\x00sample_file3.txt",
)
ret = await repo._get_file_update_statuses(old_rev, new_rev)
m.assert_called_once_with(
ProcessFormatter().format(
repo.GIT_DIFF_FILE_STATUS, path=repo.folder_path, old_rev=old_rev, new_rev=new_rev
)
)
assert ret == {
"added_file.txt": "A",
"mycog/__init__.py": "M",
"sample_file1.txt": "D",
"sample_file2.txt": "D",
"sample_file3.txt": "A",
}
@pytest.mark.asyncio
async def test_is_module_modified(mocker, repo):
old_rev = "c950fc05a540dd76b944719c2a3302da2e2f3090"
new_rev = "fb99eb7d2d5bed514efc98fe6686b368f8425745"
FakeInstallable = namedtuple("Installable", "name commit")
module = FakeInstallable("mycog", new_rev)
m = mocker.patch.object(
repo,
"_get_file_update_statuses",
autospec=True,
return_value={
"added_file.txt": "A",
"mycog/__init__.py": "M",
"sample_file1.txt": "D",
"sample_file2.txt": "D",
"sample_file3.txt": "A",
},
)
ret = await repo._is_module_modified(module, old_rev)
m.assert_called_once_with(old_rev, new_rev)
assert ret is True
@pytest.mark.asyncio
async def test_get_full_sha1_success(mocker, repo):
commit = "c950fc05a540dd76b944719c2a3302da2e2f3090"
m = _mock_run(mocker, repo, 0, commit.encode())
ret = await repo.get_full_sha1(commit)
m.assert_called_once_with(
ProcessFormatter().format(repo.GIT_GET_FULL_SHA1, path=repo.folder_path, rev=commit)
)
assert ret == commit
@pytest.mark.asyncio
async def test_get_full_sha1_notfound(mocker, repo):
m = _mock_run(mocker, repo, 128, b"", b"fatal: Needed a single revision")
with pytest.raises(UnknownRevision):
await repo.get_full_sha1("invalid")
m.assert_called_once_with(
ProcessFormatter().format(repo.GIT_GET_FULL_SHA1, path=repo.folder_path, rev="invalid")
)
@pytest.mark.asyncio
async def test_get_full_sha1_ambiguous(mocker, repo):
m = _mock_run(
mocker,
repo,
128,
b"",
b"error: short SHA1 c6f0 is ambiguous\n"
b"hint: The candidates are:\n"
b"hint: c6f028f tag ambiguous_tag_66387\n"
b"hint: c6f0e5e commit 2019-10-24 - Commit ambiguous with tag.\n"
b"fatal: Needed a single revision",
)
with pytest.raises(AmbiguousRevision) as exc_info:
await repo.get_full_sha1("c6f0")
m.assert_called_once_with(
ProcessFormatter().format(repo.GIT_GET_FULL_SHA1, path=repo.folder_path, rev="c6f0")
)
assert exc_info.value.candidates == [
Candidate("c6f028f", "tag", "ambiguous_tag_66387"),
Candidate("c6f0e5e", "commit", "2019-10-24 - Commit ambiguous with tag."),
]
def test_update_available_modules(repo):
module = repo.folder_path / "mycog" / "__init__.py"
submodule = module.parent / "submodule" / "__init__.py"
module.parent.mkdir(parents=True)
module.touch()
submodule.parent.mkdir()
submodule.touch()
ret = repo._update_available_modules()
assert (
ret
== repo.available_modules
== (Installable(location=module.parent, repo=repo, commit=repo.commit),)
)
@pytest.mark.asyncio
async def test_checkout(mocker, repo):
commit = "c950fc05a540dd76b944719c2a3302da2e2f3090"
m = _mock_run(mocker, repo, 0)
_mock_setup_repo(mocker, repo, commit)
git_path = repo.folder_path / ".git"
git_path.mkdir()
await repo._checkout(commit)
assert repo.commit == commit
m.assert_called_once_with(
ProcessFormatter().format(repo.GIT_CHECKOUT, path=repo.folder_path, rev=commit)
)
@pytest.mark.asyncio
async def test_checkout_ctx_manager(mocker, repo):
commit = "c950fc05a540dd76b944719c2a3302da2e2f3090"
m = mocker.patch.object(repo, "_checkout", autospec=True, return_value=None)
old_commit = repo.commit
async with repo.checkout(commit):
m.assert_called_with(commit, force_checkout=False)
m.return_value = None
m.assert_called_with(old_commit, force_checkout=False)
@pytest.mark.asyncio
async def test_checkout_await(mocker, repo):
commit = "c950fc05a540dd76b944719c2a3302da2e2f3090"
m = mocker.patch.object(repo, "_checkout", autospec=True, return_value=None)
await repo.checkout(commit)
m.assert_called_once_with(commit, force_checkout=False)
@pytest.mark.asyncio
async def test_clone_with_branch(mocker, repo):
branch = repo.branch = "dont_add_commits"
commit = "a0ccc2390883c85a361f5a90c72e1b07958939fa"
repo.commit = ""
m = _mock_run(mocker, repo, 0)
_mock_setup_repo(mocker, repo, commit)
await repo.clone()
assert repo.commit == commit
m.assert_called_once_with(
ProcessFormatter().format(
repo.GIT_CLONE, branch=branch, url=repo.url, folder=repo.folder_path
)
)
@pytest.mark.asyncio
async def test_clone_without_branch(mocker, repo):
branch = "dont_add_commits"
commit = "a0ccc2390883c85a361f5a90c72e1b07958939fa"
repo.branch = None
repo.commit = ""
m = _mock_run(mocker, repo, 0)
_mock_setup_repo(mocker, repo, commit)
mocker.patch.object(repo, "current_branch", autospec=True, return_value=branch)
await repo.clone()
assert repo.commit == commit
m.assert_called_once_with(
ProcessFormatter().format(repo.GIT_CLONE_NO_BRANCH, url=repo.url, folder=repo.folder_path)
)
@pytest.mark.asyncio
async def test_update(mocker, repo):
old_commit = repo.commit
new_commit = "a0ccc2390883c85a361f5a90c72e1b07958939fa"
m = _mock_run(mocker, repo, 0)
_mock_setup_repo(mocker, repo, new_commit)
mocker.patch.object(repo, "latest_commit", autospec=True, return_value=old_commit)
mocker.patch.object(repo, "hard_reset", autospec=True, return_value=None)
ret = await repo.update()
assert ret == (old_commit, new_commit)
m.assert_called_once_with(ProcessFormatter().format(repo.GIT_PULL, path=repo.folder_path))
# old tests
@pytest.mark.asyncio
async def test_add_repo(monkeypatch, repo_manager):
monkeypatch.setattr("redbot.cogs.downloader.repo_manager.Repo._run", fake_run_noprint)
monkeypatch.setattr(
"redbot.cogs.downloader.repo_manager.Repo.current_commit", fake_current_commit
)
squid = await repo_manager.add_repo(
url="https://github.com/tekulvw/Squid-Plugins", name="squid", branch="rewrite_cogs"
)
assert squid.available_modules == ()
@pytest.mark.asyncio
async def test_lib_install_requirements(monkeypatch, library_installable, repo, tmpdir):
monkeypatch.setattr("redbot.cogs.downloader.repo_manager.Repo._run", fake_run_noprint)
monkeypatch.setattr(
"redbot.cogs.downloader.repo_manager.Repo.available_libraries", (library_installable,)
)
lib_path = Path(str(tmpdir)) / "cog_data_path" / "lib"
sharedlib_path = lib_path / "cog_shared"
sharedlib_path.mkdir(parents=True, exist_ok=True)
installed, failed = await repo.install_libraries(
target_dir=sharedlib_path, req_target_dir=lib_path
)
assert len(installed) == 1
assert len(failed) == 0
@pytest.mark.asyncio
async def test_remove_repo(monkeypatch, repo_manager):
monkeypatch.setattr("redbot.cogs.downloader.repo_manager.Repo._run", fake_run_noprint)
monkeypatch.setattr(
"redbot.cogs.downloader.repo_manager.Repo.current_commit", fake_current_commit
)
await repo_manager.add_repo(
url="https://github.com/tekulvw/Squid-Plugins", name="squid", branch="rewrite_cogs"
)
assert repo_manager.get_repo("squid") is not None
await repo_manager.delete_repo("squid")
assert repo_manager.get_repo("squid") is None
@pytest.mark.asyncio
async def test_existing_repo(mocker, repo_manager):
repo_manager.does_repo_exist = mocker.MagicMock(return_value=True)
with pytest.raises(ExistingGitRepo):
await repo_manager.add_repo("http://test.com", "test")
repo_manager.does_repo_exist.assert_called_once_with("test")
def test_tree_url_parse(repo_manager):
cases = [
{
"input": ("https://github.com/Tobotimus/Tobo-Cogs", None),
"expected": ("https://github.com/Tobotimus/Tobo-Cogs", None),
},
{
"input": ("https://github.com/Tobotimus/Tobo-Cogs", "V3"),
"expected": ("https://github.com/Tobotimus/Tobo-Cogs", "V3"),
},
{
"input": ("https://github.com/Tobotimus/Tobo-Cogs/tree/V3", None),
"expected": ("https://github.com/Tobotimus/Tobo-Cogs", "V3"),
},
{
"input": ("https://github.com/Tobotimus/Tobo-Cogs/tree/V3", "V4"),
"expected": ("https://github.com/Tobotimus/Tobo-Cogs", "V4"),
},
]
for test_case in cases:
assert test_case["expected"] == repo_manager._parse_url(*test_case["input"])
def test_tree_url_non_github(repo_manager):
cases = [
{
"input": ("https://gitlab.com/Tobotimus/Tobo-Cogs", None),
"expected": ("https://gitlab.com/Tobotimus/Tobo-Cogs", None),
},
{
"input": ("https://my.usgs.gov/bitbucket/scm/Tobotimus/Tobo-Cogs", "V3"),
"expected": ("https://my.usgs.gov/bitbucket/scm/Tobotimus/Tobo-Cogs", "V3"),
},
]
for test_case in cases:
assert test_case["expected"] == repo_manager._parse_url(*test_case["input"])
|
from abc import abstractmethod
import asyncio
from datetime import timedelta
import logging
from pymfy.api.devices.category import Category
import voluptuous as vol
from homeassistant.components.somfy import config_flow
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import callback
from homeassistant.helpers import (
config_entry_oauth2_flow,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from . import api
from .const import API, CONF_OPTIMISTIC, COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=1)
SCAN_INTERVAL_ALL_ASSUMED_STATE = timedelta(minutes=60)
SOMFY_AUTH_CALLBACK_PATH = "/auth/somfy/callback"
SOMFY_AUTH_START = "/auth/somfy"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Inclusive(CONF_CLIENT_ID, "oauth"): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, "oauth"): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SOMFY_COMPONENTS = ["cover", "switch"]
async def async_setup(hass, config):
"""Set up the Somfy component."""
hass.data[DOMAIN] = {}
domain_config = config.get(DOMAIN, {})
hass.data[DOMAIN][CONF_OPTIMISTIC] = domain_config.get(CONF_OPTIMISTIC, False)
if CONF_CLIENT_ID in domain_config:
config_flow.SomfyFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
"https://accounts.somfy.com/oauth/oauth/v2/auth",
"https://accounts.somfy.com/oauth/oauth/v2/token",
),
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up Somfy from a config entry."""
# Backwards compat
if "auth_implementation" not in entry.data:
hass.config_entries.async_update_entry(
entry, data={**entry.data, "auth_implementation": DOMAIN}
)
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
data = hass.data[DOMAIN]
data[API] = api.ConfigEntrySomfyApi(hass, entry, implementation)
async def _update_all_devices():
"""Update all the devices."""
devices = await hass.async_add_executor_job(data[API].get_devices)
return {dev.id: dev for dev in devices}
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="somfy device update",
update_method=_update_all_devices,
update_interval=SCAN_INTERVAL,
)
data[COORDINATOR] = coordinator
await coordinator.async_refresh()
if all(not bool(device.states) for device in coordinator.data.values()):
_LOGGER.debug(
"All devices have assumed state. Update interval has been reduced to: %s",
SCAN_INTERVAL_ALL_ASSUMED_STATE,
)
coordinator.update_interval = SCAN_INTERVAL_ALL_ASSUMED_STATE
device_registry = await dr.async_get_registry(hass)
hubs = [
device
for device in coordinator.data.values()
if Category.HUB.value in device.categories
]
for hub in hubs:
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, hub.id)},
manufacturer="Somfy",
name=hub.name,
model=hub.type,
)
for component in SOMFY_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
hass.data[DOMAIN].pop(API, None)
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in SOMFY_COMPONENTS
]
)
return True
class SomfyEntity(CoordinatorEntity, Entity):
"""Representation of a generic Somfy device."""
def __init__(self, coordinator, device_id, somfy_api):
"""Initialize the Somfy device."""
super().__init__(coordinator)
self._id = device_id
self.api = somfy_api
@property
def device(self):
"""Return data for the device id."""
return self.coordinator.data[self._id]
@property
def unique_id(self):
"""Return the unique id base on the id returned by Somfy."""
return self._id
@property
def name(self):
"""Return the name of the device."""
return self.device.name
@property
def device_info(self):
"""Return device specific attributes.
Implemented by platform classes.
"""
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"model": self.device.type,
"via_hub": (DOMAIN, self.device.parent_id),
# For the moment, Somfy only returns their own device.
"manufacturer": "Somfy",
}
def has_capability(self, capability):
"""Test if device has a capability."""
capabilities = self.device.capabilities
return bool([c for c in capabilities if c.name == capability])
@property
def assumed_state(self):
"""Return if the device has an assumed state."""
return not bool(self.device.states)
@callback
def _handle_coordinator_update(self):
"""Process an update from the coordinator."""
self._create_device()
super()._handle_coordinator_update()
@abstractmethod
def _create_device(self):
"""Update the device with the latest data."""
|
import atexit
import dis
import sys
from coverage import env
# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
YIELD_VALUE = dis.opmap['YIELD_VALUE']
if env.PY2:
YIELD_VALUE = chr(YIELD_VALUE)
class PyTracer(object):
"""Python implementation of the raw data tracer."""
# Because of poor implementations of trace-function-manipulating tools,
# the Python trace function must be kept very simple. In particular, there
# must be only one function ever set as the trace function, both through
# sys.settrace, and as the return value from the trace function. Put
# another way, the trace function must always return itself. It cannot
# swap in other functions, or return None to avoid tracing a particular
# frame.
#
# The trace manipulator that introduced this restriction is DecoratorTools,
# which sets a trace function, and then later restores the pre-existing one
# by calling sys.settrace with a function it found in the current frame.
#
# Systems that use DecoratorTools (or similar trace manipulations) must use
# PyTracer to get accurate results. The command-line --timid argument is
# used to force the use of this tracer.
def __init__(self):
# Attributes set from the collector:
self.data = None
self.trace_arcs = False
self.should_trace = None
self.should_trace_cache = None
self.should_start_context = None
self.warn = None
# The threading module to use, if any.
self.threading = None
self.cur_file_dict = None
self.last_line = 0 # int, but uninitialized.
self.cur_file_name = None
self.context = None
self.started_context = False
self.data_stack = []
self.last_exc_back = None
self.last_exc_firstlineno = 0
self.thread = None
self.stopped = False
self._activity = False
self.in_atexit = False
# On exit, self.in_atexit = True
atexit.register(setattr, self, 'in_atexit', True)
def __repr__(self):
return "<PyTracer at {}: {} lines in {} files>".format(
id(self),
sum(len(v) for v in self.data.values()),
len(self.data),
)
def log(self, marker, *args):
"""For hard-core logging of what this tracer is doing."""
with open("/tmp/debug_trace.txt", "a") as f:
f.write("{} {:x}.{:x}[{}] {:x} {}\n".format(
marker,
id(self),
self.thread.ident,
len(self.data_stack),
self.threading.currentThread().ident,
" ".join(map(str, args))
))
def _trace(self, frame, event, arg_unused):
"""The trace function passed to sys.settrace."""
#self.log(":", frame.f_code.co_filename, frame.f_lineno, event)
if (self.stopped and sys.gettrace() == self._trace): # pylint: disable=comparison-with-callable
# The PyTrace.stop() method has been called, possibly by another
# thread, let's deactivate ourselves now.
#self.log("X", frame.f_code.co_filename, frame.f_lineno)
sys.settrace(None)
return None
if self.last_exc_back:
if frame == self.last_exc_back:
# Someone forgot a return event.
if self.trace_arcs and self.cur_file_dict:
pair = (self.last_line, -self.last_exc_firstlineno)
self.cur_file_dict[pair] = None
self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
self.data_stack.pop()
)
self.last_exc_back = None
if event == 'call':
# Should we start a new context?
if self.should_start_context and self.context is None:
context_maybe = self.should_start_context(frame) # pylint: disable=not-callable
if context_maybe is not None:
self.context = context_maybe
self.started_context = True
self.switch_context(self.context)
else:
self.started_context = False
else:
self.started_context = False
# Entering a new frame. Decide if we should trace
# in this file.
self._activity = True
self.data_stack.append(
(
self.cur_file_dict,
self.cur_file_name,
self.last_line,
self.started_context,
)
)
filename = frame.f_code.co_filename
self.cur_file_name = filename
disp = self.should_trace_cache.get(filename)
if disp is None:
disp = self.should_trace(filename, frame) # pylint: disable=not-callable
self.should_trace_cache[filename] = disp # pylint: disable=unsupported-assignment-operation
self.cur_file_dict = None
if disp.trace:
tracename = disp.source_filename
if tracename not in self.data: # pylint: disable=unsupported-membership-test
self.data[tracename] = {} # pylint: disable=unsupported-assignment-operation
self.cur_file_dict = self.data[tracename] # pylint: disable=unsubscriptable-object
# The call event is really a "start frame" event, and happens for
# function calls and re-entering generators. The f_lasti field is
# -1 for calls, and a real offset for generators. Use <0 as the
# line number for calls, and the real line number for generators.
if getattr(frame, 'f_lasti', -1) < 0:
self.last_line = -frame.f_code.co_firstlineno
else:
self.last_line = frame.f_lineno
elif event == 'line':
# Record an executed line.
if self.cur_file_dict is not None:
lineno = frame.f_lineno
#if frame.f_code.co_filename != self.cur_file_name:
# self.log("*", frame.f_code.co_filename, self.cur_file_name, lineno)
if self.trace_arcs:
self.cur_file_dict[(self.last_line, lineno)] = None
else:
self.cur_file_dict[lineno] = None
self.last_line = lineno
elif event == 'return':
if self.trace_arcs and self.cur_file_dict:
# Record an arc leaving the function, but beware that a
# "return" event might just mean yielding from a generator.
# Jython seems to have an empty co_code, so just assume return.
code = frame.f_code.co_code
if (not code) or code[frame.f_lasti] != YIELD_VALUE:
first = frame.f_code.co_firstlineno
self.cur_file_dict[(self.last_line, -first)] = None
# Leaving this function, pop the filename stack.
self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
self.data_stack.pop()
)
# Leaving a context?
if self.started_context:
self.context = None
self.switch_context(None)
elif event == 'exception':
self.last_exc_back = frame.f_back
self.last_exc_firstlineno = frame.f_code.co_firstlineno
return self._trace
def start(self):
"""Start this Tracer.
Return a Python function suitable for use with sys.settrace().
"""
self.stopped = False
if self.threading:
if self.thread is None:
self.thread = self.threading.currentThread()
else:
if self.thread.ident != self.threading.currentThread().ident:
# Re-starting from a different thread!? Don't set the trace
# function, but we are marked as running again, so maybe it
# will be ok?
#self.log("~", "starting on different threads")
return self._trace
sys.settrace(self._trace)
return self._trace
def stop(self):
"""Stop this Tracer."""
# Get the active tracer callback before setting the stop flag to be
# able to detect if the tracer was changed prior to stopping it.
tf = sys.gettrace()
# Set the stop flag. The actual call to sys.settrace(None) will happen
# in the self._trace callback itself to make sure to call it from the
# right thread.
self.stopped = True
if self.threading and self.thread.ident != self.threading.currentThread().ident:
# Called on a different thread than started us: we can't unhook
# ourselves, but we've set the flag that we should stop, so we
# won't do any more tracing.
#self.log("~", "stopping on different threads")
return
if self.warn:
# PyPy clears the trace function before running atexit functions,
# so don't warn if we are in atexit on PyPy and the trace function
# has changed to None.
dont_warn = (env.PYPY and env.PYPYVERSION >= (5, 4) and self.in_atexit and tf is None)
if (not dont_warn) and tf != self._trace: # pylint: disable=comparison-with-callable
self.warn( # pylint: disable=not-callable
"Trace function changed, measurement is likely wrong: %r" % (tf,),
slug="trace-changed",
)
def activity(self):
"""Has there been any activity?"""
return self._activity
def reset_activity(self):
"""Reset the activity() flag."""
self._activity = False
def get_stats(self):
"""Return a dictionary of statistics, or None."""
return None
|
import distutils.spawn
import os
import shutil
import pexpect
import pytest
import sh
from molecule import logger
from molecule import util
from ..conftest import change_dir_to
LOG = logger.get_logger(__name__)
IS_TRAVIS = os.getenv('TRAVIS') and os.getenv('CI')
def _env_vars_exposed(env_vars, env=os.environ):
"""Check if environment variables are exposed."""
return all(var in env for var in env_vars)
@pytest.fixture
def with_scenario(request, scenario_to_test, driver_name, scenario_name,
skip_test):
scenario_directory = os.path.join(
os.path.dirname(util.abs_path(__file__)), os.path.pardir, 'scenarios',
scenario_to_test)
with change_dir_to(scenario_directory):
yield
if scenario_name:
msg = 'CLEANUP: Destroying instances for all scenario(s)'
LOG.out(msg)
options = {
'driver_name': driver_name,
'all': True,
}
cmd = sh.molecule.bake('destroy', **options)
pytest.helpers.run_command(cmd)
@pytest.fixture
def skip_test(request, driver_name):
msg_tmpl = ("Ignoring '{}' tests for now" if driver_name == 'delegated'
else "Skipped '{}' not supported")
support_checks_map = {
'azure': supports_azure,
'digitalocean': supports_digitalocean,
'docker': supports_docker,
'ec2': supports_ec2,
'gce': supports_gce,
'linode': supports_linode,
'lxc': supports_lxc,
'lxd': supports_lxd,
'openstack': supports_openstack,
'vagrant': supports_vagrant_virtualbox,
'delegated': demands_delegated,
}
try:
check_func = support_checks_map[driver_name]
if not check_func():
pytest.skip(msg_tmpl.format(driver_name))
except KeyError:
pass
@pytest.helpers.register
def idempotence(scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('idempotence', **options)
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def init_role(temp_dir, driver_name):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
cmd = sh.molecule.bake('init', 'role', {
'driver-name': driver_name,
'role-name': 'test-init'
})
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
options = {
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def init_scenario(temp_dir, driver_name):
# Create role
role_directory = os.path.join(temp_dir.strpath, 'test-init')
cmd = sh.molecule.bake('init', 'role', {
'driver-name': driver_name,
'role-name': 'test-init'
})
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
# Create scenario
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
options = {
'scenario_name': 'test-scenario',
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def metadata_lint_update(role_directory):
# By default, ansible-lint will fail on newly-created roles because the
# fields in this file have not been changed from their defaults. This is
# good because molecule should create this file using the defaults, and
# users should receive feedback to change these defaults. However, this
# blocks the testing of 'molecule init' itself, so ansible-lint should
# be configured to ignore these metadata lint errors.
ansible_lint_src = os.path.join(
os.path.dirname(util.abs_path(__file__)), '.ansible-lint')
shutil.copy(ansible_lint_src, role_directory)
# Explicitly lint here to catch any unexpected lint errors before
# continuining functional testing. Ansible lint is run at the root
# of the role directory and pointed at the role directory to ensure
# the customize ansible-lint config is used.
with change_dir_to(role_directory):
cmd = sh.ansible_lint.bake('.')
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def list(x):
cmd = sh.molecule.bake('list')
out = pytest.helpers.run_command(cmd, log=False)
out = out.stdout.decode('utf-8')
out = util.strip_ansi_color(out)
for l in x.splitlines():
assert l in out
@pytest.helpers.register
def list_with_format_plain(x):
cmd = sh.molecule.bake('list', {'format': 'plain'})
out = pytest.helpers.run_command(cmd, log=False)
out = out.stdout.decode('utf-8')
out = util.strip_ansi_color(out)
for l in x.splitlines():
assert l in out
@pytest.helpers.register
def login(login_args, scenario_name='default'):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('destroy', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
for instance, regexp in login_args:
if len(login_args) > 1:
child_cmd = 'molecule login --host {} --scenario-name {}'.format(
instance, scenario_name)
else:
child_cmd = 'molecule login --scenario-name {}'.format(
scenario_name)
child = pexpect.spawn(child_cmd)
child.expect(regexp)
# If the test returns and doesn't hang it succeeded.
child.sendline('exit')
@pytest.helpers.register
def test(driver_name, scenario_name='default'):
options = {
'scenario_name': scenario_name,
'all': True,
}
if driver_name == 'delegated':
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.helpers.register
def verify(scenario_name='default'):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
def get_docker_executable():
return distutils.spawn.find_executable('docker')
def get_lxc_executable():
return distutils.spawn.find_executable('lxc-start')
def get_lxd_executable():
return distutils.spawn.find_executable('lxd')
def get_vagrant_executable():
return distutils.spawn.find_executable('vagrant')
def get_virtualbox_executable():
return distutils.spawn.find_executable('VBoxManage')
@pytest.helpers.register
def supports_docker():
return get_docker_executable()
@pytest.helpers.register
def supports_linode():
from ansible.modules.cloud.linode.linode import HAS_LINODE
env_vars = ('LINODE_API_KEY', )
return _env_vars_exposed(env_vars) and HAS_LINODE
@pytest.helpers.register
def supports_lxc():
# noqa: E501 # FIXME: Travis CI
# noqa: E501 # This fixes most of the errors:
# noqa: E501 # $ mkdir -p ~/.config/lxc
# noqa: E501 # $ echo "lxc.id_map = u 0 100000 65536" > ~/.config/lxc/default.conf
# noqa: E501 # $ echo "lxc.id_map = g 0 100000 65536" >> ~/.config/lxc/default.conf
# noqa: E501 # $ echo "lxc.network.type = veth" >> ~/.config/lxc/default.conf
# noqa: E501 # $ echo "lxc.network.link = lxcbr0" >> ~/.config/lxc/default.conf
# noqa: E501 # $ echo "$USER veth lxcbr0 2" | sudo tee -a /etc/lxc/lxc-usernet
# noqa: E501 # travis veth lxcbr0 2
# noqa: E501 # But there's still one left:
# noqa: E501 # $ cat ~/lxc-instance.log
# noqa: E501 # lxc-create 1542112494.884 INFO lxc_utils - utils.c:get_rundir:229 - XDG_RUNTIME_DIR isn't set in the environment.
# noqa: E501 # lxc-create 1542112494.884 WARN lxc_log - log.c:lxc_log_init:331 - lxc_log_init called with log already initialized
# noqa: E501 # lxc-create 1542112494.884 INFO lxc_confile - confile.c:config_idmap:1385 - read uid map: type u nsid 0 hostid 100000 range 65536
# noqa: E501 # lxc-create 1542112494.884 INFO lxc_confile - confile.c:config_idmap:1385 - read uid map: type g nsid 0 hostid 100000 range 65536
# noqa: E501 # lxc-create 1542112494.887 ERROR lxc_container - lxccontainer.c:do_create_container_dir:767 - Failed to chown container dir
# noqa: E501 # lxc-create 1542112494.887 ERROR lxc_create_ui - lxc_create.c:main:274 - Error creating container instance
return not IS_TRAVIS and get_lxc_executable()
@pytest.helpers.register
def supports_lxd():
# FIXME: Travis CI
return not IS_TRAVIS and get_lxd_executable()
@pytest.helpers.register
def supports_vagrant_virtualbox():
return (get_vagrant_executable() or get_virtualbox_executable())
@pytest.helpers.register
def demands_delegated():
return pytest.config.getoption('--delegated')
@pytest.helpers.register
def supports_azure():
from ansible.module_utils.azure_rm_common import HAS_AZURE
env_vars = (
'AZURE_SUBSCRIPTION_ID',
'AZURE_CLIENT_ID',
'AZURE_SECRET',
'AZURE_TENANT',
)
return _env_vars_exposed(env_vars) and HAS_AZURE
@pytest.helpers.register
def supports_digitalocean():
from ansible.modules.cloud.digital_ocean.digital_ocean import HAS_DOPY
env_vars = ('DO_API_KEY', )
return _env_vars_exposed(env_vars) and HAS_DOPY
@pytest.helpers.register
def supports_ec2():
from ansible.module_utils.ec2 import HAS_BOTO3
env_vars = (
'AWS_ACCESS_KEY',
'AWS_SECRET_ACCESS_KEY',
)
return _env_vars_exposed(env_vars) and HAS_BOTO3
@pytest.helpers.register
def supports_gce():
from ansible.module_utils.gcp import HAS_GOOGLE_AUTH
env_vars = (
'GCE_SERVICE_ACCOUNT_EMAIL',
'GCE_CREDENTIALS_FILE',
'GCE_PROJECT_ID',
)
return _env_vars_exposed(env_vars) and HAS_GOOGLE_AUTH
@pytest.helpers.register
def supports_openstack():
pytest.importorskip('shade') # Ansible provides no import
env_vars = (
'OS_AUTH_URL',
'OS_PASSWORD',
'OS_REGION_NAME',
'OS_USERNAME',
'OS_TENANT_NAME',
)
return _env_vars_exposed(env_vars)
@pytest.helpers.register
def has_inspec():
return distutils.spawn.find_executable('inspec')
@pytest.helpers.register
def has_rubocop():
return distutils.spawn.find_executable('rubocop')
needs_inspec = pytest.mark.skipif(
not has_inspec(),
reason='Needs inspec to be pre-installed and available in $PATH')
needs_rubocop = pytest.mark.skipif(
not has_rubocop(),
reason='Needs rubocop to be pre-installed and available in $PATH')
|
import asyncio
import unittest
import homeassistant.components.media_player as mp
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from tests.common import get_test_home_assistant
class AsyncMediaPlayer(mp.MediaPlayerEntity):
"""Async media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.SUPPORT_VOLUME_SET
| mp.const.SUPPORT_PLAY
| mp.const.SUPPORT_PAUSE
| mp.const.SUPPORT_TURN_OFF
| mp.const.SUPPORT_TURN_ON
)
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
async def async_media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
async def async_media_pause(self):
"""Send pause command."""
self._state = STATE_PAUSED
async def async_turn_on(self):
"""Turn the media player on."""
self._state = STATE_ON
async def async_turn_off(self):
"""Turn the media player off."""
self._state = STATE_OFF
class SyncMediaPlayer(mp.MediaPlayerEntity):
"""Sync media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.SUPPORT_VOLUME_SET
| mp.const.SUPPORT_VOLUME_STEP
| mp.const.SUPPORT_PLAY
| mp.const.SUPPORT_PAUSE
| mp.const.SUPPORT_TURN_OFF
| mp.const.SUPPORT_TURN_ON
)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + 0.2))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - 0.2))
def media_play_pause(self):
"""Play or pause the media player."""
if self._state == STATE_PLAYING:
self._state = STATE_PAUSED
else:
self._state = STATE_PLAYING
def toggle(self):
"""Toggle the power on the media player."""
if self._state in [STATE_OFF, STATE_IDLE]:
self._state = STATE_ON
else:
self._state = STATE_OFF
async def async_media_play_pause(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
await super().async_media_play_pause()
async def async_toggle(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
await super().async_toggle()
class TestAsyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = AsyncMediaPlayer(self.hass)
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
asyncio.run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop
).result()
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop
).result()
assert self.player.volume_level == 0.6
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
asyncio.run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop
).result()
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop
).result()
assert self.player.volume_level == 0.4
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PLAYING
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_ON
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_OFF
class TestSyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = SyncMediaPlayer(self.hass)
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop
).result()
assert self.player.volume_level == 0.7
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop
).result()
assert self.player.volume_level == 0.3
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PLAYING
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_ON
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_OFF
|
from datetime import timedelta
import json
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, intent
DOMAIN = "snips"
CONF_INTENTS = "intents"
CONF_ACTION = "action"
CONF_FEEDBACK = "feedback_sounds"
CONF_PROBABILITY = "probability_threshold"
CONF_SITE_IDS = "site_ids"
SERVICE_SAY = "say"
SERVICE_SAY_ACTION = "say_action"
SERVICE_FEEDBACK_ON = "feedback_on"
SERVICE_FEEDBACK_OFF = "feedback_off"
INTENT_TOPIC = "hermes/intent/#"
FEEDBACK_ON_TOPIC = "hermes/feedback/sound/toggleOn"
FEEDBACK_OFF_TOPIC = "hermes/feedback/sound/toggleOff"
ATTR_TEXT = "text"
ATTR_SITE_ID = "site_id"
ATTR_CUSTOM_DATA = "custom_data"
ATTR_CAN_BE_ENQUEUED = "can_be_enqueued"
ATTR_INTENT_FILTER = "intent_filter"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_FEEDBACK): cv.boolean,
vol.Optional(CONF_PROBABILITY, default=0): vol.Coerce(float),
vol.Optional(CONF_SITE_IDS, default=["default"]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
INTENT_SCHEMA = vol.Schema(
{
vol.Required("input"): str,
vol.Required("intent"): {vol.Required("intentName"): str},
vol.Optional("slots"): [
{
vol.Required("slotName"): str,
vol.Required("value"): {
vol.Required("kind"): str,
vol.Optional("value"): cv.match_all,
vol.Optional("rawValue"): cv.match_all,
},
}
],
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_SAY = vol.Schema(
{
vol.Required(ATTR_TEXT): str,
vol.Optional(ATTR_SITE_ID, default="default"): str,
vol.Optional(ATTR_CUSTOM_DATA, default=""): str,
}
)
SERVICE_SCHEMA_SAY_ACTION = vol.Schema(
{
vol.Required(ATTR_TEXT): str,
vol.Optional(ATTR_SITE_ID, default="default"): str,
vol.Optional(ATTR_CUSTOM_DATA, default=""): str,
vol.Optional(ATTR_CAN_BE_ENQUEUED, default=True): cv.boolean,
vol.Optional(ATTR_INTENT_FILTER): vol.All(cv.ensure_list),
}
)
SERVICE_SCHEMA_FEEDBACK = vol.Schema(
{vol.Optional(ATTR_SITE_ID, default="default"): str}
)
async def async_setup(hass, config):
"""Activate Snips component."""
@callback
def async_set_feedback(site_ids, state):
"""Set Feedback sound state."""
site_ids = site_ids if site_ids else config[DOMAIN].get(CONF_SITE_IDS)
topic = FEEDBACK_ON_TOPIC if state else FEEDBACK_OFF_TOPIC
for site_id in site_ids:
payload = json.dumps({"siteId": site_id})
hass.components.mqtt.async_publish(
FEEDBACK_ON_TOPIC, "", qos=0, retain=False
)
hass.components.mqtt.async_publish(
topic, payload, qos=int(state), retain=state
)
if CONF_FEEDBACK in config[DOMAIN]:
async_set_feedback(None, config[DOMAIN][CONF_FEEDBACK])
async def message_received(msg):
"""Handle new messages on MQTT."""
_LOGGER.debug("New intent: %s", msg.payload)
try:
request = json.loads(msg.payload)
except TypeError:
_LOGGER.error("Received invalid JSON: %s", msg.payload)
return
if request["intent"]["confidenceScore"] < config[DOMAIN].get(CONF_PROBABILITY):
_LOGGER.warning(
"Intent below probaility threshold %s < %s",
request["intent"]["confidenceScore"],
config[DOMAIN].get(CONF_PROBABILITY),
)
return
try:
request = INTENT_SCHEMA(request)
except vol.Invalid as err:
_LOGGER.error("Intent has invalid schema: %s. %s", err, request)
return
if request["intent"]["intentName"].startswith("user_"):
intent_type = request["intent"]["intentName"].split("__")[-1]
else:
intent_type = request["intent"]["intentName"].split(":")[-1]
slots = {}
for slot in request.get("slots", []):
slots[slot["slotName"]] = {"value": resolve_slot_values(slot)}
slots["{}_raw".format(slot["slotName"])] = {"value": slot["rawValue"]}
slots["site_id"] = {"value": request.get("siteId")}
slots["session_id"] = {"value": request.get("sessionId")}
slots["confidenceScore"] = {"value": request["intent"]["confidenceScore"]}
try:
intent_response = await intent.async_handle(
hass, DOMAIN, intent_type, slots, request["input"]
)
notification = {"sessionId": request.get("sessionId", "default")}
if "plain" in intent_response.speech:
notification["text"] = intent_response.speech["plain"]["speech"]
_LOGGER.debug("send_response %s", json.dumps(notification))
mqtt.async_publish(
hass, "hermes/dialogueManager/endSession", json.dumps(notification)
)
except intent.UnknownIntent:
_LOGGER.warning(
"Received unknown intent %s", request["intent"]["intentName"]
)
except intent.IntentError:
_LOGGER.exception("Error while handling intent: %s", intent_type)
await hass.components.mqtt.async_subscribe(INTENT_TOPIC, message_received)
async def snips_say(call):
"""Send a Snips notification message."""
notification = {
"siteId": call.data.get(ATTR_SITE_ID, "default"),
"customData": call.data.get(ATTR_CUSTOM_DATA, ""),
"init": {"type": "notification", "text": call.data.get(ATTR_TEXT)},
}
mqtt.async_publish(
hass, "hermes/dialogueManager/startSession", json.dumps(notification)
)
return
async def snips_say_action(call):
"""Send a Snips action message."""
notification = {
"siteId": call.data.get(ATTR_SITE_ID, "default"),
"customData": call.data.get(ATTR_CUSTOM_DATA, ""),
"init": {
"type": "action",
"text": call.data.get(ATTR_TEXT),
"canBeEnqueued": call.data.get(ATTR_CAN_BE_ENQUEUED, True),
"intentFilter": call.data.get(ATTR_INTENT_FILTER, []),
},
}
mqtt.async_publish(
hass, "hermes/dialogueManager/startSession", json.dumps(notification)
)
return
async def feedback_on(call):
"""Turn feedback sounds on."""
async_set_feedback(call.data.get(ATTR_SITE_ID), True)
async def feedback_off(call):
"""Turn feedback sounds off."""
async_set_feedback(call.data.get(ATTR_SITE_ID), False)
hass.services.async_register(
DOMAIN, SERVICE_SAY, snips_say, schema=SERVICE_SCHEMA_SAY
)
hass.services.async_register(
DOMAIN, SERVICE_SAY_ACTION, snips_say_action, schema=SERVICE_SCHEMA_SAY_ACTION
)
hass.services.async_register(
DOMAIN, SERVICE_FEEDBACK_ON, feedback_on, schema=SERVICE_SCHEMA_FEEDBACK
)
hass.services.async_register(
DOMAIN, SERVICE_FEEDBACK_OFF, feedback_off, schema=SERVICE_SCHEMA_FEEDBACK
)
return True
def resolve_slot_values(slot):
"""Convert snips builtin types to usable values."""
if "value" in slot["value"]:
value = slot["value"]["value"]
else:
value = slot["rawValue"]
if slot.get("entity") == "snips/duration":
delta = timedelta(
weeks=slot["value"]["weeks"],
days=slot["value"]["days"],
hours=slot["value"]["hours"],
minutes=slot["value"]["minutes"],
seconds=slot["value"]["seconds"],
)
value = delta.seconds
return value
|
import getpass
import os
from typing import Any
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
try:
from vault_tools.client.jsonsecret import get_plaintext
from vault_tools.paasta_secret import get_vault_client
from vault_tools.gpg import TempGpgKeyring
from vault_tools.paasta_secret import encrypt_secret
from vault_tools.cert_tools import do_cert_renew
import hvac
except ImportError:
def get_plaintext(*args: Any, **kwargs: Any) -> bytes:
return b"No plain text available without vault_tools"
def get_vault_client(*args: Any, **kwargs: Any) -> None:
return None
TempGpgKeyring = None
def encrypt_secret(*args: Any, **kwargs: Any) -> None:
return None
def do_cert_renew(*args: Any, **kwargs: Any) -> None:
return None
from paasta_tools.secret_providers import BaseSecretProvider
from paasta_tools.secret_tools import get_secret_name_from_ref
class SecretProvider(BaseSecretProvider):
def __init__(
self,
soa_dir: Optional[str],
service_name: Optional[str],
cluster_names: List[str],
vault_cluster_config: Dict[str, str] = {},
vault_auth_method: str = "ldap",
vault_token_file: str = "/root/.vault-token",
vault_num_uses: int = 1,
**kwargs: Any,
) -> None:
super().__init__(soa_dir, service_name, cluster_names)
self.vault_cluster_config = vault_cluster_config
self.vault_auth_method = vault_auth_method
self.vault_token_file = vault_token_file
self.ecosystems = self.get_vault_ecosystems_for_clusters()
self.clients: Mapping[str, hvac.Client] = {}
if vault_auth_method == "ldap":
username = getpass.getuser()
password = getpass.getpass(
"Please enter your LDAP password to auth with Vault\n"
)
else:
username = None
password = None
for ecosystem in self.ecosystems:
self.clients[ecosystem] = get_vault_client(
ecosystem=ecosystem,
num_uses=vault_num_uses,
vault_auth_method=self.vault_auth_method,
vault_token_file=self.vault_token_file,
username=username,
password=password,
)
def decrypt_environment(
self, environment: Dict[str, str], **kwargs: Any
) -> Dict[str, str]:
client = self.clients[self.ecosystems[0]]
secret_environment = {}
for k, v in environment.items():
secret_name = get_secret_name_from_ref(v)
secret_path = os.path.join(self.secret_dir, f"{secret_name}.json")
secret = get_plaintext(
client=client,
env=self.ecosystems[0],
path=secret_path,
cache_enabled=False,
cache_dir=None,
cache_key=None,
context=self.service_name,
rescue_failures=False,
).decode("utf-8")
secret_environment[k] = secret
return secret_environment
def get_vault_ecosystems_for_clusters(self) -> List[str]:
try:
return list(
{
self.vault_cluster_config[cluster_name]
for cluster_name in self.cluster_names
}
)
except KeyError as e:
print(
"Cannot find a vault cluster for the %s paasta cluster. A mapping must exist "
"in /etc/paasta so we contact the correct vault cluster to get/set secrets"
% e
)
raise
def write_secret(
self,
action: str,
secret_name: str,
plaintext: bytes,
cross_environment_motivation: Optional[str] = None,
) -> None:
with TempGpgKeyring(overwrite=True):
for ecosystem in self.ecosystems:
client = self.clients[ecosystem]
encrypt_secret(
client=client,
action=action,
ecosystem=ecosystem,
secret_name=secret_name,
soa_dir=self.soa_dir,
plaintext=plaintext,
service_name=self.service_name,
transit_key=self.encryption_key,
cross_environment_motivation=cross_environment_motivation,
)
def decrypt_secret(self, secret_name: str) -> str:
client = self.clients[self.ecosystems[0]]
secret_path = os.path.join(self.secret_dir, f"{secret_name}.json")
return get_plaintext(
client=client,
path=secret_path,
env=self.ecosystems[0],
cache_enabled=False,
cache_key=None,
cache_dir=None,
context=self.service_name,
rescue_failures=False,
).decode("utf-8")
def decrypt_secret_raw(self, secret_name: str) -> bytes:
client = self.clients[self.ecosystems[0]]
secret_path = os.path.join(self.secret_dir, f"{secret_name}.json")
return get_plaintext(
client=client,
path=secret_path,
env=self.ecosystems[0],
cache_enabled=False,
cache_key=None,
cache_dir=None,
context=self.service_name,
rescue_failures=False,
)
def get_secret_signature_from_data(self, data: Mapping[str, Any]) -> Optional[str]:
ecosystem = self.ecosystems[0]
if data["environments"].get(ecosystem):
return data["environments"][ecosystem]["signature"]
else:
return None
def renew_issue_cert(self, pki_backend: str, ttl: str) -> None:
client = self.clients[self.ecosystems[0]]
user = getpass.getuser()
pki_dir = os.path.expanduser("~/.paasta/pki")
do_cert_renew(
client=client,
pki_backend=pki_backend,
role=user,
cn=f"{user}.{self.ecosystems[0]}.paasta.yelp",
cert_path=f"{pki_dir}/{self.ecosystems[0]}.crt",
key_path=f"{pki_dir}/{self.ecosystems[0]}.key",
ca_path=f"{pki_dir}/{self.ecosystems[0]}_ca.crt",
cert_owner=user,
cert_group="users",
cert_mode="0600",
ttl=ttl,
)
|
import pytest
from homeassistant import config_entries
from homeassistant.components.kodi.config_flow import (
CannotConnectError,
InvalidAuthError,
)
from homeassistant.components.kodi.const import DEFAULT_TIMEOUT, DOMAIN
from .util import (
TEST_CREDENTIALS,
TEST_DISCOVERY,
TEST_HOST,
TEST_IMPORT,
TEST_WS_PORT,
UUID,
MockConnection,
MockWSConnection,
get_kodi_connection,
)
from tests.async_mock import AsyncMock, PropertyMock, patch
from tests.common import MockConfigEntry
@pytest.fixture
async def user_flow(hass):
"""Return a user-initiated flow after filling in host info."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
return result["flow_id"]
async def test_user_flow(hass, user_flow):
"""Test a successful user initiated flow."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
), patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_HOST["host"]
assert result["data"] == {
**TEST_HOST,
**TEST_WS_PORT,
"password": None,
"username": None,
"name": None,
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_valid_auth(hass, user_flow):
"""Test we handle valid auth."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
), patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_HOST["host"]
assert result["data"] == {
**TEST_HOST,
**TEST_WS_PORT,
**TEST_CREDENTIALS,
"name": None,
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_valid_ws_port(hass, user_flow):
"""Test we handle valid websocket port."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
), patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_WS_PORT
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_HOST["host"]
assert result["data"] == {
**TEST_HOST,
**TEST_WS_PORT,
"password": None,
"username": None,
"name": None,
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_empty_ws_port(hass, user_flow):
"""Test we handle an empty websocket port input."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"ws_port": 0}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_HOST["host"]
assert result["data"] == {
**TEST_HOST,
"ws_port": None,
"password": None,
"username": None,
"name": None,
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass, user_flow):
"""Test we handle invalid auth."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {"base": "invalid_auth"}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=Exception,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {"base": "unknown"}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_CREDENTIALS
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
async def test_form_cannot_connect_http(hass, user_flow):
"""Test we handle cannot connect over HTTP error."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_exception_http(hass, user_flow):
"""Test we handle generic exception over HTTP."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=Exception,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "unknown"}
async def test_form_cannot_connect_ws(hass, user_flow):
"""Test we handle cannot connect over WebSocket error."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection, "connected", new_callable=PropertyMock(return_value=False)
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_WS_PORT
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_WS_PORT
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_exception_ws(hass, user_flow):
"""Test we handle generic exception over WebSocket."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(user_flow, TEST_HOST)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection, "connect", AsyncMock(side_effect=Exception)
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_WS_PORT
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {"base": "unknown"}
async def test_discovery(hass):
"""Test discovery flow works."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "discovery_confirm"
with patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
flow_id=result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "hostname"
assert result["data"] == {
**TEST_HOST,
**TEST_WS_PORT,
"password": None,
"username": None,
"name": "hostname",
"timeout": DEFAULT_TIMEOUT,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_discovery_cannot_connect_http(hass):
"""Test discovery aborts if cannot connect."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_discovery_cannot_connect_ws(hass):
"""Test discovery aborts if cannot connect to websocket."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch.object(
MockWSConnection,
"connect",
AsyncMock(side_effect=CannotConnectError),
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
new=get_kodi_connection,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "ws_port"
assert result["errors"] == {}
async def test_discovery_exception_http(hass, user_flow):
"""Test we handle generic exception during discovery validation."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=Exception,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_discovery_invalid_auth(hass):
"""Test we handle invalid auth during discovery."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "credentials"
assert result["errors"] == {}
async def test_discovery_duplicate_data(hass):
"""Test discovery aborts if same mDNS packet arrives."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "discovery_confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_discovery_updates_unique_id(hass):
"""Test a duplicate discovery id aborts and updates existing entry."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=UUID,
data={"host": "dummy", "port": 11, "namename": "dummy.local."},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "zeroconf"}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["host"] == "1.1.1.1"
assert entry.data["port"] == 8080
assert entry.data["name"] == "hostname"
async def test_form_import(hass):
"""Test we get the form with import source."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
return_value=True,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
), patch(
"homeassistant.components.kodi.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.kodi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TEST_IMPORT["name"]
assert result["data"] == TEST_IMPORT
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_import_invalid_auth(hass):
"""Test we handle invalid auth on import."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_auth"
async def test_form_import_cannot_connect(hass):
"""Test we handle cannot connect on import."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=CannotConnectError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_form_import_exception(hass):
"""Test we handle unknown exception on import."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=Exception,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
|
from homeassistant.components.group import (
ATTR_ADD_ENTITIES,
ATTR_ENTITIES,
ATTR_OBJECT_ID,
DOMAIN,
SERVICE_REMOVE,
SERVICE_SET,
)
from homeassistant.const import ATTR_ICON, ATTR_NAME, SERVICE_RELOAD
from homeassistant.core import callback
from homeassistant.loader import bind_hass
@bind_hass
def reload(hass):
"""Reload the automation from config."""
hass.add_job(async_reload, hass)
@callback
@bind_hass
def async_reload(hass):
"""Reload the automation from config."""
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_RELOAD))
@bind_hass
def set_group(
hass,
object_id,
name=None,
entity_ids=None,
icon=None,
add=None,
):
"""Create/Update a group."""
hass.add_job(
async_set_group,
hass,
object_id,
name,
entity_ids,
icon,
add,
)
@callback
@bind_hass
def async_set_group(
hass,
object_id,
name=None,
entity_ids=None,
icon=None,
add=None,
):
"""Create/Update a group."""
data = {
key: value
for key, value in [
(ATTR_OBJECT_ID, object_id),
(ATTR_NAME, name),
(ATTR_ENTITIES, entity_ids),
(ATTR_ICON, icon),
(ATTR_ADD_ENTITIES, add),
]
if value is not None
}
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_SET, data))
@callback
@bind_hass
def async_remove(hass, object_id):
"""Remove a user group."""
data = {ATTR_OBJECT_ID: object_id}
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_REMOVE, data))
|
import random
from datetime import datetime as dt
from datetime import timedelta
from pandas import DataFrame, Index, MultiIndex
from pandas.util.testing import assert_frame_equal
from arctic.chunkstore.utils import read_apply
def create_test_data(size=5, index=True, multiindex=True, random_data=True, random_ids=True, date_offset=0, use_hours=False, cols=1):
data = {}
for i in range(cols):
if random_data:
data['data' + str(i)] = [random.random() * random.randint(-100, 100) for _ in range(size)]
else:
data['data' + str(i)] = range(size)
dates = [dt(2016, 1, 1) + timedelta(days=0 if use_hours else n+date_offset,
hours=n+date_offset if use_hours else 0) for n in range(size)]
if index:
if multiindex:
index_col_names = ['date', 'id']
idx = [(date, random.randint(1, size)) for date in dates] if random_ids else [(date, 1) for date in dates]
index = MultiIndex.from_tuples(idx, names=index_col_names) if idx else MultiIndex([[]]*2, [[]]*2, names=index_col_names)
return DataFrame(data=data, index=index)
return DataFrame(data=data, index=Index(data=dates, name='date'))
data.update({'date': dates})
return DataFrame(data=data)
def test_read_apply(chunkstore_lib):
df = create_test_data(index=False, size=20)
chunkstore_lib.write('test', df, chunk_size='M')
def func(df):
df['data0'] += 1.0
return df
for data in read_apply(chunkstore_lib, 'test', func):
assert_frame_equal(data, func(df))
|
import os
import click
import cookiecutter
import cookiecutter.main
from molecule import logger
from molecule import util
from molecule.command.init import base
LOG = logger.get_logger(__name__)
class Template(base.Base):
"""
.. program:: molecule init template --url https://example.com/user/cookiecutter-repo
.. option:: molecule init template --url https://example.com/user/cookiecutter-repo
Initialize a new role from a Cookiecutter URL.
""" # noqa
def __init__(self, command_args):
self._command_args = command_args
def execute(self):
"""
Execute the actions necessary to perform a `molecule init template` and
returns None.
:return: None
"""
role_name = self._command_args['role_name']
url = self._command_args['url']
no_input = self._command_args['no_input']
role_directory = os.getcwd()
msg = 'Initializing new role {}...'.format(role_name)
LOG.info(msg)
if os.path.isdir(role_name):
msg = ('The directory {} exists. '
'Cannot create new role.').format(role_name)
util.sysexit_with_message(msg)
cookiecutter.main.cookiecutter(
url,
extra_context=self._command_args,
no_input=no_input,
)
role_directory = os.path.join(os.getcwd(), role_name)
msg = 'Initialized role in {} successfully.'.format(role_directory)
LOG.success(msg)
@click.command()
@click.pass_context
@click.option(
'--url',
required=True,
help='URL to the Cookiecutter templates repository.')
@click.option(
'--no-input/--input',
default=False,
help=('Do not prompt for parameters and only use cookiecutter.json for '
'content. (false)'))
@click.option(
'--role-name',
'-r',
default='role_name',
help='Name of the role to create.')
def template(ctx, url, no_input, role_name): # pragma: no cover
""" Initialize a new role from a Cookiecutter URL. """
command_args = {
'role_name': role_name,
'subcommand': __name__,
'url': url,
'no_input': no_input,
}
t = Template(command_args)
t.execute()
|
import numpy as np
import pytest
from tensornetwork import Node
from tensornetwork.contractors import auto
from tensornetwork.contractors.opt_einsum_paths import path_contractors
from tensornetwork.ncon_interface import ncon
@pytest.fixture(
name="path_algorithm", params=["optimal", "branch", "greedy", "auto"])
def path_algorithm_fixture(request):
return getattr(path_contractors, request.param)
def test_sanity_check(backend, path_algorithm):
a = Node(np.eye(2), backend=backend)
b = Node(np.ones((2, 7, 11)), backend=backend)
c = Node(np.ones((7, 11, 13, 2)), backend=backend)
d = Node(np.eye(13), backend=backend)
# pylint: disable=pointless-statement
a[0] ^ b[0]
b[1] ^ c[0]
b[2] ^ c[1]
c[2] ^ d[1]
c[3] ^ a[1]
nodes = [a, b, c, d]
final_node = path_algorithm(nodes)
assert final_node.shape == (13,)
def test_trace_edge(backend, path_algorithm):
a = Node(np.ones((2, 2, 2, 2, 2)), backend=backend)
b = Node(np.ones((2, 2, 2)), backend=backend)
c = Node(np.ones((2, 2, 2)), backend=backend)
# pylint: disable=pointless-statement
a[0] ^ a[1]
a[2] ^ b[0]
a[3] ^ c[0]
b[1] ^ c[1]
b[2] ^ c[2]
nodes = [a, b, c]
node = path_algorithm(nodes)
np.testing.assert_allclose(node.tensor, np.ones(2) * 32.0)
def test_single_node(backend, path_algorithm):
a = Node(np.ones((2, 2, 2)), backend=backend)
# pylint: disable=pointless-statement
a[0] ^ a[1]
nodes = [a]
node = path_algorithm(nodes)
np.testing.assert_allclose(node.tensor, np.ones(2) * 2.0)
def test_custom_sanity_check(backend):
a = Node(np.ones(2), backend=backend)
b = Node(np.ones((2, 5)), backend=backend)
# pylint: disable=pointless-statement
a[0] ^ b[0]
nodes = [a, b]
class PathOptimizer:
def __call__(self, inputs, output, size_dict, memory_limit=None):
return [(0, 1)]
optimizer = PathOptimizer()
final_node = path_contractors.custom(nodes, optimizer)
np.testing.assert_allclose(final_node.tensor, np.ones(5) * 2.0)
def test_subgraph_contraction(backend, path_algorithm):
a_tensor = np.arange(4).reshape((2, 2))
b_tensor = np.arange(4).reshape((2, 2)) + 10
c_tensor = np.arange(4).reshape((2, 2)) + 20
a = Node(a_tensor, backend=backend)
b = Node(b_tensor, backend=backend)
c = Node(c_tensor, backend=backend)
a[0] ^ b[1]
c[1] ^ b[0]
remaining_edges = [c[0], a[1]]
result = path_algorithm({a, b}, [b[0], a[1]])
np.testing.assert_allclose(result.tensor, b_tensor @ a_tensor)
final = (c @ result).reorder_edges(remaining_edges)
np.testing.assert_allclose(final.tensor, c_tensor @ b_tensor @ a_tensor)
def test_multiple_partial_contractions(backend, path_algorithm):
a_tensor = np.arange(4).reshape((2, 2))
b_tensor = np.arange(4).reshape((2, 2)) + 10
c_tensor = np.arange(4).reshape((2, 2)) + 20
d_tensor = np.arange(4).reshape((2, 2)) + 30
a = Node(a_tensor, backend=backend)
b = Node(b_tensor, backend=backend)
c = Node(c_tensor, backend=backend)
d = Node(d_tensor, backend=backend)
a[1] ^ b[0]
b[1] ^ c[0]
c[1] ^ d[0]
d[1] ^ a[0]
ab = path_algorithm({a, b}, [a[0], b[1]])
np.testing.assert_allclose(ab.tensor, a_tensor @ b_tensor)
cd = path_algorithm({c, d}, [c[0], d[1]])
np.testing.assert_allclose(cd.tensor, c_tensor @ d_tensor)
result = path_algorithm({ab, cd})
np.testing.assert_allclose(
result.tensor, np.trace(a_tensor @ b_tensor @ c_tensor @ d_tensor))
def test_single_node_reorder(backend, path_algorithm):
a = Node(np.arange(4).reshape((2, 2)), backend=backend)
expected_edge_order = [a[1], a[0]]
result = path_algorithm({a}, expected_edge_order)
assert result.edges == expected_edge_order
np.testing.assert_allclose(result.tensor, np.arange(4).reshape((2, 2)).T)
def test_ignore_edge_order(backend, path_algorithm):
a = Node(np.ones((1, 1, 1)), backend=backend)
b = Node(np.ones((1, 1, 1, 2, 3)), backend=backend)
a[0] ^ b[0]
a[1] ^ b[1]
a[2] ^ b[2]
e0 = b[3]
e1 = b[4]
final_node = path_algorithm({a, b}, ignore_edge_order=True)
assert set(final_node.edges) == {e0, e1}
def test_ignore_edge_order_with_order(backend, path_algorithm):
a = Node(np.ones((1, 1, 1)), backend=backend)
b = Node(np.ones((1, 1, 1, 2, 3)), backend=backend)
a[0] ^ b[0]
a[1] ^ b[1]
a[2] ^ b[2]
e0 = b[3]
e1 = b[4]
final_node = path_algorithm({a, b}, [e1, e0], ignore_edge_order=True)
assert set(final_node.edges) == {e0, e1}
def test_disconnected_network(backend, path_algorithm):
a = Node(np.eye(2), backend=backend)
b = Node(np.eye(2), backend=backend)
c = Node(np.eye(2), backend=backend)
d = Node(np.eye(2), backend=backend)
e = Node(np.eye(2), backend=backend)
f = Node(np.eye(2), backend=backend)
g = Node(np.eye(2), backend=backend)
a[1] ^ b[0]
c[0] ^ d[1]
e[0] ^ f[0]
g[0] ^ f[1]
final_edges = [a[0], b[1], c[1], d[0], e[1], g[1]]
result = path_algorithm({a, b, c, d, e, f, g}, final_edges)
assert result.edges == final_edges
def test_passes_ignore_edge_order_from_auto(backend):
a = Node(np.eye(2), backend=backend)
b = Node(np.eye(2), backend=backend)
c = Node(np.eye(2), backend=backend)
d = Node(np.eye(2), backend=backend)
e = Node(np.eye(2), backend=backend)
# pylint: disable=pointless-statement
a[1] ^ b[0]
c[0] ^ d[1]
c[1] ^ e[0]
nodes = [a, b, c, d, e]
try:
auto(nodes, ignore_edge_order=True)
except ValueError:
pytest.fail("auto should pass ignore_edge_order when n >= 5 && n < 7")
def test_path_solver_optimal(backend):
np.random.seed(10)
D, d, M = 100, 4, 10
mps = Node(np.random.rand(D, d, D), backend=backend)
mpsc = Node(np.random.rand(D, d, D), backend=backend)
L = Node(np.random.rand(M, D, D), backend=backend)
mpo = Node(np.random.rand(M, M, d, d), backend=backend)
L[0] ^ mpo[0]
L[1] ^ mps[0]
L[2] ^ mpsc[0]
mps[1] ^ mpo[2]
mpsc[1] ^ mpo[3]
nodes = [mps, mpsc, mpo, L]
path = path_contractors.path_solver(algorithm="optimal", nodes=nodes)
assert path == [(1, 3), (1, 2), (0, 1)]
@pytest.fixture(
name="algorithm", params=["optimal", "branch", "greedy", "auto"])
def test_contract_path(backend, algorithm):
np.random.seed(10)
D, d, M = 100, 4, 10
mps = Node(np.random.rand(D, d, D), backend=backend)
mpsc = Node(np.random.rand(D, d, D), backend=backend)
L = Node(np.random.rand(M, D, D), backend=backend)
mpo = Node(np.random.rand(M, M, d, d), backend=backend)
L[0] ^ mpo[0]
L[1] ^ mps[0]
L[2] ^ mpsc[0]
mps[1] ^ mpo[3]
mpsc[1] ^ mpo[2]
nodes = [mps, mpsc, mpo, L]
path = path_contractors.path_solver(algorithm=algorithm, nodes=nodes)
order = [mpo[1], mps[2], mpsc[2]]
res = path_contractors.contract_path(
path=path, nodes=nodes, output_edge_order=order)
exp = ncon([mps.tensor, mpsc.tensor, L.tensor, mpo.tensor],
[[1, 2, -2], [5, 4, -3], [3, 1, 5], [3, -1, 4, 2]],
backend=backend)
np.testing.assert_allclose(res.tensor, exp)
def test_contract_path_raises(backend):
np.random.seed(10)
D, d, M = 100, 4, 10
mps = Node(np.random.rand(D, d, D), backend=backend)
mpsc = Node(np.random.rand(D, d, D), backend=backend)
L = Node(np.random.rand(M, D, D), backend=backend)
mpo = Node(np.random.rand(M, M, d, d), backend=backend)
L[0] ^ mpo[0]
L[1] ^ mps[0]
L[2] ^ mpsc[0]
mps[1] ^ mpo[3]
mpsc[1] ^ mpo[2]
nodes = [mps, mpsc, mpo, L]
with pytest.raises(ValueError, match="algorithm"):
_ = path_contractors.path_solver(algorithm="no-algorithm", nodes=nodes)
|
from perfkitbenchmarker import linux_packages
MKL_DIR = '%s/MKL' % linux_packages.INSTALL_DIR
MKL_TAG = 'l_mkl_2018.2.199'
MKL_TGZ = 'l_mkl_2018.2.199.tgz'
MKL_VERSION = '2018.2.199'
# TODO(user): InstallPreprovisionedBenchmarkData currently assumes that
# BENCHMARK_NAME is associated with a benchmark. Once it is expanded to include
# packages, we can associate the preprovisioned data for MKL with this package.
BENCHMARK_NAME = 'hpcc'
def _Install(vm):
"""Installs the MKL package on the VM."""
vm.RemoteCommand('cd {0} && mkdir MKL'.format(linux_packages.INSTALL_DIR))
vm.InstallPreprovisionedBenchmarkData(
BENCHMARK_NAME, [MKL_TGZ], MKL_DIR)
vm.RemoteCommand('cd {0} && tar zxvf {1}'.format(MKL_DIR, MKL_TGZ))
vm.RemoteCommand(('cd {0}/{1} && '
'sed -i "s/decline/accept/g" silent.cfg && '
'sudo ./install.sh --silent ./silent.cfg').format(
MKL_DIR, MKL_TAG))
vm.RemoteCommand('sudo chmod +w /etc/bash.bashrc && '
'sudo chmod 777 /etc/bash.bashrc && '
'echo "source /opt/intel/mkl/bin/mklvars.sh intel64" '
'>>/etc/bash.bashrc && '
'echo "export PATH=/opt/intel/bin:$PATH" '
'>>/etc/bash.bashrc && '
'echo "export LD_LIBRARY_PATH=/opt/intel/lib/intel64:'
'/opt/intel/mkl/lib/intel64:$LD_LIBRARY_PATH" '
'>>/etc/bash.bashrc && '
'echo "source /opt/intel/compilers_and_libraries/linux/bin/'
'compilervars.sh -arch intel64 -platform linux" '
'>>/etc/bash.bashrc')
_CompileInterfaces(vm)
vm.RemoteCommand(
'sudo ln -s /opt/intel/compilers_and_libraries_2018.2.199/linux/compiler/'
'lib/intel64/libiomp5.so /lib/libiomp5.so')
def _CompileInterfaces(vm):
"""Compiles the MKL FFT interfaces.
Args:
vm: Virtual Machine to compile on.
"""
mpi_lib = 'openmpi'
make_options = ('PRECISION=MKL_DOUBLE '
'interface=ilp64 '
f'mpi={mpi_lib} '
'compiler=gnu')
for interface in ('fftw2xc', 'fftw2xf', 'fftw3xc', 'fftw3xf'):
cmd = (f'cd /opt/intel/mkl/interfaces/{interface} && '
f'sudo make libintel64 {make_options}')
vm.RemoteCommand(cmd)
def YumInstall(vm):
"""Installs the MKL package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the MKL package on the VM."""
_Install(vm)
|
import asyncio
import functools
import getpass
import json
import logging
import os
import pip
import pkg_resources
import platform
import shutil
import signal
import sys
from argparse import Namespace
from copy import deepcopy
from pathlib import Path
from typing import NoReturn
import discord
# Set the event loop policies here so any subsequent `new_event_loop()`
# calls, in particular those as a result of the following imports,
# return the correct loop object.
from redbot import _update_event_loop_policy, __version__
_update_event_loop_policy()
import redbot.logging
from redbot.core.bot import Red, ExitCodes
from redbot.core.cli import interactive_config, confirm, parse_cli_flags
from redbot.setup import get_data_dir, get_name, save_config
from redbot.core import data_manager, drivers
from redbot.core._sharedlibdeprecation import SharedLibImportWarner
log = logging.getLogger("red.main")
#
# Red - Discord Bot v3
#
# Made by Twentysix, improved by many
#
def _get_instance_names():
with data_manager.config_file.open(encoding="utf-8") as fs:
data = json.load(fs)
return sorted(data.keys())
def list_instances():
if not data_manager.config_file.exists():
print(
"No instances have been configured! Configure one "
"using `redbot-setup` before trying to run the bot!"
)
sys.exit(1)
else:
text = "Configured Instances:\n\n"
for instance_name in _get_instance_names():
text += "{}\n".format(instance_name)
print(text)
sys.exit(0)
def debug_info():
"""Shows debug information useful for debugging."""
if sys.platform == "linux":
import distro # pylint: disable=import-error
IS_WINDOWS = os.name == "nt"
IS_MAC = sys.platform == "darwin"
IS_LINUX = sys.platform == "linux"
pyver = sys.version
pipver = pip.__version__
redver = __version__
dpy_version = discord.__version__
if IS_WINDOWS:
os_info = platform.uname()
osver = "{} {} (version {})".format(os_info.system, os_info.release, os_info.version)
elif IS_MAC:
os_info = platform.mac_ver()
osver = "Mac OSX {} {}".format(os_info[0], os_info[2])
else:
os_info = distro.linux_distribution()
osver = "{} {}".format(os_info[0], os_info[1]).strip()
user_who_ran = getpass.getuser()
info = (
"Debug Info for Red\n\n"
+ "Red version: {}\n".format(redver)
+ "Python version: {}\n".format(pyver)
+ "Python executable: {}\n".format(sys.executable)
+ "Discord.py version: {}\n".format(dpy_version)
+ "Pip version: {}\n".format(pipver)
+ "OS version: {}\n".format(osver)
+ "System arch: {}\n".format(platform.machine())
+ "User: {}\n".format(user_who_ran)
+ "Metadata file: {}\n".format(data_manager.config_file)
)
print(info)
sys.exit(0)
async def edit_instance(red, cli_flags):
no_prompt = cli_flags.no_prompt
token = cli_flags.token
owner = cli_flags.owner
prefix = cli_flags.prefix
old_name = cli_flags.instance_name
new_name = cli_flags.edit_instance_name
data_path = cli_flags.edit_data_path
copy_data = cli_flags.copy_data
confirm_overwrite = cli_flags.overwrite_existing_instance
if data_path is None and copy_data:
print("--copy-data can't be used without --edit-data-path argument")
sys.exit(1)
if new_name is None and confirm_overwrite:
print("--overwrite-existing-instance can't be used without --edit-instance-name argument")
sys.exit(1)
if (
no_prompt
and all(to_change is None for to_change in (token, owner, new_name, data_path))
and not prefix
):
print(
"No arguments to edit were provided."
" Available arguments (check help for more information):"
" --edit-instance-name, --edit-data-path, --copy-data, --owner, --token, --prefix"
)
sys.exit(1)
await _edit_token(red, token, no_prompt)
await _edit_prefix(red, prefix, no_prompt)
await _edit_owner(red, owner, no_prompt)
data = deepcopy(data_manager.basic_config)
name = _edit_instance_name(old_name, new_name, confirm_overwrite, no_prompt)
_edit_data_path(data, name, data_path, copy_data, no_prompt)
save_config(name, data)
if old_name != name:
save_config(old_name, {}, remove=True)
async def _edit_token(red, token, no_prompt):
if token:
if not len(token) >= 50:
print(
"The provided token doesn't look a valid Discord bot token."
" Instance's token will remain unchanged.\n"
)
return
await red._config.token.set(token)
elif not no_prompt and confirm("Would you like to change instance's token?", default=False):
await interactive_config(red, False, True, print_header=False)
print("Token updated.\n")
async def _edit_prefix(red, prefix, no_prompt):
if prefix:
prefixes = sorted(prefix, reverse=True)
await red._config.prefix.set(prefixes)
elif not no_prompt and confirm("Would you like to change instance's prefixes?", default=False):
print(
"Enter the prefixes, separated by a space (please note "
"that prefixes containing a space will need to be added with [p]set prefix)"
)
while True:
prefixes = input("> ").strip().split()
if not prefixes:
print("You need to pass at least one prefix!")
continue
prefixes = sorted(prefixes, reverse=True)
await red._config.prefix.set(prefixes)
print("Prefixes updated.\n")
break
async def _edit_owner(red, owner, no_prompt):
if owner:
if not (15 <= len(str(owner)) <= 21):
print(
"The provided owner id doesn't look like a valid Discord user id."
" Instance's owner will remain unchanged."
)
return
await red._config.owner.set(owner)
elif not no_prompt and confirm("Would you like to change instance's owner?", default=False):
print(
"Remember:\n"
"ONLY the person who is hosting Red should be owner."
" This has SERIOUS security implications."
" The owner can access any data that is present on the host system.\n"
)
if confirm("Are you sure you want to change instance's owner?", default=False):
print("Please enter a Discord user id for new owner:")
while True:
owner_id = input("> ").strip()
if not (15 <= len(owner_id) <= 21 and owner_id.isdecimal()):
print("That doesn't look like a valid Discord user id.")
continue
owner_id = int(owner_id)
await red._config.owner.set(owner_id)
print("Owner updated.")
break
else:
print("Instance's owner will remain unchanged.")
print()
def _edit_instance_name(old_name, new_name, confirm_overwrite, no_prompt):
if new_name:
name = new_name
if name in _get_instance_names() and not confirm_overwrite:
name = old_name
print(
"An instance with this name already exists.\n"
"If you want to remove the existing instance and replace it with this one,"
" run this command with --overwrite-existing-instance flag."
)
elif not no_prompt and confirm("Would you like to change the instance name?", default=False):
name = get_name()
if name in _get_instance_names():
print(
"WARNING: An instance already exists with this name. "
"Continuing will overwrite the existing instance config."
)
if not confirm(
"Are you absolutely certain you want to continue with this instance name?",
default=False,
):
print("Instance name will remain unchanged.")
name = old_name
else:
print("Instance name updated.")
else:
print("Instance name updated.")
print()
else:
name = old_name
return name
def _edit_data_path(data, instance_name, data_path, copy_data, no_prompt):
# This modifies the passed dict.
if data_path:
new_path = Path(data_path)
try:
exists = new_path.exists()
except OSError:
print(
"We were unable to check your chosen directory."
" Provided path may contain an invalid character."
" Data location will remain unchanged."
)
if not exists:
try:
new_path.mkdir(parents=True, exist_ok=True)
except OSError:
print(
"We were unable to create your chosen directory."
" Data location will remain unchanged."
)
data["DATA_PATH"] = data_path
if copy_data and not _copy_data(data):
print("Can't copy data to non-empty location. Data location will remain unchanged.")
data["DATA_PATH"] = data_manager.basic_config["DATA_PATH"]
elif not no_prompt and confirm("Would you like to change the data location?", default=False):
data["DATA_PATH"] = get_data_dir(instance_name)
if confirm("Do you want to copy the data from old location?", default=True):
if not _copy_data(data):
print("Can't copy the data to non-empty location.")
if not confirm("Do you still want to use the new data location?"):
data["DATA_PATH"] = data_manager.basic_config["DATA_PATH"]
print("Data location will remain unchanged.")
return
print("Old data has been copied over to the new location.")
print("Data location updated.")
def _copy_data(data):
if Path(data["DATA_PATH"]).exists():
if any(os.scandir(data["DATA_PATH"])):
return False
else:
# this is needed because copytree doesn't work when destination folder exists
# Python 3.8 has `dirs_exist_ok` option for that
os.rmdir(data["DATA_PATH"])
shutil.copytree(data_manager.basic_config["DATA_PATH"], data["DATA_PATH"])
return True
def handle_edit(cli_flags: Namespace):
"""
This one exists to not log all the things like it's a full run of the bot.
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
data_manager.load_basic_configuration(cli_flags.instance_name)
red = Red(cli_flags=cli_flags, description="Red V3", dm_help=None)
try:
driver_cls = drivers.get_driver_class()
loop.run_until_complete(driver_cls.initialize(**data_manager.storage_details()))
loop.run_until_complete(edit_instance(red, cli_flags))
loop.run_until_complete(driver_cls.teardown())
except (KeyboardInterrupt, EOFError):
print("Aborted!")
finally:
loop.run_until_complete(asyncio.sleep(1))
asyncio.set_event_loop(None)
loop.stop()
loop.close()
sys.exit(0)
async def run_bot(red: Red, cli_flags: Namespace) -> None:
"""
This runs the bot.
Any shutdown which is a result of not being able to log in needs to raise
a SystemExit exception.
If the bot starts normally, the bot should be left to handle the exit case.
It will raise SystemExit in a task, which will reach the event loop and
interrupt running forever, then trigger our cleanup process, and does not
need additional handling in this function.
"""
driver_cls = drivers.get_driver_class()
await driver_cls.initialize(**data_manager.storage_details())
redbot.logging.init_logging(
level=cli_flags.logging_level,
location=data_manager.core_data_path() / "logs",
force_rich_logging=cli_flags.rich_logging,
)
log.debug("====Basic Config====")
log.debug("Data Path: %s", data_manager._base_data_path())
log.debug("Storage Type: %s", data_manager.storage_type())
# lib folder has to be in sys.path before trying to load any 3rd-party cog (GH-3061)
# We might want to change handling of requirements in Downloader at later date
LIB_PATH = data_manager.cog_data_path(raw_name="Downloader") / "lib"
LIB_PATH.mkdir(parents=True, exist_ok=True)
if str(LIB_PATH) not in sys.path:
sys.path.append(str(LIB_PATH))
# "It's important to note that the global `working_set` object is initialized from
# `sys.path` when `pkg_resources` is first imported, but is only updated if you do
# all future `sys.path` manipulation via `pkg_resources` APIs. If you manually modify
# `sys.path`, you must invoke the appropriate methods on the `working_set` instance
# to keep it in sync."
# Source: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#workingset-objects
pkg_resources.working_set.add_entry(str(LIB_PATH))
sys.meta_path.insert(0, SharedLibImportWarner())
if cli_flags.token:
token = cli_flags.token
else:
token = os.environ.get("RED_TOKEN", None)
if not token:
token = await red._config.token()
prefix = cli_flags.prefix or await red._config.prefix()
if not (token and prefix):
if cli_flags.no_prompt is False:
new_token = await interactive_config(
red, token_set=bool(token), prefix_set=bool(prefix)
)
if new_token:
token = new_token
else:
log.critical("Token and prefix must be set in order to login.")
sys.exit(1)
if cli_flags.dry_run:
await red.http.close()
sys.exit(0)
try:
await red.start(token, bot=True, cli_flags=cli_flags)
except discord.LoginFailure:
log.critical("This token doesn't seem to be valid.")
db_token = await red._config.token()
if db_token and not cli_flags.no_prompt:
if confirm("\nDo you want to reset the token?"):
await red._config.token.set("")
print("Token has been reset.")
sys.exit(0)
sys.exit(1)
except discord.PrivilegedIntentsRequired:
print(
"Red requires all Privileged Intents to be enabled.\n"
"You can find out how to enable Privileged Intents with this guide:\n"
"https://docs.discord.red/en/stable/bot_application_guide.html#enabling-privileged-intents"
)
sys.exit(1)
return None
def handle_early_exit_flags(cli_flags: Namespace):
if cli_flags.list_instances:
list_instances()
elif cli_flags.version:
print("Red V3")
print("Current Version: {}".format(__version__))
sys.exit(0)
elif cli_flags.debuginfo:
debug_info()
elif not cli_flags.instance_name and (not cli_flags.no_instance or cli_flags.edit):
print("Error: No instance name was provided!")
sys.exit(1)
async def shutdown_handler(red, signal_type=None, exit_code=None):
if signal_type:
log.info("%s received. Quitting...", signal_type)
# Do not collapse the below line into other logic
# We need to renter this function
# after it interrupts the event loop.
sys.exit(ExitCodes.SHUTDOWN)
elif exit_code is None:
log.info("Shutting down from unhandled exception")
red._shutdown_mode = ExitCodes.CRITICAL
if exit_code is not None:
red._shutdown_mode = exit_code
try:
await red.logout()
finally:
# Then cancels all outstanding tasks other than ourselves
pending = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
[task.cancel() for task in pending]
await asyncio.gather(*pending, return_exceptions=True)
def global_exception_handler(red, loop, context):
"""
Logs unhandled exceptions in other tasks
"""
msg = context.get("exception", context["message"])
# These will get handled later when it *also* kills loop.run_forever
if not isinstance(msg, (KeyboardInterrupt, SystemExit)):
if isinstance(msg, Exception):
log.critical("Caught unhandled exception in task:\n", exc_info=msg)
else:
log.critical("Caught unhandled exception in task: %s", msg)
def red_exception_handler(red, red_task: asyncio.Future):
"""
This is set as a done callback for Red
must be used with functools.partial
If the main bot.run dies for some reason,
we don't want to swallow the exception and hang.
"""
try:
red_task.result()
except (SystemExit, KeyboardInterrupt, asyncio.CancelledError):
pass # Handled by the global_exception_handler, or cancellation
except Exception as exc:
log.critical("The main bot task didn't handle an exception and has crashed", exc_info=exc)
log.warning("Attempting to die as gracefully as possible...")
red.loop.create_task(shutdown_handler(red))
def main():
red = None # Error handling for users misusing the bot
cli_flags = parse_cli_flags(sys.argv[1:])
handle_early_exit_flags(cli_flags)
if cli_flags.edit:
handle_edit(cli_flags)
return
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if cli_flags.no_instance:
print(
"\033[1m"
"Warning: The data will be placed in a temporary folder and removed on next system "
"reboot."
"\033[0m"
)
cli_flags.instance_name = "temporary_red"
data_manager.create_temp_config()
data_manager.load_basic_configuration(cli_flags.instance_name)
red = Red(cli_flags=cli_flags, description="Red V3", dm_help=None)
if os.name != "nt":
# None of this works on windows.
# At least it's not a redundant handler...
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
for s in signals:
loop.add_signal_handler(
s, lambda s=s: asyncio.create_task(shutdown_handler(red, s))
)
exc_handler = functools.partial(global_exception_handler, red)
loop.set_exception_handler(exc_handler)
# We actually can't (just) use asyncio.run here
# We probably could if we didn't support windows, but we might run into
# a scenario where this isn't true if anyone works on RPC more in the future
fut = loop.create_task(run_bot(red, cli_flags))
r_exc_handler = functools.partial(red_exception_handler, red)
fut.add_done_callback(r_exc_handler)
loop.run_forever()
except KeyboardInterrupt:
# We still have to catch this here too. (*joy*)
log.warning("Please do not use Ctrl+C to Shutdown Red! (attempting to die gracefully...)")
log.error("Received KeyboardInterrupt, treating as interrupt")
if red is not None:
loop.run_until_complete(shutdown_handler(red, signal.SIGINT))
except SystemExit as exc:
# We also have to catch this one here. Basically any exception which normally
# Kills the python interpreter (Base Exceptions minus asyncio.cancelled)
# We need to do something with prior to having the loop close
log.info("Shutting down with exit code: %s", exc.code)
if red is not None:
loop.run_until_complete(shutdown_handler(red, None, exc.code))
except Exception as exc: # Non standard case.
log.exception("Unexpected exception (%s): ", type(exc), exc_info=exc)
if red is not None:
loop.run_until_complete(shutdown_handler(red, None, ExitCodes.CRITICAL))
finally:
# Allows transports to close properly, and prevent new ones from being opened.
# Transports may still not be closed correctly on windows, see below
loop.run_until_complete(loop.shutdown_asyncgens())
# *we* aren't cleaning up more here, but it prevents
# a runtime error at the event loop on windows
# with resources which require longer to clean up.
# With other event loops, a failure to cleanup prior to here
# results in a resource warning instead
log.info("Please wait, cleaning up a bit more")
loop.run_until_complete(asyncio.sleep(2))
asyncio.set_event_loop(None)
loop.stop()
loop.close()
exit_code = red._shutdown_mode if red is not None else 1
sys.exit(exit_code)
if __name__ == "__main__":
main()
|
from homeassistant.components.alexa import smart_home
from . import DEFAULT_CONFIG, get_new_request
from tests.async_mock import patch
async def test_unsupported_domain(hass):
"""Discovery ignores entities of unknown domains."""
request = get_new_request("Alexa.Discovery", "Discover")
hass.states.async_set("woz.boop", "on", {"friendly_name": "Boop Woz"})
msg = await smart_home.async_handle_message(hass, DEFAULT_CONFIG, request)
assert "event" in msg
msg = msg["event"]
assert not msg["payload"]["endpoints"]
async def test_serialize_discovery_recovers(hass, caplog):
"""Test we handle an interface raising unexpectedly during serialize discovery."""
request = get_new_request("Alexa.Discovery", "Discover")
hass.states.async_set("switch.bla", "on", {"friendly_name": "Boop Woz"})
with patch(
"homeassistant.components.alexa.capabilities.AlexaPowerController.serialize_discovery",
side_effect=TypeError,
):
msg = await smart_home.async_handle_message(hass, DEFAULT_CONFIG, request)
assert "event" in msg
msg = msg["event"]
interfaces = {
ifc["interface"] for ifc in msg["payload"]["endpoints"][0]["capabilities"]
}
assert "Alexa.PowerController" not in interfaces
assert (
f"Error serializing Alexa.PowerController discovery for {hass.states.get('switch.bla')}"
in caplog.text
)
|
import csv
import json
from io import StringIO
from os import PathLike
from pathlib import Path
from typing import Union
from box.exceptions import BoxError
yaml_available = True
toml_available = True
msgpack_available = True
try:
import ruamel.yaml as yaml
except ImportError:
try:
import yaml # type: ignore
except ImportError:
yaml = None # type: ignore
yaml_available = False
try:
import toml
except ImportError:
toml = None # type: ignore
toml_available = False
try:
import msgpack # type: ignore
except ImportError:
msgpack = None # type: ignore
msgpack_available = False
BOX_PARAMETERS = (
"default_box",
"default_box_attr",
"conversion_box",
"frozen_box",
"camel_killer_box",
"box_safe_prefix",
"box_duplicates",
"ordered_box",
"default_box_none_transform",
"box_dots",
"modify_tuples_box",
"box_intact_types",
"box_recast",
)
def _exists(filename: Union[str, PathLike], create: bool = False) -> Path:
path = Path(filename)
if create:
try:
path.touch(exist_ok=True)
except OSError as err:
raise BoxError(f"Could not create file {filename} - {err}")
else:
return path
if not path.exists():
raise BoxError(f'File "{filename}" does not exist')
if not path.is_file():
raise BoxError(f"{filename} is not a file")
return path
def _to_json(
obj, filename: Union[str, PathLike] = None, encoding: str = "utf-8", errors: str = "strict", **json_kwargs
):
if filename:
_exists(filename, create=True)
with open(filename, "w", encoding=encoding, errors=errors) as f:
json.dump(obj, f, ensure_ascii=False, **json_kwargs)
else:
return json.dumps(obj, ensure_ascii=False, **json_kwargs)
def _from_json(
json_string: str = None,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
multiline: bool = False,
**kwargs,
):
if filename:
with open(filename, "r", encoding=encoding, errors=errors) as f:
if multiline:
data = [
json.loads(line.strip(), **kwargs)
for line in f
if line.strip() and not line.strip().startswith("#")
]
else:
data = json.load(f, **kwargs)
elif json_string:
data = json.loads(json_string, **kwargs)
else:
raise BoxError("from_json requires a string or filename")
return data
def _to_yaml(
obj,
filename: Union[str, PathLike] = None,
default_flow_style: bool = False,
encoding: str = "utf-8",
errors: str = "strict",
**yaml_kwargs,
):
if filename:
_exists(filename, create=True)
with open(filename, "w", encoding=encoding, errors=errors) as f:
yaml.dump(obj, stream=f, default_flow_style=default_flow_style, **yaml_kwargs)
else:
return yaml.dump(obj, default_flow_style=default_flow_style, **yaml_kwargs)
def _from_yaml(
yaml_string: str = None,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
**kwargs,
):
if "Loader" not in kwargs:
kwargs["Loader"] = yaml.SafeLoader
if filename:
_exists(filename)
with open(filename, "r", encoding=encoding, errors=errors) as f:
data = yaml.load(f, **kwargs)
elif yaml_string:
data = yaml.load(yaml_string, **kwargs)
else:
raise BoxError("from_yaml requires a string or filename")
return data
def _to_toml(obj, filename: Union[str, PathLike] = None, encoding: str = "utf-8", errors: str = "strict"):
if filename:
_exists(filename, create=True)
with open(filename, "w", encoding=encoding, errors=errors) as f:
toml.dump(obj, f)
else:
return toml.dumps(obj)
def _from_toml(
toml_string: str = None, filename: Union[str, PathLike] = None, encoding: str = "utf-8", errors: str = "strict"
):
if filename:
_exists(filename)
with open(filename, "r", encoding=encoding, errors=errors) as f:
data = toml.load(f)
elif toml_string:
data = toml.loads(toml_string)
else:
raise BoxError("from_toml requires a string or filename")
return data
def _to_msgpack(obj, filename: Union[str, PathLike] = None, **kwargs):
if filename:
_exists(filename, create=True)
with open(filename, "wb") as f:
msgpack.pack(obj, f, **kwargs)
else:
return msgpack.packb(obj, **kwargs)
def _from_msgpack(msgpack_bytes: bytes = None, filename: Union[str, PathLike] = None, **kwargs):
if filename:
_exists(filename)
with open(filename, "rb") as f:
data = msgpack.unpack(f, **kwargs)
elif msgpack_bytes:
data = msgpack.unpackb(msgpack_bytes, **kwargs)
else:
raise BoxError("from_msgpack requires a string or filename")
return data
def _to_csv(box_list, filename: Union[str, PathLike] = None, encoding: str = "utf-8", errors: str = "strict", **kwargs):
csv_column_names = list(box_list[0].keys())
for row in box_list:
if list(row.keys()) != csv_column_names:
raise BoxError("BoxList must contain the same dictionary structure for every item to convert to csv")
if filename:
_exists(filename, create=True)
out_data = open(filename, "w", encoding=encoding, errors=errors, newline="")
else:
out_data = StringIO("")
writer = csv.DictWriter(out_data, fieldnames=csv_column_names, **kwargs)
writer.writeheader()
for data in box_list:
writer.writerow(data)
if not filename:
return out_data.getvalue() # type: ignore
out_data.close()
def _from_csv(
csv_string: str = None,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
**kwargs,
):
if csv_string:
with StringIO(csv_string) as cs:
reader = csv.DictReader(cs)
return [row for row in reader]
_exists(filename) # type: ignore
with open(filename, "r", encoding=encoding, errors=errors, newline="") as f: # type: ignore
reader = csv.DictReader(f, **kwargs)
return [row for row in reader]
|
import numpy as np
from scattertext.termscoring.CohensDCalculator import CohensDCalculator
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
class CohensD(CorpusBasedTermScorer, CohensDCalculator):
'''
Cohen's d scores
term_scorer = (CohensD(corpus).set_categories('Positive', ['Negative'], ['Plot']))
html = st.produce_frequency_explorer(
corpus,
category='Positive',
not_categories=['Negative'],
neutral_categories=['Plot'],
term_scorer=term_scorer,
metadata=rdf['movie_name'],
grey_threshold=0,
show_neutral=True
)
file_name = 'rotten_fresh_fre.html'
open(file_name, 'wb').write(html.encode('utf-8'))
IFrame(src=file_name, width=1300, height=700)
'''
def _set_scorer_args(self, **kwargs):
pass
def get_scores(self, *args):
return self.get_score_df()['cohens_d']
def get_score_df(self, correction_method=None):
'''
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
'''
# From https://people.kth.se/~lang/Effect_size.pdf
# Shinichi Nakagawa1 and Innes C. Cuthill. Effect size, confidence interval and statistical
# significance: a practical guide for biologists. 2007. In Biological Reviews 82.
#
# Modification: when calculating variance, an empty document is added to each set
X = self._get_X().astype(np.float64)
X_doc_len_norm = X / X.sum(axis=1)
X_doc_len_norm[np.isnan(X_doc_len_norm)] = 0
cat_X, ncat_X = self._get_cat_and_ncat(X_doc_len_norm)
orig_cat_X, orig_ncat_X = self._get_cat_and_ncat(X)
score_df = (self
.get_cohens_d_df(cat_X, ncat_X, orig_cat_X, orig_ncat_X, correction_method)
.set_index(np.array(self._get_index())))
return score_df
def get_name(self):
return "Cohen's d"
class HedgesR(CohensD):
def get_scores(self, *args):
return self.get_score_df()['hedges_r']
def get_name(self):
return "Hedge's r"
|
import logging
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .api import HomeConnectDevice
from .const import DOMAIN, SIGNAL_UPDATE_ENTITIES
_LOGGER = logging.getLogger(__name__)
class HomeConnectEntity(Entity):
"""Generic Home Connect entity (base class)."""
def __init__(self, device: HomeConnectDevice, desc: str) -> None:
"""Initialize the entity."""
self.device = device
self.desc = desc
self._name = f"{self.device.appliance.name} {desc}"
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITIES, self._update_callback
)
)
@callback
def _update_callback(self, ha_id):
"""Update data."""
if ha_id == self.device.appliance.haId:
self.async_entity_update()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the node (used for Entity_ID)."""
return self._name
@property
def unique_id(self):
"""Return the unique id base on the id returned by Home Connect and the entity name."""
return f"{self.device.appliance.haId}-{self.desc}"
@property
def device_info(self):
"""Return info about the device."""
return {
"identifiers": {(DOMAIN, self.device.appliance.haId)},
"name": self.device.appliance.name,
"manufacturer": self.device.appliance.brand,
"model": self.device.appliance.vib,
}
@callback
def async_entity_update(self):
"""Update the entity."""
_LOGGER.debug("Entity update triggered on %s", self)
self.async_schedule_update_ha_state(True)
|
import copy
import unittest
from absl import flags
import mock
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.aws import redshift
from tests import pkb_common_test_case
CLUSTER_PARAMETER_GROUP = 'fake_redshift_cluster_parameter_group'
CLUSTER_SUBNET_GROUP = 'fake_redshift_cluster_subnet_group'
PKB_CLUSTER = 'pkb-cluster'
PKB_CLUSTER_DATABASE = 'pkb-database'
REDSHIFT_NODE_TYPE = 'dc2.large'
USERNAME = 'pkb-username'
PASSWORD = 'pkb-password'
TEST_RUN_URI = 'fakeru'
AWS_ZONE_US_EAST_1A = 'us-east-1a'
BASE_REDSHIFT_SPEC = {
'cluster_identifier': PKB_CLUSTER,
'db': PKB_CLUSTER_DATABASE,
'user': USERNAME,
'password': PASSWORD,
'node_type': REDSHIFT_NODE_TYPE,
}
FLAGS = flags.FLAGS
class FakeRedshiftClusterSubnetGroup(object):
def __init__(self):
self.name = CLUSTER_SUBNET_GROUP
class FakeRedshiftClusterParameterGroup(object):
def __init__(self):
self.name = CLUSTER_PARAMETER_GROUP
class RedshiftTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(RedshiftTestCase, self).setUp()
FLAGS.run_uri = TEST_RUN_URI
FLAGS.zones = [AWS_ZONE_US_EAST_1A]
def testInvalidClusterCreationError(self):
kwargs = copy.copy(BASE_REDSHIFT_SPEC)
kwargs['node_count'] = None
with self.assertRaises(errors.Config.InvalidValue):
benchmark_config_spec._EdwServiceSpec('NAME', **kwargs)
def testSingleNodeClusterCreation(self):
kwargs = copy.copy(BASE_REDSHIFT_SPEC)
kwargs['node_count'] = 1
spec = benchmark_config_spec._EdwServiceSpec('NAME', **kwargs)
redshift_local = redshift.Redshift(spec)
self.assertIsNone(redshift_local.snapshot)
with mock.patch(
vm_util.__name__ + '.IssueCommand',
return_value=('out_', 'err_', 0)) as mock_issue:
redshift_local.Initialize(redshift_local.cluster_identifier,
redshift_local.node_type,
redshift_local.node_count, redshift_local.user,
redshift_local.password,
FakeRedshiftClusterParameterGroup(),
FakeRedshiftClusterSubnetGroup())
mock_issue.assert_called_once()
mock_issue.assert_called_with([
'aws', '--output', 'json', '--region', 'us-east-1', 'redshift',
'create-cluster', '--cluster-identifier', PKB_CLUSTER,
'--cluster-type', 'single-node', '--node-type', REDSHIFT_NODE_TYPE,
'--master-username', USERNAME, '--master-user-password', PASSWORD,
'--cluster-parameter-group-name',
'fake_redshift_cluster_parameter_group',
'--cluster-subnet-group-name', 'fake_redshift_cluster_subnet_group',
'--publicly-accessible', '--automated-snapshot-retention-period=0'
], raise_on_failure=False)
def testMultiNodeClusterCreation(self):
kwargs = copy.copy(BASE_REDSHIFT_SPEC)
kwargs['node_count'] = 2
spec = benchmark_config_spec._EdwServiceSpec('NAME', **kwargs)
redshift_local = redshift.Redshift(spec)
with mock.patch(
vm_util.__name__ + '.IssueCommand',
return_value=('out_', 'err_', 0)) as mock_issue:
redshift_local.Initialize(redshift_local.cluster_identifier,
redshift_local.node_type,
redshift_local.node_count, redshift_local.user,
redshift_local.password,
FakeRedshiftClusterParameterGroup(),
FakeRedshiftClusterSubnetGroup())
mock_issue.assert_called_once()
mock_issue.assert_called_with([
'aws', '--output', 'json', '--region', 'us-east-1', 'redshift',
'create-cluster', '--cluster-identifier', PKB_CLUSTER,
'--number-of-nodes', '2', '--node-type', REDSHIFT_NODE_TYPE,
'--master-username', USERNAME, '--master-user-password', PASSWORD,
'--cluster-parameter-group-name',
'fake_redshift_cluster_parameter_group',
'--cluster-subnet-group-name', 'fake_redshift_cluster_subnet_group',
'--publicly-accessible', '--automated-snapshot-retention-period=0'
], raise_on_failure=False)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
import itertools
import operator
import os
from plumbum.lib import six
from abc import abstractmethod, abstractproperty
import warnings
from functools import reduce
class FSUser(int):
"""A special object that represents a file-system user. It derives from ``int``, so it behaves
just like a number (``uid``/``gid``), but also have a ``.name`` attribute that holds the
string-name of the user, if given (otherwise ``None``)
"""
def __new__(cls, val, name=None):
self = int.__new__(cls, val)
self.name = name
return self
class Path(str, six.ABC):
"""An abstraction over file system paths. This class is abstract, and the two implementations
are :class:`LocalPath <plumbum.machines.local.LocalPath>` and
:class:`RemotePath <plumbum.path.remote.RemotePath>`.
"""
CASE_SENSITIVE = True
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, str(self))
def __div__(self, other):
"""Joins two paths"""
return self.join(other)
__truediv__ = __div__
def __getitem__(self, key):
if type(key) == str or isinstance(key, Path):
return self / key
return str(self)[key]
def __floordiv__(self, expr):
"""Returns a (possibly empty) list of paths that matched the glob-pattern under this path"""
return self.glob(expr)
def __iter__(self):
"""Iterate over the files in this directory"""
return iter(self.list())
def __eq__(self, other):
if isinstance(other, Path):
return self._get_info() == other._get_info()
elif isinstance(other, str):
if self.CASE_SENSITIVE:
return str(self) == other
else:
return str(self).lower() == other.lower()
else:
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __hash__(self):
if self.CASE_SENSITIVE:
return hash(str(self))
else:
return hash(str(self).lower())
def __nonzero__(self):
return bool(str(self))
__bool__ = __nonzero__
def __fspath__(self):
"""Added for Python 3.6 support"""
return str(self)
def __contains__(self, item):
"""Paths should support checking to see if an file or folder is in them."""
try:
return (self / item.name).exists()
except AttributeError:
return (self / item).exists()
@abstractmethod
def _form(self, *parts):
pass
def up(self, count=1):
"""Go up in ``count`` directories (the default is 1)"""
return self.join("../" * count)
def walk(self, filter=lambda p: True,
dir_filter=lambda p: True): # @ReservedAssignment
"""traverse all (recursive) sub-elements under this directory, that match the given filter.
By default, the filter accepts everything; you can provide a custom filter function that
takes a path as an argument and returns a boolean
:param filter: the filter (predicate function) for matching results. Only paths matching
this predicate are returned. Defaults to everything.
:param dir_filter: the filter (predicate function) for matching directories. Only directories
matching this predicate are recursed into. Defaults to everything.
"""
for p in self.list():
if filter(p):
yield p
if p.is_dir() and dir_filter(p):
for p2 in p.walk(filter, dir_filter):
yield p2
@abstractproperty
def name(self):
"""The basename component of this path"""
@property
def basename(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .name instead", DeprecationWarning)
return self.name
@abstractproperty
def stem(self):
"""The name without an extension, or the last component of the path"""
@abstractproperty
def dirname(self):
"""The dirname component of this path"""
@abstractproperty
def root(self):
"""The root of the file tree (`/` on Unix)"""
@abstractproperty
def drive(self):
"""The drive letter (on Windows)"""
@abstractproperty
def suffix(self):
"""The suffix of this file"""
@abstractproperty
def suffixes(self):
"""This is a list of all suffixes"""
@abstractproperty
def uid(self):
"""The user that owns this path. The returned value is a :class:`FSUser <plumbum.path.FSUser>`
object which behaves like an ``int`` (as expected from ``uid``), but it also has a ``.name``
attribute that holds the string-name of the user"""
@abstractproperty
def gid(self):
"""The group that owns this path. The returned value is a :class:`FSUser <plumbum.path.FSUser>`
object which behaves like an ``int`` (as expected from ``gid``), but it also has a ``.name``
attribute that holds the string-name of the group"""
@abstractmethod
def as_uri(self, scheme=None):
"""Returns a universal resource identifier. Use ``scheme`` to force a scheme."""
@abstractmethod
def _get_info(self):
pass
@abstractmethod
def join(self, *parts):
"""Joins this path with any number of paths"""
@abstractmethod
def list(self):
"""Returns the files in this directory"""
@abstractmethod
def iterdir(self):
"""Returns an iterator over the directory. Might be slightly faster on Python 3.5 than .list()"""
@abstractmethod
def is_dir(self):
"""Returns ``True`` if this path is a directory, ``False`` otherwise"""
def isdir(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .is_dir() instead", DeprecationWarning)
return self.is_dir()
@abstractmethod
def is_file(self):
"""Returns ``True`` if this path is a regular file, ``False`` otherwise"""
def isfile(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .is_file() instead", DeprecationWarning)
return self.is_file()
def islink(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use is_symlink instead", DeprecationWarning)
return self.is_symlink()
@abstractmethod
def is_symlink(self):
"""Returns ``True`` if this path is a symbolic link, ``False`` otherwise"""
@abstractmethod
def exists(self):
"""Returns ``True`` if this path exists, ``False`` otherwise"""
@abstractmethod
def stat(self):
"""Returns the os.stats for a file"""
pass
@abstractmethod
def with_name(self, name):
"""Returns a path with the name replaced"""
@abstractmethod
def with_suffix(self, suffix, depth=1):
"""Returns a path with the suffix replaced. Up to last ``depth`` suffixes will be
replaced. None will replace all suffixes. If there are less than ``depth`` suffixes,
this will replace all suffixes. ``.tar.gz`` is an example where ``depth=2`` or
``depth=None`` is useful"""
def preferred_suffix(self, suffix):
"""Adds a suffix if one does not currently exist (otherwise, no change). Useful
for loading files with a default suffix"""
if len(self.suffixes) > 0:
return self
else:
return self.with_suffix(suffix)
@abstractmethod
def glob(self, pattern):
"""Returns a (possibly empty) list of paths that matched the glob-pattern under this path"""
@abstractmethod
def delete(self):
"""Deletes this path (recursively, if a directory)"""
@abstractmethod
def move(self, dst):
"""Moves this path to a different location"""
def rename(self, newname):
"""Renames this path to the ``new name`` (only the basename is changed)"""
return self.move(self.up() / newname)
@abstractmethod
def copy(self, dst, override=None):
"""Copies this path (recursively, if a directory) to the destination path "dst".
Raises TypeError if dst exists and override is False.
Will overwrite if override is True.
Will silently fail to copy if override is None (the default)."""
@abstractmethod
def mkdir(self, mode=0o777, parents=True, exist_ok=True):
"""
Creates a directory at this path.
:param mode: **Currently only implemented for local paths!** Numeric mode to use for directory
creation, which may be ignored on some systems. The current implementation
reproduces the behavior of ``os.mkdir`` (i.e., the current umask is first masked
out), but this may change for remote paths. As with ``os.mkdir``, it is recommended
to call :func:`chmod` explicitly if you need to be sure.
:param parents: If this is true (the default), the directory's parents will also be created if
necessary.
:param exist_ok: If this is true (the default), no exception will be raised if the directory
already exists (otherwise ``OSError``).
Note that the defaults for ``parents`` and ``exist_ok`` are the opposite of what they are in
Python's own ``pathlib`` - this is to maintain backwards-compatibility with Plumbum's behaviour
from before they were implemented.
"""
@abstractmethod
def open(self, mode="r"):
"""opens this path as a file"""
@abstractmethod
def read(self, encoding=None):
"""returns the contents of this file as a ``str``. By default the data is read
as text, but you can specify the encoding, e.g., ``'latin1'`` or ``'utf8'``"""
@abstractmethod
def write(self, data, encoding=None):
"""writes the given data to this file. By default the data is written as-is
(either text or binary), but you can specify the encoding, e.g., ``'latin1'``
or ``'utf8'``"""
@abstractmethod
def touch(self):
"""Update the access time. Creates an empty file if none exists."""
@abstractmethod
def chown(self, owner=None, group=None, recursive=None):
"""Change ownership of this path.
:param owner: The owner to set (either ``uid`` or ``username``), optional
:param group: The group to set (either ``gid`` or ``groupname``), optional
:param recursive: whether to change ownership of all contained files and subdirectories.
Only meaningful when ``self`` is a directory. If ``None``, the value
will default to ``True`` if ``self`` is a directory, ``False`` otherwise.
"""
@abstractmethod
def chmod(self, mode):
"""Change the mode of path to the numeric mode.
:param mode: file mode as for os.chmod
"""
@staticmethod
def _access_mode_to_flags(mode,
flags={
"f": os.F_OK,
"w": os.W_OK,
"r": os.R_OK,
"x": os.X_OK
}):
if isinstance(mode, str):
mode = reduce(operator.or_, [flags[m] for m in mode.lower()], 0)
return mode
@abstractmethod
def access(self, mode=0):
"""Test file existence or permission bits
:param mode: a bitwise-or of access bits, or a string-representation thereof:
``'f'``, ``'x'``, ``'r'``, ``'w'`` for ``os.F_OK``, ``os.X_OK``,
``os.R_OK``, ``os.W_OK``
"""
@abstractmethod
def link(self, dst):
"""Creates a hard link from ``self`` to ``dst``
:param dst: the destination path
"""
@abstractmethod
def symlink(self, dst):
"""Creates a symbolic link from ``self`` to ``dst``
:param dst: the destination path
"""
@abstractmethod
def unlink(self):
"""Deletes a symbolic link"""
def split(self, *dummy_args, **dummy_kargs):
"""Splits the path on directory separators, yielding a list of directories, e.g,
``"/var/log/messages"`` will yield ``['var', 'log', 'messages']``.
"""
parts = []
path = self
while path != path.dirname:
parts.append(path.name)
path = path.dirname
return parts[::-1]
@property
def parts(self):
"""Splits the directory into parts, including the base directroy, returns a tuple"""
return tuple([self.drive + self.root] + self.split())
def relative_to(self, source):
"""Computes the "relative path" require to get from ``source`` to ``self``. They satisfy the invariant
``source_path + (target_path - source_path) == target_path``. For example::
/var/log/messages - /var/log/messages = []
/var/log/messages - /var = [log, messages]
/var/log/messages - / = [var, log, messages]
/var/log/messages - /var/tmp = [.., log, messages]
/var/log/messages - /opt = [.., var, log, messages]
/var/log/messages - /opt/lib = [.., .., var, log, messages]
"""
if isinstance(source, str):
source = self._form(source)
parts = self.split()
baseparts = source.split()
ancestors = len(
list(
itertools.takewhile(lambda p: p[0] == p[1],
zip(parts, baseparts))))
return RelativePath([".."] * (len(baseparts) - ancestors) +
parts[ancestors:])
def __sub__(self, other):
"""Same as ``self.relative_to(other)``"""
return self.relative_to(other)
def _glob(self, pattern, fn):
"""Applies a glob string or list/tuple/iterable to the current path, using ``fn``"""
if isinstance(pattern, str):
return fn(pattern)
else:
results = []
for single_pattern in pattern:
results.extend(fn(single_pattern))
return sorted(list(set(results)))
def resolve(self, strict=False):
"""Added to allow pathlib like syntax. Does nothing since
Plumbum paths are always absolute. Does not (currently) resolve
symlinks."""
# TODO: Resolve symlinks here
return self
@property
def parents(self):
"""Pathlib like sequence of ancestors"""
join = lambda x, y: self._form(x) / y
as_list = (reduce(join, self.parts[:i], self.parts[0])
for i in range(len(self.parts) - 1, 0, -1))
return tuple(as_list)
@property
def parent(self):
"""Pathlib like parent of the path."""
return self.parents[0]
class RelativePath(object):
"""
Relative paths are the "delta" required to get from one path to another.
Note that relative path do not point at anything, and thus are not paths.
Therefore they are system agnostic (but closed under addition)
Paths are always absolute and point at "something", whether existent or not.
Relative paths are created by subtracting paths (``Path.relative_to``)
"""
def __init__(self, parts):
self.parts = parts
def __str__(self):
return "/".join(self.parts)
def __iter__(self):
return iter(self.parts)
def __len__(self):
return len(self.parts)
def __getitem__(self, index):
return self.parts[index]
def __repr__(self):
return "RelativePath(%r)" % (self.parts, )
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __hash__(self):
return hash(str(self))
def __nonzero__(self):
return bool(str(self))
__bool__ = __nonzero__
def up(self, count=1):
return RelativePath(self.parts[:-count])
def __radd__(self, path):
return path.join(*self.parts)
|
from __future__ import absolute_import
from __future__ import print_function
import abc
import numpy as np
import socket
import six.moves.cPickle as pickle
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from ..utils.sockets import determine_master, send, receive
class BaseParameterClient(object):
"""BaseParameterClient
Parameter-server clients can do two things: retrieve the current parameters
from the corresponding server, and send updates (`delta`) to the server.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
raise NotImplementedError
@abc.abstractmethod
def update_parameters(self, delta):
"""Update master parameters with deltas from training process
"""
raise NotImplementedError
@abc.abstractmethod
def get_parameters(self):
"""Retrieve master weights from parameter server
"""
raise NotImplementedError
class HttpClient(BaseParameterClient):
"""HttpClient
Uses HTTP protocol for communication with its corresponding parameter server,
namely HttpServer. The HTTP server provides two endpoints, `/parameters` to
get parameters and `/update` to update the server's parameters.
"""
def __init__(self, port=4000):
self.master_url = determine_master(port=port)
self.headers = {'Content-Type': 'application/elephas'}
def get_parameters(self):
request = urllib2.Request('http://{}/parameters'.format(self.master_url),
headers=self.headers)
pickled_weights = urllib2.urlopen(request).read()
return pickle.loads(pickled_weights)
def update_parameters(self, delta):
request = urllib2.Request('http://{}/update'.format(self.master_url),
pickle.dumps(delta, -1), headers=self.headers)
return urllib2.urlopen(request).read()
class SocketClient(BaseParameterClient):
"""SocketClient
Uses a socket connection to communicate with an instance of `SocketServer`.
The socket server listens to two types of events. Those with a `g` prefix
indicate a get-request, those with a `u` indicate a parameter update.
"""
def __init__(self, port=4000):
host = self.master_url.split(':')[0]
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
def get_parameters(self):
self.socket.sendall(b'g')
return np.asarray(receive(self.socket))
def update_parameters(self, delta):
data = {'delta': delta}
self.socket.sendall(b'u')
send(self.socket, data)
|
import string
import cloudpickle
import numpy as np
import spacy
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from spacy.lang.en.stop_words import STOP_WORDS
class SklearnIntentClassifier:
def __init__(self):
self.model = None
self.spacynlp = spacy.load('en')
self.stopwords = set(STOP_WORDS +
["n't", "'s", "'m", "ca"] +
list(ENGLISH_STOP_WORDS))
self.punctuations = " ".join(string.punctuation).split(" ") + \
["-----", "---", "...", "'ve"]
def spacy_tokenizer(self, sentence):
"""
perform basic cleaning,tokenization and lemmatization
:param sentence:
:return list of clean tokens:
"""
tokens = self.spacynlp(sentence)
tokens = [tok.lemma_.lower().strip() if
tok.lemma_ != "-PRON-" else tok.lower_ for tok in tokens]
tokens = [tok for tok in tokens if
(tok not in self.stopwords and tok not in self.punctuations)]
while "" in tokens:
tokens.remove("")
while " " in tokens:
tokens.remove(" ")
while "\n" in tokens:
tokens.remove("\n")
while "\n\n" in tokens:
tokens.remove("\n\n")
return tokens
def train(self, X, y, outpath=None, verbose=True):
"""
Train intent classifier for given training data
:param X:
:param y:
:param outpath:
:param verbose:
:return:
"""
def build(X, y=None):
"""
Inner build function that builds a single model.
:param X:
:param y:
:return:
"""
model = Pipeline([
('vectorizer', TfidfVectorizer(
tokenizer=self.spacy_tokenizer,
preprocessor=None, lowercase=False)
),
('clf', SVC(C=1, kernel="linear",
probability=True, class_weight='balanced')
)])
items, counts = np.unique(y, return_counts=True)
cv_splits = max(2, min(5, np.min(counts) // 5))
Cs = [0.01, 0.25, 1, 2, 5, 10, 20, 100]
param_grid = {'clf__C': Cs, 'clf__kernel': ["linear"]}
grid_search = GridSearchCV(model,
param_grid=param_grid,
scoring='f1_weighted',
cv=cv_splits,
verbose=2,
n_jobs=-1
)
grid_search.fit(X, y)
return grid_search
model = build(X, y)
if outpath:
with open(outpath, 'wb') as f:
cloudpickle.dump(model, f)
if verbose:
print("Model written out to {}".format(outpath))
return model
def load(self, PATH):
"""
load trained model from given path
:param PATH:
:return:
"""
try:
with open(PATH, 'rb') as f:
self.model = cloudpickle.load(f)
except IOError:
return False
def predict(self, text, return_all=False, INTENT_RANKING_LENGTH=5):
"""
Predict class label for given model
"""
return self.process(text, return_all, INTENT_RANKING_LENGTH)
def predict_proba(self, X):
"""Given a bow vector of an input text, predict most probable label.
Returns only the most likely label.
:param X: bow of input text
:return: tuple of first, the most probable label
and second, its probability"""
pred_result = self.model.predict_proba(X)
print(pred_result)
# sort the probabilities retrieving the indices of the elements
sorted_indices = np.fliplr(np.argsort(pred_result, axis=1))
return sorted_indices, pred_result[:, sorted_indices]
def process(self, x, return_all=False, INTENT_RANKING_LENGTH=5):
"""Returns the most likely intent and
its probability for the input text."""
if not self.model:
print("no class")
intent = None
intent_ranking = []
else:
intents, probabilities = self.predict_proba([x])
intents = [self.model.classes_[intent]
for intent in intents.flatten()]
probabilities = probabilities.flatten()
if len(intents) > 0 and len(probabilities) > 0:
ranking = list(zip(list(intents), list(probabilities)))
ranking = ranking[:INTENT_RANKING_LENGTH]
intent = {"intent": intents[0], "confidence": probabilities[0]}
intent_ranking = [{"intent": intent_name, "confidence": score}
for intent_name, score in ranking]
else:
intent = {"name": None, "confidence": 0.0}
intent_ranking = []
if return_all:
return intent_ranking
else:
return intent
|
import haffmpeg.sensor as ffmpeg_sensor
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.components.ffmpeg import (
CONF_EXTRA_ARGUMENTS,
CONF_INITIAL_STATE,
CONF_INPUT,
DATA_FFMPEG,
FFmpegBase,
)
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
CONF_RESET = "reset"
CONF_CHANGES = "changes"
CONF_REPEAT = "repeat"
CONF_REPEAT_TIME = "repeat_time"
DEFAULT_NAME = "FFmpeg Motion"
DEFAULT_INIT_STATE = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_INPUT): cv.string,
vol.Optional(CONF_INITIAL_STATE, default=DEFAULT_INIT_STATE): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_EXTRA_ARGUMENTS): cv.string,
vol.Optional(CONF_RESET, default=10): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_CHANGES, default=10): vol.All(
vol.Coerce(float), vol.Range(min=0, max=99)
),
vol.Inclusive(CONF_REPEAT, "repeat"): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Inclusive(CONF_REPEAT_TIME, "repeat"): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the FFmpeg binary motion sensor."""
manager = hass.data[DATA_FFMPEG]
entity = FFmpegMotion(hass, manager, config)
async_add_entities([entity])
class FFmpegBinarySensor(FFmpegBase, BinarySensorEntity):
"""A binary sensor which use FFmpeg for noise detection."""
def __init__(self, config):
"""Init for the binary sensor noise detection."""
super().__init__(config.get(CONF_INITIAL_STATE))
self._state = False
self._config = config
self._name = config.get(CONF_NAME)
@callback
def _async_callback(self, state):
"""HA-FFmpeg callback for noise detection."""
self._state = state
self.async_write_ha_state()
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def name(self):
"""Return the name of the entity."""
return self._name
class FFmpegMotion(FFmpegBinarySensor):
"""A binary sensor which use FFmpeg for noise detection."""
def __init__(self, hass, manager, config):
"""Initialize FFmpeg motion binary sensor."""
super().__init__(config)
self.ffmpeg = ffmpeg_sensor.SensorMotion(
manager.binary, hass.loop, self._async_callback
)
async def _async_start_ffmpeg(self, entity_ids):
"""Start a FFmpeg instance.
This method is a coroutine.
"""
if entity_ids is not None and self.entity_id not in entity_ids:
return
# init config
self.ffmpeg.set_options(
time_reset=self._config.get(CONF_RESET),
time_repeat=self._config.get(CONF_REPEAT_TIME, 0),
repeat=self._config.get(CONF_REPEAT, 0),
changes=self._config.get(CONF_CHANGES),
)
# run
await self.ffmpeg.open_sensor(
input_source=self._config.get(CONF_INPUT),
extra_cmd=self._config.get(CONF_EXTRA_ARGUMENTS),
)
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return DEVICE_CLASS_MOTION
|
import logging
import blebox_uniapi
import pytest
from homeassistant.components.air_quality import ATTR_PM_0_1, ATTR_PM_2_5, ATTR_PM_10
from homeassistant.const import ATTR_ICON, STATE_UNKNOWN
from .conftest import async_setup_entity, mock_feature
from tests.async_mock import AsyncMock, PropertyMock
@pytest.fixture(name="airsensor")
def airsensor_fixture():
"""Return a default air quality fixture."""
feature = mock_feature(
"air_qualities",
blebox_uniapi.air_quality.AirQuality,
unique_id="BleBox-airSensor-1afe34db9437-0.air",
full_name="airSensor-0.air",
device_class=None,
pm1=None,
pm2_5=None,
pm10=None,
)
product = feature.product
type(product).name = PropertyMock(return_value="My air sensor")
type(product).model = PropertyMock(return_value="airSensor")
return (feature, "air_quality.airsensor_0_air")
async def test_init(airsensor, hass, config):
"""Test airSensor default state."""
_, entity_id = airsensor
entry = await async_setup_entity(hass, config, entity_id)
assert entry.unique_id == "BleBox-airSensor-1afe34db9437-0.air"
state = hass.states.get(entity_id)
assert state.name == "airSensor-0.air"
assert ATTR_PM_0_1 not in state.attributes
assert ATTR_PM_2_5 not in state.attributes
assert ATTR_PM_10 not in state.attributes
assert state.attributes[ATTR_ICON] == "mdi:blur"
assert state.state == STATE_UNKNOWN
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My air sensor"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "airSensor"
assert device.sw_version == "1.23"
async def test_update(airsensor, hass, config):
"""Test air quality sensor state after update."""
feature_mock, entity_id = airsensor
def initial_update():
feature_mock.pm1 = 49
feature_mock.pm2_5 = 222
feature_mock.pm10 = 333
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PM_0_1] == 49
assert state.attributes[ATTR_PM_2_5] == 222
assert state.attributes[ATTR_PM_10] == 333
assert state.state == "222"
async def test_update_failure(airsensor, hass, config, caplog):
"""Test that update failures are logged."""
caplog.set_level(logging.ERROR)
feature_mock, entity_id = airsensor
feature_mock.async_update = AsyncMock(side_effect=blebox_uniapi.error.ClientError)
await async_setup_entity(hass, config, entity_id)
assert f"Updating '{feature_mock.full_name}' failed: " in caplog.text
|
from __future__ import unicode_literals
import os
import random
from core.BASE import get_base_dic
from core.CHAR import get_char_dic
from core.CHUNK import get_chunk_dic
from core.EXTEND import get_extend_dic
from core.SEDB import SEDB
from lib.data.data import paths, pyoptions
from lib.data.text import pydictor_art_text
from lib.fun.fun import cool
from lib.parse.argsparse import plug_parser, conf_parser, pattern_parser, tool_parser
from lib.parse.command import parse_args
from lib.parse.tricksparse import sedb_tricks
def init():
args = parse_args()
pyoptions.leetmode_code = args.leet
if not (args.len[0] == pyoptions.minlen and args.len[1] == pyoptions.maxlen):
pyoptions.args_pick = True
if pyoptions.leetmode_code:
pyoptions.extend_leet = True
pyoptions.passcraper_leet = True
pyoptions.sedb_leet = True
paths.results_path = os.path.abspath(args.output) \
if '\\' in args.output or '/' in args.output else os.path.join(paths.results_path, args.output)
pyoptions.head = args.head
pyoptions.tail = args.tail
pyoptions.encode = args.encode
pyoptions.minlen = args.len[0]
pyoptions.maxlen = args.len[1]
pyoptions.letter_occur = args.occur[0]
pyoptions.digital_occur = args.occur[1]
pyoptions.special_occur = args.occur[2]
if pyoptions.default_occur * 3 != pyoptions.letter_occur + pyoptions.digital_occur + pyoptions.special_occur:
pyoptions.occur_is_filter = True
pyoptions.letter_types = args.types[0]
pyoptions.digital_types = args.types[1]
pyoptions.special_types = args.types[2]
if pyoptions.default_types * 3 != pyoptions.letter_types + pyoptions.digital_types + pyoptions.special_types:
pyoptions.types_is_filter = True
pyoptions.letter_repeat = args.repeat[0]
pyoptions.digital_repeat = args.repeat[1]
pyoptions.special_repeat = args.repeat[2]
if pyoptions.default_repeat * 3 != pyoptions.letter_repeat + pyoptions.digital_repeat + pyoptions.special_repeat:
pyoptions.repeat_is_filter = True
if pyoptions.filter_regex != args.regex:
pyoptions.regex_is_filter = True
pyoptions.filter_regex = args.regex
if args.more:
pyoptions.more = True
else:
pyoptions.more = False
if args.dmy:
pyoptions.ymd_format = False
pyoptions.args_base = args.base
pyoptions.args_char = args.char
pyoptions.args_chunk = args.chunk
pyoptions.args_extend = args.extend
pyoptions.args_plug = args.plug
pyoptions.args_sedb = args.sedb
pyoptions.args_conf = args.conf
pyoptions.args_pattern = args.pattern
pyoptions.args_tool = args.tool
pyoptions.level = args.level
try:
if os.path.isfile(paths.results_path):
tmp_dirpath, tmp_filename = os.path.split(paths.results_path)
if not os.path.isdir(tmp_dirpath):
os.makedirs(tmp_dirpath)
paths.results_path = tmp_dirpath
paths.results_file_name = ''.join(random.sample('pydictor', 4)) + '_' + tmp_filename
elif not os.path.isdir(paths.results_path):
tmp_dirpath, tmp_filename = os.path.split(paths.results_path)
if '.' in tmp_filename:
if not os.path.isdir(tmp_dirpath):
os.makedirs(tmp_dirpath)
paths.results_path = tmp_dirpath
paths.results_file_name = tmp_filename
else:
if not os.path.isdir(paths.results_path):
os.makedirs(paths.results_path)
except WindowsError:
exit(pyoptions.CRLF + cool.red("[-] Cannot create result file: %s " % paths.results_path))
if __name__ == '__main__':
print("{}".format(cool.green(pydictor_art_text)))
init()
if pyoptions.args_base:
get_base_dic(pyoptions.args_base)
elif pyoptions.args_char:
get_char_dic(pyoptions.args_char)
elif pyoptions.args_chunk:
get_chunk_dic(pyoptions.args_chunk)
elif pyoptions.args_extend:
get_extend_dic(pyoptions.args_extend)
elif pyoptions.args_plug:
plug_parser()
elif pyoptions.args_sedb:
try:
sedb_tricks()
shell = SEDB()
shell.cmdloop()
except Exception as e:
exit(e)
elif pyoptions.args_conf != 'default':
conf_parser()
elif pyoptions.args_pattern != 'default':
pattern_parser()
elif pyoptions.args_tool:
tool_parser()
|
import pytest
from xarray.backends.api import _get_default_engine
from . import requires_netCDF4, requires_scipy
@requires_netCDF4
@requires_scipy
def test__get_default_engine():
engine_remote = _get_default_engine("http://example.org/test.nc", allow_remote=True)
assert engine_remote == "netcdf4"
engine_gz = _get_default_engine("/example.gz")
assert engine_gz == "scipy"
with pytest.raises(ValueError):
_get_default_engine("/example.grib")
engine_default = _get_default_engine("/example")
assert engine_default == "netcdf4"
|
from __future__ import division, print_function
from urwid.widget import (Text, WidgetWrap, delegate_to_widget_mixin, BOX,
FLOW)
from urwid.canvas import CompositeCanvas
from urwid.signals import connect_signal
from urwid.container import Columns, Overlay
from urwid.util import is_mouse_press
from urwid.text_layout import calc_coords
from urwid.signals import disconnect_signal # doctests
from urwid.split_repr import python3_repr
from urwid.decoration import WidgetDecoration
from urwid.command_map import ACTIVATE
class SelectableIcon(Text):
ignore_focus = False
_selectable = True
def __init__(self, text, cursor_position=0):
"""
:param text: markup for this widget; see :class:`Text` for
description of text markup
:param cursor_position: position the cursor will appear in the
text when this widget is in focus
This is a text widget that is selectable. A cursor
displayed at a fixed location in the text when in focus.
This widget has no special handling of keyboard or mouse input.
"""
self.__super.__init__(text)
self._cursor_position = cursor_position
def render(self, size, focus=False):
"""
Render the text content of this widget with a cursor when
in focus.
>>> si = SelectableIcon(u"[!]")
>>> si
<SelectableIcon selectable flow widget '[!]'>
>>> si.render((4,), focus=True).cursor
(0, 0)
>>> si = SelectableIcon("((*))", 2)
>>> si.render((8,), focus=True).cursor
(2, 0)
>>> si.render((2,), focus=True).cursor
(0, 1)
"""
c = self.__super.render(size, focus)
if focus:
# create a new canvas so we can add a cursor
c = CompositeCanvas(c)
c.cursor = self.get_cursor_coords(size)
return c
def get_cursor_coords(self, size):
"""
Return the position of the cursor if visible. This method
is required for widgets that display a cursor.
"""
if self._cursor_position > len(self.text):
return None
# find out where the cursor will be displayed based on
# the text layout
(maxcol,) = size
trans = self.get_line_translation(maxcol)
x, y = calc_coords(self.text, trans, self._cursor_position)
if maxcol <= x:
return None
return x, y
def keypress(self, size, key):
"""
No keys are handled by this widget. This method is
required for selectable widgets.
"""
return key
class CheckBoxError(Exception):
pass
class CheckBox(WidgetWrap):
def sizing(self):
return frozenset([FLOW])
states = {
True: SelectableIcon("[X]", 1),
False: SelectableIcon("[ ]", 1),
'mixed': SelectableIcon("[#]", 1) }
reserve_columns = 4
# allow users of this class to listen for change events
# sent when the state of this widget is modified
# (this variable is picked up by the MetaSignals metaclass)
signals = ["change", 'postchange']
def __init__(self, label, state=False, has_mixed=False,
on_state_change=None, user_data=None, checked_symbol=None):
"""
:param label: markup for check box label
:param state: False, True or "mixed"
:param has_mixed: True if "mixed" is a state to cycle through
:param on_state_change: shorthand for connect_signal()
function call for a single callback
:param user_data: user_data for on_state_change
Signals supported: ``'change'``, ``"postchange"``
Register signal handler with::
urwid.connect_signal(check_box, 'change', callback, user_data)
where callback is callback(check_box, new_state [,user_data])
Unregister signal handlers with::
urwid.disconnect_signal(check_box, 'change', callback, user_data)
>>> CheckBox(u"Confirm")
<CheckBox selectable flow widget 'Confirm' state=False>
>>> CheckBox(u"Yogourt", "mixed", True)
<CheckBox selectable flow widget 'Yogourt' state='mixed'>
>>> cb = CheckBox(u"Extra onions", True)
>>> cb
<CheckBox selectable flow widget 'Extra onions' state=True>
>>> cb.render((20,), focus=True).text # ... = b in Python 3
[...'[X] Extra onions ']
"""
self.__super.__init__(None) # self.w set by set_state below
self._label = Text("")
self.has_mixed = has_mixed
self._state = None
if checked_symbol:
self.states[True] = SelectableIcon(u"[%s]" % checked_symbol, 1)
# The old way of listening for a change was to pass the callback
# in to the constructor. Just convert it to the new way:
if on_state_change:
connect_signal(self, 'change', on_state_change, user_data)
self.set_label(label)
self.set_state(state)
def _repr_words(self):
return self.__super._repr_words() + [
python3_repr(self.label)]
def _repr_attrs(self):
return dict(self.__super._repr_attrs(),
state=self.state)
def set_label(self, label):
"""
Change the check box label.
label -- markup for label. See Text widget for description
of text markup.
>>> cb = CheckBox(u"foo")
>>> cb
<CheckBox selectable flow widget 'foo' state=False>
>>> cb.set_label(('bright_attr', u"bar"))
>>> cb
<CheckBox selectable flow widget 'bar' state=False>
"""
self._label.set_text(label)
# no need to call self._invalidate(). WidgetWrap takes care of
# that when self.w changes
def get_label(self):
"""
Return label text.
>>> cb = CheckBox(u"Seriously")
>>> print(cb.get_label())
Seriously
>>> print(cb.label)
Seriously
>>> cb.set_label([('bright_attr', u"flashy"), u" normal"])
>>> print(cb.label) # only text is returned
flashy normal
"""
return self._label.text
label = property(get_label)
def set_state(self, state, do_callback=True):
"""
Set the CheckBox state.
state -- True, False or "mixed"
do_callback -- False to suppress signal from this change
>>> changes = []
>>> def callback_a(cb, state, user_data):
... changes.append("A %r %r" % (state, user_data))
>>> def callback_b(cb, state):
... changes.append("B %r" % state)
>>> cb = CheckBox('test', False, False)
>>> key1 = connect_signal(cb, 'change', callback_a, "user_a")
>>> key2 = connect_signal(cb, 'change', callback_b)
>>> cb.set_state(True) # both callbacks will be triggered
>>> cb.state
True
>>> disconnect_signal(cb, 'change', callback_a, "user_a")
>>> cb.state = False
>>> cb.state
False
>>> cb.set_state(True)
>>> cb.state
True
>>> cb.set_state(False, False) # don't send signal
>>> changes
["A True 'user_a'", 'B True', 'B False', 'B True']
"""
if self._state == state:
return
if state not in self.states:
raise CheckBoxError("%s Invalid state: %s" % (
repr(self), repr(state)))
# self._state is None is a special case when the CheckBox
# has just been created
old_state = self._state
if do_callback and old_state is not None:
self._emit('change', state)
self._state = state
# rebuild the display widget with the new state
self._w = Columns( [
('fixed', self.reserve_columns, self.states[state] ),
self._label ] )
self._w.focus_col = 0
if do_callback and old_state is not None:
self._emit('postchange', old_state)
def get_state(self):
"""Return the state of the checkbox."""
return self._state
state = property(get_state, set_state)
def keypress(self, size, key):
"""
Toggle state on 'activate' command.
>>> assert CheckBox._command_map[' '] == 'activate'
>>> assert CheckBox._command_map['enter'] == 'activate'
>>> size = (10,)
>>> cb = CheckBox('press me')
>>> cb.state
False
>>> cb.keypress(size, ' ')
>>> cb.state
True
>>> cb.keypress(size, ' ')
>>> cb.state
False
"""
if self._command_map[key] != ACTIVATE:
return key
self.toggle_state()
def toggle_state(self):
"""
Cycle to the next valid state.
>>> cb = CheckBox("3-state", has_mixed=True)
>>> cb.state
False
>>> cb.toggle_state()
>>> cb.state
True
>>> cb.toggle_state()
>>> cb.state
'mixed'
>>> cb.toggle_state()
>>> cb.state
False
"""
if self.state == False:
self.set_state(True)
elif self.state == True:
if self.has_mixed:
self.set_state('mixed')
else:
self.set_state(False)
elif self.state == 'mixed':
self.set_state(False)
def mouse_event(self, size, event, button, x, y, focus):
"""
Toggle state on button 1 press.
>>> size = (20,)
>>> cb = CheckBox("clickme")
>>> cb.state
False
>>> cb.mouse_event(size, 'mouse press', 1, 2, 0, True)
True
>>> cb.state
True
"""
if button != 1 or not is_mouse_press(event):
return False
self.toggle_state()
return True
class RadioButton(CheckBox):
states = {
True: SelectableIcon("(X)", 1),
False: SelectableIcon("( )", 1),
'mixed': SelectableIcon("(#)", 1) }
reserve_columns = 4
def __init__(self, group, label, state="first True",
on_state_change=None, user_data=None):
"""
:param group: list for radio buttons in same group
:param label: markup for radio button label
:param state: False, True, "mixed" or "first True"
:param on_state_change: shorthand for connect_signal()
function call for a single 'change' callback
:param user_data: user_data for on_state_change
This function will append the new radio button to group.
"first True" will set to True if group is empty.
Signals supported: ``'change'``, ``"postchange"``
Register signal handler with::
urwid.connect_signal(radio_button, 'change', callback, user_data)
where callback is callback(radio_button, new_state [,user_data])
Unregister signal handlers with::
urwid.disconnect_signal(radio_button, 'change', callback, user_data)
>>> bgroup = [] # button group
>>> b1 = RadioButton(bgroup, u"Agree")
>>> b2 = RadioButton(bgroup, u"Disagree")
>>> len(bgroup)
2
>>> b1
<RadioButton selectable flow widget 'Agree' state=True>
>>> b2
<RadioButton selectable flow widget 'Disagree' state=False>
>>> b2.render((15,), focus=True).text # ... = b in Python 3
[...'( ) Disagree ']
"""
if state=="first True":
state = not group
self.group = group
self.__super.__init__(label, state, False, on_state_change,
user_data)
group.append(self)
def set_state(self, state, do_callback=True):
"""
Set the RadioButton state.
state -- True, False or "mixed"
do_callback -- False to suppress signal from this change
If state is True all other radio buttons in the same button
group will be set to False.
>>> bgroup = [] # button group
>>> b1 = RadioButton(bgroup, u"Agree")
>>> b2 = RadioButton(bgroup, u"Disagree")
>>> b3 = RadioButton(bgroup, u"Unsure")
>>> b1.state, b2.state, b3.state
(True, False, False)
>>> b2.set_state(True)
>>> b1.state, b2.state, b3.state
(False, True, False)
>>> def relabel_button(radio_button, new_state):
... radio_button.set_label(u"Think Harder!")
>>> key = connect_signal(b3, 'change', relabel_button)
>>> b3
<RadioButton selectable flow widget 'Unsure' state=False>
>>> b3.set_state(True) # this will trigger the callback
>>> b3
<RadioButton selectable flow widget 'Think Harder!' state=True>
"""
if self._state == state:
return
self.__super.set_state(state, do_callback)
# if we're clearing the state we don't have to worry about
# other buttons in the button group
if state is not True:
return
# clear the state of each other radio button
for cb in self.group:
if cb is self: continue
if cb._state:
cb.set_state(False)
def toggle_state(self):
"""
Set state to True.
>>> bgroup = [] # button group
>>> b1 = RadioButton(bgroup, "Agree")
>>> b2 = RadioButton(bgroup, "Disagree")
>>> b1.state, b2.state
(True, False)
>>> b2.toggle_state()
>>> b1.state, b2.state
(False, True)
>>> b2.toggle_state()
>>> b1.state, b2.state
(False, True)
"""
self.set_state(True)
class Button(WidgetWrap):
def sizing(self):
return frozenset([FLOW])
button_left = Text("<")
button_right = Text(">")
signals = ["click"]
def __init__(self, label, on_press=None, user_data=None):
"""
:param label: markup for button label
:param on_press: shorthand for connect_signal()
function call for a single callback
:param user_data: user_data for on_press
Signals supported: ``'click'``
Register signal handler with::
urwid.connect_signal(button, 'click', callback, user_data)
where callback is callback(button [,user_data])
Unregister signal handlers with::
urwid.disconnect_signal(button, 'click', callback, user_data)
>>> Button(u"Ok")
<Button selectable flow widget 'Ok'>
>>> b = Button("Cancel")
>>> b.render((15,), focus=True).text # ... = b in Python 3
[...'< Cancel >']
"""
self._label = SelectableIcon("", 0)
cols = Columns([
('fixed', 1, self.button_left),
self._label,
('fixed', 1, self.button_right)],
dividechars=1)
self.__super.__init__(cols)
# The old way of listening for a change was to pass the callback
# in to the constructor. Just convert it to the new way:
if on_press:
connect_signal(self, 'click', on_press, user_data)
self.set_label(label)
def _repr_words(self):
# include button.label in repr(button)
return self.__super._repr_words() + [
python3_repr(self.label)]
def set_label(self, label):
"""
Change the button label.
label -- markup for button label
>>> b = Button("Ok")
>>> b.set_label(u"Yup yup")
>>> b
<Button selectable flow widget 'Yup yup'>
"""
self._label.set_text(label)
def get_label(self):
"""
Return label text.
>>> b = Button(u"Ok")
>>> print(b.get_label())
Ok
>>> print(b.label)
Ok
"""
return self._label.text
label = property(get_label)
def keypress(self, size, key):
"""
Send 'click' signal on 'activate' command.
>>> assert Button._command_map[' '] == 'activate'
>>> assert Button._command_map['enter'] == 'activate'
>>> size = (15,)
>>> b = Button(u"Cancel")
>>> clicked_buttons = []
>>> def handle_click(button):
... clicked_buttons.append(button.label)
>>> key = connect_signal(b, 'click', handle_click)
>>> b.keypress(size, 'enter')
>>> b.keypress(size, ' ')
>>> clicked_buttons # ... = u in Python 2
[...'Cancel', ...'Cancel']
"""
if self._command_map[key] != ACTIVATE:
return key
self._emit('click')
def mouse_event(self, size, event, button, x, y, focus):
"""
Send 'click' signal on button 1 press.
>>> size = (15,)
>>> b = Button(u"Ok")
>>> clicked_buttons = []
>>> def handle_click(button):
... clicked_buttons.append(button.label)
>>> key = connect_signal(b, 'click', handle_click)
>>> b.mouse_event(size, 'mouse press', 1, 4, 0, True)
True
>>> b.mouse_event(size, 'mouse press', 2, 4, 0, True) # ignored
False
>>> clicked_buttons # ... = u in Python 2
[...'Ok']
"""
if button != 1 or not is_mouse_press(event):
return False
self._emit('click')
return True
class PopUpLauncher(delegate_to_widget_mixin('_original_widget'),
WidgetDecoration):
def __init__(self, original_widget):
self.__super.__init__(original_widget)
self._pop_up_widget = None
def create_pop_up(self):
"""
Subclass must override this method and return a widget
to be used for the pop-up. This method is called once each time
the pop-up is opened.
"""
raise NotImplementedError("Subclass must override this method")
def get_pop_up_parameters(self):
"""
Subclass must override this method and have it return a dict, eg:
{'left':0, 'top':1, 'overlay_width':30, 'overlay_height':4}
This method is called each time this widget is rendered.
"""
raise NotImplementedError("Subclass must override this method")
def open_pop_up(self):
self._pop_up_widget = self.create_pop_up()
self._invalidate()
def close_pop_up(self):
self._pop_up_widget = None
self._invalidate()
def render(self, size, focus=False):
canv = self.__super.render(size, focus)
if self._pop_up_widget:
canv = CompositeCanvas(canv)
canv.set_pop_up(self._pop_up_widget, **self.get_pop_up_parameters())
return canv
class PopUpTarget(WidgetDecoration):
# FIXME: this whole class is a terrible hack and must be fixed
# when layout and rendering are separated
_sizing = set([BOX])
_selectable = True
def __init__(self, original_widget):
self.__super.__init__(original_widget)
self._pop_up = None
self._current_widget = self._original_widget
def _update_overlay(self, size, focus):
canv = self._original_widget.render(size, focus=focus)
self._cache_original_canvas = canv # imperfect performance hack
pop_up = canv.get_pop_up()
if pop_up:
left, top, (
w, overlay_width, overlay_height) = pop_up
if self._pop_up != w:
self._pop_up = w
self._current_widget = Overlay(w, self._original_widget,
('fixed left', left), overlay_width,
('fixed top', top), overlay_height)
else:
self._current_widget.set_overlay_parameters(
('fixed left', left), overlay_width,
('fixed top', top), overlay_height)
else:
self._pop_up = None
self._current_widget = self._original_widget
def render(self, size, focus=False):
self._update_overlay(size, focus)
return self._current_widget.render(size, focus=focus)
def get_cursor_coords(self, size):
self._update_overlay(size, True)
return self._current_widget.get_cursor_coords(size)
def get_pref_col(self, size):
self._update_overlay(size, True)
return self._current_widget.get_pref_col(size)
def keypress(self, size, key):
self._update_overlay(size, True)
return self._current_widget.keypress(size, key)
def move_cursor_to_coords(self, size, x, y):
self._update_overlay(size, True)
return self._current_widget.move_cursor_to_coords(size, x, y)
def mouse_event(self, size, event, button, x, y, focus):
self._update_overlay(size, focus)
return self._current_widget.mouse_event(size, event, button, x, y, focus)
def pack(self, size=None, focus=False):
self._update_overlay(size, focus)
return self._current_widget.pack(size)
def _test():
import doctest
doctest.testmod()
if __name__=='__main__':
_test()
|
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
CONF_ACCESSPOINT,
CONF_AUTHTOKEN,
DOMAIN,
HMIPC_AUTHTOKEN,
HMIPC_HAPID,
HMIPC_NAME,
)
from .generic_entity import HomematicipGenericEntity # noqa: F401
from .hap import HomematicipAuth, HomematicipHAP # noqa: F401
from .services import async_setup_services, async_unload_services
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(CONF_NAME, default=""): vol.Any(cv.string),
vol.Required(CONF_ACCESSPOINT): cv.string,
vol.Required(CONF_AUTHTOKEN): cv.string,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the HomematicIP Cloud component."""
hass.data[DOMAIN] = {}
accesspoints = config.get(DOMAIN, [])
for conf in accesspoints:
if conf[CONF_ACCESSPOINT] not in {
entry.data[HMIPC_HAPID]
for entry in hass.config_entries.async_entries(DOMAIN)
}:
hass.async_add_job(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
HMIPC_HAPID: conf[CONF_ACCESSPOINT],
HMIPC_AUTHTOKEN: conf[CONF_AUTHTOKEN],
HMIPC_NAME: conf[CONF_NAME],
},
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up an access point from a config entry."""
# 0.104 introduced config entry unique id, this makes upgrading possible
if entry.unique_id is None:
new_data = dict(entry.data)
hass.config_entries.async_update_entry(
entry, unique_id=new_data[HMIPC_HAPID], data=new_data
)
hap = HomematicipHAP(hass, entry)
hass.data[DOMAIN][entry.unique_id] = hap
if not await hap.async_setup():
return False
await async_setup_services(hass)
# Register on HA stop event to gracefully shutdown HomematicIP Cloud connection
hap.reset_connection_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, hap.shutdown
)
# Register hap as device in registry.
device_registry = await dr.async_get_registry(hass)
home = hap.home
# Add the HAP name from configuration if set.
hapname = home.label if not home.name else f"{home.name} {home.label}"
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, home.id)},
manufacturer="eQ-3",
name=hapname,
model=home.modelType,
sw_version=home.currentAPVersion,
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
hap = hass.data[DOMAIN].pop(entry.unique_id)
hap.reset_connection_listener()
await async_unload_services(hass)
return await hap.async_reset()
|
import datetime
import pytest
from queue import Empty
from unittest.mock import MagicMock, call, patch
from kombu import Connection
pymongo = pytest.importorskip('pymongo')
def _create_mock_connection(url='', **kwargs):
from kombu.transport import mongodb # noqa
class _Channel(mongodb.Channel):
# reset _fanout_queues for each instance
_fanout_queues = {}
collections = {}
now = datetime.datetime.utcnow()
def _create_client(self):
mock = MagicMock(name='client')
# we need new mock object for every collection
def get_collection(name):
try:
return self.collections[name]
except KeyError:
mock = self.collections[name] = MagicMock(
name='collection:%s' % name)
return mock
mock.__getitem__.side_effect = get_collection
return mock
def get_now(self):
return self.now
class Transport(mongodb.Transport):
Channel = _Channel
return Connection(url, transport=Transport, **kwargs)
class test_mongodb_uri_parsing:
def test_defaults(self):
url = 'mongodb://'
channel = _create_mock_connection(url).default_channel
hostname, dbname, options = channel._parse_uri()
assert dbname == 'kombu_default'
assert hostname == 'mongodb://127.0.0.1'
def test_custom_host(self):
url = 'mongodb://localhost'
channel = _create_mock_connection(url).default_channel
hostname, dbname, options = channel._parse_uri()
assert dbname == 'kombu_default'
def test_custom_database(self):
url = 'mongodb://localhost/dbname'
channel = _create_mock_connection(url).default_channel
hostname, dbname, options = channel._parse_uri()
assert dbname == 'dbname'
def test_custom_credentials(self):
url = 'mongodb://localhost/dbname'
channel = _create_mock_connection(
url, userid='foo', password='bar').default_channel
hostname, dbname, options = channel._parse_uri()
assert hostname == 'mongodb://foo:bar@localhost/dbname'
assert dbname == 'dbname'
def test_correct_readpreference(self):
url = 'mongodb://localhost/dbname?readpreference=nearest'
channel = _create_mock_connection(url).default_channel
hostname, dbname, options = channel._parse_uri()
assert options['readpreference'] == 'nearest'
class BaseMongoDBChannelCase:
def _get_method(self, cname, mname):
collection = getattr(self.channel, cname)
method = getattr(collection, mname.split('.', 1)[0])
for bit in mname.split('.')[1:]:
method = getattr(method.return_value, bit)
return method
def set_operation_return_value(self, cname, mname, *values):
method = self._get_method(cname, mname)
if len(values) == 1:
method.return_value = values[0]
else:
method.side_effect = values
def declare_droadcast_queue(self, queue):
self.channel.exchange_declare('fanout_exchange', type='fanout')
self.channel._queue_bind('fanout_exchange', 'foo', '*', queue)
assert queue in self.channel._broadcast_cursors
def get_broadcast(self, queue):
return self.channel._broadcast_cursors[queue]
def set_broadcast_return_value(self, queue, *values):
self.declare_droadcast_queue(queue)
cursor = MagicMock(name='cursor')
cursor.__iter__.return_value = iter(values)
self.channel._broadcast_cursors[queue]._cursor = iter(cursor)
def assert_collection_accessed(self, *collections):
self.channel.client.__getitem__.assert_has_calls(
[call(c) for c in collections], any_order=True)
def assert_operation_has_calls(self, cname, mname, calls, any_order=False):
method = self._get_method(cname, mname)
method.assert_has_calls(calls, any_order=any_order)
def assert_operation_called_with(self, cname, mname, *args, **kwargs):
self.assert_operation_has_calls(cname, mname, [call(*args, **kwargs)])
class test_mongodb_channel(BaseMongoDBChannelCase):
def setup(self):
self.connection = _create_mock_connection()
self.channel = self.connection.default_channel
# Tests for "public" channel interface
def test_new_queue(self):
self.channel._new_queue('foobar')
self.channel.client.assert_not_called()
def test_get(self):
self.set_operation_return_value('messages', 'find_and_modify', {
'_id': 'docId', 'payload': '{"some": "data"}',
})
event = self.channel._get('foobar')
self.assert_collection_accessed('messages')
self.assert_operation_called_with(
'messages', 'find_and_modify',
query={'queue': 'foobar'},
remove=True,
sort=[
('priority', pymongo.ASCENDING),
],
)
assert event == {'some': 'data'}
self.set_operation_return_value('messages', 'find_and_modify', None)
with pytest.raises(Empty):
self.channel._get('foobar')
def test_get_fanout(self):
self.set_broadcast_return_value('foobar', {
'_id': 'docId1', 'payload': '{"some": "data"}',
})
event = self.channel._get('foobar')
self.assert_collection_accessed('messages.broadcast')
assert event == {'some': 'data'}
with pytest.raises(Empty):
self.channel._get('foobar')
def test_put(self):
self.channel._put('foobar', {'some': 'data'})
self.assert_collection_accessed('messages')
self.assert_operation_called_with('messages', 'insert', {
'queue': 'foobar',
'priority': 9,
'payload': '{"some": "data"}',
})
def test_put_fanout(self):
self.declare_droadcast_queue('foobar')
self.channel._put_fanout('foobar', {'some': 'data'}, 'foo')
self.assert_collection_accessed('messages.broadcast')
self.assert_operation_called_with('broadcast', 'insert', {
'queue': 'foobar', 'payload': '{"some": "data"}',
})
def test_size(self):
self.set_operation_return_value('messages', 'find.count', 77)
result = self.channel._size('foobar')
self.assert_collection_accessed('messages')
self.assert_operation_called_with(
'messages', 'find', {'queue': 'foobar'},
)
assert result == 77
def test_size_fanout(self):
self.declare_droadcast_queue('foobar')
cursor = MagicMock(name='cursor')
cursor.get_size.return_value = 77
self.channel._broadcast_cursors['foobar'] = cursor
result = self.channel._size('foobar')
assert result == 77
def test_purge(self):
self.set_operation_return_value('messages', 'find.count', 77)
result = self.channel._purge('foobar')
self.assert_collection_accessed('messages')
self.assert_operation_called_with(
'messages', 'remove', {'queue': 'foobar'},
)
assert result == 77
def test_purge_fanout(self):
self.declare_droadcast_queue('foobar')
cursor = MagicMock(name='cursor')
cursor.get_size.return_value = 77
self.channel._broadcast_cursors['foobar'] = cursor
result = self.channel._purge('foobar')
cursor.purge.assert_any_call()
assert result == 77
def test_get_table(self):
state_table = [('foo', '*', 'foo')]
stored_table = [('bar', '*', 'bar')]
self.channel.exchange_declare('test_exchange')
self.channel.state.exchanges['test_exchange']['table'] = state_table
self.set_operation_return_value('routing', 'find', [{
'_id': 'docId',
'routing_key': stored_table[0][0],
'pattern': stored_table[0][1],
'queue': stored_table[0][2],
}])
result = self.channel.get_table('test_exchange')
self.assert_collection_accessed('messages.routing')
self.assert_operation_called_with(
'routing', 'find', {'exchange': 'test_exchange'},
)
assert set(result) == frozenset(state_table) | frozenset(stored_table)
def test_queue_bind(self):
self.channel._queue_bind('test_exchange', 'foo', '*', 'foo')
self.assert_collection_accessed('messages.routing')
self.assert_operation_called_with(
'routing', 'update',
{'queue': 'foo', 'pattern': '*',
'routing_key': 'foo', 'exchange': 'test_exchange'},
{'queue': 'foo', 'pattern': '*',
'routing_key': 'foo', 'exchange': 'test_exchange'},
upsert=True,
)
def test_queue_delete(self):
self.channel.queue_delete('foobar')
self.assert_collection_accessed('messages.routing')
self.assert_operation_called_with(
'routing', 'remove', {'queue': 'foobar'},
)
def test_queue_delete_fanout(self):
self.declare_droadcast_queue('foobar')
cursor = MagicMock(name='cursor')
self.channel._broadcast_cursors['foobar'] = cursor
self.channel.queue_delete('foobar')
cursor.close.assert_any_call()
assert 'foobar' not in self.channel._broadcast_cursors
assert 'foobar' not in self.channel._fanout_queues
# Tests for channel internals
def test_create_broadcast(self):
self.channel._create_broadcast(self.channel.client)
self.channel.client.create_collection.assert_called_with(
'messages.broadcast', capped=True, size=100000,
)
def test_ensure_indexes(self):
self.channel._ensure_indexes(self.channel.client)
self.assert_operation_called_with(
'messages', 'ensure_index',
[('queue', 1), ('priority', 1), ('_id', 1)],
background=True,
)
self.assert_operation_called_with(
'broadcast', 'ensure_index',
[('queue', 1)],
)
self.assert_operation_called_with(
'routing', 'ensure_index', [('queue', 1), ('exchange', 1)],
)
def test_create_broadcast_cursor(self):
with patch.object(pymongo, 'version_tuple', (2, )):
self.channel._create_broadcast_cursor(
'fanout_exchange', 'foo', '*', 'foobar',
)
self.assert_collection_accessed('messages.broadcast')
self.assert_operation_called_with(
'broadcast', 'find',
tailable=True,
query={'queue': 'fanout_exchange'},
)
if pymongo.version_tuple >= (3, ):
self.channel._create_broadcast_cursor(
'fanout_exchange1', 'foo', '*', 'foobar',
)
self.assert_collection_accessed('messages.broadcast')
self.assert_operation_called_with(
'broadcast', 'find',
cursor_type=pymongo.CursorType.TAILABLE,
filter={'queue': 'fanout_exchange1'},
)
def test_open_rc_version(self):
def server_info(self):
return {'version': '3.6.0-rc'}
with patch.object(pymongo.MongoClient, 'server_info', server_info):
self.channel._open()
class test_mongodb_channel_ttl(BaseMongoDBChannelCase):
def setup(self):
self.connection = _create_mock_connection(
transport_options={'ttl': True},
)
self.channel = self.connection.default_channel
self.expire_at = (
self.channel.get_now() + datetime.timedelta(milliseconds=777))
# Tests
def test_new_queue(self):
self.channel._new_queue('foobar')
self.assert_operation_called_with(
'queues', 'update',
{'_id': 'foobar'},
{'_id': 'foobar', 'options': {}, 'expire_at': None},
upsert=True,
)
def test_get(self):
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-expires': 777}},
})
self.set_operation_return_value('messages', 'find_and_modify', {
'_id': 'docId', 'payload': '{"some": "data"}',
})
self.channel._get('foobar')
self.assert_collection_accessed('messages', 'messages.queues')
self.assert_operation_called_with(
'messages', 'find_and_modify',
query={'queue': 'foobar'},
remove=True,
sort=[
('priority', pymongo.ASCENDING),
],
)
self.assert_operation_called_with(
'routing', 'update',
{'queue': 'foobar'},
{'$set': {'expire_at': self.expire_at}},
multi=True,
)
def test_put(self):
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-message-ttl': 777}},
})
self.channel._put('foobar', {'some': 'data'})
self.assert_collection_accessed('messages')
self.assert_operation_called_with('messages', 'insert', {
'queue': 'foobar',
'priority': 9,
'payload': '{"some": "data"}',
'expire_at': self.expire_at,
})
def test_queue_bind(self):
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-expires': 777}},
})
self.channel._queue_bind('test_exchange', 'foo', '*', 'foo')
self.assert_collection_accessed('messages.routing')
self.assert_operation_called_with(
'routing', 'update',
{'queue': 'foo', 'pattern': '*',
'routing_key': 'foo', 'exchange': 'test_exchange'},
{'queue': 'foo', 'pattern': '*',
'routing_key': 'foo', 'exchange': 'test_exchange',
'expire_at': self.expire_at},
upsert=True,
)
def test_queue_delete(self):
self.channel.queue_delete('foobar')
self.assert_collection_accessed('messages.queues')
self.assert_operation_called_with(
'queues', 'remove', {'_id': 'foobar'})
def test_ensure_indexes(self):
self.channel._ensure_indexes(self.channel.client)
self.assert_operation_called_with(
'messages', 'ensure_index', [('expire_at', 1)],
expireAfterSeconds=0)
self.assert_operation_called_with(
'routing', 'ensure_index', [('expire_at', 1)],
expireAfterSeconds=0)
self.assert_operation_called_with(
'queues', 'ensure_index', [('expire_at', 1)], expireAfterSeconds=0)
def test_get_expire(self):
result = self.channel._get_expire(
{'arguments': {'x-expires': 777}}, 'x-expires')
self.channel.client.assert_not_called()
assert result == self.expire_at
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-expires': 777}},
})
result = self.channel._get_expire('foobar', 'x-expires')
assert result == self.expire_at
def test_update_queues_expire(self):
self.set_operation_return_value('queues', 'find_one', {
'_id': 'docId', 'options': {'arguments': {'x-expires': 777}},
})
self.channel._update_queues_expire('foobar')
self.assert_collection_accessed('messages.routing', 'messages.queues')
self.assert_operation_called_with(
'routing', 'update',
{'queue': 'foobar'},
{'$set': {'expire_at': self.expire_at}},
multi=True,
)
self.assert_operation_called_with(
'queues', 'update',
{'_id': 'foobar'},
{'$set': {'expire_at': self.expire_at}},
multi=True,
)
class test_mongodb_channel_calc_queue_size(BaseMongoDBChannelCase):
def setup(self):
self.connection = _create_mock_connection(
transport_options={'calc_queue_size': False})
self.channel = self.connection.default_channel
self.expire_at = (
self.channel.get_now() + datetime.timedelta(milliseconds=777))
# Tests
def test_size(self):
self.set_operation_return_value('messages', 'find.count', 77)
result = self.channel._size('foobar')
self.assert_operation_has_calls('messages', 'find', [])
assert result == 0
|
from homeassistant.components.homekit import get_accessory
from homeassistant.components.homekit.const import (
DEVICE_CLASS_MOTION,
PROP_CELSIUS,
THRESHOLD_CO,
THRESHOLD_CO2,
)
from homeassistant.components.homekit.type_sensors import (
BINARY_SENSOR_SERVICE_MAP,
AirQualitySensor,
BinarySensor,
CarbonDioxideSensor,
CarbonMonoxideSensor,
HumiditySensor,
LightSensor,
TemperatureSensor,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
EVENT_HOMEASSISTANT_START,
PERCENTAGE,
STATE_HOME,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CoreState
from homeassistant.helpers import entity_registry
async def test_temperature(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.temperature"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = TemperatureSensor(hass, hk_driver, "Temperature", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_temp.value == 0.0
for key, value in PROP_CELSIUS.items():
assert acc.char_temp.properties[key] == value
hass.states.async_set(
entity_id, STATE_UNKNOWN, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
assert acc.char_temp.value == 0.0
hass.states.async_set(entity_id, "20", {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS})
await hass.async_block_till_done()
assert acc.char_temp.value == 20
hass.states.async_set(
entity_id, "75.2", {ATTR_UNIT_OF_MEASUREMENT: TEMP_FAHRENHEIT}
)
await hass.async_block_till_done()
assert acc.char_temp.value == 24
async def test_humidity(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.humidity"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = HumiditySensor(hass, hk_driver, "Humidity", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_humidity.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_humidity.value == 0
hass.states.async_set(entity_id, "20")
await hass.async_block_till_done()
assert acc.char_humidity.value == 20
async def test_air_quality(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.air_quality"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = AirQualitySensor(hass, hk_driver, "Air Quality", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_density.value == 0
assert acc.char_quality.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_density.value == 0
assert acc.char_quality.value == 0
hass.states.async_set(entity_id, "34")
await hass.async_block_till_done()
assert acc.char_density.value == 34
assert acc.char_quality.value == 1
hass.states.async_set(entity_id, "200")
await hass.async_block_till_done()
assert acc.char_density.value == 200
assert acc.char_quality.value == 5
async def test_co(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.co"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = CarbonMonoxideSensor(hass, hk_driver, "CO", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
value = 32
assert value > THRESHOLD_CO
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 32
assert acc.char_peak.value == 32
assert acc.char_detected.value == 1
value = 10
assert value < THRESHOLD_CO
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 10
assert acc.char_peak.value == 32
assert acc.char_detected.value == 0
async def test_co2(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.co2"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = CarbonDioxideSensor(hass, hk_driver, "CO2", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
value = 1100
assert value > THRESHOLD_CO2
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 1100
assert acc.char_peak.value == 1100
assert acc.char_detected.value == 1
value = 800
assert value < THRESHOLD_CO2
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 800
assert acc.char_peak.value == 1100
assert acc.char_detected.value == 0
async def test_light(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.light"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = LightSensor(hass, hk_driver, "Light", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_light.value == 0.0001
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_light.value == 0.0001
hass.states.async_set(entity_id, "300")
await hass.async_block_till_done()
assert acc.char_light.value == 300
async def test_binary(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "binary_sensor.opening"
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Window Opening", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 1
hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_HOME, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 1
hass.states.async_set(entity_id, STATE_NOT_HOME, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 0
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_detected.value == 0
async def test_motion_uses_bool(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "binary_sensor.motion"
hass.states.async_set(
entity_id, STATE_UNKNOWN, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Motion Sensor", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_detected.value is False
hass.states.async_set(entity_id, STATE_ON, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION})
await hass.async_block_till_done()
assert acc.char_detected.value is True
hass.states.async_set(
entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is False
hass.states.async_set(
entity_id, STATE_HOME, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is True
hass.states.async_set(
entity_id, STATE_NOT_HOME, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is False
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_detected.value is False
async def test_binary_device_classes(hass, hk_driver):
"""Test if services and characteristics are assigned correctly."""
entity_id = "binary_sensor.demo"
for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items():
hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class})
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Binary Sensor", entity_id, 2, None)
assert acc.get_service(service).display_name == service
assert acc.char_detected.display_name == char
async def test_sensor_restore(hass, hk_driver, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
"sensor",
"generic",
"1234",
suggested_object_id="temperature",
device_class="temperature",
)
registry.async_get_or_create(
"sensor",
"generic",
"12345",
suggested_object_id="humidity",
device_class="humidity",
unit_of_measurement=PERCENTAGE,
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = get_accessory(hass, hk_driver, hass.states.get("sensor.temperature"), 2, {})
assert acc.category == 10
acc = get_accessory(hass, hk_driver, hass.states.get("sensor.humidity"), 2, {})
assert acc.category == 10
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.