text
stringlengths 213
32.3k
|
---|
import asyncio
import functools
import json
import logging
import re
import time
from homeassistant.components import mqtt
from homeassistant.const import CONF_DEVICE, CONF_PLATFORM
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import async_get_mqtt
from .abbreviations import ABBREVIATIONS, DEVICE_ABBREVIATIONS
from .const import (
ATTR_DISCOVERY_HASH,
ATTR_DISCOVERY_PAYLOAD,
ATTR_DISCOVERY_TOPIC,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
TOPIC_MATCHER = re.compile(
r"(?P<component>\w+)/(?:(?P<node_id>[a-zA-Z0-9_-]+)/)"
r"?(?P<object_id>[a-zA-Z0-9_-]+)/config"
)
SUPPORTED_COMPONENTS = [
"alarm_control_panel",
"binary_sensor",
"camera",
"climate",
"cover",
"device_automation",
"fan",
"light",
"lock",
"sensor",
"switch",
"tag",
"vacuum",
]
ALREADY_DISCOVERED = "mqtt_discovered_components"
CONFIG_ENTRY_IS_SETUP = "mqtt_config_entry_is_setup"
DATA_CONFIG_ENTRY_LOCK = "mqtt_config_entry_lock"
DATA_CONFIG_FLOW_LOCK = "mqtt_discovery_config_flow_lock"
DISCOVERY_UNSUBSCRIBE = "mqtt_discovery_unsubscribe"
INTEGRATION_UNSUBSCRIBE = "mqtt_integration_discovery_unsubscribe"
MQTT_DISCOVERY_UPDATED = "mqtt_discovery_updated_{}"
MQTT_DISCOVERY_NEW = "mqtt_discovery_new_{}_{}"
LAST_DISCOVERY = "mqtt_last_discovery"
TOPIC_BASE = "~"
def clear_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
del hass.data[ALREADY_DISCOVERED][discovery_hash]
def set_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
hass.data[ALREADY_DISCOVERED][discovery_hash] = {}
class MQTTConfig(dict):
"""Dummy class to allow adding attributes."""
async def async_start(
hass: HomeAssistantType, discovery_topic, config_entry=None
) -> bool:
"""Start MQTT Discovery."""
mqtt_integrations = {}
async def async_entity_message_received(msg):
"""Process the received message."""
hass.data[LAST_DISCOVERY] = time.time()
payload = msg.payload
topic = msg.topic
topic_trimmed = topic.replace(f"{discovery_topic}/", "", 1)
match = TOPIC_MATCHER.match(topic_trimmed)
if not match:
return
component, node_id, object_id = match.groups()
if component not in SUPPORTED_COMPONENTS:
_LOGGER.warning("Integration %s is not supported", component)
return
if payload:
try:
payload = json.loads(payload)
except ValueError:
_LOGGER.warning("Unable to parse JSON %s: '%s'", object_id, payload)
return
payload = MQTTConfig(payload)
for key in list(payload):
abbreviated_key = key
key = ABBREVIATIONS.get(key, key)
payload[key] = payload.pop(abbreviated_key)
if CONF_DEVICE in payload:
device = payload[CONF_DEVICE]
for key in list(device):
abbreviated_key = key
key = DEVICE_ABBREVIATIONS.get(key, key)
device[key] = device.pop(abbreviated_key)
if TOPIC_BASE in payload:
base = payload.pop(TOPIC_BASE)
for key, value in payload.items():
if isinstance(value, str) and value:
if value[0] == TOPIC_BASE and key.endswith("topic"):
payload[key] = f"{base}{value[1:]}"
if value[-1] == TOPIC_BASE and key.endswith("topic"):
payload[key] = f"{value[:-1]}{base}"
# If present, the node_id will be included in the discovered object id
discovery_id = " ".join((node_id, object_id)) if node_id else object_id
discovery_hash = (component, discovery_id)
if payload:
# Attach MQTT topic to the payload, used for debug prints
setattr(payload, "__configuration_source__", f"MQTT (topic: '{topic}')")
discovery_data = {
ATTR_DISCOVERY_HASH: discovery_hash,
ATTR_DISCOVERY_PAYLOAD: payload,
ATTR_DISCOVERY_TOPIC: topic,
}
setattr(payload, "discovery_data", discovery_data)
payload[CONF_PLATFORM] = "mqtt"
if ALREADY_DISCOVERED not in hass.data:
hass.data[ALREADY_DISCOVERED] = {}
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
# Dispatch update
_LOGGER.info(
"Component has already been discovered: %s %s, sending update",
component,
discovery_id,
)
async_dispatcher_send(
hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload
)
elif payload:
# Add component
_LOGGER.info("Found new component: %s %s", component, discovery_id)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
config_entries_key = f"{component}.mqtt"
async with hass.data[DATA_CONFIG_ENTRY_LOCK]:
if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:
if component == "device_automation":
# Local import to avoid circular dependencies
# pylint: disable=import-outside-toplevel
from . import device_automation
await device_automation.async_setup_entry(hass, config_entry)
elif component == "tag":
# Local import to avoid circular dependencies
# pylint: disable=import-outside-toplevel
from . import tag
await tag.async_setup_entry(hass, config_entry)
else:
await hass.config_entries.async_forward_entry_setup(
config_entry, component
)
hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async_dispatcher_send(
hass, MQTT_DISCOVERY_NEW.format(component, "mqtt"), payload
)
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[DATA_CONFIG_FLOW_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
hass.data[DISCOVERY_UNSUBSCRIBE] = await mqtt.async_subscribe(
hass, f"{discovery_topic}/#", async_entity_message_received, 0
)
hass.data[LAST_DISCOVERY] = time.time()
mqtt_integrations = await async_get_mqtt(hass)
hass.data[INTEGRATION_UNSUBSCRIBE] = {}
for (integration, topics) in mqtt_integrations.items():
async def async_integration_message_received(integration, msg):
"""Process the received message."""
key = f"{integration}_{msg.subscribed_topic}"
# Lock to prevent initiating many parallel config flows.
# Note: The lock is not intended to prevent a race, only for performance
async with hass.data[DATA_CONFIG_FLOW_LOCK]:
# Already unsubscribed
if key not in hass.data[INTEGRATION_UNSUBSCRIBE]:
return
result = await hass.config_entries.flow.async_init(
integration, context={"source": DOMAIN}, data=msg
)
if (
result
and result["type"] == "abort"
and result["reason"]
in ["already_configured", "single_instance_allowed"]
):
unsub = hass.data[INTEGRATION_UNSUBSCRIBE].pop(key, None)
if unsub is None:
return
unsub()
for topic in topics:
key = f"{integration}_{topic}"
hass.data[INTEGRATION_UNSUBSCRIBE][key] = await mqtt.async_subscribe(
hass,
topic,
functools.partial(async_integration_message_received, integration),
0,
)
return True
async def async_stop(hass: HomeAssistantType) -> bool:
"""Stop MQTT Discovery."""
if DISCOVERY_UNSUBSCRIBE in hass.data and hass.data[DISCOVERY_UNSUBSCRIBE]:
hass.data[DISCOVERY_UNSUBSCRIBE]()
hass.data[DISCOVERY_UNSUBSCRIBE] = None
if INTEGRATION_UNSUBSCRIBE in hass.data:
for key, unsub in list(hass.data[INTEGRATION_UNSUBSCRIBE].items()):
unsub()
hass.data[INTEGRATION_UNSUBSCRIBE].pop(key)
|
import json
import logging
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Index,
Integer,
String,
Text,
distinct,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm.session import Session
from homeassistant.core import Context, Event, EventOrigin, State, split_entity_id
from homeassistant.helpers.json import JSONEncoder
import homeassistant.util.dt as dt_util
# SQLAlchemy Schema
# pylint: disable=invalid-name
Base = declarative_base()
SCHEMA_VERSION = 9
_LOGGER = logging.getLogger(__name__)
DB_TIMEZONE = "+00:00"
TABLE_EVENTS = "events"
TABLE_STATES = "states"
TABLE_RECORDER_RUNS = "recorder_runs"
TABLE_SCHEMA_CHANGES = "schema_changes"
ALL_TABLES = [TABLE_EVENTS, TABLE_STATES, TABLE_RECORDER_RUNS, TABLE_SCHEMA_CHANGES]
class Events(Base): # type: ignore
"""Event history data."""
__tablename__ = TABLE_EVENTS
event_id = Column(Integer, primary_key=True)
event_type = Column(String(32))
event_data = Column(Text)
origin = Column(String(32))
time_fired = Column(DateTime(timezone=True), index=True)
created = Column(DateTime(timezone=True), default=dt_util.utcnow)
context_id = Column(String(36), index=True)
context_user_id = Column(String(36), index=True)
context_parent_id = Column(String(36), index=True)
__table_args__ = (
# Used for fetching events at a specific time
# see logbook
Index("ix_events_event_type_time_fired", "event_type", "time_fired"),
)
@staticmethod
def from_event(event, event_data=None):
"""Create an event database object from a native event."""
return Events(
event_type=event.event_type,
event_data=event_data or json.dumps(event.data, cls=JSONEncoder),
origin=str(event.origin.value),
time_fired=event.time_fired,
context_id=event.context.id,
context_user_id=event.context.user_id,
context_parent_id=event.context.parent_id,
)
def to_native(self, validate_entity_id=True):
"""Convert to a natve HA Event."""
context = Context(
id=self.context_id,
user_id=self.context_user_id,
parent_id=self.context_parent_id,
)
try:
return Event(
self.event_type,
json.loads(self.event_data),
EventOrigin(self.origin),
process_timestamp(self.time_fired),
context=context,
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting to event: %s", self)
return None
class States(Base): # type: ignore
"""State change history."""
__tablename__ = TABLE_STATES
state_id = Column(Integer, primary_key=True)
domain = Column(String(64))
entity_id = Column(String(255))
state = Column(String(255))
attributes = Column(Text)
event_id = Column(Integer, ForeignKey("events.event_id"), index=True)
last_changed = Column(DateTime(timezone=True), default=dt_util.utcnow)
last_updated = Column(DateTime(timezone=True), default=dt_util.utcnow, index=True)
created = Column(DateTime(timezone=True), default=dt_util.utcnow)
old_state_id = Column(Integer, ForeignKey("states.state_id"))
event = relationship("Events", uselist=False)
old_state = relationship("States", remote_side=[state_id])
__table_args__ = (
# Used for fetching the state of entities at a specific time
# (get_states in history.py)
Index("ix_states_entity_id_last_updated", "entity_id", "last_updated"),
)
@staticmethod
def from_event(event):
"""Create object from a state_changed event."""
entity_id = event.data["entity_id"]
state = event.data.get("new_state")
dbstate = States(entity_id=entity_id)
# State got deleted
if state is None:
dbstate.state = ""
dbstate.domain = split_entity_id(entity_id)[0]
dbstate.attributes = "{}"
dbstate.last_changed = event.time_fired
dbstate.last_updated = event.time_fired
else:
dbstate.domain = state.domain
dbstate.state = state.state
dbstate.attributes = json.dumps(dict(state.attributes), cls=JSONEncoder)
dbstate.last_changed = state.last_changed
dbstate.last_updated = state.last_updated
return dbstate
def to_native(self, validate_entity_id=True):
"""Convert to an HA state object."""
try:
return State(
self.entity_id,
self.state,
json.loads(self.attributes),
process_timestamp(self.last_changed),
process_timestamp(self.last_updated),
# Join the events table on event_id to get the context instead
# as it will always be there for state_changed events
context=Context(id=None),
validate_entity_id=validate_entity_id,
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to state: %s", self)
return None
class RecorderRuns(Base): # type: ignore
"""Representation of recorder run."""
__tablename__ = TABLE_RECORDER_RUNS
run_id = Column(Integer, primary_key=True)
start = Column(DateTime(timezone=True), default=dt_util.utcnow)
end = Column(DateTime(timezone=True))
closed_incorrect = Column(Boolean, default=False)
created = Column(DateTime(timezone=True), default=dt_util.utcnow)
__table_args__ = (Index("ix_recorder_runs_start_end", "start", "end"),)
def entity_ids(self, point_in_time=None):
"""Return the entity ids that existed in this run.
Specify point_in_time if you want to know which existed at that point
in time inside the run.
"""
session = Session.object_session(self)
assert session is not None, "RecorderRuns need to be persisted"
query = session.query(distinct(States.entity_id)).filter(
States.last_updated >= self.start
)
if point_in_time is not None:
query = query.filter(States.last_updated < point_in_time)
elif self.end is not None:
query = query.filter(States.last_updated < self.end)
return [row[0] for row in query]
def to_native(self, validate_entity_id=True):
"""Return self, native format is this model."""
return self
class SchemaChanges(Base): # type: ignore
"""Representation of schema version changes."""
__tablename__ = TABLE_SCHEMA_CHANGES
change_id = Column(Integer, primary_key=True)
schema_version = Column(Integer)
changed = Column(DateTime(timezone=True), default=dt_util.utcnow)
def process_timestamp(ts):
"""Process a timestamp into datetime object."""
if ts is None:
return None
if ts.tzinfo is None:
return ts.replace(tzinfo=dt_util.UTC)
return dt_util.as_utc(ts)
def process_timestamp_to_utc_isoformat(ts):
"""Process a timestamp into UTC isotime."""
if ts is None:
return None
if ts.tzinfo == dt_util.UTC:
return ts.isoformat()
if ts.tzinfo is None:
return f"{ts.isoformat()}{DB_TIMEZONE}"
return ts.astimezone(dt_util.UTC).isoformat()
|
from datetime import timedelta
import logging
import voluptuous as vol
from volvooncall import Connection
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
DOMAIN = "volvooncall"
DATA_KEY = DOMAIN
_LOGGER = logging.getLogger(__name__)
MIN_UPDATE_INTERVAL = timedelta(minutes=1)
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=1)
CONF_REGION = "region"
CONF_SERVICE_URL = "service_url"
CONF_SCANDINAVIAN_MILES = "scandinavian_miles"
CONF_MUTABLE = "mutable"
SIGNAL_STATE_UPDATED = f"{DOMAIN}.updated"
COMPONENTS = {
"sensor": "sensor",
"binary_sensor": "binary_sensor",
"lock": "lock",
"device_tracker": "device_tracker",
"switch": "switch",
}
RESOURCES = [
"position",
"lock",
"heater",
"odometer",
"trip_meter1",
"trip_meter2",
"average_speed",
"fuel_amount",
"fuel_amount_level",
"average_fuel_consumption",
"distance_to_empty",
"washer_fluid_level",
"brake_fluid",
"service_warning_status",
"bulb_failures",
"battery_range",
"battery_level",
"time_to_fully_charged",
"battery_charge_status",
"engine_start",
"last_trip",
"is_engine_running",
"doors_hood_open",
"doors_tailgate_open",
"doors_front_left_door_open",
"doors_front_right_door_open",
"doors_rear_left_door_open",
"doors_rear_right_door_open",
"windows_front_left_window_open",
"windows_front_right_window_open",
"windows_rear_left_window_open",
"windows_rear_right_window_open",
"tyre_pressure_front_left_tyre_pressure",
"tyre_pressure_front_right_tyre_pressure",
"tyre_pressure_rear_left_tyre_pressure",
"tyre_pressure_rear_right_tyre_pressure",
"any_door_open",
"any_window_open",
]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_UPDATE_INTERVAL
): vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL)),
vol.Optional(CONF_NAME, default={}): cv.schema_with_slug_keys(
cv.string
),
vol.Optional(CONF_RESOURCES): vol.All(
cv.ensure_list, [vol.In(RESOURCES)]
),
vol.Optional(CONF_REGION): cv.string,
vol.Optional(CONF_SERVICE_URL): cv.string,
vol.Optional(CONF_MUTABLE, default=True): cv.boolean,
vol.Optional(CONF_SCANDINAVIAN_MILES, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Volvo On Call component."""
session = async_get_clientsession(hass)
connection = Connection(
session=session,
username=config[DOMAIN].get(CONF_USERNAME),
password=config[DOMAIN].get(CONF_PASSWORD),
service_url=config[DOMAIN].get(CONF_SERVICE_URL),
region=config[DOMAIN].get(CONF_REGION),
)
interval = config[DOMAIN][CONF_SCAN_INTERVAL]
data = hass.data[DATA_KEY] = VolvoData(config)
def is_enabled(attr):
"""Return true if the user has enabled the resource."""
return attr in config[DOMAIN].get(CONF_RESOURCES, [attr])
def discover_vehicle(vehicle):
"""Load relevant platforms."""
data.vehicles.add(vehicle.vin)
dashboard = vehicle.dashboard(
mutable=config[DOMAIN][CONF_MUTABLE],
scandinavian_miles=config[DOMAIN][CONF_SCANDINAVIAN_MILES],
)
for instrument in (
instrument
for instrument in dashboard.instruments
if instrument.component in COMPONENTS and is_enabled(instrument.slug_attr)
):
data.instruments.add(instrument)
hass.async_create_task(
discovery.async_load_platform(
hass,
COMPONENTS[instrument.component],
DOMAIN,
(vehicle.vin, instrument.component, instrument.attr),
config,
)
)
async def update(now):
"""Update status from the online service."""
try:
if not await connection.update(journal=True):
_LOGGER.warning("Could not query server")
return False
for vehicle in connection.vehicles:
if vehicle.vin not in data.vehicles:
discover_vehicle(vehicle)
async_dispatcher_send(hass, SIGNAL_STATE_UPDATED)
return True
finally:
async_track_point_in_utc_time(hass, update, utcnow() + interval)
_LOGGER.info("Logging in to service")
return await update(utcnow())
class VolvoData:
"""Hold component state."""
def __init__(self, config):
"""Initialize the component state."""
self.vehicles = set()
self.instruments = set()
self.config = config[DOMAIN]
self.names = self.config.get(CONF_NAME)
def instrument(self, vin, component, attr):
"""Return corresponding instrument."""
return next(
(
instrument
for instrument in self.instruments
if instrument.vehicle.vin == vin
and instrument.component == component
and instrument.attr == attr
),
None,
)
def vehicle_name(self, vehicle):
"""Provide a friendly name for a vehicle."""
if (
vehicle.registration_number and vehicle.registration_number.lower()
) in self.names:
return self.names[vehicle.registration_number.lower()]
if vehicle.vin and vehicle.vin.lower() in self.names:
return self.names[vehicle.vin.lower()]
if vehicle.registration_number:
return vehicle.registration_number
if vehicle.vin:
return vehicle.vin
return ""
class VolvoEntity(Entity):
"""Base class for all VOC entities."""
def __init__(self, data, vin, component, attribute):
"""Initialize the entity."""
self.data = data
self.vin = vin
self.component = component
self.attribute = attribute
async def async_added_to_hass(self):
"""Register update dispatcher."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_STATE_UPDATED, self.async_write_ha_state
)
)
@property
def instrument(self):
"""Return corresponding instrument."""
return self.data.instrument(self.vin, self.component, self.attribute)
@property
def icon(self):
"""Return the icon."""
return self.instrument.icon
@property
def vehicle(self):
"""Return vehicle."""
return self.instrument.vehicle
@property
def _entity_name(self):
return self.instrument.name
@property
def _vehicle_name(self):
return self.data.vehicle_name(self.vehicle)
@property
def name(self):
"""Return full name of the entity."""
return f"{self._vehicle_name} {self._entity_name}"
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return dict(
self.instrument.attributes,
model=f"{self.vehicle.vehicle_type}/{self.vehicle.model_year}",
)
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self.vin}-{self.component}-{self.attribute}"
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import tensorflow as tf
summary = tf.contrib.summary # TensorFlow Summary API v2.
TpuSummaryEntry = collections.namedtuple(
"TpuSummaryEntry", "summary_fn name tensor reduce_fn")
class TpuSummaries(object):
"""Class to simplify TF summaries on TPU.
An instance of the class provides simple methods for writing summaries in the
similar way to tf.summary. The difference is that each summary entry must
provide a reduction function that is used to reduce the summary values from
all the TPU cores.
"""
def __init__(self, log_dir, save_summary_steps=250):
self._log_dir = log_dir
self._entries = []
# While False no summary entries will be added. On TPU we unroll the graph
# and don't want to add multiple summaries per step.
self.record = True
self._save_summary_steps = save_summary_steps
def image(self, name, tensor, reduce_fn):
"""Add a summary for images. Tensor must be of 4-D tensor."""
if not self.record:
return
self._entries.append(
TpuSummaryEntry(summary.image, name, tensor, reduce_fn))
def scalar(self, name, tensor, reduce_fn=tf.math.reduce_mean):
"""Add a summary for a scalar tensor."""
if not self.record:
return
tensor = tf.convert_to_tensor(tensor)
if tensor.shape.ndims == 0:
tensor = tf.expand_dims(tensor, 0)
self._entries.append(
TpuSummaryEntry(summary.scalar, name, tensor, reduce_fn))
def get_host_call(self):
"""Returns the tuple (host_call_fn, host_call_args) for TPUEstimatorSpec."""
# All host_call_args must be tensors with batch dimension.
# All tensors are streamed to the host machine (mind the band width).
global_step = tf.train.get_or_create_global_step()
host_call_args = [tf.expand_dims(global_step, 0)]
host_call_args.extend([e.tensor for e in self._entries])
logging.info("host_call_args: %s", host_call_args)
return (self._host_call_fn, host_call_args)
def _host_call_fn(self, step, *args):
"""Function that will run on the host machine."""
# Host call receives values from all tensor cores (concatenate on the
# batch dimension). Step is the same for all cores.
step = step[0]
logging.info("host_call_fn: args=%s", args)
with summary.create_file_writer(self._log_dir).as_default():
with summary.record_summaries_every_n_global_steps(
self._save_summary_steps, step):
for i, e in enumerate(self._entries):
value = e.reduce_fn(args[i])
e.summary_fn(e.name, value, step=step)
return summary.all_summary_ops()
|
import uuid
P_TYPES = ['ONE', 'TWO']
T_TYPES = ['NO', 'V20']
def to_bool(value):
return value.lower() in ('true', '1')
def allowed_tax(value):
return value if value.upper() in T_TYPES else 'NO'
def allowed_types(value):
return value if value.upper() in P_TYPES else 'ONE'
def none_to_zero(value):
return 0 if value in (None, "") else value
def empty_str_to_null(value):
return None if value == '' else value
product_schema = {
'uuid': {
'type': 'string',
'required': True,
'nullable': False,
'default_setter': lambda x: uuid.uuid4().__str__(),
},
'name': {
'type': 'string',
'minlength': 1,
'maxlength': 100,
'required': True,
'nullable': False,
'default': 'noname product',
},
'group': {
'type': 'boolean',
'required': False,
'nullable': False,
'default': False,
},
'parentUuid': {
'type': 'string',
'required': False,
'nullable': True,
'default': None,
},
'hasVariants': {
'type': 'boolean',
'required': False,
'nullable': False,
'default': False,
},
'type': {
'type': 'string',
'required': True,
'allowed': P_TYPES,
'nullable': False,
'default': 'ONE',
'coerce': allowed_types,
},
'quantity': {
'type': 'float',
'required': True,
'nullable': False,
'default': 0,
'coerce': (none_to_zero, float),
},
'measureName': {
'type': 'string',
'required': True,
'nullable': False,
'default': '',
'coerce': str,
},
'tax': {
'type': 'string',
'required': True,
'allowed': T_TYPES,
'nullable': False,
'default': 'NO',
'coerce': allowed_tax,
},
'price': {
'type': 'float',
'required': True,
'min': 0,
'max': 9999999.99,
'nullable': False,
'default': 0,
'coerce': (none_to_zero, float),
},
'allowToSell': {
'type': 'boolean',
'required': True,
'nullable': False,
'default': True,
'coerce': (str, to_bool),
},
'costPrice': {
'type': 'float',
'min': 0,
'max': 9999999.99,
'required': True,
'nullable': False,
'default': 0,
'coerce': (none_to_zero, float),
},
'description': {
'type': 'string',
'minlength': 0,
'required': False,
'nullable': True,
'default': '',
'coerce': str,
},
'articleNumber': {
'type': 'string',
'minlength': 0,
'maxlength': 20,
'required': False,
'nullable': True,
'coerce': (empty_str_to_null, lambda s: str(s)[0:19]),
'default': '',
},
'code': {
'type': 'string',
'minlength': 0,
'maxlength': 10,
'required': True,
'coerce': (empty_str_to_null, lambda s: str(s)[0:9]),
'default': '',
},
'barCodes': {'type': 'list', 'required': True, 'nullable': True, 'default': []},
'alcoCodes': {'type': 'list', 'required': True, 'nullable': True, 'default': []},
'alcoholByVolume': {
'type': ['float', 'string'],
'required': True,
'nullable': True,
'default': None,
'coerce': (empty_str_to_null, float),
},
'alcoholProductKindCode': {
'type': ['float', 'string'],
'required': True,
'nullable': True,
'default': None,
'coerce': (empty_str_to_null, int),
},
'tareVolume': {
'type': ['float', 'string'],
'required': True,
'nullable': True,
'default': None,
'coerce': (empty_str_to_null, float),
},
}
|
import asyncio
from functools import partial
from typing import Optional
from urllib.parse import urlparse
from pyisy import ISY
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.typing import ConfigType
from .const import (
_LOGGER,
CONF_IGNORE_STRING,
CONF_RESTORE_LIGHT_STATE,
CONF_SENSOR_STRING,
CONF_TLS_VER,
CONF_VAR_SENSOR_STRING,
DEFAULT_IGNORE_STRING,
DEFAULT_RESTORE_LIGHT_STATE,
DEFAULT_SENSOR_STRING,
DEFAULT_VAR_SENSOR_STRING,
DOMAIN,
ISY994_ISY,
ISY994_NODES,
ISY994_PROGRAMS,
ISY994_VARIABLES,
MANUFACTURER,
SUPPORTED_PLATFORMS,
SUPPORTED_PROGRAM_PLATFORMS,
UNDO_UPDATE_LISTENER,
)
from .helpers import _categorize_nodes, _categorize_programs, _categorize_variables
from .services import async_setup_services, async_unload_services
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TLS_VER): vol.Coerce(float),
vol.Optional(
CONF_IGNORE_STRING, default=DEFAULT_IGNORE_STRING
): cv.string,
vol.Optional(
CONF_SENSOR_STRING, default=DEFAULT_SENSOR_STRING
): cv.string,
vol.Optional(
CONF_VAR_SENSOR_STRING, default=DEFAULT_VAR_SENSOR_STRING
): cv.string,
vol.Required(
CONF_RESTORE_LIGHT_STATE, default=DEFAULT_RESTORE_LIGHT_STATE
): bool,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the isy994 integration from YAML."""
isy_config: Optional[ConfigType] = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not isy_config:
return True
# Only import if we haven't before.
config_entry = _async_find_matching_config_entry(hass)
if not config_entry:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=dict(isy_config),
)
)
return True
# Update the entry based on the YAML configuration, in case it changed.
hass.config_entries.async_update_entry(config_entry, data=dict(isy_config))
return True
@callback
def _async_find_matching_config_entry(hass):
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.source == config_entries.SOURCE_IMPORT:
return entry
async def async_setup_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Set up the ISY 994 integration."""
# As there currently is no way to import options from yaml
# when setting up a config entry, we fallback to adding
# the options to the config entry and pull them out here if
# they are missing from the options
_async_import_options_from_data_if_missing(hass, entry)
hass.data[DOMAIN][entry.entry_id] = {}
hass_isy_data = hass.data[DOMAIN][entry.entry_id]
hass_isy_data[ISY994_NODES] = {}
for platform in SUPPORTED_PLATFORMS:
hass_isy_data[ISY994_NODES][platform] = []
hass_isy_data[ISY994_PROGRAMS] = {}
for platform in SUPPORTED_PROGRAM_PLATFORMS:
hass_isy_data[ISY994_PROGRAMS][platform] = []
hass_isy_data[ISY994_VARIABLES] = []
isy_config = entry.data
isy_options = entry.options
# Required
user = isy_config[CONF_USERNAME]
password = isy_config[CONF_PASSWORD]
host = urlparse(isy_config[CONF_HOST])
# Optional
tls_version = isy_config.get(CONF_TLS_VER)
ignore_identifier = isy_options.get(CONF_IGNORE_STRING, DEFAULT_IGNORE_STRING)
sensor_identifier = isy_options.get(CONF_SENSOR_STRING, DEFAULT_SENSOR_STRING)
variable_identifier = isy_options.get(
CONF_VAR_SENSOR_STRING, DEFAULT_VAR_SENSOR_STRING
)
if host.scheme == "http":
https = False
port = host.port or 80
elif host.scheme == "https":
https = True
port = host.port or 443
else:
_LOGGER.error("isy994 host value in configuration is invalid")
return False
# Connect to ISY controller.
isy = await hass.async_add_executor_job(
partial(
ISY,
host.hostname,
port,
username=user,
password=password,
use_https=https,
tls_ver=tls_version,
webroot=host.path,
)
)
if not isy.connected:
return False
# Trigger a status update for all nodes, not done automatically in PyISY v2.x
await hass.async_add_executor_job(isy.nodes.update)
_categorize_nodes(hass_isy_data, isy.nodes, ignore_identifier, sensor_identifier)
_categorize_programs(hass_isy_data, isy.programs)
_categorize_variables(hass_isy_data, isy.variables, variable_identifier)
# Dump ISY Clock Information. Future: Add ISY as sensor to Hass with attrs
_LOGGER.info(repr(isy.clock))
hass_isy_data[ISY994_ISY] = isy
await _async_get_or_create_isy_device_in_registry(hass, entry, isy)
# Load platforms for the devices in the ISY controller that we support.
for platform in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
def _start_auto_update() -> None:
"""Start isy auto update."""
_LOGGER.debug("ISY Starting Event Stream and automatic updates")
isy.auto_update = True
await hass.async_add_executor_job(_start_auto_update)
undo_listener = entry.add_update_listener(_async_update_listener)
hass_isy_data[UNDO_UPDATE_LISTENER] = undo_listener
# Register Integration-wide Services:
async_setup_services(hass)
return True
async def _async_update_listener(
hass: HomeAssistant, entry: config_entries.ConfigEntry
):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
@callback
def _async_import_options_from_data_if_missing(
hass: HomeAssistant, entry: config_entries.ConfigEntry
):
options = dict(entry.options)
modified = False
for importable_option in [
CONF_IGNORE_STRING,
CONF_SENSOR_STRING,
CONF_RESTORE_LIGHT_STATE,
]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
async def _async_get_or_create_isy_device_in_registry(
hass: HomeAssistant, entry: config_entries.ConfigEntry, isy
) -> None:
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, isy.configuration["uuid"])},
identifiers={(DOMAIN, isy.configuration["uuid"])},
manufacturer=MANUFACTURER,
name=isy.configuration["name"],
model=isy.configuration["model"],
sw_version=isy.configuration["firmware"],
)
async def async_unload_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in SUPPORTED_PLATFORMS
]
)
)
hass_isy_data = hass.data[DOMAIN][entry.entry_id]
isy = hass_isy_data[ISY994_ISY]
def _stop_auto_update() -> None:
"""Start isy auto update."""
_LOGGER.debug("ISY Stopping Event Stream and automatic updates")
isy.auto_update = False
await hass.async_add_executor_job(_stop_auto_update)
hass_isy_data[UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
async_unload_services(hass)
return unload_ok
|
from collections import OrderedDict
from os import SEEK_CUR, path as op
import pickle
import re
import numpy as np
from .constants import KIT, FIFF
from .._digitization import _make_dig_points
from ...transforms import (Transform, apply_trans, get_ras_to_neuromag_trans,
als_ras_trans)
from ...utils import warn, _check_option
INT32 = '<i4'
FLOAT64 = '<f8'
def read_mrk(fname):
r"""Marker Point Extraction in MEG space directly from sqd.
Parameters
----------
fname : str
Absolute path to Marker file.
File formats allowed: \*.sqd, \*.mrk, \*.txt, \*.pickled.
Returns
-------
mrk_points : ndarray, shape (n_points, 3)
Marker points in MEG space [m].
"""
ext = op.splitext(fname)[-1]
if ext in ('.sqd', '.mrk'):
with open(fname, 'rb', buffering=0) as fid:
fid.seek(192)
mrk_offset = np.fromfile(fid, INT32, 1)[0]
fid.seek(mrk_offset)
# skips match_done, meg_to_mri and mri_to_meg
fid.seek(KIT.INT + (2 * KIT.DOUBLE * 4 ** 2), SEEK_CUR)
mrk_count = np.fromfile(fid, INT32, 1)[0]
pts = []
for _ in range(mrk_count):
# skips mri/meg mrk_type and done, mri_marker
fid.seek(KIT.INT * 4 + (KIT.DOUBLE * 3), SEEK_CUR)
pts.append(np.fromfile(fid, dtype=FLOAT64, count=3))
mrk_points = np.array(pts)
elif ext == '.txt':
mrk_points = _read_dig_kit(fname, unit='m')
elif ext == '.pickled':
with open(fname, 'rb') as fid:
food = pickle.load(fid)
try:
mrk_points = food['mrk']
except Exception:
err = ("%r does not contain marker points." % fname)
raise ValueError(err)
else:
raise ValueError('KIT marker file must be *.sqd, *.mrk, *.txt or '
'*.pickled, *%s is not supported.' % ext)
# check output
mrk_points = np.asarray(mrk_points)
if mrk_points.shape != (5, 3):
err = ("%r is no marker file, shape is "
"%s" % (fname, mrk_points.shape))
raise ValueError(err)
return mrk_points
def read_sns(fname):
"""Sensor coordinate extraction in MEG space.
Parameters
----------
fname : str
Absolute path to sensor definition file.
Returns
-------
locs : numpy.array, shape = (n_points, 3)
Sensor coil location.
"""
p = re.compile(r'\d,[A-Za-z]*,([\.\-0-9]+),' +
r'([\.\-0-9]+),([\.\-0-9]+),' +
r'([\.\-0-9]+),([\.\-0-9]+)')
with open(fname) as fid:
locs = np.array(p.findall(fid.read()), dtype=float)
return locs
def _set_dig_kit(mrk, elp, hsp, eeg):
"""Add landmark points and head shape data to the KIT instance.
Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
ALS coordinate system. This is converted to [m].
Parameters
----------
mrk : None | str | array_like, shape (5, 3)
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more
than 10`000 points are in the head shape, they are automatically
decimated.
eeg : dict
Ordered dict of EEG dig points.
Returns
-------
dig_points : list
List of digitizer points for info['dig'].
dev_head_t : dict
A dictionary describe the device-head transformation.
"""
from ...coreg import fit_matched_points, _decimate_points
if isinstance(hsp, str):
hsp = _read_dig_kit(hsp)
n_pts = len(hsp)
if n_pts > KIT.DIG_POINTS:
hsp = _decimate_points(hsp, res=0.005)
n_new = len(hsp)
warn("The selected head shape contained {n_in} points, which is "
"more than recommended ({n_rec}), and was automatically "
"downsampled to {n_new} points. The preferred way to "
"downsample is using FastScan.".format(
n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new))
if isinstance(elp, str):
elp_points = _read_dig_kit(elp)
if len(elp_points) != 8:
raise ValueError("File %r should contain 8 points; got shape "
"%s." % (elp, elp_points.shape))
elp = elp_points
elif len(elp) not in (7, 8):
raise ValueError("ELP should contain 7 or 8 points; got shape "
"%s." % (elp.shape,))
if isinstance(mrk, str):
mrk = read_mrk(mrk)
mrk = apply_trans(als_ras_trans, mrk)
nasion, lpa, rpa = elp[:3]
nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
elp = apply_trans(nmtrans, elp)
hsp = apply_trans(nmtrans, hsp)
eeg = OrderedDict((k, apply_trans(nmtrans, p)) for k, p in eeg.items())
# device head transform
trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans')
nasion, lpa, rpa = elp[:3]
elp = elp[3:]
dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg)
dev_head_t = Transform('meg', 'head', trans)
return dig_points, dev_head_t
def _read_dig_kit(fname, unit='auto'):
# Read dig points from a file and return ndarray, using FastSCAN for .txt
from ...channels.montage import (
read_polhemus_fastscan, read_dig_polhemus_isotrak, read_custom_montage,
_check_dig_shape)
assert unit in ('auto', 'm', 'mm')
_, ext = op.splitext(fname)
_check_option('file extension', ext[1:], ('hsp', 'elp', 'mat', 'txt'))
if ext == '.txt':
unit = 'mm' if unit == 'auto' else unit
out = read_polhemus_fastscan(fname, unit=unit,
on_header_missing='ignore')
elif ext in ('.hsp', '.elp'):
unit = 'm' if unit == 'auto' else unit
mon = read_dig_polhemus_isotrak(fname, unit=unit)
if fname.endswith('.hsp'):
dig = [d['r'] for d in mon.dig
if d['kind'] != FIFF.FIFFV_POINT_CARDINAL]
else:
dig = [d['r'] for d in mon.dig]
if dig and \
mon.dig[0]['kind'] == FIFF.FIFFV_POINT_CARDINAL and \
mon.dig[0]['ident'] == FIFF.FIFFV_POINT_LPA:
# LPA, Nasion, RPA -> NLR
dig[:3] = [dig[1], dig[0], dig[2]]
out = np.array(dig, float)
else:
assert ext == '.mat'
out = np.array([d['r'] for d in read_custom_montage(fname).dig])
_check_dig_shape(out)
return out
|
import logging
import requests
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_NAME, CONF_RESOURCE, HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_FUNCTIONS = "functions"
CONF_PINS = "pins"
CONF_INVERT = "invert"
DEFAULT_NAME = "aREST switch"
PIN_FUNCTION_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PINS, default={}): vol.Schema(
{cv.string: PIN_FUNCTION_SCHEMA}
),
vol.Optional(CONF_FUNCTIONS, default={}): vol.Schema(
{cv.string: PIN_FUNCTION_SCHEMA}
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the aREST switches."""
resource = config[CONF_RESOURCE]
try:
response = requests.get(resource, timeout=10)
except requests.exceptions.MissingSchema:
_LOGGER.error(
"Missing resource or schema in configuration. Add http:// to your URL"
)
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device at %s", resource)
return False
dev = []
pins = config[CONF_PINS]
for pinnum, pin in pins.items():
dev.append(
ArestSwitchPin(
resource,
config.get(CONF_NAME, response.json()[CONF_NAME]),
pin.get(CONF_NAME),
pinnum,
pin[CONF_INVERT],
)
)
functions = config[CONF_FUNCTIONS]
for funcname, func in functions.items():
dev.append(
ArestSwitchFunction(
resource,
config.get(CONF_NAME, response.json()[CONF_NAME]),
func.get(CONF_NAME),
funcname,
)
)
add_entities(dev)
class ArestSwitchBase(SwitchEntity):
"""Representation of an aREST switch."""
def __init__(self, resource, location, name):
"""Initialize the switch."""
self._resource = resource
self._name = f"{location.title()} {name.title()}"
self._state = None
self._available = True
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._available
class ArestSwitchFunction(ArestSwitchBase):
"""Representation of an aREST switch."""
def __init__(self, resource, location, name, func):
"""Initialize the switch."""
super().__init__(resource, location, name)
self._func = func
request = requests.get(f"{self._resource}/{self._func}", timeout=10)
if request.status_code != HTTP_OK:
_LOGGER.error("Can't find function")
return
try:
request.json()["return_value"]
except KeyError:
_LOGGER.error("No return_value received")
except ValueError:
_LOGGER.error("Response invalid")
def turn_on(self, **kwargs):
"""Turn the device on."""
request = requests.get(
f"{self._resource}/{self._func}", timeout=10, params={"params": "1"}
)
if request.status_code == HTTP_OK:
self._state = True
else:
_LOGGER.error("Can't turn on function %s at %s", self._func, self._resource)
def turn_off(self, **kwargs):
"""Turn the device off."""
request = requests.get(
f"{self._resource}/{self._func}", timeout=10, params={"params": "0"}
)
if request.status_code == HTTP_OK:
self._state = False
else:
_LOGGER.error(
"Can't turn off function %s at %s", self._func, self._resource
)
def update(self):
"""Get the latest data from aREST API and update the state."""
try:
request = requests.get(f"{self._resource}/{self._func}", timeout=10)
self._state = request.json()["return_value"] != 0
self._available = True
except requests.exceptions.ConnectionError:
_LOGGER.warning("No route to device %s", self._resource)
self._available = False
class ArestSwitchPin(ArestSwitchBase):
"""Representation of an aREST switch. Based on digital I/O."""
def __init__(self, resource, location, name, pin, invert):
"""Initialize the switch."""
super().__init__(resource, location, name)
self._pin = pin
self.invert = invert
request = requests.get(f"{self._resource}/mode/{self._pin}/o", timeout=10)
if request.status_code != HTTP_OK:
_LOGGER.error("Can't set mode")
self._available = False
def turn_on(self, **kwargs):
"""Turn the device on."""
turn_on_payload = int(not self.invert)
request = requests.get(
f"{self._resource}/digital/{self._pin}/{turn_on_payload}", timeout=10
)
if request.status_code == HTTP_OK:
self._state = True
else:
_LOGGER.error("Can't turn on pin %s at %s", self._pin, self._resource)
def turn_off(self, **kwargs):
"""Turn the device off."""
turn_off_payload = int(self.invert)
request = requests.get(
f"{self._resource}/digital/{self._pin}/{turn_off_payload}", timeout=10
)
if request.status_code == HTTP_OK:
self._state = False
else:
_LOGGER.error("Can't turn off pin %s at %s", self._pin, self._resource)
def update(self):
"""Get the latest data from aREST API and update the state."""
try:
request = requests.get(f"{self._resource}/digital/{self._pin}", timeout=10)
status_value = int(self.invert)
self._state = request.json()["return_value"] != status_value
self._available = True
except requests.exceptions.ConnectionError:
_LOGGER.warning("No route to device %s", self._resource)
self._available = False
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from vmsfs import VMSFSCollector
###############################################################################
class TestVMSFSCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('VMSFSCollector', {
})
self.collector = VMSFSCollector(config, None)
def test_import(self):
self.assertTrue(VMSFSCollector)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
from django.test import TestCase
from weblate.accounts.pipeline import slugify_username
from weblate.accounts.tasks import cleanup_auditlog, cleanup_social_auth
class PipelineTest(TestCase):
def test_slugify(self):
self.assertEqual(slugify_username("zkouska"), "zkouska")
self.assertEqual(slugify_username("Zkouska"), "Zkouska")
self.assertEqual(slugify_username("zkouška"), "zkouska")
self.assertEqual(slugify_username(" zkouska "), "zkouska")
self.assertEqual(slugify_username("ahoj - ahoj"), "ahoj-ahoj")
self.assertEqual(slugify_username("..test"), "test")
class TasksTest(TestCase):
def test_cleanup_social_auth(self):
cleanup_social_auth()
def test_cleanup_auditlog(self):
cleanup_auditlog()
|
import os
import tornado.ioloop
import tornado.httpserver
from tornado.web import RequestHandler
from tornado.gen import coroutine
from react.render import render_component
comments = []
class IndexHandler(RequestHandler):
@coroutine
def get(self):
rendered = render_component(
os.path.join(os.getcwd(), 'static', 'js', 'CommentBox.jsx'),
{
'comments': comments,
'url': '/comments',
'xsrf':self.xsrf_token
},
to_static_markup=False,
)
self.render('index.html', rendered=rendered)
class CommentHandler(RequestHandler):
@coroutine
def post(self):
comments.append({
'author': self.get_argument('author'),
'text': self.get_argument('text'),
})
self.redirect('/')
urls = [
(r"/", IndexHandler),
(r"/comments", CommentHandler),
(r"/(.*)", tornado.web.StaticFileHandler, {"path":r"{0}".format(os.path.join(os.path.dirname(__file__),"static"))}),
]
settings = dict({
"template_path": os.path.join(os.path.dirname(__file__),"templates"),
"static_path": os.path.join(os.path.dirname(__file__),"static"),
"cookie_secret": os.urandom(12),
"xsrf_cookies": True,
"debug": True,
"compress_response": True
})
application = tornado.web.Application(urls,**settings)
if __name__ == "__main__":
server = tornado.httpserver.HTTPServer(application)
server.listen(8000)
tornado.ioloop.IOLoop.instance().start()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
GIT_REPO = 'https://github.com/leverich/mutilate'
MUTILATE_DIR = '%s/mutilate_benchmark' % linux_packages.INSTALL_DIR
MUTILATE_BIN = '%s/mutilate' % MUTILATE_DIR
APT_PACKAGES = 'scons libevent-dev gengetopt libzmq-dev'
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'mutilate_protocol',
'binary', ['binary', 'ascii'],
'Protocol to use. Supported protocols are binary and ascii.')
flags.DEFINE_list(
'mutilate_qps', [],
'Target aggregate QPS. If not set, target for peak qps.')
flags.DEFINE_integer(
'mutilate_time', 300,
'Maximum time to run (seconds).')
flags.DEFINE_string(
'mutilate_keysize', '16',
'Length of memcached keys (distribution).')
flags.DEFINE_string(
'mutilate_valuesize', '128',
'Length of memcached values (distribution).')
flags.DEFINE_integer(
'mutilate_records', 10000,
'Number of memcached records to use.')
flags.DEFINE_float(
'mutilate_ratio', 0.0,
'Ratio of set:get. By default, read only.')
flags.DEFINE_list(
'mutilate_options', [],
'Additional mutilate long-form options (--) in comma separated form. e.g.'
'--mutilate_options=blocking,search=99:1000.'
'See https://github.com/leverich/mutilate for all available options.')
# If more than one value provided for threads, connections, depths, we will
# enumerate all test configurations. e.g.
# threads=1,2; connections=3,4; depths=5,6
# We will test following threads:connections:depths:
# 1,3,5; 1,3,6; 1,4,5; 1,4,6; 2,3,5; 2,3,6; 2,4,5; 2,4,6;
flags.DEFINE_list(
'mutilate_threads', [1],
'Number of total client threads to spawn per client VM.')
flags.DEFINE_list(
'mutilate_connections', [1],
'Number of connections to establish per client thread.')
flags.DEFINE_list(
'mutilate_depths', [1],
'Maximum depth to pipeline requests.')
# Agent mode options.
flags.DEFINE_integer(
'mutilate_measure_connections', None,
'Master client connections.')
flags.DEFINE_integer(
'mutilate_measure_threads', None,
'Master client thread count.')
flags.DEFINE_integer(
'mutilate_measure_qps', None,
'Master client QPS.')
flags.DEFINE_integer(
'mutilate_measure_depth', None,
'Master client connection depth.')
# To use remote agent mode, we need at least 2 VMs.
AGENT_MODE_MIN_CLIENT_VMS = 2
def CheckPrerequisites():
"""Verify flags are correctly specified.
Raises:
errors.Setup.InvalidFlagConfigurationError: On invalid flag configurations.
"""
agent_mode_flags = [FLAGS['mutilate_measure_connections'].present,
FLAGS['mutilate_measure_threads'].present,
FLAGS['mutilate_measure_qps'].present,
FLAGS['mutilate_measure_depth'].present]
error_message = (
'To enable agent mode, set '
'memcached_mutilate_num_client_vms > 1.')
if any(agent_mode_flags) and (
FLAGS.memcached_mutilate_num_client_vms < AGENT_MODE_MIN_CLIENT_VMS):
raise errors.Setup.InvalidFlagConfigurationError(error_message)
def YumInstall(vm):
"""Installs the mutilate package on the VM."""
raise NotImplementedError
def AptInstall(vm):
"""Installs the mutilate package on the VM."""
vm.Install('build_tools')
vm.InstallPackages(APT_PACKAGES)
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, MUTILATE_DIR))
vm.RemoteCommand('cd {0} && sudo scons'.format(MUTILATE_DIR))
def GetMetadata():
"""Returns mutilate metadata."""
metadata = {
'protocol': FLAGS.mutilate_protocol,
'qps': FLAGS.mutilate_qps or 'peak',
'time': FLAGS.mutilate_time,
'keysize': FLAGS.mutilate_keysize,
'valuesize': FLAGS.mutilate_valuesize,
'records': FLAGS.mutilate_records,
'ratio': FLAGS.mutilate_ratio
}
if FLAGS.mutilate_options:
metadata['options'] = FLAGS.mutilate_options
return metadata
def BuildCmd(server_ip, server_port, options):
"""Build base mutilate command in a list."""
cmd = [MUTILATE_BIN,
'--server=%s:%s' % (server_ip, server_port),
'--keysize=%s' % FLAGS.mutilate_keysize,
'--valuesize=%s' % FLAGS.mutilate_valuesize,
'--records=%s' % FLAGS.mutilate_records] + options
if FLAGS.mutilate_protocol == 'binary':
cmd.append('--binary')
return cmd
def RestartAgent(vm, threads):
logging.info('Restarting mutilate remote agent on %s', vm.internal_ip)
# Kill existing mutilate agent threads
vm.RemoteCommand('pkill -9 mutilate', ignore_failure=True)
vm.RemoteCommand(' '.join(
['nohup',
MUTILATE_BIN,
'--threads=%s' % threads,
'--agentmode',
'1>/dev/null',
'2>/dev/null',
'&']))
def Load(client_vm, server_ip, server_port):
"""Preload the server with data."""
logging.info('Loading memcached server.')
cmd = BuildCmd(
server_ip, server_port,
['--loadonly'])
client_vm.RemoteCommand(' '.join(cmd))
def Run(vms, server_ip, server_port):
"""Runs the mutilate benchmark on the vm."""
samples = []
master = vms[0]
runtime_options = {}
samples = []
measure_flags = []
additional_flags = ['--%s' % option for option in FLAGS.mutilate_options]
if FLAGS.mutilate_measure_connections:
runtime_options['measure_connections'] = FLAGS.mutilate_measure_connections
measure_flags.append(
'--measure_connections=%s' % FLAGS.mutilate_measure_connections)
if FLAGS.mutilate_measure_threads:
runtime_options['measure_threads'] = FLAGS.mutilate_measure_threads
if FLAGS.mutilate_measure_qps:
runtime_options['measure_qps'] = FLAGS.mutilate_measure_qps
measure_flags.append(
'--measure_qps=%s' % FLAGS.mutilate_measure_qps)
if FLAGS.mutilate_measure_depth:
runtime_options['measure_depth'] = FLAGS.mutilate_measure_depth
measure_flags.append(
'--measure_depth=%s' % FLAGS.mutilate_measure_depth)
for thread_count in FLAGS.mutilate_threads:
runtime_options['threads'] = thread_count
for vm in vms[1:]:
RestartAgent(vm, thread_count)
for connection_count in FLAGS.mutilate_connections:
runtime_options['connections'] = connection_count
for depth in FLAGS.mutilate_depths:
runtime_options['depth'] = depth
for qps in FLAGS.mutilate_qps or [0]: # 0 indicates peak target QPS.
runtime_options['qps'] = int(qps) or 'peak'
remote_agents = ['--agent=%s' % vm.internal_ip for vm in vms[1:]]
cmd = BuildCmd(
server_ip, server_port,
[
'--noload',
'--qps=%s' % qps,
'--time=%s' % FLAGS.mutilate_time,
'--update=%s' % FLAGS.mutilate_ratio,
'--threads=%s' % (
FLAGS.mutilate_measure_threads or thread_count),
'--connections=%s' % connection_count,
'--depth=%s' % depth,
] + remote_agents + measure_flags + additional_flags)
stdout, _ = master.RemoteCommand(' '.join(cmd))
metadata = GetMetadata()
metadata.update(runtime_options)
samples.extend(ParseResults(stdout, metadata))
return samples
LATENCY_HEADER_REGEX = r'#type([\s\w\d]*)\n'
LATENCY_REGEX = r'([\s\d\.]*)'
QPS_REGEX = r'Total QPS = ([\d\.]*)'
MISS_REGEX = r'Misses = \d+ \(([\d\.]*)%\)'
BANDWIDTH_REGEX = r'[\s\d]*bytes :\s*([\d\.]*) MB/s'
def ParseResults(result, metadata):
"""Parse mutilate result into samples.
Sample Output:
#type avg min 1st 5th 10th 90th 95th 99th
read 52.4 41.0 43.1 45.2 48.1 55.8 56.6 71.5
update 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
op_q 1.5 1.0 1.0 1.1 1.1 1.9 2.0 2.0
Total QPS = 18416.6 (92083 / 5.0s)
Misses = 0 (0.0%)
RX 22744501 bytes : 4.3 MB/s
TX 3315024 bytes : 0.6 MB/s
Args:
result: Text output of running mutilate benchmark.
metadata: metadata associated with the results.
Returns:
List of sample.Sample objects.
"""
samples = []
misses = regex_util.ExtractGroup(MISS_REGEX, result)
metadata['miss_rate'] = float(misses)
latency_stats = regex_util.ExtractGroup(LATENCY_HEADER_REGEX, result).split()
# parse latency
for metric in ('read', 'update', 'op_q'):
latency_regex = metric + LATENCY_REGEX
latency_values = regex_util.ExtractGroup(latency_regex, result).split()
for idx, stat in enumerate(latency_stats):
if idx == len(latency_values):
logging.warning(
'Mutilate does not report %s latency for %s.', stat, metric)
break
samples.append(
sample.Sample(metric + '_' + stat,
float(latency_values[idx]),
'usec', metadata))
# parse bandwidth
for metric in ('TX', 'RX'):
bw_regex = metric + BANDWIDTH_REGEX
bw = regex_util.ExtractGroup(bw_regex, result)
samples.append(
sample.Sample(metric, float(bw), 'MB/s', metadata))
qps = regex_util.ExtractFloat(QPS_REGEX, result)
samples.append(sample.Sample('qps', qps, 'ops/s', metadata))
return samples
|
import asyncio
import logging
from libpyfoscam import FoscamCamera
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_STREAM, Camera
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.service import async_extract_entity_ids
from .const import (
DATA as FOSCAM_DATA,
DOMAIN as FOSCAM_DOMAIN,
ENTITIES as FOSCAM_ENTITIES,
)
_LOGGER = logging.getLogger(__name__)
CONF_IP = "ip"
CONF_RTSP_PORT = "rtsp_port"
DEFAULT_NAME = "Foscam Camera"
DEFAULT_PORT = 88
SERVICE_PTZ = "ptz"
ATTR_MOVEMENT = "movement"
ATTR_TRAVELTIME = "travel_time"
DEFAULT_TRAVELTIME = 0.125
DIR_UP = "up"
DIR_DOWN = "down"
DIR_LEFT = "left"
DIR_RIGHT = "right"
DIR_TOPLEFT = "top_left"
DIR_TOPRIGHT = "top_right"
DIR_BOTTOMLEFT = "bottom_left"
DIR_BOTTOMRIGHT = "bottom_right"
MOVEMENT_ATTRS = {
DIR_UP: "ptz_move_up",
DIR_DOWN: "ptz_move_down",
DIR_LEFT: "ptz_move_left",
DIR_RIGHT: "ptz_move_right",
DIR_TOPLEFT: "ptz_move_top_left",
DIR_TOPRIGHT: "ptz_move_top_right",
DIR_BOTTOMLEFT: "ptz_move_bottom_left",
DIR_BOTTOMRIGHT: "ptz_move_bottom_right",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_RTSP_PORT): cv.port,
}
)
SERVICE_PTZ_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_MOVEMENT): vol.In(
[
DIR_UP,
DIR_DOWN,
DIR_LEFT,
DIR_RIGHT,
DIR_TOPLEFT,
DIR_TOPRIGHT,
DIR_BOTTOMLEFT,
DIR_BOTTOMRIGHT,
]
),
vol.Optional(ATTR_TRAVELTIME, default=DEFAULT_TRAVELTIME): cv.small_float,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Foscam IP Camera."""
async def async_handle_ptz(service):
"""Handle PTZ service call."""
movement = service.data[ATTR_MOVEMENT]
travel_time = service.data[ATTR_TRAVELTIME]
entity_ids = await async_extract_entity_ids(hass, service)
if not entity_ids:
return
_LOGGER.debug("Moving '%s' camera(s): %s", movement, entity_ids)
all_cameras = hass.data[FOSCAM_DATA][FOSCAM_ENTITIES]
target_cameras = [
camera for camera in all_cameras if camera.entity_id in entity_ids
]
for camera in target_cameras:
await camera.async_perform_ptz(movement, travel_time)
hass.services.async_register(
FOSCAM_DOMAIN, SERVICE_PTZ, async_handle_ptz, schema=SERVICE_PTZ_SCHEMA
)
camera = FoscamCamera(
config[CONF_IP],
config[CONF_PORT],
config[CONF_USERNAME],
config[CONF_PASSWORD],
verbose=False,
)
rtsp_port = config.get(CONF_RTSP_PORT)
if not rtsp_port:
ret, response = await hass.async_add_executor_job(camera.get_port_info)
if ret == 0:
rtsp_port = response.get("rtspPort") or response.get("mediaPort")
ret, response = await hass.async_add_executor_job(camera.get_motion_detect_config)
motion_status = False
if ret != 0 and response == 1:
motion_status = True
async_add_entities(
[
HassFoscamCamera(
camera,
config[CONF_NAME],
config[CONF_USERNAME],
config[CONF_PASSWORD],
rtsp_port,
motion_status,
)
]
)
class HassFoscamCamera(Camera):
"""An implementation of a Foscam IP camera."""
def __init__(self, camera, name, username, password, rtsp_port, motion_status):
"""Initialize a Foscam camera."""
super().__init__()
self._foscam_session = camera
self._name = name
self._username = username
self._password = password
self._rtsp_port = rtsp_port
self._motion_status = motion_status
async def async_added_to_hass(self):
"""Handle entity addition to hass."""
entities = self.hass.data.setdefault(FOSCAM_DATA, {}).setdefault(
FOSCAM_ENTITIES, []
)
entities.append(self)
def camera_image(self):
"""Return a still image response from the camera."""
# Send the request to snap a picture and return raw jpg data
# Handle exception if host is not reachable or url failed
result, response = self._foscam_session.snap_picture_2()
if result != 0:
return None
return response
@property
def supported_features(self):
"""Return supported features."""
if self._rtsp_port:
return SUPPORT_STREAM
return 0
async def stream_source(self):
"""Return the stream source."""
if self._rtsp_port:
return f"rtsp://{self._username}:{self._password}@{self._foscam_session.host}:{self._rtsp_port}/videoMain"
return None
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
return self._motion_status
def enable_motion_detection(self):
"""Enable motion detection in camera."""
try:
ret = self._foscam_session.enable_motion_detection()
if ret != 0:
return
self._motion_status = True
except TypeError:
_LOGGER.debug("Communication problem")
def disable_motion_detection(self):
"""Disable motion detection."""
try:
ret = self._foscam_session.disable_motion_detection()
if ret != 0:
return
self._motion_status = False
except TypeError:
_LOGGER.debug("Communication problem")
async def async_perform_ptz(self, movement, travel_time):
"""Perform a PTZ action on the camera."""
_LOGGER.debug("PTZ action '%s' on %s", movement, self._name)
movement_function = getattr(self._foscam_session, MOVEMENT_ATTRS[movement])
ret, _ = await self.hass.async_add_executor_job(movement_function)
if ret != 0:
_LOGGER.error("Error moving %s '%s': %s", movement, self._name, ret)
return
await asyncio.sleep(travel_time)
ret, _ = await self.hass.async_add_executor_job(
self._foscam_session.ptz_stop_run
)
if ret != 0:
_LOGGER.error("Error stopping movement on '%s': %s", self._name, ret)
return
@property
def name(self):
"""Return the name of this camera."""
return self._name
|
import logging
import zeroconf
from homeassistant.helpers.frame import (
MissingIntegrationFrame,
get_integration_frame,
report_integration,
)
_LOGGER = logging.getLogger(__name__)
def install_multiple_zeroconf_catcher(hass_zc) -> None:
"""Wrap the Zeroconf class to return the shared instance if multiple instances are detected."""
def new_zeroconf_new(self, *k, **kw):
_report(
"attempted to create another Zeroconf instance. Please use the shared Zeroconf via await homeassistant.components.zeroconf.async_get_instance(hass)",
)
return hass_zc
def new_zeroconf_init(self, *k, **kw):
return
zeroconf.Zeroconf.__new__ = new_zeroconf_new
zeroconf.Zeroconf.__init__ = new_zeroconf_init
def _report(what: str) -> None:
"""Report incorrect usage.
Async friendly.
"""
integration_frame = None
try:
integration_frame = get_integration_frame(exclude_integrations={"zeroconf"})
except MissingIntegrationFrame:
pass
if not integration_frame:
_LOGGER.warning(
"Detected code that %s. Please report this issue.", what, stack_info=True
)
return
report_integration(what, integration_frame)
|
import flatbuffers
class CallerFeatures(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsCallerFeatures(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = CallerFeatures()
x.Init(buf, n + offset)
return x
# CallerFeatures
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# CallerFeatures
def CallerIdentification(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# CallerFeatures
def CallTimeout(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# CallerFeatures
def CallCanceling(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# CallerFeatures
def ProgressiveCallResults(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# CallerFeatures
def PayloadTransparency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# CallerFeatures
def PayloadEncryptionCryptobox(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def CallerFeaturesStart(builder): builder.StartObject(6)
def CallerFeaturesAddCallerIdentification(builder, callerIdentification): builder.PrependBoolSlot(0, callerIdentification, 0)
def CallerFeaturesAddCallTimeout(builder, callTimeout): builder.PrependBoolSlot(1, callTimeout, 0)
def CallerFeaturesAddCallCanceling(builder, callCanceling): builder.PrependBoolSlot(2, callCanceling, 0)
def CallerFeaturesAddProgressiveCallResults(builder, progressiveCallResults): builder.PrependBoolSlot(3, progressiveCallResults, 0)
def CallerFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(4, payloadTransparency, 0)
def CallerFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(5, payloadEncryptionCryptobox, 0)
def CallerFeaturesEnd(builder): return builder.EndObject()
|
from datetime import timedelta
from typing import Any, Dict, List, Optional
from pymelcloud import DEVICE_TYPE_ATA, DEVICE_TYPE_ATW, AtaDevice, AtwDevice
import pymelcloud.ata_device as ata
import pymelcloud.atw_device as atw
from pymelcloud.atw_device import (
PROPERTY_ZONE_1_OPERATION_MODE,
PROPERTY_ZONE_2_OPERATION_MODE,
Zone,
)
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.typing import HomeAssistantType
from . import MelCloudDevice
from .const import (
ATTR_STATUS,
ATTR_VANE_HORIZONTAL,
ATTR_VANE_HORIZONTAL_POSITIONS,
ATTR_VANE_VERTICAL,
ATTR_VANE_VERTICAL_POSITIONS,
CONF_POSITION,
DOMAIN,
SERVICE_SET_VANE_HORIZONTAL,
SERVICE_SET_VANE_VERTICAL,
)
SCAN_INTERVAL = timedelta(seconds=60)
ATA_HVAC_MODE_LOOKUP = {
ata.OPERATION_MODE_HEAT: HVAC_MODE_HEAT,
ata.OPERATION_MODE_DRY: HVAC_MODE_DRY,
ata.OPERATION_MODE_COOL: HVAC_MODE_COOL,
ata.OPERATION_MODE_FAN_ONLY: HVAC_MODE_FAN_ONLY,
ata.OPERATION_MODE_HEAT_COOL: HVAC_MODE_HEAT_COOL,
}
ATA_HVAC_MODE_REVERSE_LOOKUP = {v: k for k, v in ATA_HVAC_MODE_LOOKUP.items()}
ATW_ZONE_HVAC_MODE_LOOKUP = {
atw.ZONE_OPERATION_MODE_HEAT: HVAC_MODE_HEAT,
atw.ZONE_OPERATION_MODE_COOL: HVAC_MODE_COOL,
}
ATW_ZONE_HVAC_MODE_REVERSE_LOOKUP = {v: k for k, v in ATW_ZONE_HVAC_MODE_LOOKUP.items()}
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
):
"""Set up MelCloud device climate based on config_entry."""
mel_devices = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
AtaDeviceClimate(mel_device, mel_device.device)
for mel_device in mel_devices[DEVICE_TYPE_ATA]
]
+ [
AtwDeviceZoneClimate(mel_device, mel_device.device, zone)
for mel_device in mel_devices[DEVICE_TYPE_ATW]
for zone in mel_device.device.zones
],
True,
)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SET_VANE_HORIZONTAL,
{vol.Required(CONF_POSITION): cv.string},
"async_set_vane_horizontal",
)
platform.async_register_entity_service(
SERVICE_SET_VANE_VERTICAL,
{vol.Required(CONF_POSITION): cv.string},
"async_set_vane_vertical",
)
class MelCloudClimate(ClimateEntity):
"""Base climate device."""
def __init__(self, device: MelCloudDevice):
"""Initialize the climate."""
self.api = device
self._base_device = self.api.device
self._name = device.name
async def async_update(self):
"""Update state from MELCloud."""
await self.api.async_update()
@property
def device_info(self):
"""Return a device description for device registry."""
return self.api.device_info
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return self._base_device.temperature_increment
class AtaDeviceClimate(MelCloudClimate):
"""Air-to-Air climate device."""
def __init__(self, device: MelCloudDevice, ata_device: AtaDevice) -> None:
"""Initialize the climate."""
super().__init__(device)
self._device = ata_device
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return f"{self.api.device.serial}-{self.api.device.mac}"
@property
def name(self):
"""Return the display name of this entity."""
return self._name
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the optional state attributes with device specific additions."""
attr = {}
vane_horizontal = self._device.vane_horizontal
if vane_horizontal:
attr.update(
{
ATTR_VANE_HORIZONTAL: vane_horizontal,
ATTR_VANE_HORIZONTAL_POSITIONS: self._device.vane_horizontal_positions,
}
)
vane_vertical = self._device.vane_vertical
if vane_vertical:
attr.update(
{
ATTR_VANE_VERTICAL: vane_vertical,
ATTR_VANE_VERTICAL_POSITIONS: self._device.vane_vertical_positions,
}
)
return attr
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
mode = self._device.operation_mode
if not self._device.power or mode is None:
return HVAC_MODE_OFF
return ATA_HVAC_MODE_LOOKUP.get(mode)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
await self._device.set({"power": False})
return
operation_mode = ATA_HVAC_MODE_REVERSE_LOOKUP.get(hvac_mode)
if operation_mode is None:
raise ValueError(f"Invalid hvac_mode [{hvac_mode}]")
props = {"operation_mode": operation_mode}
if self.hvac_mode == HVAC_MODE_OFF:
props["power"] = True
await self._device.set(props)
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_OFF] + [
ATA_HVAC_MODE_LOOKUP.get(mode) for mode in self._device.operation_modes
]
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._device.room_temperature
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return self._device.target_temperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self._device.set(
{"target_temperature": kwargs.get("temperature", self.target_temperature)}
)
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting."""
return self._device.fan_speed
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
await self._device.set({"fan_speed": fan_mode})
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return the list of available fan modes."""
return self._device.fan_speeds
async def async_set_vane_horizontal(self, position: str) -> None:
"""Set horizontal vane position."""
if position not in self._device.vane_horizontal_positions:
raise ValueError(
f"Invalid horizontal vane position {position}. Valid positions: [{self._device.vane_horizontal_positions}]."
)
await self._device.set({ata.PROPERTY_VANE_HORIZONTAL: position})
async def async_set_vane_vertical(self, position: str) -> None:
"""Set vertical vane position."""
if position not in self._device.vane_vertical_positions:
raise ValueError(
f"Invalid vertical vane position {position}. Valid positions: [{self._device.vane_vertical_positions}]."
)
await self._device.set({ata.PROPERTY_VANE_VERTICAL: position})
@property
def swing_mode(self) -> Optional[str]:
"""Return vertical vane position or mode."""
return self._device.vane_vertical
async def async_set_swing_mode(self, swing_mode) -> None:
"""Set vertical vane position or mode."""
await self.async_set_vane_vertical(swing_mode)
@property
def swing_modes(self) -> Optional[str]:
"""Return a list of available vertical vane positions and modes."""
return self._device.vane_vertical_positions
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE | SUPPORT_SWING_MODE
async def async_turn_on(self) -> None:
"""Turn the entity on."""
await self._device.set({"power": True})
async def async_turn_off(self) -> None:
"""Turn the entity off."""
await self._device.set({"power": False})
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
min_value = self._device.target_temperature_min
if min_value is not None:
return min_value
return DEFAULT_MIN_TEMP
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
max_value = self._device.target_temperature_max
if max_value is not None:
return max_value
return DEFAULT_MAX_TEMP
class AtwDeviceZoneClimate(MelCloudClimate):
"""Air-to-Water zone climate device."""
def __init__(
self, device: MelCloudDevice, atw_device: AtwDevice, atw_zone: Zone
) -> None:
"""Initialize the climate."""
super().__init__(device)
self._device = atw_device
self._zone = atw_zone
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return f"{self.api.device.serial}-{self._zone.zone_index}"
@property
def name(self) -> str:
"""Return the display name of this entity."""
return f"{self._name} {self._zone.name}"
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the optional state attributes with device specific additions."""
data = {
ATTR_STATUS: ATW_ZONE_HVAC_MODE_LOOKUP.get(
self._zone.status, self._zone.status
)
}
return data
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
mode = self._zone.operation_mode
if not self._device.power or mode is None:
return HVAC_MODE_OFF
return ATW_ZONE_HVAC_MODE_LOOKUP.get(mode, HVAC_MODE_OFF)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
await self._device.set({"power": False})
return
operation_mode = ATW_ZONE_HVAC_MODE_REVERSE_LOOKUP.get(hvac_mode)
if operation_mode is None:
raise ValueError(f"Invalid hvac_mode [{hvac_mode}]")
if self._zone.zone_index == 1:
props = {PROPERTY_ZONE_1_OPERATION_MODE: operation_mode}
else:
props = {PROPERTY_ZONE_2_OPERATION_MODE: operation_mode}
if self.hvac_mode == HVAC_MODE_OFF:
props["power"] = True
await self._device.set(props)
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [self.hvac_mode]
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._zone.room_temperature
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return self._zone.target_temperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self._zone.set_target_temperature(
kwargs.get("temperature", self.target_temperature)
)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def min_temp(self) -> float:
"""Return the minimum temperature.
MELCloud API does not expose radiator zone temperature limits.
"""
return 10
@property
def max_temp(self) -> float:
"""Return the maximum temperature.
MELCloud API does not expose radiator zone temperature limits.
"""
return 30
|
from itertools import filterfalse
import logging
from pynello.private import Nello
import voluptuous as vol
from homeassistant.components.lock import PLATFORM_SCHEMA, LockEntity
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_ADDRESS = "address"
ATTR_LOCATION_ID = "location_id"
EVENT_DOOR_BELL = "nello_bell_ring"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nello lock platform."""
nello = Nello(config.get(CONF_USERNAME), config.get(CONF_PASSWORD))
add_entities([NelloLock(lock) for lock in nello.locations], True)
class NelloLock(LockEntity):
"""Representation of a Nello lock."""
def __init__(self, nello_lock):
"""Initialize the lock."""
self._nello_lock = nello_lock
self._device_attrs = None
self._activity = None
self._name = None
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def is_locked(self):
"""Return true if lock is locked."""
return True
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return self._device_attrs
def update(self):
"""Update the nello lock properties."""
self._nello_lock.update()
# Location identifiers
location_id = self._nello_lock.location_id
short_id = self._nello_lock.short_id
address = self._nello_lock.address
self._name = f"Nello {short_id}"
self._device_attrs = {ATTR_ADDRESS: address, ATTR_LOCATION_ID: location_id}
# Process recent activity
activity = self._nello_lock.activity
if self._activity:
# Filter out old events
new_activity = list(filterfalse(lambda x: x in self._activity, activity))
if new_activity:
for act in new_activity:
activity_type = act.get("type")
if activity_type == "bell.ring.denied":
event_data = {
"address": address,
"date": act.get("date"),
"description": act.get("description"),
"location_id": location_id,
"short_id": short_id,
}
self.hass.bus.fire(EVENT_DOOR_BELL, event_data)
# Save the activity history so that we don't trigger an event twice
self._activity = activity
def unlock(self, **kwargs):
"""Unlock the device."""
if not self._nello_lock.open_door():
_LOGGER.error("Failed to unlock")
|
import unittest
import numpy as np
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.faster_rcnn import ProposalTargetCreator
from chainercv.utils import generate_random_bbox
@testing.parameterize(
{'n_sample': 128}, {'n_sample': None}
)
class TestProposalTargetCreator(unittest.TestCase):
n_class = 21
pos_ratio = 0.25
n_roi = 1024
n_bbox = 10
def setUp(self):
self.roi = generate_random_bbox(self.n_roi, (392, 512), 16, 250)
self.bbox = generate_random_bbox(self.n_bbox, (392, 512), 16, 250)
self.label = np.random.randint(
0, self.n_class - 1, size=(self.n_bbox,), dtype=np.int32)
self.proposal_target_creator = ProposalTargetCreator(
n_sample=self.n_sample,
pos_ratio=self.pos_ratio,
)
def check_proposal_target_creator(
self, bbox, label, roi, proposal_target_creator):
xp = cuda.get_array_module(roi)
sample_roi, gt_roi_loc, gt_roi_label =\
proposal_target_creator(roi, bbox, label)
# Test types
self.assertIsInstance(sample_roi, xp.ndarray)
self.assertIsInstance(gt_roi_loc, xp.ndarray)
self.assertIsInstance(gt_roi_label, xp.ndarray)
sample_roi = cuda.to_cpu(sample_roi)
gt_roi_loc = cuda.to_cpu(gt_roi_loc)
gt_roi_label = cuda.to_cpu(gt_roi_label)
# Test shapes
n_sample = len(sample_roi)
if self.n_sample is None:
self.assertGreaterEqual(self.n_roi + self.n_bbox, n_sample)
else:
self.assertGreaterEqual(self.n_sample, n_sample)
self.assertEqual(sample_roi.shape, (n_sample, 4))
self.assertEqual(gt_roi_loc.shape, (n_sample, 4))
self.assertEqual(gt_roi_label.shape, (n_sample,))
# Test foreground and background labels
np.testing.assert_equal(np.sum(gt_roi_label >= 0), n_sample)
n_pos = np.sum(gt_roi_label >= 1)
n_neg = np.sum(gt_roi_label == 0)
self.assertLessEqual(n_pos, n_sample * self.pos_ratio)
self.assertLessEqual(n_neg, n_sample - n_pos)
def test_proposal_target_creator_cpu(self):
self.check_proposal_target_creator(
self.bbox, self.label, self.roi,
self.proposal_target_creator)
@attr.gpu
def test_proposal_target_creator_gpu(self):
self.check_proposal_target_creator(
cuda.to_gpu(self.bbox),
cuda.to_gpu(self.label),
cuda.to_gpu(self.roi),
self.proposal_target_creator)
testing.run_module(__name__, __file__)
|
from xknx.devices import Switch as XknxSwitch
from homeassistant.components.switch import SwitchEntity
from . import DOMAIN
from .knx_entity import KnxEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up switch(es) for KNX platform."""
entities = []
for device in hass.data[DOMAIN].xknx.devices:
if isinstance(device, XknxSwitch):
entities.append(KNXSwitch(device))
async_add_entities(entities)
class KNXSwitch(KnxEntity, SwitchEntity):
"""Representation of a KNX switch."""
def __init__(self, device: XknxSwitch):
"""Initialize of KNX switch."""
super().__init__(device)
@property
def is_on(self):
"""Return true if device is on."""
return self._device.state
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._device.set_on()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._device.set_off()
|
import binascii
import logging
from pysnmp.entity import config as cfg
from pysnmp.entity.rfc3413.oneliner import cmdgen
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_AUTH_KEY,
CONF_BASEOID,
CONF_COMMUNITY,
CONF_PRIV_KEY,
DEFAULT_COMMUNITY,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BASEOID): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Inclusive(CONF_AUTH_KEY, "keys"): cv.string,
vol.Inclusive(CONF_PRIV_KEY, "keys"): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return an SNMP scanner."""
scanner = SnmpScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SnmpScanner(DeviceScanner):
"""Queries any SNMP capable Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
self.snmp = cmdgen.CommandGenerator()
self.host = cmdgen.UdpTransportTarget((config[CONF_HOST], 161))
if CONF_AUTH_KEY not in config or CONF_PRIV_KEY not in config:
self.auth = cmdgen.CommunityData(config[CONF_COMMUNITY])
else:
self.auth = cmdgen.UsmUserData(
config[CONF_COMMUNITY],
config[CONF_AUTH_KEY],
config[CONF_PRIV_KEY],
authProtocol=cfg.usmHMACSHAAuthProtocol,
privProtocol=cfg.usmAesCfb128Protocol,
)
self.baseoid = cmdgen.MibVariable(config[CONF_BASEOID])
self.last_results = []
# Test the router is accessible
data = self.get_snmp_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client["mac"] for client in self.last_results if client.get("mac")]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
# We have no names
return None
def _update_info(self):
"""Ensure the information from the device is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
data = self.get_snmp_data()
if not data:
return False
self.last_results = data
return True
def get_snmp_data(self):
"""Fetch MAC addresses from access point via SNMP."""
devices = []
errindication, errstatus, errindex, restable = self.snmp.nextCmd(
self.auth, self.host, self.baseoid
)
if errindication:
_LOGGER.error("SNMPLIB error: %s", errindication)
return
if errstatus:
_LOGGER.error(
"SNMP error: %s at %s",
errstatus.prettyPrint(),
errindex and restable[int(errindex) - 1][0] or "?",
)
return
for resrow in restable:
for _, val in resrow:
try:
mac = binascii.hexlify(val.asOctets()).decode("utf-8")
except AttributeError:
continue
_LOGGER.debug("Found MAC address: %s", mac)
mac = ":".join([mac[i : i + 2] for i in range(0, len(mac), 2)])
devices.append({"mac": mac})
return devices
|
import json
import time
import pytest
from homeassistant.components.flo.const import DOMAIN as FLO_DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, CONTENT_TYPE_JSON
from .common import TEST_EMAIL_ADDRESS, TEST_PASSWORD, TEST_TOKEN, TEST_USER_ID
from tests.common import MockConfigEntry, load_fixture
@pytest.fixture
def config_entry(hass):
"""Config entry version 1 fixture."""
return MockConfigEntry(
domain=FLO_DOMAIN,
data={CONF_USERNAME: TEST_USER_ID, CONF_PASSWORD: TEST_PASSWORD},
version=1,
)
@pytest.fixture
def aioclient_mock_fixture(aioclient_mock):
"""Fixture to provide a aioclient mocker."""
now = round(time.time())
# Mocks the login response for flo.
aioclient_mock.post(
"https://api.meetflo.com/api/v1/users/auth",
text=json.dumps(
{
"token": TEST_TOKEN,
"tokenPayload": {
"user": {"user_id": TEST_USER_ID, "email": TEST_EMAIL_ADDRESS},
"timestamp": now,
},
"tokenExpiration": 86400,
"timeNow": now,
}
),
headers={"Content-Type": CONTENT_TYPE_JSON},
status=200,
)
# Mocks the device for flo.
aioclient_mock.get(
"https://api-gw.meetflo.com/api/v2/devices/98765",
text=load_fixture("flo/device_info_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
)
# Mocks the water consumption for flo.
aioclient_mock.get(
"https://api-gw.meetflo.com/api/v2/water/consumption",
text=load_fixture("flo/water_consumption_info_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
)
# Mocks the location info for flo.
aioclient_mock.get(
"https://api-gw.meetflo.com/api/v2/locations/mmnnoopp",
text=load_fixture("flo/location_info_expand_devices_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
)
# Mocks the user info for flo.
aioclient_mock.get(
"https://api-gw.meetflo.com/api/v2/users/12345abcde",
text=load_fixture("flo/user_info_expand_locations_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
params={"expand": "locations"},
)
# Mocks the user info for flo.
aioclient_mock.get(
"https://api-gw.meetflo.com/api/v2/users/12345abcde",
text=load_fixture("flo/user_info_expand_locations_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
)
# Mocks the valve open call for flo.
aioclient_mock.post(
"https://api-gw.meetflo.com/api/v2/devices/98765",
text=load_fixture("flo/device_info_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
json={"valve": {"target": "open"}},
)
# Mocks the valve close call for flo.
aioclient_mock.post(
"https://api-gw.meetflo.com/api/v2/devices/98765",
text=load_fixture("flo/device_info_response_closed.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
json={"valve": {"target": "closed"}},
)
# Mocks the health test call for flo.
aioclient_mock.post(
"https://api-gw.meetflo.com/api/v2/devices/98765/healthTest/run",
text=load_fixture("flo/user_info_expand_locations_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
)
# Mocks the health test call for flo.
aioclient_mock.post(
"https://api-gw.meetflo.com/api/v2/locations/mmnnoopp/systemMode",
text=load_fixture("flo/user_info_expand_locations_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
json={"systemMode": {"target": "home"}},
)
# Mocks the health test call for flo.
aioclient_mock.post(
"https://api-gw.meetflo.com/api/v2/locations/mmnnoopp/systemMode",
text=load_fixture("flo/user_info_expand_locations_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
json={"systemMode": {"target": "away"}},
)
# Mocks the health test call for flo.
aioclient_mock.post(
"https://api-gw.meetflo.com/api/v2/locations/mmnnoopp/systemMode",
text=load_fixture("flo/user_info_expand_locations_response.json"),
status=200,
headers={"Content-Type": CONTENT_TYPE_JSON},
json={
"systemMode": {
"target": "sleep",
"revertMinutes": 120,
"revertMode": "home",
}
},
)
|
import pytest
from homeassistant.components.alarm_control_panel import DOMAIN
import homeassistant.components.automation as automation
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_no_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a alarm_control_panel."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, [])
async def test_get_minimum_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a alarm_control_panel."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
"alarm_control_panel.test_5678", "attributes", {"supported_features": 0}
)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_disarmed",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_triggered",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_get_maximum_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a alarm_control_panel."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
"alarm_control_panel.test_5678", "attributes", {"supported_features": 31}
)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_disarmed",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_triggered",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_armed_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_armed_away",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_armed_night",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_armed_custom_bypass",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for all conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "is_triggered",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_triggered - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "is_disarmed",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_disarmed - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "is_armed_home",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_armed_home - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event4"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "is_armed_away",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_armed_away - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event5"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "is_armed_night",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_armed_night - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event6"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "is_armed_custom_bypass",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_armed_custom_bypass - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_TRIGGERED)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
hass.bus.async_fire("test_event6")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_triggered - event - test_event1"
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_DISARMED)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
hass.bus.async_fire("test_event6")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_disarmed - event - test_event2"
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_HOME)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
hass.bus.async_fire("test_event6")
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[2].data["some"] == "is_armed_home - event - test_event3"
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_AWAY)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
hass.bus.async_fire("test_event6")
await hass.async_block_till_done()
assert len(calls) == 4
assert calls[3].data["some"] == "is_armed_away - event - test_event4"
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_NIGHT)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
hass.bus.async_fire("test_event6")
await hass.async_block_till_done()
assert len(calls) == 5
assert calls[4].data["some"] == "is_armed_night - event - test_event5"
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_CUSTOM_BYPASS)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
hass.bus.async_fire("test_event6")
await hass.async_block_till_done()
assert len(calls) == 6
assert calls[5].data["some"] == "is_armed_custom_bypass - event - test_event6"
|
import os.path as op
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
from itertools import compress
from mne import io, pick_types, pick_channels, read_events, Epochs
from mne.channels.interpolation import _make_interpolation_matrix
from mne.datasets import testing
from mne.utils import run_tests_if_main
from mne.preprocessing.nirs import optical_density, scalp_coupling_index
from mne.datasets.testing import data_path
from mne.io import read_raw_nirx
from mne.io.proj import _has_eeg_average_ref_proj
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
raw_fname_ctf = op.join(base_dir, 'test_ctf_raw.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
def _load_data(kind):
"""Load data."""
# It is more memory efficient to load data in a separate
# function so it's loaded on-demand
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
# subselect channels for speed
if kind == 'eeg':
picks = pick_types(raw.info, meg=False, eeg=True, exclude=[])[:15]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, reject=dict(eeg=80e-6))
else:
picks = pick_types(raw.info, meg=True, eeg=False, exclude=[])[1:200:2]
assert kind == 'meg'
with pytest.warns(RuntimeWarning, match='projection'):
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True,
reject=dict(grad=1000e-12, mag=4e-12))
return raw, epochs
@pytest.mark.parametrize('offset', (0., 0.1))
@pytest.mark.parametrize('avg_proj, ctol', [
(True, (0.86, 0.93)),
(False, (0.97, 0.99)),
])
@pytest.mark.parametrize('method, atol', [
(None, 3e-6),
(dict(eeg='MNE'), 4e-6),
])
@pytest.mark.filterwarnings('ignore:.*than 20 mm from head frame origin.*')
def test_interpolation_eeg(offset, avg_proj, ctol, atol, method):
"""Test interpolation of EEG channels."""
raw, epochs_eeg = _load_data('eeg')
epochs_eeg = epochs_eeg.copy()
assert not _has_eeg_average_ref_proj(epochs_eeg.info['projs'])
# Offsetting the coordinate frame should have no effect on the output
for inst in (raw, epochs_eeg):
for ch in inst.info['chs']:
if ch['kind'] == io.constants.FIFF.FIFFV_EEG_CH:
ch['loc'][:3] += offset
ch['loc'][3:6] += offset
for d in inst.info['dig']:
d['r'] += offset
# check that interpolation does nothing if no bads are marked
epochs_eeg.info['bads'] = []
evoked_eeg = epochs_eeg.average()
kw = dict(method=method)
with pytest.warns(RuntimeWarning, match='Doing nothing'):
evoked_eeg.interpolate_bads(**kw)
# create good and bad channels for EEG
epochs_eeg.info['bads'] = []
goods_idx = np.ones(len(epochs_eeg.ch_names), dtype=bool)
goods_idx[epochs_eeg.ch_names.index('EEG 012')] = False
bads_idx = ~goods_idx
pos = epochs_eeg._get_channel_positions()
evoked_eeg = epochs_eeg.average()
if avg_proj:
evoked_eeg.set_eeg_reference(projection=True).apply_proj()
assert_allclose(evoked_eeg.data.mean(0), 0., atol=1e-20)
ave_before = evoked_eeg.data[bads_idx]
# interpolate bad channels for EEG
epochs_eeg.info['bads'] = ['EEG 012']
evoked_eeg = epochs_eeg.average()
if avg_proj:
evoked_eeg.set_eeg_reference(projection=True).apply_proj()
good_picks = pick_types(evoked_eeg.info, meg=False, eeg=True)
assert_allclose(evoked_eeg.data[good_picks].mean(0), 0., atol=1e-20)
evoked_eeg_bad = evoked_eeg.copy()
evoked_eeg_bad.data[
evoked_eeg.ch_names.index(epochs_eeg.info['bads'][0])] = 1e10
evoked_eeg_interp = evoked_eeg_bad.copy().interpolate_bads(
origin=(0., 0., 0.), **kw)
if avg_proj:
assert_allclose(evoked_eeg_interp.data.mean(0), 0., atol=1e-6)
interp_zero = evoked_eeg_interp.data[bads_idx]
if method is None: # using
pos_good = pos[goods_idx]
pos_bad = pos[bads_idx]
interpolation = _make_interpolation_matrix(pos_good, pos_bad)
assert interpolation.shape == (1, len(epochs_eeg.ch_names) - 1)
interp_manual = np.dot(interpolation, evoked_eeg_bad.data[goods_idx])
assert_array_equal(interp_manual, interp_zero)
del interp_manual, interpolation, pos, pos_good, pos_bad
assert_allclose(ave_before, interp_zero, atol=atol)
assert ctol[0] < np.corrcoef(ave_before, interp_zero)[0, 1] < ctol[1]
interp_fit = evoked_eeg_bad.copy().interpolate_bads(**kw).data[bads_idx]
assert_allclose(ave_before, interp_fit, atol=2.5e-6)
assert ctol[1] < np.corrcoef(ave_before, interp_fit)[0, 1] # better
# check that interpolation fails when preload is False
epochs_eeg.preload = False
with pytest.raises(RuntimeError, match='requires epochs data to be loade'):
epochs_eeg.interpolate_bads(**kw)
epochs_eeg.preload = True
# check that interpolation changes the data in raw
raw_eeg = io.RawArray(data=epochs_eeg._data[0], info=epochs_eeg.info)
raw_before = raw_eeg._data[bads_idx]
raw_after = raw_eeg.interpolate_bads(**kw)._data[bads_idx]
assert not np.all(raw_before == raw_after)
# check that interpolation fails when preload is False
for inst in [raw, epochs_eeg]:
assert hasattr(inst, 'preload')
inst.preload = False
inst.info['bads'] = [inst.ch_names[1]]
with pytest.raises(RuntimeError, match='requires.*data to be loaded'):
inst.interpolate_bads(**kw)
# check that interpolation works with few channels
raw_few = raw.copy().crop(0, 0.1).load_data()
raw_few.pick_channels(raw_few.ch_names[:1] + raw_few.ch_names[3:4])
assert len(raw_few.ch_names) == 2
raw_few.del_proj()
raw_few.info['bads'] = [raw_few.ch_names[-1]]
orig_data = raw_few[1][0]
with pytest.warns(None) as w:
raw_few.interpolate_bads(reset_bads=False, **kw)
assert len([ww for ww in w if 'more than' not in str(ww.message)]) == 0
new_data = raw_few[1][0]
assert (new_data == 0).mean() < 0.5
assert np.corrcoef(new_data, orig_data)[0, 1] > 0.2
@pytest.mark.slowtest
def test_interpolation_meg():
"""Test interpolation of MEG channels."""
# speed accuracy tradeoff: channel subselection is faster but the
# correlation drops
thresh = 0.68
raw, epochs_meg = _load_data('meg')
# check that interpolation works when non M/EEG channels are present
# before MEG channels
raw.crop(0, 0.1).load_data().pick_channels(epochs_meg.ch_names)
raw.info.normalize_proj()
with pytest.warns(RuntimeWarning, match='unit .* changed from .* to .*'):
raw.set_channel_types({raw.ch_names[0]: 'stim'})
raw.info['bads'] = [raw.ch_names[1]]
raw.load_data()
raw.interpolate_bads(mode='fast')
del raw
# check that interpolation works for MEG
epochs_meg.info['bads'] = ['MEG 0141']
evoked = epochs_meg.average()
pick = pick_channels(epochs_meg.info['ch_names'], epochs_meg.info['bads'])
# MEG -- raw
raw_meg = io.RawArray(data=epochs_meg._data[0], info=epochs_meg.info)
raw_meg.info['bads'] = ['MEG 0141']
data1 = raw_meg[pick, :][0][0]
raw_meg.info.normalize_proj()
data2 = raw_meg.interpolate_bads(reset_bads=False,
mode='fast')[pick, :][0][0]
assert np.corrcoef(data1, data2)[0, 1] > thresh
# the same number of bads as before
assert len(raw_meg.info['bads']) == len(raw_meg.info['bads'])
# MEG -- epochs
data1 = epochs_meg.get_data()[:, pick, :].ravel()
epochs_meg.info.normalize_proj()
epochs_meg.interpolate_bads(mode='fast')
data2 = epochs_meg.get_data()[:, pick, :].ravel()
assert np.corrcoef(data1, data2)[0, 1] > thresh
assert len(epochs_meg.info['bads']) == 0
# MEG -- evoked (plus auto origin)
data1 = evoked.data[pick]
evoked.info.normalize_proj()
data2 = evoked.interpolate_bads(origin='auto').data[pick]
assert np.corrcoef(data1, data2)[0, 1] > thresh
def _this_interpol(inst, ref_meg=False):
from mne.channels.interpolation import _interpolate_bads_meg
_interpolate_bads_meg(inst, ref_meg=ref_meg, mode='fast')
return inst
@pytest.mark.slowtest
def test_interpolate_meg_ctf():
"""Test interpolation of MEG channels from CTF system."""
thresh = .85
tol = .05 # assert the new interpol correlates at least .05 "better"
bad = 'MLC22-2622' # select a good channel to test the interpolation
raw = io.read_raw_fif(raw_fname_ctf).crop(0, 1.0).load_data() # 3 secs
raw.apply_gradient_compensation(3)
# Show that we have to exclude ref_meg for interpolating CTF MEG-channels
# (fixed in #5965):
raw.info['bads'] = [bad]
pick_bad = pick_channels(raw.info['ch_names'], raw.info['bads'])
data_orig = raw[pick_bad, :][0]
# mimic old behavior (the ref_meg-arg in _interpolate_bads_meg only serves
# this purpose):
data_interp_refmeg = _this_interpol(raw, ref_meg=True)[pick_bad, :][0]
# new:
data_interp_no_refmeg = _this_interpol(raw, ref_meg=False)[pick_bad, :][0]
R = dict()
R['no_refmeg'] = np.corrcoef(data_orig, data_interp_no_refmeg)[0, 1]
R['with_refmeg'] = np.corrcoef(data_orig, data_interp_refmeg)[0, 1]
print('Corrcoef of interpolated with original channel: ', R)
assert R['no_refmeg'] > R['with_refmeg'] + tol
assert R['no_refmeg'] > thresh
@testing.requires_testing_data
def test_interpolation_ctf_comp():
"""Test interpolation with compensated CTF data."""
ctf_dir = op.join(testing.data_path(download=False), 'CTF')
raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds')
raw = io.read_raw_ctf(raw_fname, preload=True)
raw.info['bads'] = [raw.ch_names[5], raw.ch_names[-5]]
raw.interpolate_bads(mode='fast', origin=(0., 0., 0.04))
assert raw.info['bads'] == []
@testing.requires_testing_data
def test_interpolation_nirs():
"""Test interpolating bad nirs channels."""
fname = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_2_recording_w_overlap')
raw_intensity = read_raw_nirx(fname, preload=False)
raw_od = optical_density(raw_intensity)
sci = scalp_coupling_index(raw_od)
raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5))
bad_0 = np.where([name == raw_od.info['bads'][0] for
name in raw_od.ch_names])[0][0]
bad_0_std_pre_interp = np.std(raw_od._data[bad_0])
raw_od.interpolate_bads()
assert raw_od.info['bads'] == []
assert bad_0_std_pre_interp > np.std(raw_od._data[bad_0])
run_tests_if_main()
|
import asyncio
from datetime import date, datetime
import logging
import pyotgw
import pyotgw.vars as gw_vars
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as COMP_BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as COMP_CLIMATE
from homeassistant.components.sensor import DOMAIN as COMP_SENSOR
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_DATE,
ATTR_ID,
ATTR_MODE,
ATTR_TEMPERATURE,
ATTR_TIME,
CONF_DEVICE,
CONF_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
ATTR_CH_OVRD,
ATTR_DHW_OVRD,
ATTR_GW_ID,
ATTR_LEVEL,
CONF_CLIMATE,
CONF_FLOOR_TEMP,
CONF_PRECISION,
DATA_GATEWAYS,
DATA_OPENTHERM_GW,
DOMAIN,
SERVICE_RESET_GATEWAY,
SERVICE_SET_CH_OVRD,
SERVICE_SET_CLOCK,
SERVICE_SET_CONTROL_SETPOINT,
SERVICE_SET_GPIO_MODE,
SERVICE_SET_HOT_WATER_OVRD,
SERVICE_SET_HOT_WATER_SETPOINT,
SERVICE_SET_LED_MODE,
SERVICE_SET_MAX_MOD,
SERVICE_SET_OAT,
SERVICE_SET_SB_TEMP,
)
_LOGGER = logging.getLogger(__name__)
CLIMATE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
vol.Optional(CONF_FLOOR_TEMP, default=False): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
{
vol.Required(CONF_DEVICE): cv.string,
vol.Optional(CONF_CLIMATE, default={}): CLIMATE_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def options_updated(hass, entry):
"""Handle options update."""
gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]]
async_dispatcher_send(hass, gateway.options_update_signal, entry)
async def async_setup_entry(hass, config_entry):
"""Set up the OpenTherm Gateway component."""
if DATA_OPENTHERM_GW not in hass.data:
hass.data[DATA_OPENTHERM_GW] = {DATA_GATEWAYS: {}}
gateway = OpenThermGatewayDevice(hass, config_entry)
hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]] = gateway
config_entry.add_update_listener(options_updated)
# Schedule directly on the loop to avoid blocking HA startup.
hass.loop.create_task(gateway.connect_and_subscribe())
for comp in [COMP_BINARY_SENSOR, COMP_CLIMATE, COMP_SENSOR]:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, comp)
)
register_services(hass)
return True
async def async_setup(hass, config):
"""Set up the OpenTherm Gateway component."""
if not hass.config_entries.async_entries(DOMAIN) and DOMAIN in config:
conf = config[DOMAIN]
for device_id, device_config in conf.items():
device_config[CONF_ID] = device_id
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=device_config
)
)
return True
def register_services(hass):
"""Register services for the component."""
service_reset_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
)
}
)
service_set_central_heating_ovrd_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_CH_OVRD): cv.boolean,
}
)
service_set_clock_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Optional(ATTR_DATE, default=date.today()): cv.date,
vol.Optional(ATTR_TIME, default=datetime.now().time()): cv.time,
}
)
service_set_control_setpoint_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=90)
),
}
)
service_set_hot_water_setpoint_schema = service_set_control_setpoint_schema
service_set_hot_water_ovrd_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_DHW_OVRD): vol.Any(
vol.Equal("A"), vol.All(vol.Coerce(int), vol.Range(min=0, max=1))
),
}
)
service_set_gpio_mode_schema = vol.Schema(
vol.Any(
vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.Equal("A"),
vol.Required(ATTR_MODE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=6)
),
}
),
vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.Equal("B"),
vol.Required(ATTR_MODE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=7)
),
}
),
)
)
service_set_led_mode_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.In("ABCDEF"),
vol.Required(ATTR_MODE): vol.In("RXTBOFHWCEMP"),
}
)
service_set_max_mod_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_LEVEL): vol.All(
vol.Coerce(int), vol.Range(min=-1, max=100)
),
}
)
service_set_oat_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=-40, max=99)
),
}
)
service_set_sb_temp_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=30)
),
}
)
async def reset_gateway(call):
"""Reset the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
mode_rst = gw_vars.OTGW_MODE_RESET
status = await gw_dev.gateway.set_mode(mode_rst)
gw_dev.status = status
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_RESET_GATEWAY, reset_gateway, service_reset_schema
)
async def set_ch_ovrd(call):
"""Set the central heating override on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
await gw_dev.gateway.set_ch_enable_bit(1 if call.data[ATTR_CH_OVRD] else 0)
hass.services.async_register(
DOMAIN,
SERVICE_SET_CH_OVRD,
set_ch_ovrd,
service_set_central_heating_ovrd_schema,
)
async def set_control_setpoint(call):
"""Set the control setpoint on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_CONTROL_SETPOINT
value = await gw_dev.gateway.set_control_setpoint(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN,
SERVICE_SET_CONTROL_SETPOINT,
set_control_setpoint,
service_set_control_setpoint_schema,
)
async def set_dhw_ovrd(call):
"""Set the domestic hot water override on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.OTGW_DHW_OVRD
value = await gw_dev.gateway.set_hot_water_ovrd(call.data[ATTR_DHW_OVRD])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN,
SERVICE_SET_HOT_WATER_OVRD,
set_dhw_ovrd,
service_set_hot_water_ovrd_schema,
)
async def set_dhw_setpoint(call):
"""Set the domestic hot water setpoint on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_DHW_SETPOINT
value = await gw_dev.gateway.set_dhw_setpoint(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN,
SERVICE_SET_HOT_WATER_SETPOINT,
set_dhw_setpoint,
service_set_hot_water_setpoint_schema,
)
async def set_device_clock(call):
"""Set the clock on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
attr_date = call.data[ATTR_DATE]
attr_time = call.data[ATTR_TIME]
await gw_dev.gateway.set_clock(datetime.combine(attr_date, attr_time))
hass.services.async_register(
DOMAIN, SERVICE_SET_CLOCK, set_device_clock, service_set_clock_schema
)
async def set_gpio_mode(call):
"""Set the OpenTherm Gateway GPIO modes."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gpio_id = call.data[ATTR_ID]
gpio_mode = call.data[ATTR_MODE]
mode = await gw_dev.gateway.set_gpio_mode(gpio_id, gpio_mode)
gpio_var = getattr(gw_vars, f"OTGW_GPIO_{gpio_id}")
gw_dev.status.update({gpio_var: mode})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_GPIO_MODE, set_gpio_mode, service_set_gpio_mode_schema
)
async def set_led_mode(call):
"""Set the OpenTherm Gateway LED modes."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
led_id = call.data[ATTR_ID]
led_mode = call.data[ATTR_MODE]
mode = await gw_dev.gateway.set_led_mode(led_id, led_mode)
led_var = getattr(gw_vars, f"OTGW_LED_{led_id}")
gw_dev.status.update({led_var: mode})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_LED_MODE, set_led_mode, service_set_led_mode_schema
)
async def set_max_mod(call):
"""Set the max modulation level."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD
level = call.data[ATTR_LEVEL]
if level == -1:
# Backend only clears setting on non-numeric values.
level = "-"
value = await gw_dev.gateway.set_max_relative_mod(level)
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_MAX_MOD, set_max_mod, service_set_max_mod_schema
)
async def set_outside_temp(call):
"""Provide the outside temperature to the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_OUTSIDE_TEMP
value = await gw_dev.gateway.set_outside_temp(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_OAT, set_outside_temp, service_set_oat_schema
)
async def set_setback_temp(call):
"""Set the OpenTherm Gateway SetBack temperature."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.OTGW_SB_TEMP
value = await gw_dev.gateway.set_setback_temp(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_SB_TEMP, set_setback_temp, service_set_sb_temp_schema
)
async def async_unload_entry(hass, entry):
"""Cleanup and disconnect from gateway."""
await asyncio.gather(
hass.config_entries.async_forward_entry_unload(entry, COMP_BINARY_SENSOR),
hass.config_entries.async_forward_entry_unload(entry, COMP_CLIMATE),
hass.config_entries.async_forward_entry_unload(entry, COMP_SENSOR),
)
gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]]
await gateway.cleanup()
return True
class OpenThermGatewayDevice:
"""OpenTherm Gateway device class."""
def __init__(self, hass, config_entry):
"""Initialize the OpenTherm Gateway."""
self.hass = hass
self.device_path = config_entry.data[CONF_DEVICE]
self.gw_id = config_entry.data[CONF_ID]
self.name = config_entry.data[CONF_NAME]
self.climate_config = config_entry.options
self.status = {}
self.update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_update"
self.options_update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_options_update"
self.gateway = pyotgw.pyotgw()
self.gw_version = None
async def cleanup(self, event=None):
"""Reset overrides on the gateway."""
await self.gateway.set_control_setpoint(0)
await self.gateway.set_max_relative_mod("-")
await self.gateway.disconnect()
async def connect_and_subscribe(self):
"""Connect to serial device and subscribe report handler."""
self.status = await self.gateway.connect(self.hass.loop, self.device_path)
_LOGGER.debug("Connected to OpenTherm Gateway at %s", self.device_path)
self.gw_version = self.status.get(gw_vars.OTGW_BUILD)
self.hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, self.cleanup)
async def handle_report(status):
"""Handle reports from the OpenTherm Gateway."""
_LOGGER.debug("Received report: %s", status)
self.status = status
async_dispatcher_send(self.hass, self.update_signal, status)
self.gateway.subscribe(handle_report)
|
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
from django.test import TestCase
from import_export.results import InvalidRow
class InvalidRowTest(TestCase):
def setUp(self):
# Create a ValidationError with a mix of field-specific and non-field-specific errors
self.non_field_errors = ValidationError(['Error 1', 'Error 2', 'Error 3'])
self.field_errors = ValidationError({
'name': ['Error 4', 'Error 5'],
'birthday': ['Error 6', 'Error 7'],
})
combined_error_dict = self.non_field_errors.update_error_dict(
self.field_errors.error_dict.copy()
)
e = ValidationError(combined_error_dict)
# Create an InvalidRow instance to use in tests
self.obj = InvalidRow(
number=1,
validation_error=e,
values=['ABC', '123']
)
def test_error_count(self):
self.assertEqual(self.obj.error_count, 7)
def test_non_field_specific_errors(self):
result = self.obj.non_field_specific_errors
self.assertIsInstance(result, list)
self.assertEqual(result, ['Error 1', 'Error 2', 'Error 3'])
def test_field_specific_errors(self):
result = self.obj.field_specific_errors
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 2)
self.assertEqual(result['name'], ['Error 4', 'Error 5'])
self.assertEqual(result['birthday'], ['Error 6', 'Error 7'])
def test_creates_error_dict_from_error_list_if_validation_error_only_has_error_list(self):
obj = InvalidRow(
number=1,
validation_error=self.non_field_errors,
values=[]
)
self.assertIsInstance(obj.error_dict, dict)
self.assertIn(NON_FIELD_ERRORS, obj.error_dict)
self.assertEqual(obj.error_dict[NON_FIELD_ERRORS], ['Error 1', 'Error 2', 'Error 3'])
|
import os
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_FILENAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
CONF_TIMESTAMP = "timestamp"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_FILENAME): cv.string,
vol.Optional(CONF_TIMESTAMP, default=False): cv.boolean,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the file notification service."""
filename = config[CONF_FILENAME]
timestamp = config[CONF_TIMESTAMP]
return FileNotificationService(hass, filename, timestamp)
class FileNotificationService(BaseNotificationService):
"""Implement the notification service for the File service."""
def __init__(self, hass, filename, add_timestamp):
"""Initialize the service."""
self.filepath = os.path.join(hass.config.config_dir, filename)
self.add_timestamp = add_timestamp
def send_message(self, message="", **kwargs):
"""Send a message to a file."""
with open(self.filepath, "a") as file:
if os.stat(self.filepath).st_size == 0:
title = f"{kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)} notifications (Log started: {dt_util.utcnow().isoformat()})\n{'-' * 80}\n"
file.write(title)
if self.add_timestamp:
text = f"{dt_util.utcnow().isoformat()} {message}\n"
else:
text = f"{message}\n"
file.write(text)
|
from __future__ import print_function
import argparse
import string
import sys
import fileinput
def filter_non_printable(s):
return ''.join([c if c.isalnum() or c.isspace() or c in string.punctuation else ' ' for c in s])
def main(args):
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("files", action="store", nargs="*", help="files to print")
ns = p.parse_args(args)
status = 0
fileinput.close() # in case it is not closed
try:
for line in fileinput.input(ns.files, openhook=fileinput.hook_encoded("utf-8")):
print(filter_non_printable(line), end='')
except Exception as e:
print('cat: %s' % str(e))
status = 1
finally:
fileinput.close()
sys.exit(status)
if __name__ == "__main__":
main(sys.argv[1:])
|
from __future__ import unicode_literals
import hashlib
def sha256_encode(item):
"""sha-256 message digest algorithm"""
try:
return (hashlib.sha256(item.encode("utf-8"))).hexdigest()
except:
return ''
|
import abc
from absl import flags
from perfkitbenchmarker import resource
# List of memory store types
REDIS = 'REDIS'
MEMCACHED = 'MEMCACHED'
FLAGS = flags.FLAGS
class Failover(object):
"""Enum for redis failover options."""
FAILOVER_NONE = 'failover_none'
FAILOVER_SAME_ZONE = 'failover_same_zone'
FAILOVER_SAME_REGION = 'failover_same_region'
flags.DEFINE_enum(
'redis_failover_style',
Failover.FAILOVER_NONE,
[Failover.FAILOVER_NONE,
Failover.FAILOVER_SAME_ZONE,
Failover.FAILOVER_SAME_REGION],
'Failover behavior of cloud redis cluster. Acceptable values are:'
'failover_none, failover_same_zone, and failover_same_region')
# List of redis versions
REDIS_3_2 = 'redis_3_2'
REDIS_4_0 = 'redis_4_0'
REDIS_VERSIONS = [REDIS_3_2, REDIS_4_0]
flags.DEFINE_string('managed_memory_store_version',
None,
'The version of managed memory store to use. This flag '
'overrides Redis or Memcached version defaults that is set '
'in benchmark config. Defaults to None so that benchmark '
'config defaults are used.')
MEMCACHED_NODE_COUNT = 1
def GetManagedMemoryStoreClass(cloud, memory_store):
"""Gets the cloud managed memory store class corresponding to 'cloud'.
Args:
cloud: String. Name of cloud to get the class for.
memory_store: String. Type of memory store to get the class for.
Returns:
Implementation class corresponding to the argument cloud
Raises:
Exception: An invalid cloud was provided
"""
return resource.GetResourceClass(BaseManagedMemoryStore,
CLOUD=cloud,
MEMORY_STORE=memory_store)
class BaseManagedMemoryStore(resource.BaseResource):
"""Object representing a cloud managed memory store."""
REQUIRED_ATTRS = ['CLOUD', 'MEMORY_STORE']
RESOURCE_TYPE = 'BaseManagedMemoryStore'
def __init__(self, spec):
"""Initialize the cloud managed memory store object.
Args:
spec: spec of the managed memory store.
"""
super(BaseManagedMemoryStore, self).__init__()
self.spec = spec
self.name = 'pkb-%s' % FLAGS.run_uri
self._ip = None
self._port = None
self._password = None
def GetMemoryStoreIp(self):
"""Returns the Ip address of the managed memory store."""
if not self._ip:
self._PopulateEndpoint()
return self._ip
def GetMemoryStorePort(self):
"""Returns the port number of the managed memory store."""
if not self._port:
self._PopulateEndpoint()
return self._port
@abc.abstractmethod
def _PopulateEndpoint(self):
"""Populates the endpoint information for the managed memory store."""
raise NotImplementedError()
def GetMemoryStorePassword(self):
"""Returns the access password of the managed memory store, if any."""
return self._password
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from libvirtkvm import LibvirtKVMCollector
###############################################################################
class TestLibvirtKVMCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('LibvirtKVMCollector', {
})
self.collector = LibvirtKVMCollector(config, None)
def test_import(self):
self.assertTrue(LibvirtKVMCollector)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
from typing import Any, Dict
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TIME_MILLISECONDS
from homeassistant.helpers.typing import HomeAssistantType
from . import MinecraftServer, MinecraftServerEntity
from .const import (
ATTR_PLAYERS_LIST,
DOMAIN,
ICON_LATENCY_TIME,
ICON_PLAYERS_MAX,
ICON_PLAYERS_ONLINE,
ICON_PROTOCOL_VERSION,
ICON_VERSION,
NAME_LATENCY_TIME,
NAME_PLAYERS_MAX,
NAME_PLAYERS_ONLINE,
NAME_PROTOCOL_VERSION,
NAME_VERSION,
UNIT_PLAYERS_MAX,
UNIT_PLAYERS_ONLINE,
UNIT_PROTOCOL_VERSION,
UNIT_VERSION,
)
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Minecraft Server sensor platform."""
server = hass.data[DOMAIN][config_entry.unique_id]
# Create entities list.
entities = [
MinecraftServerVersionSensor(server),
MinecraftServerProtocolVersionSensor(server),
MinecraftServerLatencyTimeSensor(server),
MinecraftServerPlayersOnlineSensor(server),
MinecraftServerPlayersMaxSensor(server),
]
# Add sensor entities.
async_add_entities(entities, True)
class MinecraftServerSensorEntity(MinecraftServerEntity):
"""Representation of a Minecraft Server sensor base entity."""
def __init__(
self,
server: MinecraftServer,
type_name: str,
icon: str = None,
unit: str = None,
device_class: str = None,
) -> None:
"""Initialize sensor base entity."""
super().__init__(server, type_name, icon, device_class)
self._state = None
self._unit = unit
@property
def available(self) -> bool:
"""Return sensor availability."""
return self._server.online
@property
def state(self) -> Any:
"""Return sensor state."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return sensor measurement unit."""
return self._unit
class MinecraftServerVersionSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server version sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize version sensor."""
super().__init__(
server=server, type_name=NAME_VERSION, icon=ICON_VERSION, unit=UNIT_VERSION
)
async def async_update(self) -> None:
"""Update version."""
self._state = self._server.version
class MinecraftServerProtocolVersionSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server protocol version sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize protocol version sensor."""
super().__init__(
server=server,
type_name=NAME_PROTOCOL_VERSION,
icon=ICON_PROTOCOL_VERSION,
unit=UNIT_PROTOCOL_VERSION,
)
async def async_update(self) -> None:
"""Update protocol version."""
self._state = self._server.protocol_version
class MinecraftServerLatencyTimeSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server latency time sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize latency time sensor."""
super().__init__(
server=server,
type_name=NAME_LATENCY_TIME,
icon=ICON_LATENCY_TIME,
unit=TIME_MILLISECONDS,
)
async def async_update(self) -> None:
"""Update latency time."""
self._state = self._server.latency_time
class MinecraftServerPlayersOnlineSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server online players sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize online players sensor."""
super().__init__(
server=server,
type_name=NAME_PLAYERS_ONLINE,
icon=ICON_PLAYERS_ONLINE,
unit=UNIT_PLAYERS_ONLINE,
)
async def async_update(self) -> None:
"""Update online players state and device state attributes."""
self._state = self._server.players_online
device_state_attributes = None
players_list = self._server.players_list
if players_list is not None:
if len(players_list) != 0:
device_state_attributes = {ATTR_PLAYERS_LIST: self._server.players_list}
self._device_state_attributes = device_state_attributes
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return players list in device state attributes."""
return self._device_state_attributes
class MinecraftServerPlayersMaxSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server maximum number of players sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize maximum number of players sensor."""
super().__init__(
server=server,
type_name=NAME_PLAYERS_MAX,
icon=ICON_PLAYERS_MAX,
unit=UNIT_PLAYERS_MAX,
)
async def async_update(self) -> None:
"""Update maximum number of players."""
self._state = self._server.players_max
|
import ctypes
import numpy as np
try:
from elephas.java import java_classes
except:
pass
def get_context_dtype():
"""Returns the nd4j dtype
"""
dtype = java_classes.DataTypeUtil.getDtypeFromContext()
return java_classes.DataTypeUtil.getDTypeForName(dtype)
def to_numpy(nd4j_array):
""" Convert an ND4J array to a numpy array
:param nd4j_array:
:return:
"""
buff = nd4j_array.data()
address = buff.pointer().address()
type_name = java_classes.DataTypeUtil.getDtypeFromContext()
data_type = java_classes.DataTypeUtil.getDTypeForName(type_name)
mapping = {
'double': ctypes.c_double,
'float': ctypes.c_float
}
Pointer = ctypes.POINTER(mapping[data_type])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array
def retrieve_keras_weights(java_model):
"""For a previously imported Keras model, after training it with DL4J Spark,
we want to set the resulting weights back to the original Keras model.
:param java_model: DL4J model (MultiLayerNetwork or ComputationGraph
:return: list of numpy arrays in correct order for model.set_weights(...) of a corresponding Keras model
"""
weights = []
layers = java_model.getLayers()
for layer in layers:
params = layer.paramTable()
keys = params.keySet()
key_list = java_classes.ArrayList(keys)
for key in key_list:
weight = params.get(key)
np_weight = np.squeeze(to_numpy(weight))
weights.append(np_weight)
return weights
|
from __future__ import absolute_import
from pyspark.mllib.linalg import Matrices, Vectors
def from_matrix(matrix):
"""Convert MLlib Matrix to numpy array """
return matrix.toArray()
def to_matrix(np_array):
"""Convert numpy array to MLlib Matrix
"""
if len(np_array.shape) == 2:
return Matrices.dense(np_array.shape[0],
np_array.shape[1],
np_array.ravel())
else:
raise Exception("An MLLib Matrix can only be created from a two-dimensional " +
"numpy array, got {}".format(len(np_array.shape)))
def from_vector(vector):
"""Convert MLlib Vector to numpy array
"""
return vector.toArray()
def to_vector(np_array):
"""Convert numpy array to MLlib Vector
"""
if len(np_array.shape) == 1:
return Vectors.dense(np_array)
else:
raise Exception("An MLLib Vector can only be created from a one-dimensional " +
"numpy array, got {}".format(len(np_array.shape)))
|
import warnings
from distutils.version import LooseVersion
from typing import Iterable
import numpy as np
try:
import dask.array as da
from dask import __version__ as dask_version
except ImportError:
dask_version = "0.0.0"
da = None
def _validate_pad_output_shape(input_shape, pad_width, output_shape):
"""Validates the output shape of dask.array.pad, raising a RuntimeError if they do not match.
In the current versions of dask (2.2/2.4), dask.array.pad with mode='reflect' sometimes returns
an invalid shape.
"""
isint = lambda i: isinstance(i, int)
if isint(pad_width):
pass
elif len(pad_width) == 2 and all(map(isint, pad_width)):
pad_width = sum(pad_width)
elif (
len(pad_width) == len(input_shape)
and all(map(lambda x: len(x) == 2, pad_width))
and all(isint(i) for p in pad_width for i in p)
):
pad_width = np.sum(pad_width, axis=1)
else:
# unreachable: dask.array.pad should already have thrown an error
raise ValueError("Invalid value for `pad_width`")
if not np.array_equal(np.array(input_shape) + pad_width, output_shape):
raise RuntimeError(
"There seems to be something wrong with the shape of the output of dask.array.pad, "
"try upgrading Dask, use a different pad mode e.g. mode='constant' or first convert "
"your DataArray/Dataset to one backed by a numpy array by calling the `compute()` method."
"See: https://github.com/dask/dask/issues/5303"
)
def pad(array, pad_width, mode="constant", **kwargs):
padded = da.pad(array, pad_width, mode=mode, **kwargs)
# workaround for inconsistency between numpy and dask: https://github.com/dask/dask/issues/5303
if mode == "mean" and issubclass(array.dtype.type, np.integer):
warnings.warn(
'dask.array.pad(mode="mean") converts integers to floats. xarray converts '
"these floats back to integers to keep the interface consistent. There is a chance that "
"this introduces rounding errors. If you wish to keep the values as floats, first change "
"the dtype to a float before calling pad.",
UserWarning,
)
return da.round(padded).astype(array.dtype)
_validate_pad_output_shape(array.shape, pad_width, padded.shape)
return padded
if LooseVersion(dask_version) > LooseVersion("2.9.0"):
nanmedian = da.nanmedian
else:
def nanmedian(a, axis=None, keepdims=False):
"""
This works by automatically chunking the reduced axes to a single chunk
and then calling ``numpy.nanmedian`` function across the remaining dimensions
"""
if axis is None:
raise NotImplementedError(
"The da.nanmedian function only works along an axis. "
"The full algorithm is difficult to do in parallel"
)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = [ax + a.ndim if ax < 0 else ax for ax in axis]
a = a.rechunk({ax: -1 if ax in axis else "auto" for ax in range(a.ndim)})
result = da.map_blocks(
np.nanmedian,
a,
axis=axis,
keepdims=keepdims,
drop_axis=axis if not keepdims else None,
chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]
if keepdims
else None,
)
return result
|
import functools as ft
from homeassistant.const import (
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
EVENT_TIME_CHANGED,
)
from homeassistant.core import Event, callback as async_callback
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.loader import bind_hass
from homeassistant.util.async_ import run_callback_threadsafe
_KEY_INSTANCE = "configurator"
DATA_REQUESTS = "configurator_requests"
ATTR_CONFIGURE_ID = "configure_id"
ATTR_DESCRIPTION = "description"
ATTR_DESCRIPTION_IMAGE = "description_image"
ATTR_ERRORS = "errors"
ATTR_FIELDS = "fields"
ATTR_LINK_NAME = "link_name"
ATTR_LINK_URL = "link_url"
ATTR_SUBMIT_CAPTION = "submit_caption"
DOMAIN = "configurator"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SERVICE_CONFIGURE = "configure"
STATE_CONFIGURE = "configure"
STATE_CONFIGURED = "configured"
@bind_hass
@async_callback
def async_request_config(
hass,
name,
callback=None,
description=None,
description_image=None,
submit_caption=None,
fields=None,
link_name=None,
link_url=None,
entity_picture=None,
):
"""Create a new request for configuration.
Will return an ID to be used for sequent calls.
"""
if link_name is not None and link_url is not None:
description += f"\n\n[{link_name}]({link_url})"
if description_image is not None:
description += f"\n\n"
instance = hass.data.get(_KEY_INSTANCE)
if instance is None:
instance = hass.data[_KEY_INSTANCE] = Configurator(hass)
request_id = instance.async_request_config(
name, callback, description, submit_caption, fields, entity_picture
)
if DATA_REQUESTS not in hass.data:
hass.data[DATA_REQUESTS] = {}
hass.data[DATA_REQUESTS][request_id] = instance
return request_id
@bind_hass
def request_config(hass, *args, **kwargs):
"""Create a new request for configuration.
Will return an ID to be used for sequent calls.
"""
return run_callback_threadsafe(
hass.loop, ft.partial(async_request_config, hass, *args, **kwargs)
).result()
@bind_hass
@async_callback
def async_notify_errors(hass, request_id, error):
"""Add errors to a config request."""
try:
hass.data[DATA_REQUESTS][request_id].async_notify_errors(request_id, error)
except KeyError:
# If request_id does not exist
pass
@bind_hass
def notify_errors(hass, request_id, error):
"""Add errors to a config request."""
return run_callback_threadsafe(
hass.loop, async_notify_errors, hass, request_id, error
).result()
@bind_hass
@async_callback
def async_request_done(hass, request_id):
"""Mark a configuration request as done."""
try:
hass.data[DATA_REQUESTS].pop(request_id).async_request_done(request_id)
except KeyError:
# If request_id does not exist
pass
@bind_hass
def request_done(hass, request_id):
"""Mark a configuration request as done."""
return run_callback_threadsafe(
hass.loop, async_request_done, hass, request_id
).result()
async def async_setup(hass, config):
"""Set up the configurator component."""
return True
class Configurator:
"""The class to keep track of current configuration requests."""
def __init__(self, hass):
"""Initialize the configurator."""
self.hass = hass
self._cur_id = 0
self._requests = {}
hass.services.async_register(
DOMAIN, SERVICE_CONFIGURE, self.async_handle_service_call
)
@async_callback
def async_request_config(
self, name, callback, description, submit_caption, fields, entity_picture
):
"""Set up a request for configuration."""
entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, name, hass=self.hass)
if fields is None:
fields = []
request_id = self._generate_unique_id()
self._requests[request_id] = (entity_id, fields, callback)
data = {
ATTR_CONFIGURE_ID: request_id,
ATTR_FIELDS: fields,
ATTR_FRIENDLY_NAME: name,
ATTR_ENTITY_PICTURE: entity_picture,
}
data.update(
{
key: value
for key, value in [
(ATTR_DESCRIPTION, description),
(ATTR_SUBMIT_CAPTION, submit_caption),
]
if value is not None
}
)
self.hass.states.async_set(entity_id, STATE_CONFIGURE, data)
return request_id
@async_callback
def async_notify_errors(self, request_id, error):
"""Update the state with errors."""
if not self._validate_request_id(request_id):
return
entity_id = self._requests[request_id][0]
state = self.hass.states.get(entity_id)
new_data = dict(state.attributes)
new_data[ATTR_ERRORS] = error
self.hass.states.async_set(entity_id, STATE_CONFIGURE, new_data)
@async_callback
def async_request_done(self, request_id):
"""Remove the configuration request."""
if not self._validate_request_id(request_id):
return
entity_id = self._requests.pop(request_id)[0]
# If we remove the state right away, it will not be included with
# the result of the service call (current design limitation).
# Instead, we will set it to configured to give as feedback but delete
# it shortly after so that it is deleted when the client updates.
self.hass.states.async_set(entity_id, STATE_CONFIGURED)
def deferred_remove(event: Event):
"""Remove the request state."""
self.hass.states.async_remove(entity_id, context=event.context)
self.hass.bus.async_listen_once(EVENT_TIME_CHANGED, deferred_remove)
async def async_handle_service_call(self, call):
"""Handle a configure service call."""
request_id = call.data.get(ATTR_CONFIGURE_ID)
if not self._validate_request_id(request_id):
return
# pylint: disable=unused-variable
entity_id, fields, callback = self._requests[request_id]
# field validation goes here?
if callback:
await self.hass.async_add_job(callback, call.data.get(ATTR_FIELDS, {}))
def _generate_unique_id(self):
"""Generate a unique configurator ID."""
self._cur_id += 1
return f"{id(self)}-{self._cur_id}"
def _validate_request_id(self, request_id):
"""Validate that the request belongs to this instance."""
return request_id in self._requests
|
from homeassistant.helpers.entity import ToggleEntity
from . import DATA_KEY, VolvoEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Volvo switch."""
if discovery_info is None:
return
async_add_entities([VolvoSwitch(hass.data[DATA_KEY], *discovery_info)])
class VolvoSwitch(VolvoEntity, ToggleEntity):
"""Representation of a Volvo switch."""
@property
def is_on(self):
"""Return true if switch is on."""
return self.instrument.state
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
await self.instrument.turn_on()
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
await self.instrument.turn_off()
self.async_write_ha_state()
|
import os
import sys
import threading
import weakref
import ctypes
from collections import OrderedDict
from .shcommon import M_64, _SYS_STDOUT, python_capi
_STATE_STR_TEMPLATE = """enclosed_cwd: {}
aliases: {}
sys.stidin: {}
sys.stdout: {}
sys.stderr: {}
temporary_environ: {}
environ: {}
"""
class ShState(object):
""" State of the current worker thread
"""
def __init__(
self,
aliases=None,
environ=None,
enclosed_cwd=None,
sys_stdin=None,
sys_stdout=None,
sys_stderr=None,
sys_path=None
):
self.aliases = aliases or {}
self.environ = environ or {}
self.enclosed_cwd = enclosed_cwd
self.sys_stdin__ = self.sys_stdin = sys_stdin or sys.stdin
self.sys_stdout__ = self.sys_stdout = sys_stdout or sys.stdout
self.sys_stderr__ = self.sys_stderr = sys_stderr or sys.stderr
self.sys_path = sys_path or sys.path[:]
self.temporary_environ = {}
self.enclosing_aliases = None
self.enclosing_environ = None
self.enclosing_cwd = None
def __str__(self):
s = _STATE_STR_TEMPLATE.format(
self.enclosed_cwd,
self.aliases,
self.sys_stdin,
self.sys_stdout,
self.sys_stderr,
self.temporary_environ,
self.environ
)
return s
@property
def return_value(self):
return self.environ.get('?', 0)
@return_value.setter
def return_value(self, value):
self.environ['?'] = value
def environ_get(self, name):
return self.environ[name]
def environ_set(self, name, value):
self.environ[name] = value
def persist_child(self, child_state, persistent_level=0):
"""
This is used to carry child shell state to its parent shell
:param ShState child_state: Child state
"""
if persistent_level == 0:
# restore old state
if os.getcwd() != child_state.enclosed_cwd:
os.chdir(child_state.enclosed_cwd)
# TODO: return status?
elif persistent_level == 1:
# update state
self.aliases = dict(child_state.aliases)
self.enclosing_aliases = child_state.aliases
self.enclosed_cwd = self.enclosing_cwd = os.getcwd()
self.environ = dict(child_state.environ)
self.enclosing_environ = child_state.environ
self.sys_path = child_state.sys_path[:]
elif persistent_level == 2:
# ensure future children will have child state
self.enclosing_aliases = child_state.aliases
self.enclosing_environ = child_state.environ
self.enclosing_cwd = os.getcwd()
# TODO: return status?
if self.enclosed_cwd is not None:
os.chdir(self.enclosed_cwd)
@staticmethod
def new_from_parent(parent_state):
"""
Create new state from parent state. Parent's enclosing environ are merged as
part of child's environ
:param ShState parent_state: Parent state
:return:
"""
if parent_state.enclosing_aliases:
aliases = parent_state.enclosing_aliases
else:
aliases = dict(parent_state.aliases)
if parent_state.enclosing_environ:
environ = parent_state.enclosing_environ
else:
environ = dict(parent_state.environ)
environ.update(parent_state.temporary_environ)
if parent_state.enclosing_cwd:
os.chdir(parent_state.enclosing_cwd)
return ShState(
aliases=aliases,
environ=environ,
enclosed_cwd=os.getcwd(),
sys_stdin=parent_state.sys_stdin__,
sys_stdout=parent_state.sys_stdout__,
sys_stderr=parent_state.sys_stderr__,
sys_path=parent_state.sys_path[:]
)
class ShWorkerRegistry(object):
""" Bookkeeping for all worker threads (both foreground and background).
This is useful to provide an overview of all running threads.
"""
def __init__(self):
self.registry = OrderedDict()
self._count = 1
self._lock = threading.Lock()
def __repr__(self):
ret = []
for job_id, thread in self.registry.items():
ret.append('{:>5d} {}'.format(job_id, thread))
return '\n'.join(ret)
def __iter__(self):
return self.registry.values().__iter__()
def __len__(self):
return len(self.registry)
def __contains__(self, item):
return item in self.registry
def _get_job_id(self):
try:
self._lock.acquire()
job_id = self._count
self._count += 1
return job_id
finally:
self._lock.release()
def add_worker(self, worker):
worker.job_id = self._get_job_id()
self.registry[worker.job_id] = worker
def remove_worker(self, worker):
self.registry.pop(worker.job_id)
def get_worker(self, job_id):
return self.registry.get(job_id, None)
def get_first_bg_worker(self):
for worker in self.registry.values():
if worker.is_background:
return worker
else:
return None
def purge(self):
"""
Kill all registered thread and clear the entire registry
:return:
"""
for worker in self.registry.values():
worker.kill()
# The worker removes itself from the registry when killed.
class ShBaseThread(threading.Thread):
""" The basic Thread class provides life cycle management.
"""
CREATED = 1
STARTED = 2
STOPPED = 3
def __init__(self, registry, parent, command, target=None, is_background=False, environ={}, cwd=None):
super(ShBaseThread, self).__init__(group=None, target=target, name='_shthread', args=(), kwargs=None)
# Registry management
self.registry = weakref.proxy(registry)
self.job_id = None # to be set by the registry
registry.add_worker(self)
# The command that the thread runs
if command.__class__.__name__ == 'ShIO':
self.command = ''.join(command._buffer)[::-1].strip()
else:
self.command = command
self.parent = weakref.proxy(parent)
# Set up the state based on parent's state
self.state = ShState.new_from_parent(parent.state)
self.state.environ.update(environ)
if cwd is not None:
self.state.enclosed_cwd = cwd
os.chdir(cwd)
self.killed = False
self.killer = 0
self.child_thread = None
self.set_background(is_background)
def __repr__(self):
command_str = str(self.command)
return '[{}] {} {}'.format(
self.job_id,
{
self.CREATED: 'Created',
self.STARTED: 'Started',
self.STOPPED: 'Stopped'
}[self.status()],
command_str[:20] + ('...' if len(command_str) > 20 else '')
)
def status(self):
"""
Status of the thread. Created, Started or Stopped.
"""
# STATES
# isAlive() | self.ident | Meaning
# ----------+-------------+--------
# False | None | created
# False | not None | stopped
# True | None | impossible
# True | not None | running
if self.isAlive():
return self.STARTED
elif (not self.is_alive()) and (self.ident is not None):
return self.STOPPED
else:
return self.CREATED
def set_background(self, is_background=True):
self.is_background = is_background
if is_background:
if self.parent.child_thread is self:
self.parent.child_thread = None
else:
assert self.parent.child_thread is None, 'parent must have no existing child thread'
self.parent.child_thread = self
def is_top_level(self):
"""
Whether or not the thread is directly under the runtime, aka top level.
A top level thread has the runtime as its parent
"""
return not isinstance(self.parent, ShBaseThread) and not self.is_background
def cleanup(self):
"""
End of life cycle management by remove itself from registry and unlink
it self from parent if exists.
"""
self.registry.remove_worker(self)
if not self.is_background:
assert self.parent.child_thread is self
self.parent.child_thread = None
def on_kill(self):
"""
This should be called when a thread was killed.
Calling this method will set self.killer to the job_id of the current Thread.
"""
ct = threading.current_thread()
if not isinstance(ct, ShBaseThread):
self.killer = 0
else:
self.killer = ct.job_id
# noinspection PyAttributeOutsideInit
class ShTracedThread(ShBaseThread):
""" Killable thread implementation with trace """
def __init__(self, registry, parent, command, target=None, is_background=False, environ={}, cwd=None):
super(ShTracedThread,
self).__init__(
registry,
parent,
command,
target=target,
is_background=is_background,
environ=environ,
cwd=cwd
)
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
return self.localtrace if why == 'call' else None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
if self.child_thread:
self.child_thread.kill()
raise KeyboardInterrupt()
return self.localtrace
def kill(self):
if not self.killed:
self.killed = True
self.on_kill()
class ShCtypesThread(ShBaseThread):
"""
A thread class that supports raising exception in the thread from
another thread (with ctypes).
"""
def __init__(self, registry, parent, command, target=None, is_background=False, environ={}, cwd=None):
super(ShCtypesThread,
self).__init__(
registry,
parent,
command,
target=target,
is_background=is_background,
environ=environ,
cwd=cwd
)
def _async_raise(self):
tid = self.ident
res = python_capi.PyThreadState_SetAsyncExc(ctypes.c_long(tid) if M_64 else tid, ctypes.py_object(KeyboardInterrupt))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
python_capi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
return res
def kill(self):
if not self.killed:
self.killed = True
if self.child_thread:
self.child_thread.kill()
try:
res = self._async_raise()
except (ValueError, SystemError):
self.killed = False
else:
self.on_kill()
|
import unittest
import numpy as np
from pgmpy.factors.distributions import GaussianDistribution as JGD
from pgmpy.sampling import (
HamiltonianMC as HMC,
HamiltonianMCDA as HMCda,
GradLogPDFGaussian,
NoUTurnSampler as NUTS,
NoUTurnSamplerDA as NUTSda,
)
class TestHMCInference(unittest.TestCase):
def setUp(self):
mean = [-1, 1, -1]
covariance = np.array([[3, 0.8, 0.2], [0.8, 2, 0.3], [0.2, 0.3, 1]])
self.test_model = JGD(["x", "y", "z"], mean, covariance)
self.hmc_sampler = HMCda(model=self.test_model, grad_log_pdf=GradLogPDFGaussian)
def test_errors(self):
with self.assertRaises(TypeError):
HMCda(model=self.test_model, grad_log_pdf=1)
with self.assertRaises(TypeError):
HMCda(
model=self.test_model,
grad_log_pdf=GradLogPDFGaussian,
simulate_dynamics=1,
)
with self.assertRaises(ValueError):
HMCda(model=self.test_model, delta=-1)
with self.assertRaises(TypeError):
self.hmc_sampler.sample(
initial_pos=1, num_adapt=1, num_samples=1, trajectory_length=1
)
with self.assertRaises(TypeError):
self.hmc_sampler.generate_sample(1, 1, 1, 1).send(None)
with self.assertRaises(TypeError):
HMC(model=self.test_model).sample(
initial_pos=1, num_samples=1, trajectory_length=1
)
with self.assertRaises(TypeError):
HMC(model=self.test_model).generate_sample(1, 1, 1).send(None)
def test_acceptance_prob(self):
acceptance_probability = self.hmc_sampler._acceptance_prob(
np.array([1, 2, 3]),
np.array([2, 3, 4]),
np.array([1, -1, 1]),
np.array([0, 0, 0]),
)
np.testing.assert_almost_equal(acceptance_probability, 0.0347363)
def test_find_resonable_stepsize(self):
np.random.seed(987654321)
stepsize = self.hmc_sampler._find_reasonable_stepsize(np.array([-1, 1, -1]))
np.testing.assert_almost_equal(stepsize, 2.0)
def test_adapt_params(self):
stepsize, stepsize_bar, h_bar = self.hmc_sampler._adapt_params(
0.0025, 1, 1, np.log(0.025), 2, 1
)
np.testing.assert_almost_equal(stepsize, 3.13439452e-13)
np.testing.assert_almost_equal(stepsize_bar, 3.6742481e-08)
np.testing.assert_almost_equal(h_bar, 0.8875)
def test_sample(self):
# Seeding is done for _find_reasonable_stepsize method
# Testing sample method simple HMC
np.random.seed(3124141)
samples = self.hmc_sampler.sample(
initial_pos=[0.3, 0.4, 0.2],
num_adapt=0,
num_samples=10000,
trajectory_length=4,
)
covariance = np.cov(samples.values.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 3)
# Testing sample of method of HMCda
np.random.seed(3124141)
samples = self.hmc_sampler.sample(
initial_pos=[0.6, 0.2, 0.8],
num_adapt=10000,
num_samples=10000,
trajectory_length=4,
)
covariance = np.cov(samples.values.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 0.3)
# Testing generate_sample method of simple HMC
np.random.seed(3124141)
gen_samples = self.hmc_sampler.generate_sample(
initial_pos=[0.3, 0.4, 0.2],
num_adapt=0,
num_samples=10000,
trajectory_length=4,
)
samples = np.array([sample for sample in gen_samples])
covariance = np.cov(samples.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 3)
# Testing sample of method of HMCda
np.random.seed(3124141)
gen_samples = self.hmc_sampler.generate_sample(
initial_pos=[0.6, 0.2, 0.8],
num_adapt=10000,
num_samples=10000,
trajectory_length=4,
)
samples = np.array([sample for sample in gen_samples])
covariance = np.cov(samples.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 0.3)
def tearDown(self):
del self.hmc_sampler
del self.test_model
class TestNUTSInference(unittest.TestCase):
def setUp(self):
mean = np.array([-1, 1, 0])
covariance = np.array([[6, 0.7, 0.2], [0.7, 3, 0.9], [0.2, 0.9, 1]])
self.test_model = JGD(["x", "y", "z"], mean, covariance)
self.nuts_sampler = NUTSda(
model=self.test_model, grad_log_pdf=GradLogPDFGaussian
)
def test_errors(self):
with self.assertRaises(TypeError):
NUTS(model=self.test_model, grad_log_pdf=JGD)
with self.assertRaises(TypeError):
NUTS(
model=self.test_model,
grad_log_pdf=None,
simulate_dynamics=GradLogPDFGaussian,
)
with self.assertRaises(ValueError):
NUTSda(model=self.test_model, delta=-0.2, grad_log_pdf=None)
with self.assertRaises(ValueError):
NUTSda(model=self.test_model, delta=1.1, grad_log_pdf=GradLogPDFGaussian)
with self.assertRaises(TypeError):
NUTS(self.test_model, GradLogPDFGaussian).sample(
initial_pos={1, 1, 1}, num_samples=1
)
with self.assertRaises(ValueError):
NUTS(self.test_model, GradLogPDFGaussian).sample(
initial_pos=[1, 1], num_samples=1
)
with self.assertRaises(TypeError):
NUTSda(self.test_model, GradLogPDFGaussian).sample(
initial_pos=1, num_samples=1, num_adapt=1
)
with self.assertRaises(ValueError):
NUTSda(self.test_model, GradLogPDFGaussian).sample(
initial_pos=[1, 1, 1, 1], num_samples=1, num_adapt=1
)
with self.assertRaises(TypeError):
NUTS(self.test_model, GradLogPDFGaussian).generate_sample(
initial_pos=0.1, num_samples=1
).send(None)
with self.assertRaises(ValueError):
NUTS(self.test_model, GradLogPDFGaussian).generate_sample(
initial_pos=(0, 1, 1, 1), num_samples=1
).send(None)
with self.assertRaises(TypeError):
NUTSda(self.test_model, GradLogPDFGaussian).generate_sample(
initial_pos=[[1, 2, 3]], num_samples=1, num_adapt=1
).send(None)
with self.assertRaises(ValueError):
NUTSda(self.test_model, GradLogPDFGaussian).generate_sample(
initial_pos=[1], num_samples=1, num_adapt=1
).send(None)
def test_sampling(self):
np.random.seed(1010101)
samples = self.nuts_sampler.sample(
initial_pos=[-0.4, 1, 3.6],
num_adapt=0,
num_samples=10000,
return_type="recarray",
)
sample_array = np.array(
[samples[var_name] for var_name in self.test_model.variables]
)
sample_covariance = np.cov(sample_array)
self.assertTrue(
np.linalg.norm(sample_covariance - self.test_model.covariance) < 3
)
np.random.seed(1210161)
samples = self.nuts_sampler.generate_sample(
initial_pos=[-0.4, 1, 3.6], num_adapt=0, num_samples=10000
)
samples_array = np.array([sample for sample in samples])
sample_covariance = np.cov(samples_array.T)
self.assertTrue(
np.linalg.norm(sample_covariance - self.test_model.covariance) < 3
)
np.random.seed(12313131)
samples = self.nuts_sampler.sample(
initial_pos=[0.2, 0.4, 2.2], num_adapt=10000, num_samples=10000
)
sample_covariance = np.cov(samples.values.T)
self.assertTrue(
np.linalg.norm(sample_covariance - self.test_model.covariance) < 0.4
)
np.random.seed(921312312)
samples = self.nuts_sampler.generate_sample(
initial_pos=[0.2, 0.4, 2.2], num_adapt=10000, num_samples=30000
)
samples_array = np.array([sample for sample in samples])
sample_covariance = np.cov(samples_array.T)
self.assertTrue(
np.linalg.norm(sample_covariance - self.test_model.covariance) < 0.4
)
def tearDown(self):
del self.test_model
del self.nuts_sampler
|
from collections import OrderedDict
from ..fixes import _get_args
from ..utils import _check_fname, logger
def what(fname):
"""Try to determine the type of the FIF file.
Parameters
----------
fname : str
The filename. Should end in .fif or .fif.gz.
Returns
-------
what : str | None
The type of the file. Will be 'unknown' if it could not be determined.
Notes
-----
.. versionadded:: 0.19
"""
from .fiff import read_raw_fif
from ..epochs import read_epochs
from ..evoked import read_evokeds
from ..preprocessing import read_ica
from ..forward import read_forward_solution
from ..minimum_norm import read_inverse_operator
from ..source_space import read_source_spaces
from ..bem import read_bem_solution, read_bem_surfaces
from ..cov import read_cov
from ..transforms import read_trans
from ..event import read_events
from ..proj import read_proj
from .meas_info import read_fiducials
_check_fname(fname, overwrite='read', must_exist=True)
checks = OrderedDict()
checks['raw'] = read_raw_fif
checks['ica'] = read_ica
checks['epochs'] = read_epochs
checks['evoked'] = read_evokeds
checks['forward'] = read_forward_solution
checks['inverse'] = read_inverse_operator
checks['src'] = read_source_spaces
checks['bem solution'] = read_bem_solution
checks['bem surfaces'] = read_bem_surfaces
checks['cov'] = read_cov
checks['transform'] = read_trans
checks['events'] = read_events
checks['fiducials'] = read_fiducials
checks['proj'] = read_proj
for what, func in checks.items():
args = _get_args(func)
assert 'verbose' in args, func
kwargs = dict(verbose='error')
if 'preload' in args:
kwargs['preload'] = False
try:
func(fname, **kwargs)
except Exception as exp:
logger.debug('Not %s: %s' % (what, exp))
else:
return what
return 'unknown'
|
import concurrent
import json
import logging
import time
from pathlib import Path
from types import SimpleNamespace
from typing import TYPE_CHECKING, List, Union
import lavalink
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core.commands import Cog
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
from redbot.core.utils.dbtools import APSWConnectionWrapper
from ..audio_logging import debug_exc_log
from ..sql_statements import (
PERSIST_QUEUE_BULK_PLAYED,
PERSIST_QUEUE_CREATE_INDEX,
PERSIST_QUEUE_CREATE_TABLE,
PERSIST_QUEUE_DELETE_SCHEDULED,
PERSIST_QUEUE_DROP_TABLE,
PERSIST_QUEUE_FETCH_ALL,
PERSIST_QUEUE_PLAYED,
PERSIST_QUEUE_UPSERT,
PRAGMA_FETCH_user_version,
PRAGMA_SET_journal_mode,
PRAGMA_SET_read_uncommitted,
PRAGMA_SET_temp_store,
PRAGMA_SET_user_version,
)
from .api_utils import QueueFetchResult
log = logging.getLogger("red.cogs.Audio.api.PersistQueueWrapper")
_ = Translator("Audio", Path(__file__))
if TYPE_CHECKING:
from .. import Audio
class QueueInterface:
def __init__(
self, bot: Red, config: Config, conn: APSWConnectionWrapper, cog: Union["Audio", Cog]
):
self.bot = bot
self.database = conn
self.config = config
self.cog = cog
self.statement = SimpleNamespace()
self.statement.pragma_temp_store = PRAGMA_SET_temp_store
self.statement.pragma_journal_mode = PRAGMA_SET_journal_mode
self.statement.pragma_read_uncommitted = PRAGMA_SET_read_uncommitted
self.statement.set_user_version = PRAGMA_SET_user_version
self.statement.get_user_version = PRAGMA_FETCH_user_version
self.statement.create_table = PERSIST_QUEUE_CREATE_TABLE
self.statement.create_index = PERSIST_QUEUE_CREATE_INDEX
self.statement.upsert = PERSIST_QUEUE_UPSERT
self.statement.update_bulk_player = PERSIST_QUEUE_BULK_PLAYED
self.statement.delete_scheduled = PERSIST_QUEUE_DELETE_SCHEDULED
self.statement.drop_table = PERSIST_QUEUE_DROP_TABLE
self.statement.get_all = PERSIST_QUEUE_FETCH_ALL
self.statement.get_player = PERSIST_QUEUE_PLAYED
async def init(self) -> None:
"""Initialize the PersistQueue table"""
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(self.database.cursor().execute, self.statement.pragma_temp_store)
executor.submit(self.database.cursor().execute, self.statement.pragma_journal_mode)
executor.submit(self.database.cursor().execute, self.statement.pragma_read_uncommitted)
executor.submit(self.database.cursor().execute, self.statement.create_table)
executor.submit(self.database.cursor().execute, self.statement.create_index)
async def fetch_all(self) -> List[QueueFetchResult]:
"""Fetch all playlists"""
output = []
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
for future in concurrent.futures.as_completed(
[
executor.submit(
self.database.cursor().execute,
self.statement.get_all,
)
]
):
try:
row_result = future.result()
except Exception as exc:
debug_exc_log(log, exc, "Failed to complete playlist fetch from database")
return []
async for index, row in AsyncIter(row_result).enumerate(start=1):
output.append(QueueFetchResult(*row))
return output
async def played(self, guild_id: int, track_id: str) -> None:
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(
self.database.cursor().execute,
PERSIST_QUEUE_PLAYED,
{"guild_id": guild_id, "track_id": track_id},
)
async def delete_scheduled(self):
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(self.database.cursor().execute, PERSIST_QUEUE_DELETE_SCHEDULED)
async def drop(self, guild_id: int):
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(
self.database.cursor().execute, PERSIST_QUEUE_BULK_PLAYED, ({"guild_id": guild_id})
)
async def enqueued(self, guild_id: int, room_id: int, track: lavalink.Track):
enqueue_time = track.extras.get("enqueue_time", 0)
if enqueue_time == 0:
track.extras["enqueue_time"] = int(time.time())
track_identifier = track.track_identifier
track = self.cog.track_to_json(track)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(
self.database.cursor().execute,
PERSIST_QUEUE_UPSERT,
{
"guild_id": int(guild_id),
"room_id": int(room_id),
"played": False,
"time": enqueue_time,
"track": json.dumps(track),
"track_id": track_identifier,
},
)
|
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
BinarySensorEntity,
)
from homeassistant.const import STATE_OFF, STATE_ON
from . import ATTR_DISCOVER_DEVICES, EGARDIA_DEVICE
EGARDIA_TYPE_TO_DEVICE_CLASS = {
"IR Sensor": DEVICE_CLASS_MOTION,
"Door Contact": DEVICE_CLASS_OPENING,
"IR": DEVICE_CLASS_MOTION,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Initialize the platform."""
if discovery_info is None or discovery_info[ATTR_DISCOVER_DEVICES] is None:
return
disc_info = discovery_info[ATTR_DISCOVER_DEVICES]
async_add_entities(
(
EgardiaBinarySensor(
sensor_id=disc_info[sensor]["id"],
name=disc_info[sensor]["name"],
egardia_system=hass.data[EGARDIA_DEVICE],
device_class=EGARDIA_TYPE_TO_DEVICE_CLASS.get(
disc_info[sensor]["type"], None
),
)
for sensor in disc_info
),
True,
)
class EgardiaBinarySensor(BinarySensorEntity):
"""Represents a sensor based on an Egardia sensor (IR, Door Contact)."""
def __init__(self, sensor_id, name, egardia_system, device_class):
"""Initialize the sensor device."""
self._id = sensor_id
self._name = name
self._state = None
self._device_class = device_class
self._egardia_system = egardia_system
def update(self):
"""Update the status."""
egardia_input = self._egardia_system.getsensorstate(self._id)
self._state = STATE_ON if egardia_input else STATE_OFF
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def is_on(self):
"""Whether the device is switched on."""
return self._state == STATE_ON
@property
def device_class(self):
"""Return the device class."""
return self._device_class
|
from collections import namedtuple
import pytest
from homeassistant.components.arlo import DATA_ARLO, sensor as arlo
from homeassistant.const import (
ATTR_ATTRIBUTION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
)
from tests.async_mock import patch
def _get_named_tuple(input_dict):
return namedtuple("Struct", input_dict.keys())(*input_dict.values())
def _get_sensor(name="Last", sensor_type="last_capture", data=None):
if data is None:
data = {}
return arlo.ArloSensor(name, data, sensor_type)
@pytest.fixture()
def default_sensor():
"""Create an ArloSensor with default values."""
return _get_sensor()
@pytest.fixture()
def battery_sensor():
"""Create an ArloSensor with battery data."""
data = _get_named_tuple({"battery_level": 50})
return _get_sensor("Battery Level", "battery_level", data)
@pytest.fixture()
def temperature_sensor():
"""Create a temperature ArloSensor."""
return _get_sensor("Temperature", "temperature")
@pytest.fixture()
def humidity_sensor():
"""Create a humidity ArloSensor."""
return _get_sensor("Humidity", "humidity")
@pytest.fixture()
def cameras_sensor():
"""Create a total cameras ArloSensor."""
data = _get_named_tuple({"cameras": [0, 0]})
return _get_sensor("Arlo Cameras", "total_cameras", data)
@pytest.fixture()
def captured_sensor():
"""Create a captured today ArloSensor."""
data = _get_named_tuple({"captured_today": [0, 0, 0, 0, 0]})
return _get_sensor("Captured Today", "captured_today", data)
class PlatformSetupFixture:
"""Fixture for testing platform setup call to add_entities()."""
def __init__(self):
"""Instantiate the platform setup fixture."""
self.sensors = None
self.update = False
def add_entities(self, sensors, update):
"""Mock method for adding devices."""
self.sensors = sensors
self.update = update
@pytest.fixture()
def platform_setup():
"""Create an instance of the PlatformSetupFixture class."""
return PlatformSetupFixture()
@pytest.fixture()
def sensor_with_hass_data(default_sensor, hass):
"""Create a sensor with async_dispatcher_connected mocked."""
hass.data = {}
default_sensor.hass = hass
return default_sensor
@pytest.fixture()
def mock_dispatch():
"""Mock the dispatcher connect method."""
target = "homeassistant.components.arlo.sensor.async_dispatcher_connect"
with patch(target) as _mock:
yield _mock
def test_setup_with_no_data(platform_setup, hass):
"""Test setup_platform with no data."""
arlo.setup_platform(hass, None, platform_setup.add_entities)
assert platform_setup.sensors is None
assert not platform_setup.update
def test_setup_with_valid_data(platform_setup, hass):
"""Test setup_platform with valid data."""
config = {
"monitored_conditions": [
"last_capture",
"total_cameras",
"captured_today",
"battery_level",
"signal_strength",
"temperature",
"humidity",
"air_quality",
]
}
hass.data[DATA_ARLO] = _get_named_tuple(
{
"cameras": [_get_named_tuple({"name": "Camera", "model_id": "ABC1000"})],
"base_stations": [
_get_named_tuple({"name": "Base Station", "model_id": "ABC1000"})
],
}
)
arlo.setup_platform(hass, config, platform_setup.add_entities)
assert len(platform_setup.sensors) == 8
assert platform_setup.update
def test_sensor_name(default_sensor):
"""Test the name property."""
assert default_sensor.name == "Last"
async def test_async_added_to_hass(sensor_with_hass_data, mock_dispatch):
"""Test dispatcher called when added."""
await sensor_with_hass_data.async_added_to_hass()
assert len(mock_dispatch.mock_calls) == 1
kall = mock_dispatch.call_args
args, kwargs = kall
assert len(args) == 3
assert args[0] == sensor_with_hass_data.hass
assert args[1] == "arlo_update"
assert not kwargs
def test_sensor_state_default(default_sensor):
"""Test the state property."""
assert default_sensor.state is None
def test_sensor_icon_battery(battery_sensor):
"""Test the battery icon."""
assert battery_sensor.icon == "mdi:battery-50"
def test_sensor_icon(temperature_sensor):
"""Test the icon property."""
assert temperature_sensor.icon == "mdi:thermometer"
def test_unit_of_measure(default_sensor, battery_sensor):
"""Test the unit_of_measurement property."""
assert default_sensor.unit_of_measurement is None
assert battery_sensor.unit_of_measurement == PERCENTAGE
def test_device_class(default_sensor, temperature_sensor, humidity_sensor):
"""Test the device_class property."""
assert default_sensor.device_class is None
assert temperature_sensor.device_class == DEVICE_CLASS_TEMPERATURE
assert humidity_sensor.device_class == DEVICE_CLASS_HUMIDITY
def test_update_total_cameras(cameras_sensor):
"""Test update method for total_cameras sensor type."""
cameras_sensor.update()
assert cameras_sensor.state == 2
def test_update_captured_today(captured_sensor):
"""Test update method for captured_today sensor type."""
captured_sensor.update()
assert captured_sensor.state == 5
def _test_attributes(sensor_type):
data = _get_named_tuple({"model_id": "TEST123"})
sensor = _get_sensor("test", sensor_type, data)
attrs = sensor.device_state_attributes
assert attrs.get(ATTR_ATTRIBUTION) == "Data provided by arlo.netgear.com"
assert attrs.get("brand") == "Netgear Arlo"
assert attrs.get("model") == "TEST123"
def test_state_attributes():
"""Test attributes for camera sensor types."""
_test_attributes("battery_level")
_test_attributes("signal_strength")
_test_attributes("temperature")
_test_attributes("humidity")
_test_attributes("air_quality")
def test_attributes_total_cameras(cameras_sensor):
"""Test attributes for total cameras sensor type."""
attrs = cameras_sensor.device_state_attributes
assert attrs.get(ATTR_ATTRIBUTION) == "Data provided by arlo.netgear.com"
assert attrs.get("brand") == "Netgear Arlo"
assert attrs.get("model") is None
def _test_update(sensor_type, key, value):
data = _get_named_tuple({key: value})
sensor = _get_sensor("test", sensor_type, data)
sensor.update()
assert sensor.state == value
def test_update():
"""Test update method for direct transcription sensor types."""
_test_update("battery_level", "battery_level", 100)
_test_update("signal_strength", "signal_strength", 100)
_test_update("temperature", "ambient_temperature", 21.4)
_test_update("humidity", "ambient_humidity", 45.1)
_test_update("air_quality", "ambient_air_quality", 14.2)
|
import xmlrpclib
try:
import supervisor.xmlrpc
except ImportError:
supervisor = None
import diamond.collector
class SupervisordCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(SupervisordCollector,
self).get_default_config_help()
config_help.update({
'xmlrpc_server_protocol': 'XML-RPC server protocol. Options: unix, http', # NOQA
'xmlrpc_server_path': 'XML-RPC server path.'
})
return config_help
def get_default_config(self):
default_config = super(SupervisordCollector, self).get_default_config()
default_config['path'] = 'supervisor'
default_config['xmlrpc_server_protocol'] = 'unix'
default_config['xmlrpc_server_path'] = '/var/run/supervisor.sock'
return default_config
def getAllProcessInfo(self):
server = None
protocol = self.config['xmlrpc_server_protocol']
path = self.config['xmlrpc_server_path']
uri = '{}://{}'.format(protocol, path)
self.log.debug(
'Attempting to connect to XML-RPC server "%s"', uri)
if protocol == 'unix':
server = xmlrpclib.ServerProxy(
'http://127.0.0.1',
supervisor.xmlrpc.SupervisorTransport(None, None, uri)
).supervisor
elif protocol == 'http':
server = xmlrpclib.Server(uri).supervisor
else:
self.log.debug(
'Invalid xmlrpc_server_protocol config setting "%s"',
protocol)
return None
return server.getAllProcessInfo()
def collect(self):
processes = self.getAllProcessInfo()
self.log.debug('Found %s supervisord processes', len(processes))
for process in processes:
statPrefix = "%s.%s" % (process["group"], process["name"])
# state
self.publish(statPrefix + ".state", process["state"])
# uptime
uptime = 0
if process["statename"] == "RUNNING":
uptime = process["now"] - process["start"]
self.publish(statPrefix + ".uptime", uptime)
|
import pytest
import pywilight
from homeassistant.components.wilight.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.components.wilight import (
HOST,
UPNP_MAC_ADDRESS,
UPNP_MODEL_NAME_P_B,
UPNP_MODEL_NUMBER,
UPNP_SERIAL,
setup_integration,
)
@pytest.fixture(name="dummy_device_from_host")
def mock_dummy_device_from_host():
"""Mock a valid api_devce."""
device = pywilight.wilight_from_discovery(
f"http://{HOST}:45995/wilight.xml",
UPNP_MAC_ADDRESS,
UPNP_MODEL_NAME_P_B,
UPNP_SERIAL,
UPNP_MODEL_NUMBER,
)
device.set_dummy(True)
with patch(
"pywilight.device_from_host",
return_value=device,
):
yield device
async def test_config_entry_not_ready(hass: HomeAssistantType) -> None:
"""Test the WiLight configuration entry not ready."""
entry = await setup_integration(hass)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_config_entry(
hass: HomeAssistantType, dummy_device_from_host
) -> None:
"""Test the WiLight configuration entry unloading."""
entry = await setup_integration(hass)
assert entry.entry_id in hass.data[DOMAIN]
assert entry.state == ENTRY_STATE_LOADED
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
if DOMAIN in hass.data:
assert entry.entry_id not in hass.data[DOMAIN]
assert entry.state == ENTRY_STATE_NOT_LOADED
|
import os.path as op
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
from mne import read_source_spaces
from mne.datasets import testing
from mne.simulation import simulate_sparse_stc, source_estimate_quantification
from mne.utils import run_tests_if_main
data_path = testing.data_path(download=False)
src_fname = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
@testing.requires_testing_data
def test_metrics():
"""Test simulation metrics."""
src = read_source_spaces(src_fname)
times = np.arange(600) / 1000.
rng = np.random.RandomState(42)
stc1 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng)
stc2 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng)
E1_rms = source_estimate_quantification(stc1, stc1, metric='rms')
E2_rms = source_estimate_quantification(stc2, stc2, metric='rms')
E1_cos = source_estimate_quantification(stc1, stc1, metric='cosine')
E2_cos = source_estimate_quantification(stc2, stc2, metric='cosine')
# ### Tests to add
assert (E1_rms == 0.)
assert (E2_rms == 0.)
assert_almost_equal(E1_cos, 0.)
assert_almost_equal(E2_cos, 0.)
stc_bad = stc2.copy().crop(0, 0.5)
pytest.raises(ValueError, source_estimate_quantification, stc1, stc_bad)
stc_bad = stc2.copy()
stc_bad.tmin -= 0.1
pytest.raises(ValueError, source_estimate_quantification, stc1, stc_bad)
pytest.raises(ValueError, source_estimate_quantification, stc1, stc2,
metric='foo')
run_tests_if_main()
|
from bs4 import BeautifulSoup as bs
from publicsuffixlist import PublicSuffixList
from urllib.parse import urlparse
from httpobs.conf import SCANNER_MOZILLA_DOMAINS
from httpobs.scanner.analyzer.decorators import scored_test
from httpobs.scanner.analyzer.utils import only_if_worse
from httpobs.scanner.retriever.retriever import HTML_TYPES
import json
# Compat between Python 3.4 and Python 3.5 (see: https://github.com/mozilla/http-observatory-website/issues/14)
if not hasattr(json, 'JSONDecodeError'): # pragma: no cover
json.JSONDecodeError = ValueError
@scored_test
def contribute(reqs: dict, expectation='contribute-json-with-required-keys') -> dict:
"""
:param reqs: dictionary containing all the request and response objects
:param expectation: test expectation
contribute-json-with-required-keys: contribute.json exists, with all the required_keys [default]
contribute-json-missing-required-keys: contribute.json exists, but missing some of the required_keys (A-)
contribute-json-only-required-on-mozilla-properties: contribute.json isn't required,
since it's not a Mozilla domain
contribute-json-not-implemented: contribute.json file missing (B+)
:return: dictionary with:
data: the parsed contribute.json file
expectation: test expectation
pass: whether the site's configuration met its expectation (null for non-Mozilla sites)
result: short string describing the result of the test
"""
# TODO: allow a bonus if you have a contribute.json on a non-Mozilla website
output = {
'data': None,
'expectation': expectation,
'pass': False,
'result': None,
}
# The keys that are required to be in contribute.json
required_keys = ('name', 'description', 'participate', 'bugs', 'urls')
response = reqs['responses']['auto']
# This finds the SLD ('mozilla' out of 'mozilla.org') if it exists
if '.' in urlparse(response.url).netloc:
second_level_domain = urlparse(response.url).netloc.split('.')[-2]
else:
second_level_domain = ''
if second_level_domain not in SCANNER_MOZILLA_DOMAINS:
output['expectation'] = output['result'] = 'contribute-json-only-required-on-mozilla-properties'
# If there's a contribute.json file
elif reqs['resources']['/contribute.json']:
try:
contrib = json.loads(reqs['resources']['/contribute.json'])
if all(key in contrib for key in required_keys):
output['result'] = 'contribute-json-with-required-keys'
else:
output['result'] = 'contribute-json-missing-required-keys'
except (json.JSONDecodeError, TypeError):
contrib = {}
output['result'] = 'contribute-json-invalid-json'
# Store the contribute.json file
if any(key in contrib for key in required_keys):
contrib = {key: contrib.get(key) for key in required_keys if key in contrib}
# Store contribute.json in the database if it's under a certain size
if len(str(contrib)) < 32768:
output['data'] = contrib
else:
output['data'] = {}
else:
output['result'] = 'contribute-json-not-implemented'
# Check to see if the test passed or failed
if expectation == output['result']:
output['pass'] = True
elif output['result'] == 'contribute-json-only-required-on-mozilla-properties':
output['pass'] = True
return output
@scored_test
def subresource_integrity(reqs: dict, expectation='sri-implemented-and-external-scripts-loaded-securely') -> dict:
"""
:param reqs: dictionary containing all the request and response objects
:param expectation: test expectation
sri-implemented-and-all-scripts-loaded-securely: all same origin, and uses SRI
sri-implemented-and-external-scripts-loaded-securely: integrity attribute exists on all external scripts,
and scripts loaded [default for HTML]
sri-implemented-but-external-scripts-not-loaded-securely: SRI implemented, but with scripts loaded over HTTP
sri-not-implemented-but-external-scripts-loaded-securely: SRI isn't implemented,
but all scripts are loaded over HTTPS
sri-not-implemented-and-external-scripts-not-loaded-securely: SRI isn't implemented,
and scripts are downloaded over HTTP
sri-not-implemented-but-all-scripts-loaded-from-secure-origin: SRI isn't implemented,
but all scripts come from secure origins (self)
sri-not-implemented-but-no-scripts-loaded: SRI isn't implemented, because the page doesn't load any scripts
sri-not-implemented-response-not-html: SRI isn't needed, because the page isn't HTML [default for non-HTML]
request-did-not-return-status-code-200: Only look for SRI on pages that returned 200, not things like 404s
html-not-parsable: Can't parse the page's content
:return: dictionary with:
data: all external scripts and their integrity / crossorigin attributes
expectation: test expectation
pass: whether the site's external scripts met expectations
result: short string describing the result of the test
"""
output = {
'data': {},
'expectation': expectation,
'pass': False,
'result': None,
}
response = reqs['responses']['auto']
# The order of how "good" the results are
goodness = ['sri-implemented-and-all-scripts-loaded-securely',
'sri-implemented-and-external-scripts-loaded-securely',
'sri-implemented-but-external-scripts-not-loaded-securely',
'sri-not-implemented-but-external-scripts-loaded-securely',
'sri-not-implemented-and-external-scripts-not-loaded-securely',
'sri-not-implemented-response-not-html']
# If the content isn't HTML, there's no scripts to load; this is okay
if response.headers.get('Content-Type', '').split(';')[0] not in HTML_TYPES:
output['result'] = 'sri-not-implemented-response-not-html'
else:
# Try to parse the HTML
try:
soup = bs(reqs['resources']['__path__'], 'html.parser')
except:
output['result'] = 'html-not-parsable'
return output
# Track to see if any scripts were on foreign TLDs
scripts_on_foreign_origin = False
# Get all the scripts
scripts = soup.find_all('script')
for script in scripts:
if script.has_attr('src'):
# Script tag parameters
src = urlparse(script['src'])
integrity = script.get('integrity')
crossorigin = script.get('crossorigin')
# Check to see if they're on the same second-level domain
# TODO: update the PSL list on startup
psl = PublicSuffixList()
samesld = True if (psl.privatesuffix(urlparse(response.url).netloc) ==
psl.privatesuffix(src.netloc)) else False
if src.scheme == '':
if src.netloc == '':
# Relative URL (src="/path")
relativeorigin = True
relativeprotocol = False
else:
# Relative protocol (src="//host/path")
relativeorigin = False
relativeprotocol = True
else:
relativeorigin = False
relativeprotocol = False
# Check to see if it's the same origin or second-level domain
if relativeorigin or (samesld and not relativeprotocol):
secureorigin = True
else:
secureorigin = False
scripts_on_foreign_origin = True
# See if it's a secure scheme
if src.scheme == 'https' or (relativeorigin and urlparse(response.url).scheme == 'https'):
securescheme = True
else:
securescheme = False
# Add it to the scripts data result, if it's not a relative URI
if not secureorigin:
output['data'][script['src']] = {
'crossorigin': crossorigin,
'integrity': integrity
}
if integrity and not securescheme:
output['result'] = only_if_worse('sri-implemented-but-external-scripts-not-loaded-securely',
output['result'],
goodness)
elif not integrity and securescheme:
output['result'] = only_if_worse('sri-not-implemented-but-external-scripts-loaded-securely',
output['result'],
goodness)
elif not integrity and not securescheme and samesld:
output['result'] = only_if_worse('sri-not-implemented-and-external-scripts'
'-not-loaded-securely',
output['result'],
goodness)
elif not integrity and not securescheme:
output['result'] = only_if_worse('sri-not-implemented-and-external-scripts'
'-not-loaded-securely',
output['result'],
goodness)
# Grant bonus even if they use SRI on the same origin
else:
if integrity and securescheme and not output['result']:
output['result'] = 'sri-implemented-and-all-scripts-loaded-securely'
# If the page doesn't load any scripts
if not scripts:
output['result'] = 'sri-not-implemented-but-no-scripts-loaded'
# If all the scripts are loaded from a secure origin, not triggering a need for SRI
elif scripts and not scripts_on_foreign_origin and not output['result']:
output['result'] = 'sri-not-implemented-but-all-scripts-loaded-from-secure-origin'
# If the page loaded from a foreign origin, but everything included SRI
elif scripts and scripts_on_foreign_origin and not output['result']:
output['result'] = only_if_worse('sri-implemented-and-external-scripts-loaded-securely',
output['result'],
goodness)
# Code defensively on the size of the data
output['data'] = output['data'] if len(str(output['data'])) < 32768 else {}
# Check to see if the test passed or failed
if output['result'] in ('sri-implemented-and-all-scripts-loaded-securely',
'sri-implemented-and-external-scripts-loaded-securely',
'sri-not-implemented-response-not-html',
'sri-not-implemented-but-all-scripts-loaded-from-secure-origin',
'sri-not-implemented-but-no-scripts-loaded',
expectation):
output['pass'] = True
return output
|
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import CONF_HYDROMETERS, CONF_MOUSE, CONF_THERMOMETERS, HUB as hub
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure platform."""
sensors = []
hub.update_overview()
if int(hub.config.get(CONF_THERMOMETERS, 1)):
sensors.extend(
[
VerisureThermometer(device_label)
for device_label in hub.get(
"$.climateValues[?(@.temperature)].deviceLabel"
)
]
)
if int(hub.config.get(CONF_HYDROMETERS, 1)):
sensors.extend(
[
VerisureHygrometer(device_label)
for device_label in hub.get(
"$.climateValues[?(@.humidity)].deviceLabel"
)
]
)
if int(hub.config.get(CONF_MOUSE, 1)):
sensors.extend(
[
VerisureMouseDetection(device_label)
for device_label in hub.get(
"$.eventCounts[?(@.deviceType=='MOUSE1')].deviceLabel"
)
]
)
add_entities(sensors)
class VerisureThermometer(Entity):
"""Representation of a Verisure thermometer."""
def __init__(self, device_label):
"""Initialize the sensor."""
self._device_label = device_label
@property
def name(self):
"""Return the name of the device."""
return (
hub.get_first(
"$.climateValues[?(@.deviceLabel=='%s')].deviceArea", self._device_label
)
+ " temperature"
)
@property
def state(self):
"""Return the state of the device."""
return hub.get_first(
"$.climateValues[?(@.deviceLabel=='%s')].temperature", self._device_label
)
@property
def available(self):
"""Return True if entity is available."""
return (
hub.get_first(
"$.climateValues[?(@.deviceLabel=='%s')].temperature",
self._device_label,
)
is not None
)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return TEMP_CELSIUS
# pylint: disable=no-self-use
def update(self):
"""Update the sensor."""
hub.update_overview()
class VerisureHygrometer(Entity):
"""Representation of a Verisure hygrometer."""
def __init__(self, device_label):
"""Initialize the sensor."""
self._device_label = device_label
@property
def name(self):
"""Return the name of the device."""
return (
hub.get_first(
"$.climateValues[?(@.deviceLabel=='%s')].deviceArea", self._device_label
)
+ " humidity"
)
@property
def state(self):
"""Return the state of the device."""
return hub.get_first(
"$.climateValues[?(@.deviceLabel=='%s')].humidity", self._device_label
)
@property
def available(self):
"""Return True if entity is available."""
return (
hub.get_first(
"$.climateValues[?(@.deviceLabel=='%s')].humidity", self._device_label
)
is not None
)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return PERCENTAGE
# pylint: disable=no-self-use
def update(self):
"""Update the sensor."""
hub.update_overview()
class VerisureMouseDetection(Entity):
"""Representation of a Verisure mouse detector."""
def __init__(self, device_label):
"""Initialize the sensor."""
self._device_label = device_label
@property
def name(self):
"""Return the name of the device."""
return (
hub.get_first(
"$.eventCounts[?(@.deviceLabel=='%s')].area", self._device_label
)
+ " mouse"
)
@property
def state(self):
"""Return the state of the device."""
return hub.get_first(
"$.eventCounts[?(@.deviceLabel=='%s')].detections", self._device_label
)
@property
def available(self):
"""Return True if entity is available."""
return (
hub.get_first("$.eventCounts[?(@.deviceLabel=='%s')]", self._device_label)
is not None
)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return "Mice"
# pylint: disable=no-self-use
def update(self):
"""Update the sensor."""
hub.update_overview()
|
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from .const import (
DATA_CONNECTIONS,
SIGNAL_WEBSOCKET_CONNECTED,
SIGNAL_WEBSOCKET_DISCONNECTED,
)
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the API streams platform."""
entity = APICount()
async_add_entities([entity])
class APICount(Entity):
"""Entity to represent how many people are connected to the stream API."""
def __init__(self):
"""Initialize the API count."""
self.count = 0
async def async_added_to_hass(self):
"""Added to hass."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_WEBSOCKET_CONNECTED, self._update_count
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_WEBSOCKET_DISCONNECTED, self._update_count
)
)
@property
def name(self):
"""Return name of entity."""
return "Connected clients"
@property
def state(self):
"""Return current API count."""
return self.count
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "clients"
@callback
def _update_count(self):
self.count = self.hass.data.get(DATA_CONNECTIONS, 0)
self.async_write_ha_state()
|
import jwt
import pytest
from homeassistant.components.cloud import const, prefs
from . import mock_cloud, mock_cloud_prefs
from tests.async_mock import patch
@pytest.fixture(autouse=True)
def mock_user_data():
"""Mock os module."""
with patch("hass_nabucasa.Cloud.write_user_info") as writer:
yield writer
@pytest.fixture
def mock_cloud_fixture(hass):
"""Fixture for cloud component."""
hass.loop.run_until_complete(mock_cloud(hass))
return mock_cloud_prefs(hass)
@pytest.fixture
async def cloud_prefs(hass):
"""Fixture for cloud preferences."""
cloud_prefs = prefs.CloudPreferences(hass)
await cloud_prefs.async_initialize()
return cloud_prefs
@pytest.fixture
async def mock_cloud_setup(hass):
"""Set up the cloud."""
await mock_cloud(hass)
@pytest.fixture
def mock_cloud_login(hass, mock_cloud_setup):
"""Mock cloud is logged in."""
hass.data[const.DOMAIN].id_token = jwt.encode(
{
"email": "[email protected]",
"custom:sub-exp": "2018-01-03",
"cognito:username": "abcdefghjkl",
},
"test",
)
|
from datetime import timedelta
import logging
from typing import Any, Callable, Dict, List, Optional, Union
from pyvizio import VizioAsync
from pyvizio.api.apps import find_app_name
from pyvizio.const import APP_HOME, INPUT_APPS, NO_APP_RUNNING, UNKNOWN_APP
from homeassistant.components.media_player import (
DEVICE_CLASS_SPEAKER,
DEVICE_CLASS_TV,
SUPPORT_SELECT_SOUND_MODE,
MediaPlayerEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_DEVICE_CLASS,
CONF_EXCLUDE,
CONF_HOST,
CONF_INCLUDE,
CONF_NAME,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import entity_platform
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
CONF_ADDITIONAL_CONFIGS,
CONF_APPS,
CONF_VOLUME_STEP,
DEFAULT_TIMEOUT,
DEFAULT_VOLUME_STEP,
DEVICE_ID,
DOMAIN,
ICON,
SERVICE_UPDATE_SETTING,
SUPPORTED_COMMANDS,
UPDATE_SETTING_SCHEMA,
VIZIO_AUDIO_SETTINGS,
VIZIO_DEVICE_CLASSES,
VIZIO_MUTE,
VIZIO_MUTE_ON,
VIZIO_SOUND_MODE,
VIZIO_VOLUME,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistantType,
config_entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up a Vizio media player entry."""
host = config_entry.data[CONF_HOST]
token = config_entry.data.get(CONF_ACCESS_TOKEN)
name = config_entry.data[CONF_NAME]
device_class = config_entry.data[CONF_DEVICE_CLASS]
# If config entry options not set up, set them up, otherwise assign values managed in options
volume_step = config_entry.options.get(
CONF_VOLUME_STEP, config_entry.data.get(CONF_VOLUME_STEP, DEFAULT_VOLUME_STEP)
)
params = {}
if not config_entry.options:
params["options"] = {CONF_VOLUME_STEP: volume_step}
include_or_exclude_key = next(
(
key
for key in config_entry.data.get(CONF_APPS, {})
if key in [CONF_INCLUDE, CONF_EXCLUDE]
),
None,
)
if include_or_exclude_key:
params["options"][CONF_APPS] = {
include_or_exclude_key: config_entry.data[CONF_APPS][
include_or_exclude_key
].copy()
}
if not config_entry.data.get(CONF_VOLUME_STEP):
new_data = config_entry.data.copy()
new_data.update({CONF_VOLUME_STEP: volume_step})
params["data"] = new_data
if params:
hass.config_entries.async_update_entry(config_entry, **params)
device = VizioAsync(
DEVICE_ID,
host,
name,
auth_token=token,
device_type=VIZIO_DEVICE_CLASSES[device_class],
session=async_get_clientsession(hass, False),
timeout=DEFAULT_TIMEOUT,
)
if not await device.can_connect_with_auth_check():
_LOGGER.warning("Failed to connect to %s", host)
raise PlatformNotReady
apps_coordinator = hass.data[DOMAIN].get(CONF_APPS)
entity = VizioDevice(config_entry, device, name, device_class, apps_coordinator)
async_add_entities([entity], update_before_add=True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_UPDATE_SETTING, UPDATE_SETTING_SCHEMA, "async_update_setting"
)
class VizioDevice(MediaPlayerEntity):
"""Media Player implementation which performs REST requests to device."""
def __init__(
self,
config_entry: ConfigEntry,
device: VizioAsync,
name: str,
device_class: str,
apps_coordinator: DataUpdateCoordinator,
) -> None:
"""Initialize Vizio device."""
self._config_entry = config_entry
self._apps_coordinator = apps_coordinator
self._name = name
self._state = None
self._volume_level = None
self._volume_step = config_entry.options[CONF_VOLUME_STEP]
self._is_volume_muted = None
self._current_input = None
self._current_app = None
self._current_app_config = None
self._current_sound_mode = None
self._available_sound_modes = []
self._available_inputs = []
self._available_apps = []
self._all_apps = apps_coordinator.data if apps_coordinator else None
self._conf_apps = config_entry.options.get(CONF_APPS, {})
self._additional_app_configs = config_entry.data.get(CONF_APPS, {}).get(
CONF_ADDITIONAL_CONFIGS, []
)
self._device_class = device_class
self._supported_commands = SUPPORTED_COMMANDS[device_class]
self._device = device
self._max_volume = float(self._device.get_max_volume())
self._icon = ICON[device_class]
self._available = True
self._model = None
self._sw_version = None
def _apps_list(self, apps: List[str]) -> List[str]:
"""Return process apps list based on configured filters."""
if self._conf_apps.get(CONF_INCLUDE):
return [app for app in apps if app in self._conf_apps[CONF_INCLUDE]]
if self._conf_apps.get(CONF_EXCLUDE):
return [app for app in apps if app not in self._conf_apps[CONF_EXCLUDE]]
return apps
async def async_update(self) -> None:
"""Retrieve latest state of the device."""
if not self._model:
self._model = await self._device.get_model_name()
if not self._sw_version:
self._sw_version = await self._device.get_version()
is_on = await self._device.get_power_state(log_api_exception=False)
if is_on is None:
if self._available:
_LOGGER.warning(
"Lost connection to %s", self._config_entry.data[CONF_HOST]
)
self._available = False
return
if not self._available:
_LOGGER.info(
"Restored connection to %s", self._config_entry.data[CONF_HOST]
)
self._available = True
if not is_on:
self._state = STATE_OFF
self._volume_level = None
self._is_volume_muted = None
self._current_input = None
self._current_app = None
self._current_app_config = None
self._current_sound_mode = None
return
self._state = STATE_ON
audio_settings = await self._device.get_all_settings(
VIZIO_AUDIO_SETTINGS, log_api_exception=False
)
if audio_settings:
self._volume_level = float(audio_settings[VIZIO_VOLUME]) / self._max_volume
if VIZIO_MUTE in audio_settings:
self._is_volume_muted = (
audio_settings[VIZIO_MUTE].lower() == VIZIO_MUTE_ON
)
else:
self._is_volume_muted = None
if VIZIO_SOUND_MODE in audio_settings:
self._supported_commands |= SUPPORT_SELECT_SOUND_MODE
self._current_sound_mode = audio_settings[VIZIO_SOUND_MODE]
if not self._available_sound_modes:
self._available_sound_modes = (
await self._device.get_setting_options(
VIZIO_AUDIO_SETTINGS, VIZIO_SOUND_MODE
)
)
else:
# Explicitly remove SUPPORT_SELECT_SOUND_MODE from supported features
self._supported_commands &= ~SUPPORT_SELECT_SOUND_MODE
input_ = await self._device.get_current_input(log_api_exception=False)
if input_:
self._current_input = input_
inputs = await self._device.get_inputs_list(log_api_exception=False)
# If no inputs returned, end update
if not inputs:
return
self._available_inputs = [input_.name for input_ in inputs]
# Return before setting app variables if INPUT_APPS isn't in available inputs
if self._device_class == DEVICE_CLASS_SPEAKER or not any(
app for app in INPUT_APPS if app in self._available_inputs
):
return
# Create list of available known apps from known app list after
# filtering by CONF_INCLUDE/CONF_EXCLUDE
self._available_apps = self._apps_list([app["name"] for app in self._all_apps])
self._current_app_config = await self._device.get_current_app_config(
log_api_exception=False
)
self._current_app = find_app_name(
self._current_app_config,
[APP_HOME, *self._all_apps, *self._additional_app_configs],
)
if self._current_app == NO_APP_RUNNING:
self._current_app = None
def _get_additional_app_names(self) -> List[Dict[str, Any]]:
"""Return list of additional apps that were included in configuration.yaml."""
return [
additional_app["name"] for additional_app in self._additional_app_configs
]
@staticmethod
async def _async_send_update_options_signal(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> None:
"""Send update event when Vizio config entry is updated."""
# Move this method to component level if another entity ever gets added for a single config entry.
# See here: https://github.com/home-assistant/core/pull/30653#discussion_r366426121
async_dispatcher_send(hass, config_entry.entry_id, config_entry)
async def _async_update_options(self, config_entry: ConfigEntry) -> None:
"""Update options if the update signal comes from this entity."""
self._volume_step = config_entry.options[CONF_VOLUME_STEP]
# Update so that CONF_ADDITIONAL_CONFIGS gets retained for imports
self._conf_apps.update(config_entry.options.get(CONF_APPS, {}))
async def async_update_setting(
self, setting_type: str, setting_name: str, new_value: Union[int, str]
) -> None:
"""Update a setting when update_setting service is called."""
await self._device.set_setting(
setting_type,
setting_name,
new_value,
)
async def async_added_to_hass(self) -> None:
"""Register callbacks when entity is added."""
# Register callback for when config entry is updated.
self.async_on_remove(
self._config_entry.add_update_listener(
self._async_send_update_options_signal
)
)
# Register callback for update event
self.async_on_remove(
async_dispatcher_connect(
self.hass, self._config_entry.entry_id, self._async_update_options
)
)
# Register callback for app list updates if device is a TV
@callback
def apps_list_update():
"""Update list of all apps."""
self._all_apps = self._apps_coordinator.data
self.async_write_ha_state()
if self._device_class == DEVICE_CLASS_TV:
self.async_on_remove(
self._apps_coordinator.async_add_listener(apps_list_update)
)
@property
def available(self) -> bool:
"""Return the availabiliity of the device."""
return self._available
@property
def state(self) -> Optional[str]:
"""Return the state of the device."""
return self._state
@property
def name(self) -> str:
"""Return the name of the device."""
return self._name
@property
def icon(self) -> str:
"""Return the icon of the device."""
return self._icon
@property
def volume_level(self) -> Optional[float]:
"""Return the volume level of the device."""
return self._volume_level
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._is_volume_muted
@property
def source(self) -> Optional[str]:
"""Return current input of the device."""
if self._current_app is not None and self._current_input in INPUT_APPS:
return self._current_app
return self._current_input
@property
def source_list(self) -> List[str]:
"""Return list of available inputs of the device."""
# If Smartcast app is in input list, and the app list has been retrieved,
# show the combination with , otherwise just return inputs
if self._available_apps:
return [
*[
_input
for _input in self._available_inputs
if _input not in INPUT_APPS
],
*self._available_apps,
*[
app
for app in self._get_additional_app_names()
if app not in self._available_apps
],
]
return self._available_inputs
@property
def app_id(self) -> Optional[str]:
"""Return the ID of the current app if it is unknown by pyvizio."""
if self._current_app_config and self.app_name == UNKNOWN_APP:
return {
"APP_ID": self._current_app_config.APP_ID,
"NAME_SPACE": self._current_app_config.NAME_SPACE,
"MESSAGE": self._current_app_config.MESSAGE,
}
return None
@property
def app_name(self) -> Optional[str]:
"""Return the friendly name of the current app."""
return self._current_app
@property
def supported_features(self) -> int:
"""Flag device features that are supported."""
return self._supported_commands
@property
def unique_id(self) -> str:
"""Return the unique id of the device."""
return self._config_entry.unique_id
@property
def device_info(self) -> Dict[str, Any]:
"""Return device registry information."""
return {
"identifiers": {(DOMAIN, self._config_entry.unique_id)},
"name": self.name,
"manufacturer": "VIZIO",
"model": self._model,
"sw_version": self._sw_version,
}
@property
def device_class(self) -> str:
"""Return device class for entity."""
return self._device_class
@property
def sound_mode(self) -> Optional[str]:
"""Name of the current sound mode."""
return self._current_sound_mode
@property
def sound_mode_list(self) -> Optional[List[str]]:
"""List of available sound modes."""
return self._available_sound_modes
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
if sound_mode in self._available_sound_modes:
await self._device.set_setting(
VIZIO_AUDIO_SETTINGS, VIZIO_SOUND_MODE, sound_mode
)
async def async_turn_on(self) -> None:
"""Turn the device on."""
await self._device.pow_on()
async def async_turn_off(self) -> None:
"""Turn the device off."""
await self._device.pow_off()
async def async_mute_volume(self, mute: bool) -> None:
"""Mute the volume."""
if mute:
await self._device.mute_on()
self._is_volume_muted = True
else:
await self._device.mute_off()
self._is_volume_muted = False
async def async_media_previous_track(self) -> None:
"""Send previous channel command."""
await self._device.ch_down()
async def async_media_next_track(self) -> None:
"""Send next channel command."""
await self._device.ch_up()
async def async_select_source(self, source: str) -> None:
"""Select input source."""
if source in self._available_inputs:
await self._device.set_input(source)
elif source in self._get_additional_app_names():
await self._device.launch_app_config(
**next(
app["config"]
for app in self._additional_app_configs
if app["name"] == source
)
)
elif source in self._available_apps:
await self._device.launch_app(source, self._all_apps)
async def async_volume_up(self) -> None:
"""Increase volume of the device."""
await self._device.vol_up(num=self._volume_step)
if self._volume_level is not None:
self._volume_level = min(
1.0, self._volume_level + self._volume_step / self._max_volume
)
async def async_volume_down(self) -> None:
"""Decrease volume of the device."""
await self._device.vol_down(num=self._volume_step)
if self._volume_level is not None:
self._volume_level = max(
0.0, self._volume_level - self._volume_step / self._max_volume
)
async def async_set_volume_level(self, volume: float) -> None:
"""Set volume level."""
if self._volume_level is not None:
if volume > self._volume_level:
num = int(self._max_volume * (volume - self._volume_level))
await self._device.vol_up(num=num)
self._volume_level = volume
elif volume < self._volume_level:
num = int(self._max_volume * (self._volume_level - volume))
await self._device.vol_down(num=num)
self._volume_level = volume
|
from cerberus import Validator, errors
from cerberus.tests import assert_fail
ValidationError = errors.ValidationError
def test__error_1():
v = Validator(schema={'foo': {'type': 'string'}})
v.document = {'foo': 42}
v._error('foo', errors.TYPE, 'string')
error = v._errors[0]
assert error.document_path == ('foo',)
assert error.schema_path == ('foo', 'type')
assert error.code == 0x24
assert error.rule == 'type'
assert error.constraint == ('string',)
assert error.value == 42
assert error.info == ('string',)
assert not error.is_group_error
assert not error.is_logic_error
def test__error_2():
v = Validator(schema={'foo': {'keysrules': {'type': 'integer'}}})
v.document = {'foo': {'0': 'bar'}}
v._error('foo', errors.KEYSRULES, ())
error = v._errors[0]
assert error.document_path == ('foo',)
assert error.schema_path == ('foo', 'keysrules')
assert error.code == 0x83
assert error.rule == 'keysrules'
assert error.constraint == {'type': ('integer',)}
assert error.value == {'0': 'bar'}
assert error.info == ((),)
assert error.is_group_error
assert not error.is_logic_error
def test__error_3():
valids = (
{'type': ('string',), 'regex': '0x[0-9a-f]{2}'},
{'type': ('integer',), 'min': 0, 'max': 255},
)
v = Validator(schema={'foo': {'oneof': valids}})
v.document = {'foo': '0x100'}
v._error('foo', errors.ONEOF, (), 0, 2)
error = v._errors[0]
assert error.document_path == ('foo',)
assert error.schema_path == ('foo', 'oneof')
assert error.code == 0x92
assert error.rule == 'oneof'
assert error.constraint == valids
assert error.value == '0x100'
assert error.info == ((), 0, 2)
assert error.is_group_error
assert error.is_logic_error
def test_error_tree_from_subschema(validator):
schema = {'foo': {'schema': {'bar': {'type': 'string'}}}}
document = {'foo': {'bar': 0}}
assert_fail(document, schema, validator=validator)
d_error_tree = validator.document_error_tree
s_error_tree = validator.schema_error_tree
assert 'foo' in d_error_tree
assert len(d_error_tree['foo'].errors) == 1, d_error_tree['foo']
assert d_error_tree['foo'].errors[0].code == errors.SCHEMA.code
assert 'bar' in d_error_tree['foo']
assert d_error_tree['foo']['bar'].errors[0].value == 0
assert d_error_tree.fetch_errors_from(('foo', 'bar'))[0].value == 0
assert 'foo' in s_error_tree
assert 'schema' in s_error_tree['foo']
assert 'bar' in s_error_tree['foo']['schema']
assert 'type' in s_error_tree['foo']['schema']['bar']
assert s_error_tree['foo']['schema']['bar']['type'].errors[0].value == 0
assert (
s_error_tree.fetch_errors_from(('foo', 'schema', 'bar', 'type'))[0].value == 0
)
def test_error_tree_from_anyof(validator):
schema = {'foo': {'anyof': [{'type': 'string'}, {'type': 'integer'}]}}
document = {'foo': []}
assert_fail(document, schema, validator=validator)
d_error_tree = validator.document_error_tree
s_error_tree = validator.schema_error_tree
assert 'foo' in d_error_tree
assert d_error_tree['foo'].errors[0].value == []
assert 'foo' in s_error_tree
assert 'anyof' in s_error_tree['foo']
assert 0 in s_error_tree['foo']['anyof']
assert 1 in s_error_tree['foo']['anyof']
assert 'type' in s_error_tree['foo']['anyof'][0]
assert s_error_tree['foo']['anyof'][0]['type'].errors[0].value == []
def test_nested_error_paths(validator):
schema = {
'a_dict': {
'keysrules': {'type': 'integer'},
'valuesrules': {'regex': '[a-z]*'},
},
'a_list': {
'itemsrules': {'type': 'string', 'oneof_regex': ['[a-z]*$', '[A-Z]*']}
},
}
document = {
'a_dict': {0: 'abc', 'one': 'abc', 2: 'aBc', 'three': 'abC'},
'a_list': [0, 'abc', 'abC'],
}
assert_fail(document, schema, validator=validator)
_det = validator.document_error_tree
_set = validator.schema_error_tree
assert len(_det.errors) == 0
assert len(_set.errors) == 0
assert len(_det['a_dict'].errors) == 2
assert len(_set['a_dict'].errors) == 0
assert _det['a_dict'][0] is None
assert len(_det['a_dict']['one'].errors) == 1
assert len(_det['a_dict'][2].errors) == 1
assert len(_det['a_dict']['three'].errors) == 2
assert len(_set['a_dict']['keysrules'].errors) == 1
assert len(_set['a_dict']['valuesrules'].errors) == 1
assert len(_set['a_dict']['keysrules']['type'].errors) == 2
assert len(_set['a_dict']['valuesrules']['regex'].errors) == 2
_ref_err = ValidationError(
('a_dict', 'one'),
('a_dict', 'keysrules', 'type'),
errors.TYPE.code,
'type',
'integer',
'one',
(),
)
assert _det['a_dict']['one'].errors[0] == _ref_err
assert _set['a_dict']['keysrules']['type'].errors[0] == _ref_err
_ref_err = ValidationError(
('a_dict', 2),
('a_dict', 'valuesrules', 'regex'),
errors.REGEX_MISMATCH.code,
'regex',
'[a-z]*$',
'aBc',
(),
)
assert _det['a_dict'][2].errors[0] == _ref_err
assert _set['a_dict']['valuesrules']['regex'].errors[0] == _ref_err
_ref_err = ValidationError(
('a_dict', 'three'),
('a_dict', 'keysrules', 'type'),
errors.TYPE.code,
'type',
'integer',
'three',
(),
)
assert _det['a_dict']['three'].errors[0] == _ref_err
assert _set['a_dict']['keysrules']['type'].errors[1] == _ref_err
_ref_err = ValidationError(
('a_dict', 'three'),
('a_dict', 'valuesrules', 'regex'),
errors.REGEX_MISMATCH.code,
'regex',
'[a-z]*$',
'abC',
(),
)
assert _det['a_dict']['three'].errors[1] == _ref_err
assert _set['a_dict']['valuesrules']['regex'].errors[1] == _ref_err
assert len(_det['a_list'].errors) == 1
assert len(_det['a_list'][0].errors) == 1
assert _det['a_list'][1] is None
assert len(_det['a_list'][2].errors) == 3
assert len(_set['a_list'].errors) == 0
assert len(_set['a_list']['itemsrules'].errors) == 1
assert len(_set['a_list']['itemsrules']['type'].errors) == 1
assert len(_set['a_list']['itemsrules']['oneof'][0]['regex'].errors) == 1
assert len(_set['a_list']['itemsrules']['oneof'][1]['regex'].errors) == 1
_ref_err = ValidationError(
('a_list', 0),
('a_list', 'itemsrules', 'type'),
errors.TYPE.code,
'type',
'string',
0,
(),
)
assert _det['a_list'][0].errors[0] == _ref_err
assert _set['a_list']['itemsrules']['type'].errors[0] == _ref_err
_ref_err = ValidationError(
('a_list', 2),
('a_list', 'itemsrules', 'oneof'),
errors.ONEOF.code,
'oneof',
'irrelevant_at_this_point',
'abC',
(),
)
assert _det['a_list'][2].errors[0] == _ref_err
assert _set['a_list']['itemsrules']['oneof'].errors[0] == _ref_err
_ref_err = ValidationError(
('a_list', 2),
('a_list', 'itemsrules', 'oneof', 0, 'regex'),
errors.REGEX_MISMATCH.code,
'regex',
'[a-z]*$',
'abC',
(),
)
assert _det['a_list'][2].errors[1] == _ref_err
assert _set['a_list']['itemsrules']['oneof'][0]['regex'].errors[0] == _ref_err
_ref_err = ValidationError(
('a_list', 2),
('a_list', 'itemsrules', 'oneof', 1, 'regex'),
errors.REGEX_MISMATCH.code,
'regex',
'[a-z]*$',
'abC',
(),
)
assert _det['a_list'][2].errors[2] == _ref_err
assert _set['a_list']['itemsrules']['oneof'][1]['regex'].errors[0] == _ref_err
def test_queries():
schema = {'foo': {'type': 'dict', 'schema': {'bar': {'type': 'number'}}}}
document = {'foo': {'bar': 'zero'}}
validator = Validator(schema)
validator(document)
assert 'foo' in validator.document_error_tree
assert 'bar' in validator.document_error_tree['foo']
assert 'foo' in validator.schema_error_tree
assert 'schema' in validator.schema_error_tree['foo']
assert errors.SCHEMA in validator.document_error_tree['foo'].errors
assert errors.SCHEMA in validator.document_error_tree['foo']
assert errors.TYPE in validator.document_error_tree['foo']['bar']
assert errors.SCHEMA in validator.schema_error_tree['foo']['schema']
assert errors.TYPE in validator.schema_error_tree['foo']['schema']['bar']['type']
assert (
validator.document_error_tree['foo'][errors.SCHEMA].child_errors[0].code
== errors.TYPE.code
)
def test_basic_error_handler():
handler = errors.BasicErrorHandler()
_errors, ref = [], {}
_errors.append(ValidationError(['foo'], ['foo'], 0x63, 'readonly', True, None, ()))
ref.update({'foo': [handler.messages[0x63]]})
assert handler(_errors) == ref
_errors.append(ValidationError(['bar'], ['foo'], 0x42, 'min', 1, 2, ()))
ref.update({'bar': [handler.messages[0x42].format(constraint=1)]})
assert handler(_errors) == ref
_errors.append(
ValidationError(
['zap', 'foo'], ['zap', 'schema', 'foo'], 0x24, 'type', 'string', True, ()
)
)
ref.update({'zap': [{'foo': [handler.messages[0x24].format(constraint='string')]}]})
assert handler(_errors) == ref
_errors.append(
ValidationError(
['zap', 'foo'],
['zap', 'schema', 'foo'],
0x41,
'regex',
'^p[äe]ng$',
'boom',
(),
)
)
ref['zap'][0]['foo'].append(handler.messages[0x41].format(constraint='^p[äe]ng$'))
assert handler(_errors) == ref
def test_basic_error_of_errors(validator):
schema = {'foo': {'oneof': ({'type': ('integer',)}, {'type': ('string',)})}}
document = {'foo': 23.42}
error = ('foo', ('foo', 'oneof'), errors.ONEOF, schema['foo']['oneof'], ())
child_errors = [
(error[0], error[1] + (0, 'type'), errors.TYPE, ('integer',)),
(error[0], error[1] + (1, 'type'), errors.TYPE, ('string',)),
]
assert_fail(
document, schema, validator=validator, error=error, child_errors=child_errors
)
assert validator.errors == {
'foo': [
errors.BasicErrorHandler.messages[0x92],
{
'oneof definition 0': ["must be one of these types: ('integer',)"],
'oneof definition 1': ["must be one of these types: ('string',)"],
},
]
}
def test_wrong_amount_of_items(validator):
# https://github.com/pyeve/cerberus/issues/505
validator.schema = {
'test_list': {
'type': 'list',
'required': True,
'items': [{'type': 'string'}, {'type': 'string'}],
}
}
validator({'test_list': ['test']})
assert validator.errors == {'test_list': ["length of list should be 2, it is 1"]}
|
from diamond.collector import Collector
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from mock import Mock
from etcdstat import EtcdCollector
try:
import simplejson as json
except ImportError:
import json
##########################################################################
class TestEtcdCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('EtcdCollector', {
'interval': 10
})
self.collector = EtcdCollector(config, None)
def test_import(self):
self.assertTrue(EtcdCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_follower_data(self, publish_mock):
patch1_collector = patch.object(
EtcdCollector,
'get_self_metrics',
Mock(return_value=json.loads(
self.getFixture('follower-self-metrics.json').getvalue())))
patch2_collector = patch.object(
EtcdCollector,
'get_store_metrics',
Mock(return_value=json.loads(
self.getFixture('store-metrics2.json').getvalue())))
patch1_collector.start()
patch2_collector.start()
self.collector.collect()
patch2_collector.stop()
patch1_collector.stop()
metrics = {
'self.is_leader': 0,
'self.sendAppendRequestCnt': 0,
'self.recvAppendRequestCnt': 79367,
'self.recvPkgRate': 6.557436727874493,
'self.recvBandwidthRate': 527.021189819273,
'store.compareAndDeleteFail': 0,
'store.watchers': 0,
'store.setsFail': 12,
'store.createSuccess': 1294,
'store.compareAndSwapFail': 136,
'store.compareAndSwapSuccess': 4839,
'store.deleteSuccess': 6,
'store.updateSuccess': 2,
'store.createFail': 0,
'store.getsSuccess': 396632,
'store.expireCount': 0,
'store.deleteFail': 6,
'store.updateFail': 0,
'store.getsFail': 255837,
'store.compareAndDeleteSuccess': 1239,
'store.setsSuccess': 98571,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_real_leader_data(self, publish_mock):
patch1_collector = patch.object(
EtcdCollector,
'get_self_metrics',
Mock(return_value=json.loads(
self.getFixture('leader-self-metrics.json').getvalue())))
patch2_collector = patch.object(
EtcdCollector,
'get_store_metrics',
Mock(return_value=json.loads(
self.getFixture('store-metrics.json').getvalue())))
patch1_collector.start()
patch2_collector.start()
self.collector.collect()
patch2_collector.stop()
patch1_collector.stop()
metrics = {
'self.is_leader': 1,
'self.sendAppendRequestCnt': 2097127,
'self.recvAppendRequestCnt': 5870,
'self.sendPkgRate': 11.763588080610418,
'self.sendBandwidthRate': 901.0908469747579,
'store.compareAndDeleteFail': 0,
'store.watchers': 51,
'store.setsFail': 123,
'store.createSuccess': 6468,
'store.compareAndSwapFail': 355,
'store.compareAndSwapSuccess': 9156,
'store.deleteSuccess': 2468,
'store.updateSuccess': 4576,
'store.createFail': 2508,
'store.getsSuccess': 1685131,
'store.expireCount': 0,
'store.deleteFail': 2138,
'store.updateFail': 0,
'store.getsFail': 922428,
'store.compareAndDeleteSuccess': 2047,
'store.setsSuccess': 733,
}
self.assertPublishedMany(publish_mock, metrics)
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import numpy as np
from ..filter import next_fast_len
from ..source_estimate import _BaseSourceEstimate
from ..utils import verbose, _check_combine, _check_option
@verbose
def envelope_correlation(data, combine='mean', orthogonalize="pairwise",
log=False, absolute=True, verbose=None):
"""Compute the envelope correlation.
Parameters
----------
data : array-like, shape=(n_epochs, n_signals, n_times) | generator
The data from which to compute connectivity.
The array-like object can also be a list/generator of array,
each with shape (n_signals, n_times), or a :class:`~mne.SourceEstimate`
object (and ``stc.data`` will be used). If it's float data,
the Hilbert transform will be applied; if it's complex data,
it's assumed the Hilbert has already been applied.
combine : 'mean' | callable | None
How to combine correlation estimates across epochs.
Default is 'mean'. Can be None to return without combining.
If callable, it must accept one positional input.
For example::
combine = lambda data: np.median(data, axis=0)
orthogonalize : 'pairwise' | False
Whether to orthogonalize with the pairwise method or not.
Defaults to 'pairwise'. Note that when False,
the correlation matrix will not be returned with
absolute values.
.. versionadded:: 0.19
log : bool
If True (default False), square and take the log before orthonalizing
envelopes or computing correlations.
.. versionadded:: 0.22
absolute : bool
If True (default), then take the absolute value of correlation
coefficients before making each epoch's correlation matrix
symmetric (and thus before combining matrices across epochs).
Only used when ``orthogonalize=True``.
.. versionadded:: 0.22
%(verbose)s
Returns
-------
corr : ndarray, shape ([n_epochs, ]n_nodes, n_nodes)
The pairwise orthogonal envelope correlations.
This matrix is symmetric. If combine is None, the array
with have three dimensions, the first of which is ``n_epochs``.
Notes
-----
This function computes the power envelope correlation between
orthogonalized signals [1]_ [2]_.
.. versionchanged:: 0.22
Computations fixed for ``orthogonalize=True`` and diagonal entries are
set explicitly to zero.
References
----------
.. [1] Hipp JF, Hawellek DJ, Corbetta M, Siegel M, Engel AK (2012)
Large-scale cortical correlation structure of spontaneous
oscillatory activity. Nature Neuroscience 15:884–890
.. [2] Khan S et al. (2018). Maturation trajectories of cortical
resting-state networks depend on the mediating frequency band.
Neuroimage 174:57–68
"""
_check_option('orthogonalize', orthogonalize, (False, 'pairwise'))
from scipy.signal import hilbert
n_nodes = None
if combine is not None:
fun = _check_combine(combine, valid=('mean',))
else: # None
fun = np.array
corrs = list()
# Note: This is embarassingly parallel, but the overhead of sending
# the data to different workers is roughly the same as the gain of
# using multiple CPUs. And we require too much GIL for prefer='threading'
# to help.
for ei, epoch_data in enumerate(data):
if isinstance(epoch_data, _BaseSourceEstimate):
epoch_data = epoch_data.data
if epoch_data.ndim != 2:
raise ValueError('Each entry in data must be 2D, got shape %s'
% (epoch_data.shape,))
n_nodes, n_times = epoch_data.shape
if ei > 0 and n_nodes != corrs[0].shape[0]:
raise ValueError('n_nodes mismatch between data[0] and data[%d], '
'got %s and %s'
% (ei, n_nodes, corrs[0].shape[0]))
# Get the complex envelope (allowing complex inputs allows people
# to do raw.apply_hilbert if they want)
if epoch_data.dtype in (np.float32, np.float64):
n_fft = next_fast_len(n_times)
epoch_data = hilbert(epoch_data, N=n_fft, axis=-1)[..., :n_times]
if epoch_data.dtype not in (np.complex64, np.complex128):
raise ValueError('data.dtype must be float or complex, got %s'
% (epoch_data.dtype,))
data_mag = np.abs(epoch_data)
data_conj_scaled = epoch_data.conj()
data_conj_scaled /= data_mag
if log:
data_mag *= data_mag
np.log(data_mag, out=data_mag)
# subtract means
data_mag_nomean = data_mag - np.mean(data_mag, axis=-1, keepdims=True)
# compute variances using linalg.norm (square, sum, sqrt) since mean=0
data_mag_std = np.linalg.norm(data_mag_nomean, axis=-1)
data_mag_std[data_mag_std == 0] = 1
corr = np.empty((n_nodes, n_nodes))
for li, label_data in enumerate(epoch_data):
if orthogonalize is False: # the new code
label_data_orth = data_mag[li]
label_data_orth_std = data_mag_std[li]
else:
label_data_orth = (label_data * data_conj_scaled).imag
np.abs(label_data_orth, out=label_data_orth)
# protect against invalid value -- this will be zero
# after (log and) mean subtraction
label_data_orth[li] = 1.
if log:
label_data_orth *= label_data_orth
np.log(label_data_orth, out=label_data_orth)
label_data_orth -= np.mean(label_data_orth, axis=-1,
keepdims=True)
label_data_orth_std = np.linalg.norm(label_data_orth, axis=-1)
label_data_orth_std[label_data_orth_std == 0] = 1
# correlation is dot product divided by variances
corr[li] = np.sum(label_data_orth * data_mag_nomean, axis=1)
corr[li] /= data_mag_std
corr[li] /= label_data_orth_std
if orthogonalize is not False:
# Make it symmetric (it isn't at this point)
if absolute:
corr = np.abs(corr)
corr = (corr.T + corr) / 2.
corrs.append(corr)
del corr
corr = fun(corrs)
return corr
|
from django.db import migrations
from weblate.addons.events import EVENT_PRE_COMMIT
from weblate.addons.utils import adjust_addon_events
def update_cleanup_addon(apps, schema_editor):
"""Update events setup for weblate.git.squash addon."""
adjust_addon_events(
apps,
schema_editor,
["weblate.cleanup.generic"],
[EVENT_PRE_COMMIT],
[],
)
class Migration(migrations.Migration):
dependencies = [
("addons", "0001_squashed_0021_linguas_daily"),
]
operations = [migrations.RunPython(update_cleanup_addon, elidable=True)]
|
import asyncio
import logging
from aioimaplib import IMAP4_SSL, AioImapException
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_SERVER = "server"
CONF_FOLDER = "folder"
CONF_SEARCH = "search"
CONF_CHARSET = "charset"
DEFAULT_PORT = 993
ICON = "mdi:email-outline"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_SERVER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_CHARSET, default="utf-8"): cv.string,
vol.Optional(CONF_FOLDER, default="INBOX"): cv.string,
vol.Optional(CONF_SEARCH, default="UnSeen UnDeleted"): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the IMAP platform."""
sensor = ImapSensor(
config.get(CONF_NAME),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
config.get(CONF_SERVER),
config.get(CONF_PORT),
config.get(CONF_CHARSET),
config.get(CONF_FOLDER),
config.get(CONF_SEARCH),
)
if not await sensor.connection():
raise PlatformNotReady
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, sensor.shutdown)
async_add_entities([sensor], True)
class ImapSensor(Entity):
"""Representation of an IMAP sensor."""
def __init__(self, name, user, password, server, port, charset, folder, search):
"""Initialize the sensor."""
self._name = name or user
self._user = user
self._password = password
self._server = server
self._port = port
self._charset = charset
self._folder = folder
self._email_count = None
self._search = search
self._connection = None
self._does_push = None
self._idle_loop_task = None
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
if not self.should_poll:
self._idle_loop_task = self.hass.loop.create_task(self.idle_loop())
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
@property
def state(self):
"""Return the number of emails found."""
return self._email_count
@property
def available(self):
"""Return the availability of the device."""
return self._connection is not None
@property
def should_poll(self):
"""Return if polling is needed."""
return not self._does_push
async def connection(self):
"""Return a connection to the server, establishing it if necessary."""
if self._connection is None:
try:
self._connection = IMAP4_SSL(self._server, self._port)
await self._connection.wait_hello_from_server()
await self._connection.login(self._user, self._password)
await self._connection.select(self._folder)
self._does_push = self._connection.has_capability("IDLE")
except (AioImapException, asyncio.TimeoutError):
self._connection = None
return self._connection
async def idle_loop(self):
"""Wait for data pushed from server."""
while True:
try:
if await self.connection():
await self.refresh_email_count()
self.async_write_ha_state()
idle = await self._connection.idle_start()
await self._connection.wait_server_push()
self._connection.idle_done()
with async_timeout.timeout(10):
await idle
else:
self.async_write_ha_state()
except (AioImapException, asyncio.TimeoutError):
self.disconnected()
async def async_update(self):
"""Periodic polling of state."""
try:
if await self.connection():
await self.refresh_email_count()
except (AioImapException, asyncio.TimeoutError):
self.disconnected()
async def refresh_email_count(self):
"""Check the number of found emails."""
if self._connection:
await self._connection.noop()
result, lines = await self._connection.search(
self._search, charset=self._charset
)
if result == "OK":
self._email_count = len(lines[0].split())
else:
_LOGGER.error(
"Can't parse IMAP server response to search '%s': %s / %s",
self._search,
result,
lines[0],
)
def disconnected(self):
"""Forget the connection after it was lost."""
_LOGGER.warning("Lost %s (will attempt to reconnect)", self._server)
self._connection = None
async def shutdown(self, *_):
"""Close resources."""
if self._connection:
if self._connection.has_pending_idle():
self._connection.idle_done()
await self._connection.logout()
if self._idle_loop_task:
self._idle_loop_task.cancel()
|
import sys
import os
import time
import struct
# Fix Path for locating the SNMPCollector
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../',
'snmp',
)))
from diamond.metric import Metric
from snmp import SNMPCollector as parent_SNMPCollector
class IODriveSNMPCollector(parent_SNMPCollector):
"""
SNMPCollector for a single Fusion IO Drive
"""
IODRIVE_STATS = {
"InternalTemp": "1.3.6.1.4.1.30018.1.2.1.1.1.24.5",
"MilliVolts": "1.3.6.1.4.1.30018.1.2.1.1.1.32.5",
"MilliWatts": "1.3.6.1.4.1.30018.1.2.1.1.1.35.5",
"MilliAmps": "1.3.6.1.4.1.30018.1.2.1.1.1.37.5",
}
IODRIVE_BYTE_STATS = {
"BytesReadU": "1.3.6.1.4.1.30018.1.2.2.1.1.12.5",
"BytesReadL": "1.3.6.1.4.1.30018.1.2.2.1.1.13.5",
"BytesWrittenU": "1.3.6.1.4.1.30018.1.2.2.1.1.14.5",
"BytesWrittenL": "1.3.6.1.4.1.30018.1.2.2.1.1.15.5",
}
MAX_VALUE = 18446744073709551615
def get_default_config_help(self):
config_help = super(IODriveSNMPCollector,
self).get_default_config_help()
config_help.update({
'host': 'Host address',
'port': 'SNMP port to collect snmp data',
'community': 'SNMP community',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(IODriveSNMPCollector, self).get_default_config()
config.update({
'path': 'iodrive',
'timeout': 15,
})
return config
def get_string_index_oid(self, s):
"""Turns a string into an oid format is length of name followed by
name chars in ascii"""
return (len(self.get_bytes(s)), ) + self.get_bytes(s)
def get_bytes(self, s):
"""Turns a string into a list of byte values"""
return struct.unpack('%sB' % len(s), s)
def collect_snmp(self, device, host, port, community):
"""
Collect Fusion IO Drive SNMP stats from device
host and device are from the conf file. In the future device should be
changed to be what IODRive device it being checked.
i.e. fioa, fiob.
"""
# Set timestamp
timestamp = time.time()
for k, v in self.IODRIVE_STATS.items():
# Get Metric Name and Value
metricName = '.'.join([k])
metricValue = int(self.get(v, host, port, community)[v])
# Get Metric Path
metricPath = '.'.join(['servers', host, device, metricName])
# Create Metric
metric = Metric(metricPath, metricValue, timestamp, 0)
# Publish Metric
self.publish_metric(metric)
for k, v in self.IODRIVE_BYTE_STATS.items():
# Get Metric Name and Value
metricName = '.'.join([k])
metricValue = int(self.get(v, host, port, community)[v])
# Get Metric Path
metricPath = '.'.join(['servers', host, device, metricName])
# Create Metric
metric = Metric(metricPath, metricValue, timestamp, 0)
# Publish Metric
self.publish_metric(metric)
|
from homeassistant.components import aws
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, MagicMock, patch as async_patch
class MockAioSession:
"""Mock AioSession."""
def __init__(self, *args, **kwargs):
"""Init a mock session."""
self.get_user = AsyncMock()
self.invoke = AsyncMock()
self.publish = AsyncMock()
self.send_message = AsyncMock()
def create_client(self, *args, **kwargs): # pylint: disable=no-self-use
"""Create a mocked client."""
return MagicMock(
__aenter__=AsyncMock(
return_value=AsyncMock(
get_user=self.get_user, # iam
invoke=self.invoke, # lambda
publish=self.publish, # sns
send_message=self.send_message, # sqs
)
),
__aexit__=AsyncMock(),
)
async def test_empty_config(hass):
"""Test a default config will be create for empty config."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(hass, "aws", {"aws": {}})
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
session = sessions.get("default")
assert isinstance(session, MockAioSession)
# we don't validate auto-created default profile
session.get_user.assert_not_awaited()
async def test_empty_credential(hass):
"""Test a default config will be create for empty credential section."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"notify": [
{
"service": "lambda",
"name": "New Lambda Test",
"region_name": "us-east-1",
}
]
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
session = sessions.get("default")
assert isinstance(session, MockAioSession)
assert hass.services.has_service("notify", "new_lambda_test") is True
await hass.services.async_call(
"notify", "new_lambda_test", {"message": "test", "target": "ARN"}, blocking=True
)
session.invoke.assert_awaited_once()
async def test_profile_credential(hass):
"""Test credentials with profile name."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"credentials": {"name": "test", "profile_name": "test-profile"},
"notify": [
{
"service": "sns",
"credential_name": "test",
"name": "SNS Test",
"region_name": "us-east-1",
}
],
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
session = sessions.get("test")
assert isinstance(session, MockAioSession)
assert hass.services.has_service("notify", "sns_test") is True
await hass.services.async_call(
"notify",
"sns_test",
{"title": "test", "message": "test", "target": "ARN"},
blocking=True,
)
session.publish.assert_awaited_once()
async def test_access_key_credential(hass):
"""Test credentials with access key."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"credentials": [
{"name": "test", "profile_name": "test-profile"},
{
"name": "key",
"aws_access_key_id": "test-key",
"aws_secret_access_key": "test-secret",
},
],
"notify": [
{
"service": "sns",
"credential_name": "key",
"name": "SNS Test",
"region_name": "us-east-1",
}
],
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 2
session = sessions.get("key")
assert isinstance(session, MockAioSession)
assert hass.services.has_service("notify", "sns_test") is True
await hass.services.async_call(
"notify",
"sns_test",
{"title": "test", "message": "test", "target": "ARN"},
blocking=True,
)
session.publish.assert_awaited_once()
async def test_notify_credential(hass):
"""Test notify service can use access key directly."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"notify": [
{
"service": "sqs",
"credential_name": "test",
"name": "SQS Test",
"region_name": "us-east-1",
"aws_access_key_id": "some-key",
"aws_secret_access_key": "some-secret",
}
]
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
assert isinstance(sessions.get("default"), MockAioSession)
assert hass.services.has_service("notify", "sqs_test") is True
await hass.services.async_call(
"notify", "sqs_test", {"message": "test", "target": "ARN"}, blocking=True
)
async def test_notify_credential_profile(hass):
"""Test notify service can use profile directly."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"notify": [
{
"service": "sqs",
"name": "SQS Test",
"region_name": "us-east-1",
"profile_name": "test",
}
]
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
assert isinstance(sessions.get("default"), MockAioSession)
assert hass.services.has_service("notify", "sqs_test") is True
await hass.services.async_call(
"notify", "sqs_test", {"message": "test", "target": "ARN"}, blocking=True
)
async def test_credential_skip_validate(hass):
"""Test credential can skip validate."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"credentials": [
{
"name": "key",
"aws_access_key_id": "not-valid",
"aws_secret_access_key": "dont-care",
"validate": False,
}
]
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
session = sessions.get("key")
assert isinstance(session, MockAioSession)
session.get_user.assert_not_awaited()
|
import os
from molecule import config
from molecule import interpolation
from molecule import util
def from_yaml(data):
"""
Interpolate the provided data and return a dict.
Currently, this is used to reinterpolate the `molecule.yml` inside an
Ansible playbook. If there were any interpolation errors, they would
have been found and raised earlier.
:return: dict
"""
molecule_env_file = os.environ['MOLECULE_ENV_FILE']
env = os.environ.copy()
env = config.set_env_from_file(env, molecule_env_file)
i = interpolation.Interpolator(interpolation.TemplateWithDefaults, env)
interpolated_data = i.interpolate(data)
return util.safe_load(interpolated_data)
def to_yaml(data):
return str(util.safe_dump(data))
def header(content):
return util.molecule_prepender(content)
def get_docker_networks(data):
network_list = []
for platform in data:
if "networks" in platform:
for network in platform['networks']:
if "name" in network:
name = network['name']
network_list.append(name)
return network_list
class FilterModule(object):
""" Core Molecule filter plugins. """
def filters(self):
return {
'molecule_from_yaml': from_yaml,
'molecule_to_yaml': to_yaml,
'molecule_header': header,
'molecule_get_docker_networks': get_docker_networks,
}
|
from __future__ import absolute_import
from __future__ import print_function
import pyspark
import h5py
import json
from keras.optimizers import serialize as serialize_optimizer
from keras.optimizers import get as get_optimizer
from keras.models import load_model
from .utils import subtract_params
from .utils import lp_to_simple_rdd
from .utils import model_to_dict
from .mllib import to_matrix, from_matrix, to_vector, from_vector
from .worker import AsynchronousSparkWorker, SparkWorker
from .parameter import HttpServer, SocketServer
from .parameter import HttpClient, SocketClient
class SparkModel(object):
def __init__(self, model, mode='asynchronous', frequency='epoch', parameter_server_mode='http', num_workers=None,
custom_objects=None, batch_size=32, port=4000, *args, **kwargs):
"""SparkModel
Base class for distributed training on RDDs. Spark model takes a Keras
model as master network, an optimization scheme, a parallelisation mode
and an averaging frequency.
:param model: Compiled Keras model
:param mode: String, choose from `asynchronous`, `synchronous` and `hogwild`
:param frequency: String, either `epoch` or `batch`
:param parameter_server_mode: String, either `http` or `socket`
:param num_workers: int, number of workers used for training (defaults to None)
:param custom_objects: Keras custom objects
:param batch_size: batch size used for training and inference
:param port: port used in case of 'http' parameter server mode
"""
self._master_network = model
if not hasattr(model, "loss"):
raise Exception(
"Compile your Keras model before initializing an Elephas model with it")
metrics = model.metrics
loss = model.loss
optimizer = serialize_optimizer(model.optimizer)
if custom_objects is None:
custom_objects = {}
if metrics is None:
metrics = ["accuracy"]
self.mode = mode
self.frequency = frequency
self.num_workers = num_workers
self.weights = self._master_network.get_weights()
self.pickled_weights = None
self.master_optimizer = optimizer
self.master_loss = loss
self.master_metrics = metrics
self.custom_objects = custom_objects
self.parameter_server_mode = parameter_server_mode
self.batch_size = batch_size
self.port = port
self.kwargs = kwargs
self.serialized_model = model_to_dict(model)
if self.mode is not 'synchronous':
if self.parameter_server_mode == 'http':
self.parameter_server = HttpServer(
self.serialized_model, self.mode, self.port)
self.client = HttpClient(self.port)
elif self.parameter_server_mode == 'socket':
self.parameter_server = SocketServer(self.serialized_model)
self.client = SocketClient()
else:
raise ValueError("Parameter server mode has to be either `http` or `socket`, "
"got {}".format(self.parameter_server_mode))
@staticmethod
def get_train_config(epochs, batch_size, verbose, validation_split):
return {'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'validation_split': validation_split}
def get_config(self):
base_config = {
'parameter_server_mode': self.parameter_server_mode,
'mode': self.mode,
'frequency': self.frequency,
'num_workers': self.num_workers,
'batch_size': self.batch_size}
config = base_config.copy()
config.update(self.kwargs)
return config
def save(self, file_name):
model = self._master_network
model.save(file_name)
f = h5py.File(file_name, mode='a')
f.attrs['distributed_config'] = json.dumps({
'class_name': self.__class__.__name__,
'config': self.get_config()
}).encode('utf8')
f.flush()
f.close()
@property
def master_network(self):
return self._master_network
@master_network.setter
def master_network(self, network):
self._master_network = network
def start_server(self):
self.parameter_server.start()
def stop_server(self):
self.parameter_server.stop()
def predict(self, data):
"""Get prediction probabilities for a numpy array of features
"""
return self._master_network.predict(data)
def predict_classes(self, data):
""" Predict classes for a numpy array of features
"""
return self._master_network.predict_classes(data)
def fit(self, rdd, epochs=10, batch_size=32,
verbose=0, validation_split=0.1):
"""
Train an elephas model on an RDD. The Keras model configuration as specified
in the elephas model is sent to Spark workers, abd each worker will be trained
on their data partition.
:param rdd: RDD with features and labels
:param epochs: number of epochs used for training
:param batch_size: batch size used for training
:param verbose: logging verbosity level (0, 1 or 2)
:param validation_split: percentage of data set aside for validation
"""
print('>>> Fit model')
if self.num_workers:
rdd = rdd.repartition(self.num_workers)
if self.mode in ['asynchronous', 'synchronous', 'hogwild']:
self._fit(rdd, epochs, batch_size, verbose, validation_split)
else:
raise ValueError(
"Choose from one of the modes: asynchronous, synchronous or hogwild")
def _fit(self, rdd, epochs, batch_size, verbose, validation_split):
"""Protected train method to make wrapping of modes easier
"""
self._master_network.compile(optimizer=get_optimizer(self.master_optimizer),
loss=self.master_loss,
metrics=self.master_metrics)
if self.mode in ['asynchronous', 'hogwild']:
self.start_server()
train_config = self.get_train_config(
epochs, batch_size, verbose, validation_split)
mode = self.parameter_server_mode
freq = self.frequency
optimizer = self.master_optimizer
loss = self.master_loss
metrics = self.master_metrics
custom = self.custom_objects
yaml = self._master_network.to_yaml()
init = self._master_network.get_weights()
parameters = rdd.context.broadcast(init)
if self.mode in ['asynchronous', 'hogwild']:
print('>>> Initialize workers')
worker = AsynchronousSparkWorker(
yaml, parameters, mode, train_config, freq, optimizer, loss, metrics, custom)
print('>>> Distribute load')
rdd.mapPartitions(worker.train).collect()
print('>>> Async training complete.')
new_parameters = self.client.get_parameters()
elif self.mode == 'synchronous':
worker = SparkWorker(yaml, parameters, train_config,
optimizer, loss, metrics, custom)
gradients = rdd.mapPartitions(worker.train).collect()
new_parameters = self._master_network.get_weights()
for grad in gradients: # simply accumulate gradients one by one
new_parameters = subtract_params(new_parameters, grad)
print('>>> Synchronous training complete.')
else:
raise ValueError("Unsupported mode {}".format(self.mode))
self._master_network.set_weights(new_parameters)
if self.mode in ['asynchronous', 'hogwild']:
self.stop_server()
def load_spark_model(file_name):
model = load_model(file_name)
f = h5py.File(file_name, mode='r')
elephas_conf = json.loads(f.attrs.get('distributed_config'))
class_name = elephas_conf.get('class_name')
config = elephas_conf.get('config')
if class_name == "SparkModel":
return SparkModel(model=model, **config)
elif class_name == "SparkMLlibModel":
return SparkMLlibModel(model=model, **config)
class SparkMLlibModel(SparkModel):
def __init__(self, model, mode='asynchronous', frequency='epoch', parameter_server_mode='http',
num_workers=4, elephas_optimizer=None, custom_objects=None, batch_size=32, port=4000, *args, **kwargs):
"""SparkMLlibModel
The Spark MLlib model takes RDDs of LabeledPoints for training.
:param model: Compiled Keras model
:param mode: String, choose from `asynchronous`, `synchronous` and `hogwild`
:param frequency: String, either `epoch` or `batch`
:param parameter_server_mode: String, either `http` or `socket`
:param num_workers: int, number of workers used for training (defaults to None)
:param custom_objects: Keras custom objects
:param batch_size: batch size used for training and inference
:param port: port used in case of 'http' parameter server mode
"""
SparkModel.__init__(self, model=model, mode=mode, frequency=frequency,
parameter_server_mode=parameter_server_mode, num_workers=num_workers,
custom_objects=custom_objects,
batch_size=batch_size, port=port, *args, **kwargs)
def fit(self, labeled_points, epochs=10, batch_size=32, verbose=0, validation_split=0.1,
categorical=False, nb_classes=None):
"""Train an elephas model on an RDD of LabeledPoints
"""
rdd = lp_to_simple_rdd(labeled_points, categorical, nb_classes)
rdd = rdd.repartition(self.num_workers)
self._fit(rdd=rdd, epochs=epochs, batch_size=batch_size,
verbose=verbose, validation_split=validation_split)
def predict(self, mllib_data):
"""Predict probabilities for an RDD of features
"""
if isinstance(mllib_data, pyspark.mllib.linalg.Matrix):
return to_matrix(self._master_network.predict(from_matrix(mllib_data)))
elif isinstance(mllib_data, pyspark.mllib.linalg.Vector):
return to_vector(self._master_network.predict(from_vector(mllib_data)))
else:
raise ValueError(
'Provide either an MLLib matrix or vector, got {}'.format(mllib_data.__name__))
|
from .packages.ordereddict import OrderedDict as _OrderedDict
# ListDict: OrderedDict subclass with insertion methods for modifying the order of the linked list in O(1) time
# https://gist.github.com/jaredks/6276032
class ListDict(_OrderedDict):
def __insertion(self, link_prev, key_value):
key, value = key_value
if link_prev[2] != key:
if key in self:
del self[key]
link_next = link_prev[1]
self._OrderedDict__map[key] = link_prev[1] = link_next[0] = [link_prev, link_next, key]
dict.__setitem__(self, key, value)
def insert_after(self, existing_key, key_value):
self.__insertion(self._OrderedDict__map[existing_key], key_value)
def insert_before(self, existing_key, key_value):
self.__insertion(self._OrderedDict__map[existing_key][0], key_value)
|
from unittest import TestCase
from httpobs.scanner.analyzer.content import contribute, subresource_integrity
from httpobs.tests.utils import empty_requests
class TestContribute(TestCase):
def setUp(self):
self.reqs = empty_requests()
def tearDown(self):
self.reqs = None
def test_no_contribute_mozilla(self):
result = contribute(self.reqs)
self.assertEquals('contribute-json-not-implemented', result['result'])
self.assertFalse(result['pass'])
def test_no_contribute_not_mozilla(self):
self.reqs['responses']['auto'].url = 'https://github.com'
result = contribute(self.reqs)
self.assertEquals('contribute-json-only-required-on-mozilla-properties', result['result'])
self.assertTrue(result['pass'])
def test_invalid_json(self):
self.reqs['resources']['/contribute.json'] = 'foobar'
result = contribute(self.reqs)
self.assertEquals('contribute-json-invalid-json', result['result'])
self.assertFalse(result['pass'])
def test_contribute_too_large(self):
self.reqs['resources']['/contribute.json'] = '{"name": "' + 'foo' * 100000 + '"}'
result = contribute(self.reqs)
self.assertEquals(result['data'], {})
def test_with_required_keys(self):
self.reqs['resources']['/contribute.json'] = """
{
"name": "Bedrock",
"description": "The app powering www.mozilla.org.",
"repository": {
"url": "https://github.com/mozilla/bedrock",
"license": "MPL2",
"tests": "https://travis-ci.org/mozilla/bedrock/"
},
"participate": {
"home": "https://wiki.mozilla.org/Webdev/GetInvolved/mozilla.org",
"docs": "http://bedrock.readthedocs.org/",
"mailing-list": "https://www.mozilla.org/about/forums/#dev-mozilla-org",
"irc": "irc://irc.mozilla.org/#www"
},
"bugs": {
"list": "https://bugzilla.mozilla.org/describecomponents.cgi?product=www.mozilla.org",
"report": "https://bugzilla.mozilla.org/enter_bug.cgi?product=www.mozilla.org",
"mentored": "https://bugzilla.mozilla.org/buglist.cgi?f1=bug_mentor&o1=..."
},
"urls": {
"prod": "https://www.mozilla.org",
"stage": "https://www.allizom.org",
"dev": "https://www-dev.allizom.org",
"demo1": "https://www-demo1.allizom.org",
"demo2": "https://www-demo2.allizom.org",
"demo3": "https://www-demo3.allizom.org",
"demo4": "https://www-demo4.allizom.org",
"demo5": "https://www-demo5.allizom.org"
},
"keywords": [
"python",
"less-css",
"django",
"html5",
"jquery"
]
}"""
result = contribute(self.reqs)
self.assertEquals('contribute-json-with-required-keys', result['result'])
self.assertTrue(result['pass'])
def test_missing_required_keys(self):
self.reqs['resources']['/contribute.json'] = """
{
"name": "Bedrock",
"description": "The app powering www.mozilla.org.",
"repository": {
"url": "https://github.com/mozilla/bedrock",
"license": "MPL2",
"tests": "https://travis-ci.org/mozilla/bedrock/"
},
"participate": {
"home": "https://wiki.mozilla.org/Webdev/GetInvolved/mozilla.org",
"docs": "http://bedrock.readthedocs.org/",
"mailing-list": "https://www.mozilla.org/about/forums/#dev-mozilla-org",
"irc": "irc://irc.mozilla.org/#www"
},
"urls": {
"prod": "https://www.mozilla.org",
"stage": "https://www.allizom.org",
"dev": "https://www-dev.allizom.org",
"demo1": "https://www-demo1.allizom.org",
"demo2": "https://www-demo2.allizom.org",
"demo3": "https://www-demo3.allizom.org",
"demo4": "https://www-demo4.allizom.org",
"demo5": "https://www-demo5.allizom.org"
},
"keywords": [
"python",
"less-css",
"django",
"html5",
"jquery"
]
}"""
result = contribute(self.reqs)
self.assertEquals('contribute-json-missing-required-keys', result['result'])
self.assertFalse(result['pass'])
class TestSubResourceIntegrity(TestCase):
def setUp(self):
self.reqs = empty_requests()
def tearDown(self):
self.reqs = None
def test_no_scripts(self):
self.reqs = empty_requests('test_content_sri_no_scripts.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-no-scripts-loaded', result['result'])
self.assertTrue(result['pass'])
def test_not_html(self):
# invalid html
self.reqs['resources']['__path__'] = '<![..]>'
result = subresource_integrity(self.reqs)
self.assertEquals('html-not-parsable', result['result'])
self.assertFalse(result['pass'])
# json, like what an API might return
self.reqs['responses']['auto'].headers['Content-Type'] = 'application/json'
self.reqs['resources']['__path__'] = """
{
'foo': 'bar'
}
"""
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-response-not-html', result['result'])
self.assertTrue(result['pass'])
def test_same_origin(self):
self.reqs = empty_requests('test_content_sri_sameorigin1.html')
result = subresource_integrity(self.reqs)
self.assertEquals(result['result'], 'sri-not-implemented-but-all-scripts-loaded-from-secure-origin')
self.assertTrue(result['pass'])
# On the same second-level domain, but without a protocol
self.reqs = empty_requests('test_content_sri_sameorigin3.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
# On the same second-level domain, with https:// specified
self.reqs = empty_requests('test_content_sri_sameorigin2.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-all-scripts-loaded-from-secure-origin', result['result'])
self.assertTrue(result['pass'])
# And the same, but with a 404 status code
self.reqs['responses']['auto'].status_code = 404
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-all-scripts-loaded-from-secure-origin', result['result'])
self.assertTrue(result['pass'])
def test_implemented_external_scripts_https(self):
# load from a remote site
self.reqs = empty_requests('test_content_sri_impl_external_https1.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-external-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
# load from an intranet / localhost
self.reqs = empty_requests('test_content_sri_impl_external_https2.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-external-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
def test_implemented_same_origin(self):
self.reqs = empty_requests('test_content_sri_impl_sameorigin.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-all-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
def test_not_implemented_external_scripts_https(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_https.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-external-scripts-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_implemented_external_scripts_http(self):
self.reqs = empty_requests('test_content_sri_impl_external_http.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-but-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_implemented_external_scripts_noproto(self):
self.reqs = empty_requests('test_content_sri_impl_external_noproto.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-but-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_not_implemented_external_scripts_http(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_http.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_not_implemented_external_scripts_noproto(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_noproto.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
|
import logging
from typing import List, Optional
from iaqualink import AqualinkHeater, AqualinkPump, AqualinkSensor, AqualinkState
from iaqualink.const import (
AQUALINK_TEMP_CELSIUS_HIGH,
AQUALINK_TEMP_CELSIUS_LOW,
AQUALINK_TEMP_FAHRENHEIT_HIGH,
AQUALINK_TEMP_FAHRENHEIT_LOW,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
DOMAIN,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers.typing import HomeAssistantType
from . import AqualinkEntity, refresh_system
from .const import CLIMATE_SUPPORTED_MODES, DOMAIN as AQUALINK_DOMAIN
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered switches."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkThermostat(dev))
async_add_entities(devs, True)
class HassAqualinkThermostat(AqualinkEntity, ClimateEntity):
"""Representation of a thermostat."""
@property
def name(self) -> str:
"""Return the name of the thermostat."""
return self.dev.label.split(" ")[0]
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def hvac_modes(self) -> List[str]:
"""Return the list of supported HVAC modes."""
return CLIMATE_SUPPORTED_MODES
@property
def pump(self) -> AqualinkPump:
"""Return the pump device for the current thermostat."""
pump = f"{self.name.lower()}_pump"
return self.dev.system.devices[pump]
@property
def hvac_mode(self) -> str:
"""Return the current HVAC mode."""
state = AqualinkState(self.heater.state)
if state == AqualinkState.ON:
return HVAC_MODE_HEAT
return HVAC_MODE_OFF
@refresh_system
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Turn the underlying heater switch on or off."""
if hvac_mode == HVAC_MODE_HEAT:
await self.heater.turn_on()
elif hvac_mode == HVAC_MODE_OFF:
await self.heater.turn_off()
else:
_LOGGER.warning("Unknown operation mode: %s", hvac_mode)
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
if self.dev.system.temp_unit == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def min_temp(self) -> int:
"""Return the minimum temperature supported by the thermostat."""
if self.temperature_unit == TEMP_FAHRENHEIT:
return AQUALINK_TEMP_FAHRENHEIT_LOW
return AQUALINK_TEMP_CELSIUS_LOW
@property
def max_temp(self) -> int:
"""Return the minimum temperature supported by the thermostat."""
if self.temperature_unit == TEMP_FAHRENHEIT:
return AQUALINK_TEMP_FAHRENHEIT_HIGH
return AQUALINK_TEMP_CELSIUS_HIGH
@property
def target_temperature(self) -> float:
"""Return the current target temperature."""
return float(self.dev.state)
@refresh_system
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self.dev.set_temperature(int(kwargs[ATTR_TEMPERATURE]))
@property
def sensor(self) -> AqualinkSensor:
"""Return the sensor device for the current thermostat."""
sensor = f"{self.name.lower()}_temp"
return self.dev.system.devices[sensor]
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
if self.sensor.state != "":
return float(self.sensor.state)
return None
@property
def heater(self) -> AqualinkHeater:
"""Return the heater device for the current thermostat."""
heater = f"{self.name.lower()}_heater"
return self.dev.system.devices[heater]
|
import logging
from temperusb.temper import TemperHandler
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME, TEMP_FAHRENHEIT
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_SCALE = "scale"
CONF_OFFSET = "offset"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): vol.Coerce(str),
vol.Optional(CONF_SCALE, default=1): vol.Coerce(float),
vol.Optional(CONF_OFFSET, default=0): vol.Coerce(float),
}
)
TEMPER_SENSORS = []
def get_temper_devices():
"""Scan the Temper devices from temperusb."""
return TemperHandler().get_devices()
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Temper sensors."""
temp_unit = hass.config.units.temperature_unit
name = config.get(CONF_NAME)
scaling = {"scale": config.get(CONF_SCALE), "offset": config.get(CONF_OFFSET)}
temper_devices = get_temper_devices()
for idx, dev in enumerate(temper_devices):
if idx != 0:
name = f"{name}_{idx!s}"
TEMPER_SENSORS.append(TemperSensor(dev, temp_unit, name, scaling))
add_entities(TEMPER_SENSORS)
def reset_devices():
"""
Re-scan for underlying Temper sensors and assign them to our devices.
This assumes the same sensor devices are present in the same order.
"""
temper_devices = get_temper_devices()
for sensor, device in zip(TEMPER_SENSORS, temper_devices):
sensor.set_temper_device(device)
class TemperSensor(Entity):
"""Representation of a Temper temperature sensor."""
def __init__(self, temper_device, temp_unit, name, scaling):
"""Initialize the sensor."""
self.temp_unit = temp_unit
self.scale = scaling["scale"]
self.offset = scaling["offset"]
self.current_value = None
self._name = name
self.set_temper_device(temper_device)
@property
def name(self):
"""Return the name of the temperature sensor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self.temp_unit
def set_temper_device(self, temper_device):
"""Assign the underlying device for this sensor."""
self.temper_device = temper_device
# set calibration data
self.temper_device.set_calibration_data(scale=self.scale, offset=self.offset)
def update(self):
"""Retrieve latest state."""
try:
format_str = (
"fahrenheit" if self.temp_unit == TEMP_FAHRENHEIT else "celsius"
)
sensor_value = self.temper_device.get_temperature(format_str)
self.current_value = round(sensor_value, 1)
except OSError:
_LOGGER.error(
"Failed to get temperature. The device address may"
"have changed. Attempting to reset device"
)
reset_devices()
|
import httpx
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
from .data import DEFAULT_TIMEOUT, RestData
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Binary Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_FORCE_UPDATE = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_RESOURCE, CONF_RESOURCE): cv.url,
vol.Exclusive(CONF_RESOURCE_TEMPLATE, CONF_RESOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): {cv.string: cv.string},
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(["POST", "GET"]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the REST binary sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
timeout = config.get(CONF_TIMEOUT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
force_update = config.get(CONF_FORCE_UPDATE)
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.render(parse_result=False)
if value_template is not None:
value_template.hass = hass
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = httpx.DigestAuth(username, password)
else:
auth = (username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
await rest.async_update()
if rest.data is None:
raise PlatformNotReady
async_add_entities(
[
RestBinarySensor(
hass,
rest,
name,
device_class,
value_template,
force_update,
resource_template,
)
],
True,
)
class RestBinarySensor(BinarySensorEntity):
"""Representation of a REST binary sensor."""
def __init__(
self,
hass,
rest,
name,
device_class,
value_template,
force_update,
resource_template,
):
"""Initialize a REST binary sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._device_class = device_class
self._state = False
self._previous_data = None
self._value_template = value_template
self._force_update = force_update
self._resource_template = resource_template
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return the availability of this sensor."""
return self.rest.data is not None
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self.rest.data is None:
return False
response = self.rest.data
if self._value_template is not None:
response = self._value_template.async_render_with_possible_json_value(
self.rest.data, False
)
try:
return bool(int(response))
except ValueError:
return {"true": True, "on": True, "open": True, "yes": True}.get(
response.lower(), False
)
@property
def force_update(self):
"""Force update."""
return self._force_update
async def async_will_remove_from_hass(self):
"""Shutdown the session."""
await self.rest.async_remove()
async def async_update(self):
"""Get the latest data from REST API and updates the state."""
if self._resource_template is not None:
self.rest.set_url(self._resource_template.render(parse_result=False))
await self.rest.async_update()
|
from redbot.core import commands
from redbot.core.i18n import Translator
__all__ = ("trivia_stop_check",)
_ = Translator("Trivia", __file__)
def trivia_stop_check():
async def predicate(ctx: commands.GuildContext) -> bool:
session = ctx.cog._get_trivia_session(ctx.channel)
if session is None:
raise commands.CheckFailure(_("There is no ongoing trivia session in this channel."))
author = ctx.author
auth_checks = (
await ctx.bot.is_owner(author),
await ctx.bot.is_mod(author),
await ctx.bot.is_admin(author),
author == ctx.guild.owner,
author == session.ctx.author,
)
return any(auth_checks)
return commands.permissions_check(predicate)
|
import difflib
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import gettext as _
from django.views.decorators.http import require_POST
from django.views.generic import DetailView, ListView
from PIL import Image
from weblate.screenshots.forms import ScreenshotEditForm, ScreenshotForm, SearchForm
from weblate.screenshots.models import Screenshot
from weblate.trans.models import Unit
from weblate.utils import messages
from weblate.utils.locale import c_locale
from weblate.utils.search import parse_query
from weblate.utils.views import ComponentViewMixin
try:
with c_locale():
from tesserocr import RIL, PyTessBaseAPI
HAS_OCR = True
except ImportError:
HAS_OCR = False
def try_add_source(request, obj):
if "source" not in request.POST:
return False
try:
source = obj.translation.unit_set.get(pk=int(request.POST["source"]))
except (Unit.DoesNotExist, ValueError):
return False
obj.units.add(source)
return True
class ScreenshotList(ListView, ComponentViewMixin):
paginate_by = 25
model = Screenshot
_add_form = None
def get_queryset(self):
self.kwargs["component"] = self.get_component()
return (
Screenshot.objects.filter(translation__component=self.kwargs["component"])
.prefetch_related("translation__language")
.order()
)
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
component = self.kwargs["component"]
result["object"] = component
if self.request.user.has_perm("screenshot.add", component):
if self._add_form is not None:
result["add_form"] = self._add_form
else:
result["add_form"] = ScreenshotForm(component)
return result
def post(self, request, **kwargs):
component = self.get_component()
if not request.user.has_perm("screenshot.add", component):
raise PermissionDenied()
self._add_form = ScreenshotForm(component, request.POST, request.FILES)
if self._add_form.is_valid():
obj = Screenshot.objects.create(
user=request.user, **self._add_form.cleaned_data
)
request.user.profile.increase_count("uploaded")
try_add_source(request, obj)
messages.success(
request,
_(
"Screenshot has been uploaded, "
"you can now assign it to source strings."
),
)
return redirect(obj)
messages.error(
request, _("Failed to upload screenshot, please fix errors below.")
)
return self.get(request, **kwargs)
class ScreenshotDetail(DetailView):
model = Screenshot
_edit_form = None
def get_object(self, *args, **kwargs):
obj = super().get_object(*args, **kwargs)
self.request.user.check_access_component(obj.translation.component)
return obj
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
component = result["object"].translation.component
if self.request.user.has_perm("screenshot.edit", component):
if self._edit_form is not None:
result["edit_form"] = self._edit_form
else:
result["edit_form"] = ScreenshotEditForm(instance=result["object"])
return result
def post(self, request, **kwargs):
obj = self.get_object()
if request.user.has_perm("screenshot.edit", obj.translation):
self._edit_form = ScreenshotEditForm(
request.POST, request.FILES, instance=obj
)
if self._edit_form.is_valid():
if request.FILES:
obj.user = request.user
request.user.profile.increase_count("uploaded")
self._edit_form.save()
else:
return self.get(request, **kwargs)
return redirect(obj)
@require_POST
@login_required
def delete_screenshot(request, pk):
obj = get_object_or_404(Screenshot, pk=pk)
component = obj.translation.component
if not request.user.has_perm("screenshot.delete", obj.translation):
raise PermissionDenied()
kwargs = {"project": component.project.slug, "component": component.slug}
obj.delete()
messages.success(request, _("Screenshot %s has been deleted.") % obj.name)
return redirect("screenshots", **kwargs)
def get_screenshot(request, pk):
obj = get_object_or_404(Screenshot, pk=pk)
if not request.user.has_perm("screenshot.edit", obj.translation.component):
raise PermissionDenied()
return obj
@require_POST
@login_required
def remove_source(request, pk):
obj = get_screenshot(request, pk)
obj.units.remove(request.POST["source"])
messages.success(request, _("Source has been removed."))
return redirect(obj)
def search_results(code, obj, units=None):
if units is None:
units = []
else:
units = units.exclude(id__in=obj.units.values_list("id", flat=True))
results = [
{
"text": unit.source_string,
"pk": unit.pk,
"context": unit.context,
"location": unit.location,
"assigned": unit.screenshots.count(),
}
for unit in units
]
return JsonResponse(data={"responseCode": code, "results": results})
@login_required
@require_POST
def search_source(request, pk):
obj = get_screenshot(request, pk)
translation = obj.translation
form = SearchForm(request.POST)
if not form.is_valid():
return search_results(400, obj)
return search_results(
200, obj, translation.unit_set.filter(parse_query(form.cleaned_data["q"]))
)
def ocr_extract(api, image, strings):
"""Extract closes matches from an image."""
api.SetImage(image)
for item in api.GetComponentImages(RIL.TEXTLINE, True):
api.SetRectangle(item[1]["x"], item[1]["y"], item[1]["w"], item[1]["h"])
ocr_result = api.GetUTF8Text()
parts = [ocr_result] + ocr_result.split("|") + ocr_result.split()
for part in parts:
yield from difflib.get_close_matches(part, strings, cutoff=0.9)
api.Clear()
@login_required
@require_POST
def ocr_search(request, pk):
obj = get_screenshot(request, pk)
if not HAS_OCR:
return search_results(500, obj)
translation = obj.translation
# Load image
original_image = Image.open(obj.image.path)
# Convert to greyscale
original_image = original_image.convert("L")
# Resize image (tesseract works best around 300dpi)
scaled_image = original_image.copy().resize(
[size * 4 for size in original_image.size], Image.BICUBIC
)
# Find all our strings
sources = dict(translation.unit_set.values_list("source", "pk"))
strings = tuple(sources.keys())
results = set()
# Extract and match strings
with c_locale(), PyTessBaseAPI() as api:
for image in (original_image, scaled_image):
for match in ocr_extract(api, image, strings):
results.add(sources[match])
# Close images
original_image.close()
scaled_image.close()
return search_results(200, obj, translation.unit_set.filter(pk__in=results))
@login_required
@require_POST
def add_source(request, pk):
obj = get_screenshot(request, pk)
result = try_add_source(request, obj)
return JsonResponse(data={"responseCode": 200, "status": result})
@login_required
def get_sources(request, pk):
obj = get_screenshot(request, pk)
return render(
request,
"screenshots/screenshot_sources_body.html",
{"sources": obj.units.order(), "object": obj},
)
|
import time
import pytest
from paasta_tools.utils import SystemPaastaConfig
def time_to_feel_bad(*args, **kwarg):
raise Exception(
"This test called time.sleep() which is bad and slows down our test suite"
)
time.true_slow_sleep = time.sleep
time.sleep = time_to_feel_bad
@pytest.fixture
def system_paasta_config():
return SystemPaastaConfig(
{
"cluster": "fake_cluster",
"api_endpoints": {"fake_cluster": "http://fake_cluster:5054"},
"docker_registry": "fake_registry",
"volumes": [
{
"hostPath": "/hostPath",
"containerPath": "/containerPath",
"mode": "RO",
}
],
"service_discovery_providers": {"smartstack": {}, "envoy": {}},
},
"/fake_dir/",
)
|
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import StoreBaseAddon
from weblate.addons.forms import YAMLCustomizeForm
BREAKS = {"dos": "\r\n", "mac": "\r", "unix": "\n"}
class YAMLCustomizeAddon(StoreBaseAddon):
name = "weblate.yaml.customize"
verbose = _("Customize YAML output")
description = _(
"Allows adjusting YAML output behavior, for example line-length or newlines."
)
settings_form = YAMLCustomizeForm
compat = {"file_format": {"yaml", "ruby-yaml"}}
def store_post_load(self, translation, store):
config = self.instance.configuration
args = store.store.dump_args
args["indent"] = int(config.get("indent", 2))
args["width"] = int(config.get("width", 80))
args["line_break"] = BREAKS[config.get("line_break", "unix")]
|
from absl import flags
from perfkitbenchmarker.linux_packages import speccpu
FLAGS = flags.FLAGS
_PACKAGE_NAME = 'speccpu2006'
_MOUNT_DIR = 'cpu2006_mnt'
_SPECCPU2006_DIR = 'cpu2006'
_SPECCPU2006_ISO = 'cpu2006-1.2.iso'
_SPECCPU2006_TAR = 'cpu2006v1.2.tgz'
_TAR_REQUIRED_MEMBERS = 'cpu2006', 'cpu2006/bin/runspec'
_LOG_FORMAT = r'Est. (SPEC.*_base2006)\s*(\S*)'
_DEFAULT_RUNSPEC_CONFIG = 'linux64-x64-gcc47.cfg'
# This benchmark can be run with an .iso file in the data directory, a tar file
# in the data directory, or a tar file preprovisioned in cloud storage. To run
# this benchmark with tar file preprovisioned in cloud storage, update the
# following dict with sha256sum of the file in cloud storage.
PREPROVISIONED_DATA = {_SPECCPU2006_TAR: None}
def GetSpecInstallConfig(scratch_dir):
"""Returns a SpecInstallConfigurations() for SPEC CPU 2006.
Args:
scratch_dir: The scratch directory on the VM that SPEC is installed on.
"""
install_config = speccpu.SpecInstallConfigurations()
install_config.package_name = _PACKAGE_NAME
install_config.base_mount_dir = _MOUNT_DIR
install_config.base_spec_dir = _SPECCPU2006_DIR
install_config.base_iso_file_path = _SPECCPU2006_ISO
install_config.base_tar_file_path = _SPECCPU2006_TAR
install_config.required_members = _TAR_REQUIRED_MEMBERS
install_config.log_format = _LOG_FORMAT
install_config.runspec_config = (FLAGS.runspec_config or
_DEFAULT_RUNSPEC_CONFIG)
install_config.UpdateConfig(scratch_dir)
return install_config
def Install(vm):
"""Installs SPECCPU 2006."""
speccpu.InstallSPECCPU(vm, GetSpecInstallConfig(vm.GetScratchDir()))
|
import time
from typing import Any, Callable, Mapping, Optional, Sequence
import attr
from PyQt5.QtCore import QObject
from qutebrowser.utils import usertypes
@attr.s
class _CallArgs:
args: Sequence[Any] = attr.ib()
kwargs: Mapping[str, Any] = attr.ib()
class Throttle(QObject):
"""A throttle to throttle calls.
If a request comes in, it will be processed immediately. If another request
comes in too soon, it is ignored, but will be processed when a timeout
ends. If another request comes in, it will update the pending request.
"""
def __init__(self,
func: Callable,
delay_ms: int,
parent: QObject = None) -> None:
"""Constructor.
Args:
delay_ms: The time to wait before allowing another call of the
function. -1 disables the wrapper.
func: The function/method to call on __call__.
parent: The parent object.
"""
super().__init__(parent)
self._delay_ms = delay_ms
self._func = func
self._pending_call: Optional[_CallArgs] = None
self._last_call_ms: Optional[int] = None
self._timer = usertypes.Timer(self, 'throttle-timer')
self._timer.setSingleShot(True)
def _call_pending(self) -> None:
"""Start a pending call."""
assert self._pending_call is not None
self._func(*self._pending_call.args, **self._pending_call.kwargs)
self._pending_call = None
self._last_call_ms = int(time.monotonic() * 1000)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
cur_time_ms = int(time.monotonic() * 1000)
if self._pending_call is None:
if (self._last_call_ms is None or
cur_time_ms - self._last_call_ms > self._delay_ms):
# Call right now
self._last_call_ms = cur_time_ms
self._func(*args, **kwargs)
return
self._timer.setInterval(self._delay_ms -
(cur_time_ms - self._last_call_ms))
# Disconnect any existing calls, continue if no connections.
try:
self._timer.timeout.disconnect()
except TypeError:
pass
self._timer.timeout.connect(self._call_pending)
self._timer.start()
# Update arguments for an existing pending call
self._pending_call = _CallArgs(args=args, kwargs=kwargs)
def set_delay(self, delay_ms: int) -> None:
"""Set the delay to wait between invocation of this function."""
self._delay_ms = delay_ms
def cancel(self) -> None:
"""Cancel any pending instance of this timer."""
self._timer.stop()
|
import argparse
import matplotlib.pyplot as plt
import chainer
from chainercv.datasets import camvid_label_colors
from chainercv.datasets import camvid_label_names
from chainercv.links import SegNetBasic
from chainercv import utils
from chainercv.visualizations import vis_image
from chainercv.visualizations import vis_semantic_segmentation
def main():
chainer.config.train = False
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('--dataset', choices=('camvid',), default='camvid')
parser.add_argument('image')
args = parser.parse_args()
if args.dataset == 'camvid':
if args.pretrained_model is None:
args.pretrained_model = 'camvid'
label_names = camvid_label_names
colors = camvid_label_colors
model = SegNetBasic(
n_class=len(label_names),
pretrained_model=args.pretrained_model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
labels = model.predict([img])
label = labels[0]
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
vis_image(img, ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
# Do not overlay the label image on the color image
vis_semantic_segmentation(None, label, label_names, colors, ax=ax2)
plt.show()
if __name__ == '__main__':
main()
|
from __future__ import print_function
import argparse
import collections
import fileinput
import os
import re
import sys
def main(args):
global _stash
ap = argparse.ArgumentParser()
ap.add_argument('pattern', help='the pattern to match')
ap.add_argument('files', nargs='*', help='files to be searched')
ap.add_argument('-i', '--ignore-case', action='store_true', help='ignore case while searching')
ap.add_argument('-v', '--invert', action='store_true', help='invert the search result')
ap.add_argument('-c', '--count', action='store_true', help='count the search results instead of normal output')
ns = ap.parse_args(args)
flags = 0
if ns.ignore_case:
flags |= re.IGNORECASE
pattern = re.compile(ns.pattern, flags=flags)
# Do not try to grep directories
files = [f for f in ns.files if not os.path.isdir(f)]
fileinput.close() # in case it is not closed
try:
counts = collections.defaultdict(int)
for line in fileinput.input(files, openhook=fileinput.hook_encoded("utf-8")):
if bool(pattern.search(line)) != ns.invert:
if ns.count:
counts[fileinput.filename()] += 1
else:
if ns.invert: # optimize: if ns.invert, then no match, so no highlight color needed
newline = line
else:
newline = re.sub(pattern, lambda m: _stash.text_color(m.group(), 'red'), line)
if fileinput.isstdin():
fmt = u'{lineno}: {line}'
else:
fmt = u'{filename}: {lineno}: {line}'
print(fmt.format(filename=fileinput.filename(), lineno=fileinput.filelineno(), line=newline.rstrip()))
if ns.count:
for filename, count in counts.items():
fmt = u'{count:6} {filename}'
print(fmt.format(filename=filename, count=count))
except Exception as err:
print("grep: {}: {!s}".format(type(err).__name__, err), file=sys.stderr)
finally:
fileinput.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
import socket
DEFAULT_SPARK_SERVICE = "spark"
def get_webui_url(port: str) -> str:
return f"http://{socket.getfqdn()}:{port}"
def inject_spark_conf_str(original_docker_cmd: str, spark_conf_str: str) -> str:
for base_cmd in ("pyspark", "spark-shell", "spark-submit"):
if base_cmd in original_docker_cmd:
return original_docker_cmd.replace(
base_cmd, base_cmd + " " + spark_conf_str, 1
)
return original_docker_cmd
|
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.homekit_controller.const import DOMAIN
from homeassistant.setup import async_setup_component
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
from tests.components.homekit_controller.common import setup_test_component
# pylint: disable=redefined-outer-name
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
def create_remote(accessory):
"""Define characteristics for a button (that is inn a group)."""
service_label = accessory.add_service(ServicesTypes.SERVICE_LABEL)
char = service_label.add_char(CharacteristicsTypes.SERVICE_LABEL_NAMESPACE)
char.value = 1
for i in range(4):
button = accessory.add_service(ServicesTypes.STATELESS_PROGRAMMABLE_SWITCH)
button.linked.append(service_label)
char = button.add_char(CharacteristicsTypes.INPUT_EVENT)
char.value = 0
char.perms = ["pw", "pr", "ev"]
char = button.add_char(CharacteristicsTypes.NAME)
char.value = f"Button {i + 1}"
char = button.add_char(CharacteristicsTypes.SERVICE_LABEL_INDEX)
char.value = i
battery = accessory.add_service(ServicesTypes.BATTERY_SERVICE)
battery.add_char(CharacteristicsTypes.BATTERY_LEVEL)
def create_button(accessory):
"""Define a button (that is not in a group)."""
button = accessory.add_service(ServicesTypes.STATELESS_PROGRAMMABLE_SWITCH)
char = button.add_char(CharacteristicsTypes.INPUT_EVENT)
char.value = 0
char.perms = ["pw", "pr", "ev"]
char = button.add_char(CharacteristicsTypes.NAME)
char.value = "Button 1"
battery = accessory.add_service(ServicesTypes.BATTERY_SERVICE)
battery.add_char(CharacteristicsTypes.BATTERY_LEVEL)
def create_doorbell(accessory):
"""Define a button (that is not in a group)."""
button = accessory.add_service(ServicesTypes.DOORBELL)
char = button.add_char(CharacteristicsTypes.INPUT_EVENT)
char.value = 0
char.perms = ["pw", "pr", "ev"]
char = button.add_char(CharacteristicsTypes.NAME)
char.value = "Doorbell"
battery = accessory.add_service(ServicesTypes.BATTERY_SERVICE)
battery.add_char(CharacteristicsTypes.BATTERY_LEVEL)
async def test_enumerate_remote(hass, utcnow):
"""Test that remote is correctly enumerated."""
await setup_test_component(hass, create_remote)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.testdevice_battery")
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
expected = [
{
"device_id": device.id,
"domain": "sensor",
"entity_id": "sensor.testdevice_battery",
"platform": "device",
"type": "battery_level",
}
]
for button in ("button1", "button2", "button3", "button4"):
for subtype in ("single_press", "double_press", "long_press"):
expected.append(
{
"device_id": device.id,
"domain": "homekit_controller",
"platform": "device",
"type": button,
"subtype": subtype,
}
)
triggers = await async_get_device_automations(hass, "trigger", device.id)
assert_lists_same(triggers, expected)
async def test_enumerate_button(hass, utcnow):
"""Test that a button is correctly enumerated."""
await setup_test_component(hass, create_button)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.testdevice_battery")
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
expected = [
{
"device_id": device.id,
"domain": "sensor",
"entity_id": "sensor.testdevice_battery",
"platform": "device",
"type": "battery_level",
}
]
for subtype in ("single_press", "double_press", "long_press"):
expected.append(
{
"device_id": device.id,
"domain": "homekit_controller",
"platform": "device",
"type": "button1",
"subtype": subtype,
}
)
triggers = await async_get_device_automations(hass, "trigger", device.id)
assert_lists_same(triggers, expected)
async def test_enumerate_doorbell(hass, utcnow):
"""Test that a button is correctly enumerated."""
await setup_test_component(hass, create_doorbell)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.testdevice_battery")
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
expected = [
{
"device_id": device.id,
"domain": "sensor",
"entity_id": "sensor.testdevice_battery",
"platform": "device",
"type": "battery_level",
}
]
for subtype in ("single_press", "double_press", "long_press"):
expected.append(
{
"device_id": device.id,
"domain": "homekit_controller",
"platform": "device",
"type": "doorbell",
"subtype": subtype,
}
)
triggers = await async_get_device_automations(hass, "trigger", device.id)
assert_lists_same(triggers, expected)
async def test_handle_events(hass, utcnow, calls):
"""Test that events are handled."""
helper = await setup_test_component(hass, create_remote)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.testdevice_battery")
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"alias": "single_press",
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "button1",
"subtype": "single_press",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"{{ trigger.platform}} - "
"{{ trigger.type }} - {{ trigger.subtype }}"
)
},
},
},
{
"alias": "long_press",
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "button2",
"subtype": "long_press",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"{{ trigger.platform}} - "
"{{ trigger.type }} - {{ trigger.subtype }}"
)
},
},
},
]
},
)
# Make sure first automation (only) fires for single press
helper.pairing.testing.update_named_service(
"Button 1", {CharacteristicsTypes.INPUT_EVENT: 0}
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "device - button1 - single_press"
# Make sure automation doesn't trigger for long press
helper.pairing.testing.update_named_service(
"Button 1", {CharacteristicsTypes.INPUT_EVENT: 1}
)
await hass.async_block_till_done()
assert len(calls) == 1
# Make sure automation doesn't trigger for double press
helper.pairing.testing.update_named_service(
"Button 1", {CharacteristicsTypes.INPUT_EVENT: 2}
)
await hass.async_block_till_done()
assert len(calls) == 1
# Make sure second automation fires for long press
helper.pairing.testing.update_named_service(
"Button 2", {CharacteristicsTypes.INPUT_EVENT: 2}
)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "device - button2 - long_press"
# Turn the automations off
await hass.services.async_call(
"automation",
"turn_off",
{"entity_id": "automation.long_press"},
blocking=True,
)
await hass.services.async_call(
"automation",
"turn_off",
{"entity_id": "automation.single_press"},
blocking=True,
)
# Make sure event no longer fires
helper.pairing.testing.update_named_service(
"Button 2", {CharacteristicsTypes.INPUT_EVENT: 2}
)
await hass.async_block_till_done()
assert len(calls) == 2
|
import shlex
import subprocess as sp
from pathlib import Path
from typing import Tuple
import click
MAIN_DIRECTORY = Path(__file__).absolute().parent.parent
TEST_REPO_EXPORT_PTH: Path = MAIN_DIRECTORY / "redbot" / "pytest" / "downloader_testrepo.export"
class ClickCustomPath(click.Path):
"""Similar to `click.Path` but returns `Path` object instead."""
def convert(self, value, param, ctx):
path_string = super().convert(value, param, ctx)
return Path(path_string)
class EmptyDirectory(ClickCustomPath):
"""Similar to `ClickCustomPath`, but only allows empty or non-existent directories.
Unlike `ClickCustomPath`, this type doesn't accept
'file_okay', 'dir_okay' and 'readable' keyword arguments.
"""
def __init__(self, **kwargs):
super().__init__(readable=True, dir_okay=True, file_okay=False, **kwargs)
def convert(self, value, param, ctx):
path = super().convert(value, param, ctx)
if path.exists() and next(path.glob("*"), None) is not None:
self.fail(f'Directory "{str(path)}" is not empty!')
return path
class GitRepoDirectory(ClickCustomPath):
"""Similar to `ClickCustomPath`, but only allows git repo directories.
Unlike `ClickCustomPath`, this type doesn't accept
'file_okay', 'dir_okay' and 'readable' keyword arguments.
"""
def __init__(self, **kwargs):
super().__init__(readable=True, dir_okay=True, file_okay=False, **kwargs)
def convert(self, value, param, ctx):
path = super().convert(value, param, ctx)
git_path = path / ".git"
if not git_path.exists():
self.fail(f"A git repo does not exist at path: {str(path)}")
return path
@click.group()
def cli():
"""Downloader test repo commands."""
@cli.command(name="init", short_help="Init a new test repo in chosen directory.")
@click.argument("destination", type=EmptyDirectory(writable=True, resolve_path=True))
def git_init(destination: Path):
"""Init a new test repo in chosen directory. This might be useful
if someone will ever want to make a completely new test repo without importing it."""
init_test_repo(destination)
click.echo(f'New test repo successfully initialized at "{str(destination)}".')
@cli.command(name="import", short_help="Import test repo into chosen directory.")
@click.argument("destination", type=EmptyDirectory(writable=True, resolve_path=True))
def git_import(destination: Path):
"""Import test repo into chosen directory."""
if not TEST_REPO_EXPORT_PTH.is_file():
raise click.ClickException(f'File "{str(TEST_REPO_EXPORT_PTH)}" can\'t be found.')
git_dirparams = init_test_repo(destination)
fast_import = sp.Popen((*git_dirparams, "fast-import", "--quiet"), stdin=sp.PIPE)
with TEST_REPO_EXPORT_PTH.open(mode="rb") as f:
fast_import.communicate(f.read())
return_code = fast_import.wait()
if return_code:
raise click.ClickException(f"git fast-import failed with code {return_code}")
_run((*git_dirparams, "reset", "--hard"))
click.echo(
f'Test repo successfully imported at "{str(destination)}"\n'
'When you\'ll update it, use "edit_testrepo.py export" to update test repo file.'
)
@cli.command(name="export", short_help="Export repo to test repo file.")
@click.argument("source", type=GitRepoDirectory(resolve_path=True))
@click.option("--yes", is_flag=True)
def git_export(source: Path, yes: bool):
if not yes and TEST_REPO_EXPORT_PTH.is_file():
click.confirm(
f"Test repo file ({str(TEST_REPO_EXPORT_PTH)}) already exists, "
"are you sure you want to replace it?",
abort=True,
)
p = _run(
("git", "-C", str(source), "fast-export", "--all", "--show-original-ids"), stdout=sp.PIPE
)
with TEST_REPO_EXPORT_PTH.open(mode="wb") as f:
f.write(
b"# THIS FILE SHOULDN'T BE EDITED MANUALLY. "
b"USE `edit_testrepo.py` TOOL TO UPDATE THE REPO.\n" + p.stdout
)
click.echo("Test repo successfully exported.")
def init_test_repo(destination: Path):
destination.mkdir(exist_ok=True)
git_dirparams = ("git", "-C", str(destination))
init_commands: Tuple[Tuple[str, ...], ...] = (
(*git_dirparams, "init"),
(*git_dirparams, "config", "--local", "user.name", "Cog-Creators"),
(*git_dirparams, "config", "--local", "user.email", "[email protected]"),
(*git_dirparams, "config", "--local", "commit.gpgSign", "false"),
)
for args in init_commands:
_run(args)
return git_dirparams
def _run(args, stderr=None, stdout=sp.DEVNULL) -> sp.CompletedProcess:
try:
return sp.run(args, stderr=stderr, stdout=stdout, check=True)
except sp.CalledProcessError as exc:
cmd = " ".join(map(lambda c: shlex.quote(str(c)), exc.cmd))
raise click.ClickException(
f"The following command failed with code {exc.returncode}:\n {cmd}"
)
if __name__ == "__main__":
cli()
|
import pytest
from mock import patch
from arctic.scripts import arctic_init_library as mil
from arctic.scripts.arctic_init_library import Arctic as ar
from ...util import run_as_main
def test_init_library():
# Create the user agains the current mongo database
with patch('pymongo.MongoClient') as MongoClient, \
patch('arctic.scripts.arctic_init_library.logger', autospec=True) as logger, \
patch('arctic.scripts.arctic_init_library.Arctic', spec=ar) as Arctic, \
patch('arctic.scripts.arctic_init_library.get_mongodb_uri', autospec=True) as get_mongodb_uri, \
patch('arctic.scripts.arctic_init_library.do_db_auth', autospec=True) as do_db_auth:
run_as_main(mil.main, '--host', 'hostname', '--library', 'arctic_user.library', '--type', 'VersionStore')
get_mongodb_uri.assert_called_once_with('hostname')
MongoClient.assert_called_once_with(get_mongodb_uri.return_value)
do_db_auth.assert_called_once_with('hostname', MongoClient.return_value, 'arctic_user')
Arctic.assert_called_once_with(MongoClient.return_value)
Arctic.return_value.initialize_library.assert_called_once_with('arctic_user.library', 'VersionStore', hashed=False)
assert logger.warn.call_count == 0
def test_init_library_no_admin():
# Create the user agains the current mongo database
with patch('pymongo.MongoClient') as MongoClient, \
patch('arctic.scripts.arctic_init_library.logger', autospec=True), \
patch('arctic.scripts.arctic_init_library.Arctic', spec=ar) as Arctic, \
patch('arctic.scripts.arctic_init_library.get_mongodb_uri', autospec=True) as get_mongodb_uri, \
patch('arctic.scripts.arctic_init_library.do_db_auth', autospec=True) as do_db_auth:
run_as_main(mil.main, '--host', 'hostname', '--library', 'arctic_user.library', '--type', 'VersionStore')
get_mongodb_uri.assert_called_once_with('hostname')
MongoClient.assert_called_once_with(get_mongodb_uri.return_value)
Arctic.assert_called_once_with(MongoClient.return_value)
Arctic.return_value.initialize_library.assert_called_once_with('arctic_user.library', 'VersionStore', hashed=False)
def test_init_library_hashed():
# Create the user agains the current mongo database
with patch('pymongo.MongoClient') as MongoClient, \
patch('arctic.scripts.arctic_init_library.logger', autospec=True) as logger, \
patch('arctic.scripts.arctic_init_library.Arctic', spec=ar) as Arctic, \
patch('arctic.scripts.arctic_init_library.get_mongodb_uri', autospec=True) as get_mongodb_uri, \
patch('arctic.scripts.arctic_init_library.do_db_auth', autospec=True) as do_db_auth:
run_as_main(mil.main, '--host', 'hostname', '--library', 'arctic_user.library', '--type', 'VersionStore', '--hashed')
get_mongodb_uri.assert_called_once_with('hostname')
MongoClient.assert_called_once_with(get_mongodb_uri.return_value)
do_db_auth.assert_called_once_with('hostname', MongoClient.return_value, 'arctic_user')
Arctic.assert_called_once_with(MongoClient.return_value)
Arctic.return_value.initialize_library.assert_called_once_with('arctic_user.library', 'VersionStore', hashed=True)
assert logger.warn.call_count == 0
def test_init_library_no_admin_no_user_creds():
with patch('pymongo.MongoClient') as MongoClient, \
patch('arctic.scripts.arctic_init_library.logger', autospec=True) as logger, \
patch('arctic.scripts.arctic_init_library.Arctic', spec=ar) as Arctic, \
patch('arctic.scripts.arctic_init_library.get_mongodb_uri', autospec=True) as get_mongodb_uri, \
patch('arctic.scripts.arctic_init_library.do_db_auth', return_value=False, autospec=True) as do_db_auth:
MongoClient.return_value['arctic_user'].authenticate.return_value = False
run_as_main(mil.main, '--host', 'hostname', '--library', 'arctic_user.library', '--type', 'VersionStore')
get_mongodb_uri.assert_called_once_with('hostname')
MongoClient.assert_called_once_with(get_mongodb_uri.return_value)
assert Arctic.call_count == 0
def test_bad_library_name():
with pytest.raises(Exception):
with patch('argparse.ArgumentParser.error', side_effect=Exception) as error:
run_as_main(mil.main, '--library', 'arctic_jblackburn')
error.assert_called_once_with('Must specify the full path of the library e.g. user.library!')
with pytest.raises(Exception):
with patch('argparse.ArgumentParser.error', side_effect=Exception) as error:
run_as_main(mil.main)
error.assert_called_once_with('Must specify the full path of the library e.g. user.library!')
|
import pandas as pd
# these are generated via _rebuild_suffixes
# valid emoji combos for spec 5.0
from scattertext.emojis.ProcessedEmojiStructure import VALID_EMOJIS
def _rebuild_suffixes(emoji_spec_url='http://www.unicode.org/Public/emoji/5.0/emoji-test.txt'):
valid_seqs = (pd.DataFrame(pd.read_csv(emoji_spec_url,
sep=';', comment='#', names=['code_points', 'status'])
['code_points'].apply(
lambda x: pd.Series({'seq': [int(c, 16) for c in x.split()], 'len': len(x.split())})))
.sort_values(by=['len'], ascending=False)
['seq'])
suffixes_construct = {}
for x in valid_seqs:
suffix = tuple(x[1:])
suffix_holder = suffixes_construct.setdefault(x[0], {})
suffix_set = suffix_holder.setdefault(len(suffix), set())
suffix_set.add(suffix)
for k, v in suffixes_construct.items():
suffixes_construct[k] = list(reversed(sorted(v.items())))
return suffixes_construct
# some numbers and non-letter characters are slipping in
def _append_if_valid(found_emojis, candidate):
for c in candidate:
if ord(c) > 1000:
found_emojis.append(candidate)
return
def extract_emoji(text):
'''
Parameters
----------
text, str
Returns
-------
List of 5.0-compliant emojis that occur in text.
'''
found_emojis = []
len_text = len(text)
i = 0
while i < len_text:
cur_char = ord(text[i])
try:
VALID_EMOJIS[cur_char]
except:
i += 1
continue
found = False
for dict_len, candidates in VALID_EMOJIS[cur_char]:
if i + dict_len <= len_text:
if dict_len == 0:
_append_if_valid(found_emojis,text[i])
i += 1
found = True
break
candidate = tuple(ord(c) for c in text[i + 1:i + 1 + dict_len])
if candidate in candidates:
_append_if_valid(found_emojis,text[i:i + 1 + dict_len])
i += 1 + dict_len
found = True
break
if found:
break
if not found:
_append_if_valid(found_emojis,text[i])
i += 1
return found_emojis
|
from asynctest.mock import patch
from pyownet.protocol import ConnError, OwnetError
from homeassistant.components.onewire.const import CONF_TYPE_OWSERVER, DOMAIN
from homeassistant.config_entries import (
CONN_CLASS_LOCAL_POLL,
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_TYPE
from . import setup_onewire_owserver_integration, setup_onewire_sysbus_integration
from tests.common import MockConfigEntry
async def test_owserver_connect_failure(hass):
"""Test connection failure raises ConfigEntryNotReady."""
config_entry_owserver = MockConfigEntry(
domain=DOMAIN,
source="user",
data={
CONF_TYPE: CONF_TYPE_OWSERVER,
CONF_HOST: "1.2.3.4",
CONF_PORT: "1234",
},
unique_id=f"{CONF_TYPE_OWSERVER}:1.2.3.4:1234",
connection_class=CONN_CLASS_LOCAL_POLL,
options={},
entry_id="2",
)
config_entry_owserver.add_to_hass(hass)
with patch(
"homeassistant.components.onewire.onewirehub.protocol.proxy",
side_effect=ConnError,
):
await hass.config_entries.async_setup(config_entry_owserver.entry_id)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert config_entry_owserver.state == ENTRY_STATE_SETUP_RETRY
assert not hass.data.get(DOMAIN)
async def test_failed_owserver_listing(hass):
"""Create the 1-Wire integration."""
config_entry_owserver = MockConfigEntry(
domain=DOMAIN,
source="user",
data={
CONF_TYPE: CONF_TYPE_OWSERVER,
CONF_HOST: "1.2.3.4",
CONF_PORT: "1234",
},
unique_id=f"{CONF_TYPE_OWSERVER}:1.2.3.4:1234",
connection_class=CONN_CLASS_LOCAL_POLL,
options={},
entry_id="2",
)
config_entry_owserver.add_to_hass(hass)
with patch("homeassistant.components.onewire.onewirehub.protocol.proxy") as owproxy:
owproxy.return_value.dir.side_effect = OwnetError
await hass.config_entries.async_setup(config_entry_owserver.entry_id)
await hass.async_block_till_done()
return config_entry_owserver
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
config_entry_owserver = await setup_onewire_owserver_integration(hass)
config_entry_sysbus = await setup_onewire_sysbus_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
assert config_entry_owserver.state == ENTRY_STATE_LOADED
assert config_entry_sysbus.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(config_entry_owserver.entry_id)
assert await hass.config_entries.async_unload(config_entry_sysbus.entry_id)
await hass.async_block_till_done()
assert config_entry_owserver.state == ENTRY_STATE_NOT_LOADED
assert config_entry_sysbus.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
|
from homeassistant.components import camera
from homeassistant.components.axis.const import (
CONF_STREAM_PROFILE,
DOMAIN as AXIS_DOMAIN,
)
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.const import STATE_IDLE
from homeassistant.setup import async_setup_component
from .test_device import ENTRY_OPTIONS, NAME, setup_axis_integration
from tests.async_mock import patch
async def test_platform_manually_configured(hass):
"""Test that nothing happens when platform is manually configured."""
assert (
await async_setup_component(
hass, CAMERA_DOMAIN, {CAMERA_DOMAIN: {"platform": AXIS_DOMAIN}}
)
is True
)
assert AXIS_DOMAIN not in hass.data
async def test_camera(hass):
"""Test that Axis camera platform is loaded properly."""
await setup_axis_integration(hass)
assert len(hass.states.async_entity_ids(CAMERA_DOMAIN)) == 1
entity_id = f"{CAMERA_DOMAIN}.{NAME}"
cam = hass.states.get(entity_id)
assert cam.state == STATE_IDLE
assert cam.name == NAME
camera_entity = camera._get_camera_from_entity_id(hass, entity_id)
assert camera_entity.image_source == "http://1.2.3.4:80/axis-cgi/jpg/image.cgi"
assert camera_entity.mjpeg_source == "http://1.2.3.4:80/axis-cgi/mjpg/video.cgi"
assert (
await camera_entity.stream_source()
== "rtsp://root:[email protected]/axis-media/media.amp?videocodec=h264"
)
async def test_camera_with_stream_profile(hass):
"""Test that Axis camera entity is using the correct path with stream profike."""
with patch.dict(ENTRY_OPTIONS, {CONF_STREAM_PROFILE: "profile_1"}):
await setup_axis_integration(hass)
assert len(hass.states.async_entity_ids(CAMERA_DOMAIN)) == 1
entity_id = f"{CAMERA_DOMAIN}.{NAME}"
cam = hass.states.get(entity_id)
assert cam.state == STATE_IDLE
assert cam.name == NAME
camera_entity = camera._get_camera_from_entity_id(hass, entity_id)
assert camera_entity.image_source == "http://1.2.3.4:80/axis-cgi/jpg/image.cgi"
assert (
camera_entity.mjpeg_source
== "http://1.2.3.4:80/axis-cgi/mjpg/video.cgi?&streamprofile=profile_1"
)
assert (
await camera_entity.stream_source()
== "rtsp://root:[email protected]/axis-media/media.amp?videocodec=h264&streamprofile=profile_1"
)
async def test_camera_disabled(hass):
"""Test that Axis camera platform is loaded properly but does not create camera entity."""
with patch("axis.vapix.Params.image_format", new=None):
await setup_axis_integration(hass)
assert len(hass.states.async_entity_ids(CAMERA_DOMAIN)) == 0
|
import logging
from scsgate.tasks import (
HaltRollerShutterTask,
LowerRollerShutterTask,
RaiseRollerShutterTask,
)
import voluptuous as vol
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverEntity
from homeassistant.const import CONF_DEVICES, CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import CONF_SCS_ID, DOMAIN, SCSGATE_SCHEMA
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_DEVICES): cv.schema_with_slug_keys(SCSGATE_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SCSGate cover."""
devices = config.get(CONF_DEVICES)
covers = []
logger = logging.getLogger(__name__)
scsgate = hass.data[DOMAIN]
if devices:
for entity_info in devices.values():
if entity_info[CONF_SCS_ID] in scsgate.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[CONF_SCS_ID]
logger.info("Adding %s scsgate.cover", name)
cover = SCSGateCover(
name=name, scs_id=scs_id, logger=logger, scsgate=scsgate
)
scsgate.add_device(cover)
covers.append(cover)
add_entities(covers)
class SCSGateCover(CoverEntity):
"""Representation of SCSGate cover."""
def __init__(self, scs_id, name, logger, scsgate):
"""Initialize the cover."""
self._scs_id = scs_id
self._name = name
self._logger = logger
self._scsgate = scsgate
@property
def scs_id(self):
"""Return the SCSGate ID."""
return self._scs_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def is_closed(self):
"""Return if the cover is closed."""
return None
def open_cover(self, **kwargs):
"""Move the cover."""
self._scsgate.append_task(RaiseRollerShutterTask(target=self._scs_id))
def close_cover(self, **kwargs):
"""Move the cover down."""
self._scsgate.append_task(LowerRollerShutterTask(target=self._scs_id))
def stop_cover(self, **kwargs):
"""Stop the cover."""
self._scsgate.append_task(HaltRollerShutterTask(target=self._scs_id))
def process_event(self, message):
"""Handle a SCSGate message related with this cover."""
self._logger.debug("Cover %s, got message %s", self._scs_id, message.toggled)
|
import enum
from PyQt5.QtCore import (pyqtSlot, pyqtProperty, # type: ignore[attr-defined]
QUrl)
from qutebrowser.mainwindow.statusbar import textbase
from qutebrowser.config import stylesheet
from qutebrowser.utils import usertypes, urlutils
class UrlType(enum.Enum):
"""The type/color of the URL being shown.
Note this has entries for success/error/warn from widgets.webview:LoadStatus.
"""
success = enum.auto()
success_https = enum.auto()
error = enum.auto()
warn = enum.auto()
hover = enum.auto()
normal = enum.auto()
class UrlText(textbase.TextBase):
"""URL displayed in the statusbar.
Attributes:
_normal_url: The normal URL to be displayed as a UrlType instance.
_normal_url_type: The type of the normal URL as a UrlType instance.
_hover_url: The URL we're currently hovering over.
_ssl_errors: Whether SSL errors occurred while loading.
_urltype: The URL type to show currently (normal/ok/error/warn/hover).
Accessed via the urltype property.
"""
STYLESHEET = """
QLabel#UrlText[urltype="normal"] {
color: {{ conf.colors.statusbar.url.fg }};
}
QLabel#UrlText[urltype="success"] {
color: {{ conf.colors.statusbar.url.success.http.fg }};
}
QLabel#UrlText[urltype="success_https"] {
color: {{ conf.colors.statusbar.url.success.https.fg }};
}
QLabel#UrlText[urltype="error"] {
color: {{ conf.colors.statusbar.url.error.fg }};
}
QLabel#UrlText[urltype="warn"] {
color: {{ conf.colors.statusbar.url.warn.fg }};
}
QLabel#UrlText[urltype="hover"] {
color: {{ conf.colors.statusbar.url.hover.fg }};
}
"""
def __init__(self, parent=None):
super().__init__(parent)
self._urltype = None
self.setObjectName(self.__class__.__name__)
stylesheet.set_register(self)
self._hover_url = None
self._normal_url = None
self._normal_url_type = UrlType.normal
@pyqtProperty(str)
def urltype(self):
"""Getter for self.urltype, so it can be used as Qt property.
Return:
The urltype as a string (!)
"""
if self._urltype is None:
return ""
else:
return self._urltype.name
def _update_url(self):
"""Update the displayed URL if the url or the hover url changed."""
old_urltype = self._urltype
if self._hover_url is not None:
self.setText(self._hover_url)
self._urltype = UrlType.hover
elif self._normal_url is not None:
self.setText(self._normal_url)
self._urltype = self._normal_url_type
else:
self.setText('')
self._urltype = UrlType.normal
if old_urltype != self._urltype:
# We can avoid doing an unpolish here because the new style will
# always override the old one.
self.style().polish(self)
@pyqtSlot(usertypes.LoadStatus)
def on_load_status_changed(self, status):
"""Slot for load_status_changed. Sets URL color accordingly.
Args:
status: The usertypes.LoadStatus.
"""
assert isinstance(status, usertypes.LoadStatus), status
if status in [usertypes.LoadStatus.success,
usertypes.LoadStatus.success_https,
usertypes.LoadStatus.error,
usertypes.LoadStatus.warn]:
self._normal_url_type = UrlType[status.name]
else:
self._normal_url_type = UrlType.normal
self._update_url()
@pyqtSlot(QUrl)
def set_url(self, url):
"""Setter to be used as a Qt slot.
Args:
url: The URL to set as QUrl, or None.
"""
if url is None:
self._normal_url = None
elif not url.isValid():
self._normal_url = "Invalid URL!"
else:
self._normal_url = urlutils.safe_display_string(url)
self._normal_url_type = UrlType.normal
self._update_url()
@pyqtSlot(str)
def set_hover_url(self, link):
"""Setter to be used as a Qt slot.
Saves old shown URL in self._old_url and restores it later if a link is
"un-hovered" when it gets called with empty parameters.
Args:
link: The link which was hovered (string)
"""
if link:
qurl = QUrl(link)
if qurl.isValid():
self._hover_url = urlutils.safe_display_string(qurl)
else:
self._hover_url = '(invalid URL!) {}'.format(link)
else:
self._hover_url = None
self._update_url()
def on_tab_changed(self, tab):
"""Update URL if the tab changed."""
self._hover_url = None
if tab.url().isValid():
self._normal_url = urlutils.safe_display_string(tab.url())
else:
self._normal_url = ''
self.on_load_status_changed(tab.load_status())
self._update_url()
|
import datetime
from env_canada import ECRadar # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
ATTR_UPDATED = "updated"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
CONF_LOOP = "loop"
CONF_PRECIP_TYPE = "precip_type"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_LOOP, default=True): cv.boolean,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): cv.matches_regex(r"^C[A-Z]{4}$|^[A-Z]{3}$"),
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
vol.Optional(CONF_PRECIP_TYPE): ["RAIN", "SNOW"],
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada camera."""
if config.get(CONF_STATION):
radar_object = ECRadar(
station_id=config[CONF_STATION], precip_type=config.get(CONF_PRECIP_TYPE)
)
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
radar_object = ECRadar(
coordinates=(lat, lon), precip_type=config.get(CONF_PRECIP_TYPE)
)
add_devices(
[ECCamera(radar_object, config.get(CONF_NAME), config[CONF_LOOP])], True
)
class ECCamera(Camera):
"""Implementation of an Environment Canada radar camera."""
def __init__(self, radar_object, camera_name, is_loop):
"""Initialize the camera."""
super().__init__()
self.radar_object = radar_object
self.camera_name = camera_name
self.is_loop = is_loop
self.content_type = "image/gif"
self.image = None
self.timestamp = None
def camera_image(self):
"""Return bytes of camera image."""
self.update()
return self.image
@property
def name(self):
"""Return the name of the camera."""
if self.camera_name is not None:
return self.camera_name
return "Environment Canada Radar"
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {ATTR_ATTRIBUTION: CONF_ATTRIBUTION, ATTR_UPDATED: self.timestamp}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update radar image."""
if self.is_loop:
self.image = self.radar_object.get_loop()
else:
self.image = self.radar_object.get_latest_frame()
self.timestamp = self.radar_object.timestamp
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from compare_gan.architectures import dcgan
from compare_gan.architectures import infogan
from compare_gan.architectures import resnet30
from compare_gan.architectures import resnet5
from compare_gan.architectures import resnet_biggan
from compare_gan.architectures import resnet_cifar
from compare_gan.architectures import resnet_stl
from compare_gan.architectures import sndcgan
import tensorflow as tf
class ArchitectureTest(parameterized.TestCase, tf.test.TestCase):
def assertArchitectureBuilds(self, gen, disc, image_shape, z_dim=120):
with tf.Graph().as_default():
batch_size = 2
num_classes = 10
# Prepare inputs
z = tf.random.normal((batch_size, z_dim), name="z")
y = tf.one_hot(tf.range(batch_size), num_classes)
# Run check output shapes for G and D.
x = gen(z=z, y=y, is_training=True, reuse=False)
self.assertAllEqual(x.shape.as_list()[1:], image_shape)
out, _, _ = disc(
x, y=y, is_training=True, reuse=False)
self.assertAllEqual(out.shape.as_list(), (batch_size, 1))
# Check that G outputs valid pixel values (we use [0, 1] everywhere) and
# D outputs a probablilty.
with self.session() as sess:
sess.run(tf.global_variables_initializer())
image, pred = sess.run([x, out])
self.assertAllGreaterEqual(image, 0)
self.assertAllLessEqual(image, 1)
self.assertAllGreaterEqual(pred, 0)
self.assertAllLessEqual(pred, 1)
@parameterized.parameters(
{"image_shape": (28, 28, 1)},
{"image_shape": (32, 32, 1)},
{"image_shape": (32, 32, 3)},
{"image_shape": (64, 64, 3)},
{"image_shape": (128, 128, 3)},
)
def testDcGan(self, image_shape):
self.assertArchitectureBuilds(
gen=dcgan.Generator(image_shape=image_shape),
disc=dcgan.Discriminator(),
image_shape=image_shape)
@parameterized.parameters(
{"image_shape": (28, 28, 1)},
{"image_shape": (32, 32, 1)},
{"image_shape": (32, 32, 3)},
{"image_shape": (64, 64, 3)},
{"image_shape": (128, 128, 3)},
)
def testInfoGan(self, image_shape):
self.assertArchitectureBuilds(
gen=infogan.Generator(image_shape=image_shape),
disc=infogan.Discriminator(),
image_shape=image_shape)
def testResNet30(self, image_shape=(128, 128, 3)):
self.assertArchitectureBuilds(
gen=resnet30.Generator(image_shape=image_shape),
disc=resnet30.Discriminator(),
image_shape=image_shape)
@parameterized.parameters(
{"image_shape": (32, 32, 1)},
{"image_shape": (32, 32, 3)},
{"image_shape": (64, 64, 3)},
{"image_shape": (128, 128, 3)},
)
def testResNet5(self, image_shape):
self.assertArchitectureBuilds(
gen=resnet5.Generator(image_shape=image_shape),
disc=resnet5.Discriminator(),
image_shape=image_shape)
@parameterized.parameters(
{"image_shape": (32, 32, 3)},
{"image_shape": (64, 64, 3)},
{"image_shape": (128, 128, 3)},
{"image_shape": (256, 256, 3)},
{"image_shape": (512, 512, 3)},
)
def testResNet5BigGan(self, image_shape):
if image_shape[0] == 512:
z_dim = 160
elif image_shape[0] == 256:
z_dim = 140
else:
z_dim = 120
# Use channel multiplier 4 to avoid OOM errors.
self.assertArchitectureBuilds(
gen=resnet_biggan.Generator(image_shape=image_shape, ch=16),
disc=resnet_biggan.Discriminator(ch=16),
image_shape=image_shape,
z_dim=z_dim)
@parameterized.parameters(
{"image_shape": (32, 32, 1)},
{"image_shape": (32, 32, 3)},
)
def testResNetCifar(self, image_shape):
self.assertArchitectureBuilds(
gen=resnet_cifar.Generator(image_shape=image_shape),
disc=resnet_cifar.Discriminator(),
image_shape=image_shape)
@parameterized.parameters(
{"image_shape": (48, 48, 1)},
{"image_shape": (48, 48, 3)},
)
def testResNetStl(self, image_shape):
self.assertArchitectureBuilds(
gen=resnet_stl.Generator(image_shape=image_shape),
disc=resnet_stl.Discriminator(),
image_shape=image_shape)
@parameterized.parameters(
{"image_shape": (28, 28, 1)},
{"image_shape": (32, 32, 1)},
{"image_shape": (32, 32, 3)},
{"image_shape": (64, 64, 3)},
{"image_shape": (128, 128, 3)},
)
def testSnDcGan(self, image_shape):
self.assertArchitectureBuilds(
gen=sndcgan.Generator(image_shape=image_shape),
disc=sndcgan.Discriminator(),
image_shape=image_shape)
if __name__ == "__main__":
tf.test.main()
|
import os
import sys
import platform
import functools
import threading
import ctypes
from itertools import chain
import six
IN_PYTHONISTA = sys.executable.find('Pythonista') >= 0
if IN_PYTHONISTA:
import plistlib
_properties = plistlib.readPlist(os.path.join(os.path.dirname(sys.executable), 'Info.plist'))
PYTHONISTA_VERSION = _properties['CFBundleShortVersionString']
PYTHONISTA_VERSION_LONG = _properties['CFBundleVersion']
if PYTHONISTA_VERSION < '3.0':
python_capi = ctypes.pythonapi
else:
# The default pythonapi always points to Python 3 in Pythonista 3
if six.PY3:
python_capi = ctypes.pythonapi
else:
# We need to load the Python 2 API manually
try:
python_capi = ctypes.PyDLL(os.path.join(os.path.dirname(sys.executable), 'Frameworks/Py2Kit.framework/Py2Kit'))
except OSError:
python_capi = ctypes.PyDLL(
os.path.join(os.path.dirname(sys.executable),
'Frameworks/PythonistaKit.framework/PythonistaKit')
)
else:
PYTHONISTA_VERSION = '0.0'
PYTHONISTA_VERSION_LONG = '000000'
python_capi = ctypes.pythonapi
platform_string = platform.platform()
ON_IPAD = platform_string.find('iPad') >= 0
ON_IOS_8 = platform_string.split('-')[1].startswith('14')
M_64 = platform_string.find('64bit') != -1
CTRL_KEY_FLAG = (1 << 18) # Control key for keyCommands
CMD_KEY_FLAG = (1 << 20) # Command key
K_CC, K_CD, K_HUP, K_HDN, K_LEFT, K_RIGHT, K_CU, K_TAB, K_HIST, K_CZ, K_KB = range(11)
_STASH_ROOT = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
_STASH_CONFIG_FILES = ('.stash_config', 'stash.cfg')
_STASH_HISTORY_FILE = '.stash_history'
# directory for stash extensions
_STASH_EXTENSION_PATH = os.path.abspath(os.path.join(os.getenv("HOME"), "Documents", "stash_extensions"), )
# directory for stash bin extensions
_STASH_EXTENSION_BIN_PATH = os.path.join(_STASH_EXTENSION_PATH, "bin")
# directory for stash man extensions
_STASH_EXTENSION_MAN_PATH = os.path.join(_STASH_EXTENSION_PATH, "man")
# directory for stash FSI extensions
_STASH_EXTENSION_FSI_PATH = os.path.join(_STASH_EXTENSION_PATH, "fsi")
# directory for stash patch extensions
_STASH_EXTENSION_PATCH_PATH = os.path.join(_STASH_EXTENSION_PATH, "patches")
# list of directories outside of _STASH_ROOT, used for simple mkdir
_EXTERNAL_DIRS = [
_STASH_EXTENSION_PATH,
_STASH_EXTENSION_BIN_PATH,
_STASH_EXTENSION_MAN_PATH,
_STASH_EXTENSION_FSI_PATH,
_STASH_EXTENSION_PATCH_PATH,
]
# Python 3 or not Python 3
PY3 = six.PY3
# Save the true IOs
if IN_PYTHONISTA:
# The stdio catchers recreation is copied from code written by @dgelessus
# https://forum.omz-software.com/topic/1946/pythonista-1-6-beta/167
# In pythonista beta 301006, _outputcapture was replaced with pykit_io
try:
import _outputcapture
except ImportError:
import pykit_io
class _outputcapture(object):
ReadStdin = pykit_io.read_stdin
CaptureStdout = pykit_io.write_stdout
CaptureStderr = pykit_io.write_stderr
if sys.stdin.__class__.__name__ == 'StdinCatcher':
_SYS_STDIN = sys.__stdin__ = sys.stdin
elif sys.__stdin__.__class__.__name__ == 'StdinCatcher':
_SYS_STDIN = sys.__stdin__
else:
class StdinCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def read(self, limit=-1):
return _outputcapture.ReadStdin(limit)
def readline(self):
return _outputcapture.ReadStdin()
_SYS_STDIN = StdinCatcher()
if sys.stdout.__class__.__name__ == 'StdoutCatcher':
_SYS_STDOUT = sys.__stdout__ = sys.stdout
elif sys.__stdout__.__class__.__name__ == 'StdoutCatcher':
_SYS_STDOUT = sys.__stdout__
else:
class StdoutCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def flush(self):
pass
def write(self, s):
if isinstance(s, str):
_outputcapture.CaptureStdout(s)
elif isinstance(s, six.text_type):
_outputcapture.CaptureStdout(s.encode('utf8'))
def writelines(self, lines):
self.write(''.join(lines))
_SYS_STDOUT = StdoutCatcher()
if sys.stderr.__class__.__name__ == 'StderrCatcher':
_SYS_STDERR = sys.__stderr__ = sys.stderr
elif sys.stderr.__class__.__name__ == 'StderrCatcher':
_SYS_STDERR = sys.__stderr__
else:
class StderrCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def flush(self):
pass
def write(self, s):
if isinstance(s, str):
_outputcapture.CaptureStderr(s)
elif isinstance(s, six.text_type):
_outputcapture.CaptureStderr(s.encode('utf8'))
def writelines(self, lines):
self.write(''.join(lines))
_SYS_STDERR = StderrCatcher()
else:
_SYS_STDOUT = sys.stdout
_SYS_STDERR = sys.stderr
_SYS_STDIN = sys.stdin
_SYS_PATH = sys.path
_OS_ENVIRON = os.environ
def is_binary_file(filename, nbytes=1024):
"""
An approximate way to tell whether a file is binary.
:param str filename: The name of the file to be tested.
:param int nbytes: number of bytes to read for test
:return:
"""
with open(filename, 'rb') as ins:
for c in ins.read(nbytes):
if isinstance(c, six.integer_types):
oc = c
else:
oc = ord(c)
if 127 < oc < 256 or (oc < 32 and oc not in (9, 10, 13)):
return True
else:
return False
def sh_delay(func, nseconds):
t = threading.Timer(nseconds, func)
t.start()
return t
def sh_background(name=None):
def wrap(func):
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
t = threading.Thread(name=name, target=func, args=args, kwargs=kwargs)
t.start()
return t
return wrapped_func
return wrap
class ShFileNotFound(Exception):
pass
class ShIsDirectory(Exception):
pass
class ShNotExecutable(Exception):
def __init__(self, filename):
super(Exception, self).__init__('{}: not executable\n'.format(filename))
class ShSingleExpansionRequired(Exception):
pass
class ShEventNotFound(Exception):
pass
class ShBadSubstitution(Exception):
pass
class ShSyntaxError(Exception):
pass
class ShInternalError(Exception):
pass
class Control(object):
"""
pyte.control
~~~~~~~~~~~~
This module defines simple control sequences, recognized by
:class:`~pyte.streams.Stream`, the set of codes here is for
``TERM=linux`` which is a superset of VT102.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: *Space*: Not suprisingly -- ``" "``.
SP = u" "
#: *Null*: Does nothing.
NUL = u"\u0000"
#: *Bell*: Beeps.
BEL = u"\u0007"
#: *Backspace*: Backspace one column, but not past the beginning of the
#: line.
BS = u"\u0008"
#: *Horizontal tab*: Move cursor to the next tab stop, or to the end
#: of the line if there is no earlier tab stop.
HT = u"\u0009"
#: *Linefeed*: Give a line feed, and, if :data:`pyte.modes.LNM` (new
#: line mode) is set also a carriage return.
LF = u"\n"
#: *Vertical tab*: Same as :data:`LF`.
VT = u"\u000b"
#: *Form feed*: Same as :data:`LF`.
FF = u"\u000c"
#: *Carriage return*: Move cursor to left margin on current line.
CR = u"\r"
#: *Shift out*: Activate G1 character set.
SO = u"\u000e"
#: *Shift in*: Activate G0 character set.
SI = u"\u000f"
#: *Cancel*: Interrupt escape sequence. If received during an escape or
#: control sequence, cancels the sequence and displays substitution
#: character.
CAN = u"\u0018"
#: *Substitute*: Same as :data:`CAN`.
SUB = u"\u001a"
#: *Escape*: Starts an escape sequence.
ESC = u"\u001b"
#: *Delete*: Is ignored.
DEL = u"\u007f"
#: *Control sequence introducer*: An equivalent for ``ESC [``.
CSI = u"\u009b"
class Escape(object):
"""
pyte.escape
~~~~~~~~~~~
This module defines both CSI and non-CSI escape sequences, recognized
by :class:`~pyte.streams.Stream` and subclasses.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: *Reset*.
RIS = u"c"
#: *Index*: Move cursor down one line in same column. If the cursor is
#: at the bottom margin, the screen performs a scroll-up.
IND = u"D"
#: *Next line*: Same as :data:`pyte.control.LF`.
NEL = u"E"
#: Tabulation set: Set a horizontal tab stop at cursor position.
HTS = u"H"
#: *Reverse index*: Move cursor up one line in same column. If the
#: cursor is at the top margin, the screen performs a scroll-down.
RI = u"M"
#: Save cursor: Save cursor position, character attribute (graphic
#: rendition), character set, and origin mode selection (see
#: :data:`DECRC`).
DECSC = u"7"
#: *Restore cursor*: Restore previously saved cursor position, character
#: attribute (graphic rendition), character set, and origin mode
#: selection. If none were saved, move cursor to home position.
DECRC = u"8"
# "Percent" escape sequences.
# ---------------------------
#: *Select default (ISO 646 / ISO 8859-1)*.
DEFAULT = u"@"
#: *Select UTF-8*.
UTF8 = u"G"
#: *Select UTF-8 (obsolete)*.
UTF8_OBSOLETE = u"8"
# "Sharp" escape sequences.
# -------------------------
#: *Alignment display*: Fill screen with uppercase E's for testing
#: screen focus and alignment.
DECALN = u"8"
# ECMA-48 CSI sequences.
# ---------------------
#: *Insert character*: Insert the indicated # of blank characters.
ICH = u"@"
#: *Cursor up*: Move cursor up the indicated # of lines in same column.
#: Cursor stops at top margin.
CUU = u"A"
#: *Cursor down*: Move cursor down the indicated # of lines in same
#: column. Cursor stops at bottom margin.
CUD = u"B"
#: *Cursor forward*: Move cursor right the indicated # of columns.
#: Cursor stops at right margin.
CUF = u"C"
#: *Cursor back*: Move cursor left the indicated # of columns. Cursor
#: stops at left margin.
CUB = u"D"
#: *Cursor next line*: Move cursor down the indicated # of lines to
#: column 1.
CNL = u"E"
#: *Cursor previous line*: Move cursor up the indicated # of lines to
#: column 1.
CPL = u"F"
#: *Cursor horizontal align*: Move cursor to the indicated column in
#: current line.
CHA = u"G"
#: *Cursor position*: Move cursor to the indicated line, column (origin
#: at ``1, 1``).
CUP = u"H"
#: *Erase data* (default: from cursor to end of line).
ED = u"J"
#: *Erase in line* (default: from cursor to end of line).
EL = u"K"
#: *Insert line*: Insert the indicated # of blank lines, starting from
#: the current line. Lines displayed below cursor move down. Lines moved
#: past the bottom margin are lost.
IL = u"L"
#: *Delete line*: Delete the indicated # of lines, starting from the
#: current line. As lines are deleted, lines displayed below cursor
#: move up. Lines added to bottom of screen have spaces with same
#: character attributes as last line move up.
DL = u"M"
#: *Delete character*: Delete the indicated # of characters on the
#: current line. When character is deleted, all characters to the right
#: of cursor move left.
DCH = u"P"
#: *Erase character*: Erase the indicated # of characters on the
#: current line.
ECH = u"X"
#: *Horizontal position relative*: Same as :data:`CUF`.
HPR = u"a"
#: *Vertical position adjust*: Move cursor to the indicated line,
#: current column.
VPA = u"d"
#: *Vertical position relative*: Same as :data:`CUD`.
VPR = u"e"
#: *Horizontal / Vertical position*: Same as :data:`CUP`.
HVP = u"f"
#: *Tabulation clear*: Clears a horizontal tab stop at cursor position.
TBC = u"g"
#: *Set mode*.
SM = u"h"
#: *Reset mode*.
RM = u"l"
#: *Select graphics rendition*: The terminal can display the following
#: character attributes that change the character display without
#: changing the character (see :mod:`pyte.graphics`).
SGR = u"m"
#: *Select top and bottom margins*: Selects margins, defining the
#: scrolling region; parameters are top and bottom line. If called
#: without any arguments, whole screen is used.
DECSTBM = u"r"
#: *Horizontal position adjust*: Same as :data:`CHA`.
HPA = u"'"
class Graphics(object):
# -*- coding: utf-8 -*-
"""
pyte.graphics
~~~~~~~~~~~~~
This module defines graphic-related constants, mostly taken from
:manpage:`console_codes(4)` and
http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: A mapping of ANSI text style codes to style names, "+" means the:
#: attribute is set, "-" -- reset; example:
#:
#: >>> text[1]
#: '+bold'
#: >>> text[9]
#: '+strikethrough'
TEXT = {
1: "+bold",
3: "+italics",
4: "+underscore",
7: "+reverse",
9: "+strikethrough",
22: "-bold",
23: "-italics",
24: "-underscore",
27: "-reverse",
29: "-strikethrough"
}
#: A mapping of ANSI foreground color codes to color names, example:
#:
#: >>> FG[30]
#: 'black'
#: >>> FG[38]
#: 'default'
FG = {
30: "black",
31: "red",
32: "green",
33: "brown",
34: "blue",
35: "magenta",
36: "cyan",
37: "white",
39: "default", # white.
50: "gray",
51: "yellow",
52: "smoke",
}
#: A mapping of ANSI background color codes to color names, example:
#:
#: >>> BG[40]
#: 'black'
#: >>> BG[48]
#: 'default'
BG = {
40: "black",
41: "red",
42: "green",
43: "brown",
44: "blue",
45: "magenta",
46: "cyan",
47: "white",
49: "default", # black.
60: "gray",
61: "yellow",
62: "smoke",
}
# Reverse mapping of all available attributes -- keep this private!
_SGR = {v: k for k, v in chain(FG.items(), TEXT.items())}
_SGR.update({'bg-' + v: k for k, v in BG.items()})
|
import os.path
import functools
import posixpath
import zipfile
import logging
import pathlib
from typing import cast, IO, List, Set
from PyQt5.QtCore import QUrl
from qutebrowser.api import (cmdutils, hook, config, message, downloads,
interceptor, apitypes, qtutils)
logger = logging.getLogger('network')
_host_blocker = cast('HostBlocker', None)
def _guess_zip_filename(zf: zipfile.ZipFile) -> str:
"""Guess which file to use inside a zip file."""
files = zf.namelist()
if len(files) == 1:
return files[0]
else:
for e in files:
if posixpath.splitext(e)[0].lower() == 'hosts':
return e
raise FileNotFoundError("No hosts file found in zip")
def get_fileobj(byte_io: IO[bytes]) -> IO[bytes]:
"""Get a usable file object to read the hosts file from."""
byte_io.seek(0) # rewind downloaded file
if zipfile.is_zipfile(byte_io):
byte_io.seek(0) # rewind what zipfile.is_zipfile did
zf = zipfile.ZipFile(byte_io)
filename = _guess_zip_filename(zf)
byte_io = zf.open(filename, mode='r')
else:
byte_io.seek(0) # rewind what zipfile.is_zipfile did
return byte_io
def _is_whitelisted_url(url: QUrl) -> bool:
"""Check if the given URL is on the adblock whitelist."""
for pattern in config.val.content.host_blocking.whitelist:
if pattern.matches(url):
return True
return False
class _FakeDownload(downloads.TempDownload):
"""A download stub to use on_download_finished with local files."""
def __init__(self, # pylint: disable=super-init-not-called
fileobj: IO[bytes]) -> None:
self.fileobj = fileobj
self.successful = True
class HostBlocker:
"""Manage blocked hosts based from /etc/hosts-like files.
Attributes:
_blocked_hosts: A set of blocked hosts.
_config_blocked_hosts: A set of blocked hosts from ~/.config.
_in_progress: The DownloadItems which are currently downloading.
_done_count: How many files have been read successfully.
_local_hosts_file: The path to the blocked-hosts file.
_config_hosts_file: The path to a blocked-hosts in ~/.config
_has_basedir: Whether a custom --basedir is set.
"""
def __init__(self, *, data_dir: pathlib.Path, config_dir: pathlib.Path,
has_basedir: bool = False) -> None:
self._has_basedir = has_basedir
self._blocked_hosts: Set[str] = set()
self._config_blocked_hosts: Set[str] = set()
self._in_progress: List[downloads.TempDownload] = []
self._done_count = 0
self._local_hosts_file = str(data_dir / 'blocked-hosts')
self.update_files()
self._config_hosts_file = str(config_dir / 'blocked-hosts')
def _is_blocked(self, request_url: QUrl,
first_party_url: QUrl = None) -> bool:
"""Check whether the given request is blocked."""
if first_party_url is not None and not first_party_url.isValid():
first_party_url = None
qtutils.ensure_valid(request_url)
if not config.get('content.host_blocking.enabled',
url=first_party_url):
return False
host = request_url.host()
return ((host in self._blocked_hosts or
host in self._config_blocked_hosts) and
not _is_whitelisted_url(request_url))
def filter_request(self, info: interceptor.Request) -> None:
"""Block the given request if necessary."""
if self._is_blocked(request_url=info.request_url,
first_party_url=info.first_party_url):
logger.debug("Request to {} blocked by host blocker."
.format(info.request_url.host()))
info.block()
def _read_hosts_line(self, raw_line: bytes) -> Set[str]:
"""Read hosts from the given line.
Args:
line: The bytes object to read.
Returns:
A set containing valid hosts found
in the line.
"""
if raw_line.startswith(b'#'):
# Ignoring comments early so we don't have to care about
# encoding errors in them
return set()
line = raw_line.decode('utf-8')
# Remove comments
hash_idx = line.find('#')
line = line if hash_idx == -1 else line[:hash_idx]
parts = line.strip().split()
if len(parts) == 1:
# "one host per line" format
hosts = parts
else:
# /etc/hosts format
hosts = parts[1:]
filtered_hosts = set()
for host in hosts:
if ('.' in host and
not host.endswith('.localdomain') and
host != '0.0.0.0'):
filtered_hosts.update([host])
return filtered_hosts
def _read_hosts_file(self, filename: str, target: Set[str]) -> bool:
"""Read hosts from the given filename.
Args:
filename: The file to read.
target: The set to store the hosts in.
Return:
True if a read was attempted, False otherwise
"""
if not os.path.exists(filename):
return False
try:
with open(filename, 'rb') as f:
for line in f:
target |= self._read_hosts_line(line)
except (OSError, UnicodeDecodeError):
logger.exception("Failed to read host blocklist!")
return True
def read_hosts(self) -> None:
"""Read hosts from the existing blocked-hosts file."""
self._blocked_hosts = set()
self._read_hosts_file(self._config_hosts_file,
self._config_blocked_hosts)
found = self._read_hosts_file(self._local_hosts_file,
self._blocked_hosts)
if not found:
if (config.val.content.host_blocking.lists and
not self._has_basedir and
config.val.content.host_blocking.enabled):
message.info("Run :adblock-update to get adblock lists.")
def adblock_update(self) -> None:
"""Update the adblock block lists."""
self._read_hosts_file(self._config_hosts_file,
self._config_blocked_hosts)
self._blocked_hosts = set()
self._done_count = 0
for url in config.val.content.host_blocking.lists:
if url.scheme() == 'file':
filename = url.toLocalFile()
if os.path.isdir(filename):
for entry in os.scandir(filename):
if entry.is_file():
self._import_local(entry.path)
else:
self._import_local(filename)
else:
download = downloads.download_temp(url)
self._in_progress.append(download)
download.finished.connect(
functools.partial(self._on_download_finished, download))
def _import_local(self, filename: str) -> None:
"""Adds the contents of a file to the blocklist.
Args:
filename: path to a local file to import.
"""
try:
fileobj = open(filename, 'rb')
except OSError as e:
message.error("adblock: Error while reading {}: {}".format(
filename, e.strerror))
return
download = _FakeDownload(fileobj)
self._in_progress.append(download)
self._on_download_finished(download)
def _merge_file(self, byte_io: IO[bytes]) -> None:
"""Read and merge host files.
Args:
byte_io: The BytesIO object of the completed download.
"""
error_count = 0
line_count = 0
try:
f = get_fileobj(byte_io)
except (OSError, zipfile.BadZipFile, zipfile.LargeZipFile,
LookupError) as e:
message.error("adblock: Error while reading {}: {} - {}".format(
byte_io.name, e.__class__.__name__, e))
return
for line in f:
line_count += 1
try:
self._blocked_hosts |= self._read_hosts_line(line)
except UnicodeDecodeError:
logger.error("Failed to decode: {!r}".format(line))
error_count += 1
logger.debug("{}: read {} lines".format(byte_io.name, line_count))
if error_count > 0:
message.error("adblock: {} read errors for {}".format(
error_count, byte_io.name))
def _on_lists_downloaded(self) -> None:
"""Install block lists after files have been downloaded."""
with open(self._local_hosts_file, 'w', encoding='utf-8') as f:
for host in sorted(self._blocked_hosts):
f.write(host + '\n')
message.info("adblock: Read {} hosts from {} sources.".format(
len(self._blocked_hosts), self._done_count))
def update_files(self) -> None:
"""Update files when the config changed."""
if not config.val.content.host_blocking.lists:
try:
os.remove(self._local_hosts_file)
except FileNotFoundError:
pass
except OSError as e:
logger.exception("Failed to delete hosts file: {}".format(e))
def _on_download_finished(self, download: downloads.TempDownload) -> None:
"""Check if all downloads are finished and if so, trigger reading.
Arguments:
download: The finished download.
"""
self._in_progress.remove(download)
if download.successful:
self._done_count += 1
assert not isinstance(download.fileobj,
downloads.UnsupportedAttribute)
assert download.fileobj is not None
try:
self._merge_file(download.fileobj)
finally:
download.fileobj.close()
if not self._in_progress:
try:
self._on_lists_downloaded()
except OSError:
logger.exception("Failed to write host block list!")
@cmdutils.register()
def adblock_update() -> None:
"""Update the adblock block lists.
This updates `~/.local/share/qutebrowser/blocked-hosts` with downloaded
host lists and re-reads `~/.config/qutebrowser/blocked-hosts`.
"""
# FIXME: As soon as we can register instances again, we should move this
# back to the class.
_host_blocker.adblock_update()
@hook.config_changed('content.host_blocking.lists')
def on_config_changed() -> None:
_host_blocker.update_files()
@hook.init()
def init(context: apitypes.InitContext) -> None:
"""Initialize the host blocker."""
global _host_blocker
_host_blocker = HostBlocker(data_dir=context.data_dir,
config_dir=context.config_dir,
has_basedir=context.args.basedir is not None)
_host_blocker.read_hosts()
interceptor.register(_host_blocker.filter_request)
|
import logging
import voluptuous as vol
from zengge import zengge
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import CONF_DEVICES, CONF_NAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
SUPPORT_ZENGGE_LED = SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_WHITE_VALUE
DEVICE_SCHEMA = vol.Schema({vol.Optional(CONF_NAME): cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Zengge platform."""
lights = []
for address, device_config in config[CONF_DEVICES].items():
device = {}
device["name"] = device_config[CONF_NAME]
device["address"] = address
light = ZenggeLight(device)
if light.is_valid:
lights.append(light)
add_entities(lights, True)
class ZenggeLight(LightEntity):
"""Representation of a Zengge light."""
def __init__(self, device):
"""Initialize the light."""
self._name = device["name"]
self._address = device["address"]
self.is_valid = True
self._bulb = zengge(self._address)
self._white = 0
self._brightness = 0
self._hs_color = (0, 0)
self._state = False
if self._bulb.connect() is False:
self.is_valid = False
_LOGGER.error("Failed to connect to bulb %s, %s", self._address, self._name)
return
@property
def unique_id(self):
"""Return the ID of this light."""
return self._address
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def brightness(self):
"""Return the brightness property."""
return self._brightness
@property
def hs_color(self):
"""Return the color property."""
return self._hs_color
@property
def white_value(self):
"""Return the white property."""
return self._white
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_ZENGGE_LED
@property
def assumed_state(self):
"""We can report the actual state."""
return False
def set_rgb(self, red, green, blue):
"""Set the rgb state."""
return self._bulb.set_rgb(red, green, blue)
def set_white(self, white):
"""Set the white state."""
return self._bulb.set_white(white)
def turn_on(self, **kwargs):
"""Turn the specified light on."""
self._state = True
self._bulb.on()
hs_color = kwargs.get(ATTR_HS_COLOR)
white = kwargs.get(ATTR_WHITE_VALUE)
brightness = kwargs.get(ATTR_BRIGHTNESS)
if white is not None:
self._white = white
self._hs_color = (0, 0)
if hs_color is not None:
self._white = 0
self._hs_color = hs_color
if brightness is not None:
self._white = 0
self._brightness = brightness
if self._white != 0:
self.set_white(self._white)
else:
rgb = color_util.color_hsv_to_RGB(
self._hs_color[0], self._hs_color[1], self._brightness / 255 * 100
)
self.set_rgb(*rgb)
def turn_off(self, **kwargs):
"""Turn the specified light off."""
self._state = False
self._bulb.off()
def update(self):
"""Synchronise internal state with the actual light state."""
rgb = self._bulb.get_colour()
hsv = color_util.color_RGB_to_hsv(*rgb)
self._hs_color = hsv[:2]
self._brightness = (hsv[2] / 100) * 255
self._white = self._bulb.get_white()
self._state = self._bulb.get_on()
|
from datetime import timedelta
from epsonprinter_pkg.epsonprinterapi import EpsonPrinterAPI
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_MONITORED_CONDITIONS, PERCENTAGE
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
MONITORED_CONDITIONS = {
"black": ["Ink level Black", PERCENTAGE, "mdi:water"],
"photoblack": ["Ink level Photoblack", PERCENTAGE, "mdi:water"],
"magenta": ["Ink level Magenta", PERCENTAGE, "mdi:water"],
"cyan": ["Ink level Cyan", PERCENTAGE, "mdi:water"],
"yellow": ["Ink level Yellow", PERCENTAGE, "mdi:water"],
"clean": ["Cleaning level", PERCENTAGE, "mdi:water"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]
),
}
)
SCAN_INTERVAL = timedelta(minutes=60)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the cartridge sensor."""
host = config.get(CONF_HOST)
api = EpsonPrinterAPI(host)
if not api.available:
raise PlatformNotReady()
sensors = [
EpsonPrinterCartridge(api, condition)
for condition in config[CONF_MONITORED_CONDITIONS]
]
add_devices(sensors, True)
class EpsonPrinterCartridge(Entity):
"""Representation of a cartridge sensor."""
def __init__(self, api, cartridgeidx):
"""Initialize a cartridge sensor."""
self._api = api
self._id = cartridgeidx
self._name = MONITORED_CONDITIONS[self._id][0]
self._unit = MONITORED_CONDITIONS[self._id][1]
self._icon = MONITORED_CONDITIONS[self._id][2]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@property
def state(self):
"""Return the state of the device."""
return self._api.getSensorValue(self._id)
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._api.available
def update(self):
"""Get the latest data from the Epson printer."""
self._api.update()
|
import warnings
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from .common import (
_contains_datetime_like_objects,
is_np_datetime_like,
is_np_timedelta_like,
)
from .pycompat import is_duck_dask_array
def _season_from_months(months):
"""Compute season (DJF, MAM, JJA, SON) from month ordinal"""
# TODO: Move "season" accessor upstream into pandas
seasons = np.array(["DJF", "MAM", "JJA", "SON"])
months = np.asarray(months)
return seasons[(months // 3) % 4]
def _access_through_cftimeindex(values, name):
"""Coerce an array of datetime-like values to a CFTimeIndex
and access requested datetime component
"""
from ..coding.cftimeindex import CFTimeIndex
values_as_cftimeindex = CFTimeIndex(values.ravel())
if name == "season":
months = values_as_cftimeindex.month
field_values = _season_from_months(months)
else:
field_values = getattr(values_as_cftimeindex, name)
return field_values.reshape(values.shape)
def _access_through_series(values, name):
"""Coerce an array of datetime-like values to a pandas Series and
access requested datetime component
"""
values_as_series = pd.Series(values.ravel())
if name == "season":
months = values_as_series.dt.month.values
field_values = _season_from_months(months)
elif name == "isocalendar":
# isocalendar returns iso- year, week, and weekday -> reshape
field_values = np.array(values_as_series.dt.isocalendar(), dtype=np.int64)
return field_values.T.reshape(3, *values.shape)
else:
field_values = getattr(values_as_series.dt, name).values
return field_values.reshape(values.shape)
def _get_date_field(values, name, dtype):
"""Indirectly access pandas' libts.get_date_field by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str
Name of datetime field to access
dtype : dtype-like
dtype for output date field values
Returns
-------
datetime_fields : same type as values
Array-like of datetime fields accessed for each element in values
"""
if is_np_datetime_like(values.dtype):
access_method = _access_through_series
else:
access_method = _access_through_cftimeindex
if is_duck_dask_array(values):
from dask.array import map_blocks
new_axis = chunks = None
# isocalendar adds adds an axis
if name == "isocalendar":
chunks = (3,) + values.chunksize
new_axis = 0
return map_blocks(
access_method, values, name, dtype=dtype, new_axis=new_axis, chunks=chunks
)
else:
return access_method(values, name)
def _round_through_series_or_index(values, name, freq):
"""Coerce an array of datetime-like values to a pandas Series or xarray
CFTimeIndex and apply requested rounding
"""
from ..coding.cftimeindex import CFTimeIndex
if is_np_datetime_like(values.dtype):
values_as_series = pd.Series(values.ravel())
method = getattr(values_as_series.dt, name)
else:
values_as_cftimeindex = CFTimeIndex(values.ravel())
method = getattr(values_as_cftimeindex, name)
field_values = method(freq=freq).values
return field_values.reshape(values.shape)
def _round_field(values, name, freq):
"""Indirectly access rounding functions by wrapping data
as a Series or CFTimeIndex
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : {"ceil", "floor", "round"}
Name of rounding function
freq : str
a freq string indicating the rounding resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
if is_duck_dask_array(values):
from dask.array import map_blocks
dtype = np.datetime64 if is_np_datetime_like(values.dtype) else np.dtype("O")
return map_blocks(
_round_through_series_or_index, values, name, freq=freq, dtype=dtype
)
else:
return _round_through_series_or_index(values, name, freq)
def _strftime_through_cftimeindex(values, date_format):
"""Coerce an array of cftime-like values to a CFTimeIndex
and access requested datetime component
"""
from ..coding.cftimeindex import CFTimeIndex
values_as_cftimeindex = CFTimeIndex(values.ravel())
field_values = values_as_cftimeindex.strftime(date_format)
return field_values.values.reshape(values.shape)
def _strftime_through_series(values, date_format):
"""Coerce an array of datetime-like values to a pandas Series and
apply string formatting
"""
values_as_series = pd.Series(values.ravel())
strs = values_as_series.dt.strftime(date_format)
return strs.values.reshape(values.shape)
def _strftime(values, date_format):
if is_np_datetime_like(values.dtype):
access_method = _strftime_through_series
else:
access_method = _strftime_through_cftimeindex
if is_duck_dask_array(values):
from dask.array import map_blocks
return map_blocks(access_method, values, date_format)
else:
return access_method(values, date_format)
class Properties:
def __init__(self, obj):
self._obj = obj
def _tslib_field_accessor( # type: ignore
name: str, docstring: str = None, dtype: np.dtype = None
):
def f(self, dtype=dtype):
if dtype is None:
dtype = self._obj.dtype
obj_type = type(self._obj)
result = _get_date_field(self._obj.data, name, dtype)
return obj_type(
result, name=name, coords=self._obj.coords, dims=self._obj.dims
)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _tslib_round_accessor(self, name, freq):
obj_type = type(self._obj)
result = _round_field(self._obj.data, name, freq)
return obj_type(result, name=name, coords=self._obj.coords, dims=self._obj.dims)
def floor(self, freq):
"""
Round timestamps downward to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
floor-ed timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("floor", freq)
def ceil(self, freq):
"""
Round timestamps upward to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
ceil-ed timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("ceil", freq)
def round(self, freq):
"""
Round timestamps to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("round", freq)
class DatetimeAccessor(Properties):
"""Access datetime fields for DataArrays with datetime-like dtypes.
Fields can be accessed through the `.dt` attribute
for applicable DataArrays.
Examples
---------
>>> import xarray as xr
>>> import pandas as pd
>>> dates = pd.date_range(start="2000/01/01", freq="D", periods=10)
>>> ts = xr.DataArray(dates, dims=("time"))
>>> ts
<xarray.DataArray (time: 10)>
array(['2000-01-01T00:00:00.000000000', '2000-01-02T00:00:00.000000000',
'2000-01-03T00:00:00.000000000', '2000-01-04T00:00:00.000000000',
'2000-01-05T00:00:00.000000000', '2000-01-06T00:00:00.000000000',
'2000-01-07T00:00:00.000000000', '2000-01-08T00:00:00.000000000',
'2000-01-09T00:00:00.000000000', '2000-01-10T00:00:00.000000000'],
dtype='datetime64[ns]')
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
>>> ts.dt # doctest: +ELLIPSIS
<xarray.core.accessor_dt.DatetimeAccessor object at 0x...>
>>> ts.dt.dayofyear
<xarray.DataArray 'dayofyear' (time: 10)>
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
>>> ts.dt.quarter
<xarray.DataArray 'quarter' (time: 10)>
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
"""
def strftime(self, date_format):
"""
Return an array of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format doc
<https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`__
Parameters
----------
date_format : str
date format string (e.g. "%Y-%m-%d")
Returns
-------
formatted strings : same type as values
Array-like of strings formatted for each element in values
Examples
--------
>>> import datetime
>>> rng = xr.Dataset({"time": datetime.datetime(2000, 1, 1)})
>>> rng["time"].dt.strftime("%B %d, %Y, %r")
<xarray.DataArray 'strftime' ()>
array('January 01, 2000, 12:00:00 AM', dtype=object)
"""
obj_type = type(self._obj)
result = _strftime(self._obj.data, date_format)
return obj_type(
result, name="strftime", coords=self._obj.coords, dims=self._obj.dims
)
def isocalendar(self):
"""Dataset containing ISO year, week number, and weekday.
Note
----
The iso year and weekday differ from the nominal year and weekday.
"""
from .dataset import Dataset
if not is_np_datetime_like(self._obj.data.dtype):
raise AttributeError("'CFTimeIndex' object has no attribute 'isocalendar'")
if LooseVersion(pd.__version__) < "1.1.0":
raise AttributeError("'isocalendar' not available in pandas < 1.1.0")
values = _get_date_field(self._obj.data, "isocalendar", np.int64)
obj_type = type(self._obj)
data_vars = {}
for i, name in enumerate(["year", "week", "weekday"]):
data_vars[name] = obj_type(
values[i], name=name, coords=self._obj.coords, dims=self._obj.dims
)
return Dataset(data_vars)
year = Properties._tslib_field_accessor(
"year", "The year of the datetime", np.int64
)
month = Properties._tslib_field_accessor(
"month", "The month as January=1, December=12", np.int64
)
day = Properties._tslib_field_accessor("day", "The days of the datetime", np.int64)
hour = Properties._tslib_field_accessor(
"hour", "The hours of the datetime", np.int64
)
minute = Properties._tslib_field_accessor(
"minute", "The minutes of the datetime", np.int64
)
second = Properties._tslib_field_accessor(
"second", "The seconds of the datetime", np.int64
)
microsecond = Properties._tslib_field_accessor(
"microsecond", "The microseconds of the datetime", np.int64
)
nanosecond = Properties._tslib_field_accessor(
"nanosecond", "The nanoseconds of the datetime", np.int64
)
@property
def weekofyear(self):
"The week ordinal of the year"
warnings.warn(
"dt.weekofyear and dt.week have been deprecated. Please use "
"dt.isocalendar().week instead.",
FutureWarning,
)
if LooseVersion(pd.__version__) < "1.1.0":
weekofyear = Properties._tslib_field_accessor(
"weekofyear", "The week ordinal of the year", np.int64
).fget(self)
else:
weekofyear = self.isocalendar().week
return weekofyear
week = weekofyear
dayofweek = Properties._tslib_field_accessor(
"dayofweek", "The day of the week with Monday=0, Sunday=6", np.int64
)
weekday = dayofweek
weekday_name = Properties._tslib_field_accessor(
"weekday_name", "The name of day in a week", object
)
dayofyear = Properties._tslib_field_accessor(
"dayofyear", "The ordinal day of the year", np.int64
)
quarter = Properties._tslib_field_accessor("quarter", "The quarter of the date")
days_in_month = Properties._tslib_field_accessor(
"days_in_month", "The number of days in the month", np.int64
)
daysinmonth = days_in_month
season = Properties._tslib_field_accessor("season", "Season of the year", object)
time = Properties._tslib_field_accessor(
"time", "Timestamps corresponding to datetimes", object
)
is_month_start = Properties._tslib_field_accessor(
"is_month_start",
"Indicates whether the date is the first day of the month.",
bool,
)
is_month_end = Properties._tslib_field_accessor(
"is_month_end", "Indicates whether the date is the last day of the month.", bool
)
is_quarter_start = Properties._tslib_field_accessor(
"is_quarter_start",
"Indicator for whether the date is the first day of a quarter.",
bool,
)
is_quarter_end = Properties._tslib_field_accessor(
"is_quarter_end",
"Indicator for whether the date is the last day of a quarter.",
bool,
)
is_year_start = Properties._tslib_field_accessor(
"is_year_start", "Indicate whether the date is the first day of a year.", bool
)
is_year_end = Properties._tslib_field_accessor(
"is_year_end", "Indicate whether the date is the last day of the year.", bool
)
is_leap_year = Properties._tslib_field_accessor(
"is_leap_year", "Boolean indicator if the date belongs to a leap year.", bool
)
class TimedeltaAccessor(Properties):
"""Access Timedelta fields for DataArrays with Timedelta-like dtypes.
Fields can be accessed through the `.dt` attribute for applicable DataArrays.
Examples
--------
>>> import pandas as pd
>>> import xarray as xr
>>> dates = pd.timedelta_range(start="1 day", freq="6H", periods=20)
>>> ts = xr.DataArray(dates, dims=("time"))
>>> ts
<xarray.DataArray (time: 20)>
array([ 86400000000000, 108000000000000, 129600000000000, 151200000000000,
172800000000000, 194400000000000, 216000000000000, 237600000000000,
259200000000000, 280800000000000, 302400000000000, 324000000000000,
345600000000000, 367200000000000, 388800000000000, 410400000000000,
432000000000000, 453600000000000, 475200000000000, 496800000000000],
dtype='timedelta64[ns]')
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt # doctest: +ELLIPSIS
<xarray.core.accessor_dt.TimedeltaAccessor object at 0x...>
>>> ts.dt.days
<xarray.DataArray 'days' (time: 20)>
array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt.microseconds
<xarray.DataArray 'microseconds' (time: 20)>
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt.seconds
<xarray.DataArray 'seconds' (time: 20)>
array([ 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0,
21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600,
43200, 64800])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
"""
days = Properties._tslib_field_accessor(
"days", "Number of days for each element.", np.int64
)
seconds = Properties._tslib_field_accessor(
"seconds",
"Number of seconds (>= 0 and less than 1 day) for each element.",
np.int64,
)
microseconds = Properties._tslib_field_accessor(
"microseconds",
"Number of microseconds (>= 0 and less than 1 second) for each element.",
np.int64,
)
nanoseconds = Properties._tslib_field_accessor(
"nanoseconds",
"Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.",
np.int64,
)
class CombinedDatetimelikeAccessor(DatetimeAccessor, TimedeltaAccessor):
def __new__(cls, obj):
# CombinedDatetimelikeAccessor isn't really instatiated. Instead
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
# do all the validation here.
if not _contains_datetime_like_objects(obj):
raise TypeError(
"'.dt' accessor only available for "
"DataArray with datetime64 timedelta64 dtype or "
"for arrays containing cftime datetime "
"objects."
)
if is_np_timedelta_like(obj.dtype):
return TimedeltaAccessor(obj)
else:
return DatetimeAccessor(obj)
|
import pytest
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.hyper_spaces import quniform
@pytest.fixture
def param_table():
params = ParamTable()
params.add(Param('ham', 'Parma Ham'))
return params
def test_get(param_table):
assert param_table['ham'] == 'Parma Ham'
def test_set(param_table):
new_param = Param('egg', 'Over Easy')
param_table.set('egg', new_param)
assert 'egg' in param_table.keys()
def test_keys(param_table):
assert 'ham' in param_table.keys()
def test_hyper_space(param_table):
new_param = Param(
name='my_param',
value=1,
hyper_space=quniform(low=1, high=5)
)
param_table.add(new_param)
hyper_space = param_table.hyper_space
assert hyper_space
|
from __future__ import print_function
from __future__ import with_statement
import sys
from .baretest import print_unittest_summary
from .core import XML_OUTPUT_FLAG
from .core import create_xml_runner
def unitrun(package, test_name, test, sysargs=None, coverage_packages=None):
"""
Wrapper routine from running python unitttests with
JUnit-compatible XML output. This is meant for unittests that do
not not need a running ROS graph (i.e. offline tests only).
This enables JUnit-compatible test reporting so that
test results can be reported to higher-level tools.
WARNING: unitrun() will trigger a sys.exit() on test failure in
order to properly exit with an error code. This routine is meant
to be used as a main() routine, not as a library.
@param package: name of ROS package that is running the test
@type package: str
@param test: a test case instance or a name resolving to a test case or suite
@type test: unittest.TestCase, or string
@param coverage_packages: list of Python package to compute coverage results for. Defaults to package
@type coverage_packages: [str]
@param sysargs: (optional) alternate sys.argv
@type sysargs: [str]
"""
if sysargs is None:
# lazy-init sys args
import sys
sysargs = sys.argv
import unittest
if coverage_packages is None:
coverage_packages = [package]
# parse sysargs
result_file = None
for arg in sysargs:
if arg.startswith(XML_OUTPUT_FLAG):
result_file = arg[len(XML_OUTPUT_FLAG):]
text_mode = '--text' in sysargs
coverage_mode = '--cov' in sysargs or '--covhtml' in sysargs
if coverage_mode:
start_coverage(coverage_packages)
# create and run unittest suite with our xmllrunner wrapper
suite = None
if isinstance(test, str):
suite = unittest.TestLoader().loadTestsFromName(test)
else:
# some callers pass a TestCase type (instead of an instance)
suite = unittest.TestLoader().loadTestsFromTestCase(test)
if text_mode:
result = unittest.TextTestRunner(verbosity=2).run(suite)
else:
result = create_xml_runner(package, test_name, result_file).run(suite)
if coverage_mode:
cov_html_dir = 'covhtml' if '--covhtml' in sysargs else None
stop_coverage(coverage_packages, html=cov_html_dir)
# test over, summarize results and exit appropriately
print_unittest_summary(result)
if not result.wasSuccessful():
import sys
sys.exit(1)
# coverage instance
_cov = None
def start_coverage(packages):
global _cov
try:
import coverage
try:
_cov = coverage.coverage()
# load previous results as we need to accumulate
_cov.load()
_cov.start()
except coverage.CoverageException:
print("WARNING: you have an older version of python-coverage that is not support. Please update to the version provided by 'easy_install coverage'", file=sys.stderr)
except ImportError:
print("""WARNING: cannot import python-coverage, coverage tests will not run.
To install coverage, run 'easy_install coverage'""", file=sys.stderr)
def stop_coverage(packages, html=None):
"""
@param packages: list of packages to generate coverage reports for
@type packages: [str]
@param html: (optional) if not None, directory to generate html report to
@type html: str
"""
if _cov is None:
return
import os
import sys
try:
_cov.stop()
# accumulate results
_cov.save()
# - update our own .coverage-modules file list for
# coverage-html tool. The reason we read and rewrite instead
# of append is that this does a uniqueness check to keep the
# file from growing unbounded
if os.path.exists('.coverage-modules'):
with open('.coverage-modules', 'r') as f:
all_packages = set([x for x in f.read().split('\n') if x.strip()] + packages)
else:
all_packages = set(packages)
with open('.coverage-modules', 'w') as f:
f.write('\n'.join(all_packages)+'\n')
try:
# list of all modules for html report
all_mods = []
# iterate over packages to generate per-package console reports
for package in packages:
__import__(package)
m = [v for v in sys.modules.values() if v and v.__name__.startswith(package)]
all_mods.extend(m)
# generate overall report and per module analysis
_cov.report(m, show_missing=0)
for mod in m:
res = _cov.analysis(mod)
print('\n%s:\nMissing lines: %s' % (res[0], res[3]))
if html:
print('=' * 80 + '\ngenerating html coverage report to %s\n' % html + '=' * 80)
_cov.html_report(all_mods, directory=html)
except ImportError:
print("WARNING: cannot import '%s', will not generate coverage report" % package, file=sys.stderr)
except ImportError:
print("""WARNING: cannot import python-coverage, coverage tests will not run.
To install coverage, run 'easy_install coverage'""", file=sys.stderr)
|
from .const import DOMAIN as HUE_DOMAIN
class GenericHueDevice:
"""Representation of a Hue device."""
def __init__(self, sensor, name, bridge, primary_sensor=None):
"""Initialize the sensor."""
self.sensor = sensor
self._name = name
self._primary_sensor = primary_sensor
self.bridge = bridge
@property
def primary_sensor(self):
"""Return the primary sensor entity of the physical device."""
return self._primary_sensor or self.sensor
@property
def device_id(self):
"""Return the ID of the physical device this sensor is part of."""
return self.unique_id[:23]
@property
def unique_id(self):
"""Return the ID of this Hue sensor."""
return self.sensor.uniqueid
@property
def name(self):
"""Return a friendly name for the sensor."""
return self._name
@property
def swupdatestate(self):
"""Return detail of available software updates for this device."""
return self.primary_sensor.raw.get("swupdate", {}).get("state")
@property
def device_info(self):
"""Return the device info.
Links individual entities together in the hass device registry.
"""
return {
"identifiers": {(HUE_DOMAIN, self.device_id)},
"name": self.primary_sensor.name,
"manufacturer": self.primary_sensor.manufacturername,
"model": (self.primary_sensor.productname or self.primary_sensor.modelid),
"sw_version": self.primary_sensor.swversion,
"via_device": (HUE_DOMAIN, self.bridge.api.config.bridgeid),
}
|
import unittest
from mock import Mock, call, ANY
from trashcli.fstab import FakeFstab
from trashcli.put import GlobalTrashCan
from datetime import datetime
import os
class TestHomeFallback(unittest.TestCase):
def setUp(self):
self.reporter = Mock()
mount_points = ['/', 'sandbox/other_partition']
self.fs = Mock()
self.trashcan = GlobalTrashCan(
reporter = self.reporter,
getuid = lambda: 123,
volume_of = self.fake_volume_of(mount_points),
now = datetime.now,
environ = dict(),
fs = self.fs,
parent_path = os.path.dirname,
realpath = lambda x:x,
logger = Mock())
def test_use_of_top_trash_dir_when_sticky(self):
self.fs.mock_add_spec(['isdir', 'islink', 'has_sticky_bit',
'move', 'atomic_write',
'remove_file', 'ensure_dir'])
self.fs.isdir.return_value = True
self.fs.islink.return_value = False
self.fs.has_sticky_bit.return_value = True
self.trashcan.trash('sandbox/foo')
assert [
call.isdir('.Trash'),
call.islink('.Trash'),
call.has_sticky_bit('.Trash'),
call.ensure_dir('.Trash/123/info', 448),
call.atomic_write('.Trash/123/info/foo.trashinfo', ANY),
call.ensure_dir('.Trash/123/files', 448),
call.move('sandbox/foo', '.Trash/123/files/foo')
] == self.fs.mock_calls
def test_bug_will_use_top_trashdir_even_with_not_sticky(self):
self.fs.mock_add_spec(['isdir', 'islink', 'has_sticky_bit',
'move', 'atomic_write',
'remove_file', 'ensure_dir'])
self.fs.isdir.return_value = True
self.fs.islink.return_value = False
self.fs.has_sticky_bit.return_value = False
self.trashcan.trash('sandbox/foo')
assert [
call.isdir('.Trash'),
call.islink('.Trash'),
call.has_sticky_bit('.Trash'),
call.ensure_dir('.Trash-123/info', 448),
call.atomic_write('.Trash-123/info/foo.trashinfo', ANY),
call.ensure_dir('.Trash-123/files', 448),
call.move('sandbox/foo', '.Trash-123/files/foo')
] == self.fs.mock_calls, self.fs.mock_calls
def fake_volume_of(self, volumes):
fstab = FakeFstab()
for vol in volumes:
fstab.add_mount(vol)
return fstab.volume_of
|
from typing import List
from typing import Mapping
from typing import Optional
import service_configuration_lib
from paasta_tools.kubernetes_tools import sanitised_cr_name
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfigDict
from paasta_tools.utils import BranchDictV2
from paasta_tools.utils import deep_merge_dictionaries
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_service_instance_config
from paasta_tools.utils import load_v2_deployments_json
class NrtsearchServiceDeploymentConfigDict(LongRunningServiceConfigDict, total=False):
replicas: int
class NrtsearchServiceDeploymentConfig(LongRunningServiceConfig):
config_dict: NrtsearchServiceDeploymentConfigDict
config_filename_prefix = "nrtsearchservice"
def __init__(
self,
service: str,
cluster: str,
instance: str,
config_dict: NrtsearchServiceDeploymentConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
super().__init__(
cluster=cluster,
instance=instance,
service=service,
soa_dir=soa_dir,
config_dict=config_dict,
branch_dict=branch_dict,
)
def get_instances(self, with_limit: bool = True) -> int:
return self.config_dict.get("replicas", 1)
def validate(
self,
params: List[str] = [
"cpus",
"security",
"dependencies_reference",
"deploy_group",
],
) -> List[str]:
# Use InstanceConfig to validate shared config keys like cpus and mem
# TODO: add mem back to this list once we fix PAASTA-15582 and
# move to using the same units as flink/marathon etc.
error_msgs = super().validate(params=params)
if error_msgs:
name = self.get_instance()
return [f"{name}: {msg}" for msg in error_msgs]
else:
return []
def load_nrtsearchservice_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> NrtsearchServiceDeploymentConfig:
"""Read a service instance's configuration for Nrtsearch.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "nrtsearchservice", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = NrtsearchServiceDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return NrtsearchServiceDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
# TODO: read this from CRD in service configs
def cr_id(service: str, instance: str) -> Mapping[str, str]:
return dict(
group="yelp.com",
version="v1alpha1",
namespace="paasta-nrtsearchservices",
plural="nrtsearchservices",
name=sanitised_cr_name(service, instance),
)
|
import json
from gios import ApiError
from homeassistant import data_entry_flow
from homeassistant.components.gios import config_flow
from homeassistant.components.gios.const import CONF_STATION_ID
from homeassistant.const import CONF_NAME
from tests.async_mock import patch
from tests.common import load_fixture
from tests.components.gios import STATIONS
CONFIG = {
CONF_NAME: "Foo",
CONF_STATION_ID: 123,
}
async def test_show_form(hass):
"""Test that the form is served with no input."""
flow = config_flow.GiosFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_invalid_station_id(hass):
"""Test that errors are shown when measuring station ID is invalid."""
with patch(
"homeassistant.components.gios.Gios._get_stations", return_value=STATIONS
):
flow = config_flow.GiosFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_user(
user_input={CONF_NAME: "Foo", CONF_STATION_ID: 0}
)
assert result["errors"] == {CONF_STATION_ID: "wrong_station_id"}
async def test_invalid_sensor_data(hass):
"""Test that errors are shown when sensor data is invalid."""
with patch(
"homeassistant.components.gios.Gios._get_stations", return_value=STATIONS
), patch(
"homeassistant.components.gios.Gios._get_station",
return_value=json.loads(load_fixture("gios/station.json")),
), patch(
"homeassistant.components.gios.Gios._get_sensor", return_value={}
):
flow = config_flow.GiosFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_user(user_input=CONFIG)
assert result["errors"] == {CONF_STATION_ID: "invalid_sensors_data"}
async def test_cannot_connect(hass):
"""Test that errors are shown when cannot connect to GIOS server."""
with patch(
"homeassistant.components.gios.Gios._async_get", side_effect=ApiError("error")
):
flow = config_flow.GiosFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_user(user_input=CONFIG)
assert result["errors"] == {"base": "cannot_connect"}
async def test_create_entry(hass):
"""Test that the user step works."""
with patch(
"homeassistant.components.gios.Gios._get_stations", return_value=STATIONS
), patch(
"homeassistant.components.gios.Gios._get_station",
return_value=json.loads(load_fixture("gios/station.json")),
), patch(
"homeassistant.components.gios.Gios._get_all_sensors",
return_value=json.loads(load_fixture("gios/sensors.json")),
), patch(
"homeassistant.components.gios.Gios._get_indexes",
return_value=json.loads(load_fixture("gios/indexes.json")),
):
flow = config_flow.GiosFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_user(user_input=CONFIG)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == CONFIG[CONF_STATION_ID]
assert result["data"][CONF_STATION_ID] == CONFIG[CONF_STATION_ID]
assert flow.context["unique_id"] == CONFIG[CONF_STATION_ID]
|
from django.conf import settings
from django.db.models import Max
from django.forms import models, fields, widgets
from django.utils.translation import gettext_lazy as _
from cms.wizards.forms import BaseFormMixin
from djangocms_text_ckeditor.fields import HTMLFormField
from shop.models.related import ProductPageModel
from shop.models.defaults.commodity import Commodity
class CommodityWizardForm(BaseFormMixin, models.ModelForm):
product_name = fields.CharField(label=_("Product Name"),
widget=widgets.TextInput(attrs={'size': 50}))
slug = fields.CharField(label=_("Slug"), widget=widgets.TextInput(attrs={'size': 50}))
caption = HTMLFormField(label=_("Caption"), required=False)
class Meta:
model = Commodity
fields = ('product_name', 'slug', 'caption', 'product_code',
'unit_price', 'active', 'show_breadcrumb', 'sample_image',)
@property
def media(self):
minimized = '' if settings.DEBUG else '.min'
media = super().media
css = {'all': ['admin/css/base.css', 'admin/css/forms.css']}
media.add_css(css)
media._js = [
'admin/js/core.js',
'admin/js/vendor/jquery/jquery{}.js'.format(minimized),
'admin/js/jquery.init.js',
'admin/js/urlify.js',
'admin/js/prepopulate{}.js'.format(minimized),
] + media._js
media.add_js([
'filer/js/libs/mediator.min.js',
'filer/js/libs/jquery.cookie.min.js',
'filer/js/libs/fileuploader.min.js',
'admin/js/admin/RelatedObjectLookups.js',
])
return media
def save(self, commit=True):
self.instance.product_name = self.cleaned_data['product_name']
self.instance.caption = self.cleaned_data['caption']
self.instance.slug = self.cleaned_data['slug']
max_order = Commodity.objects.aggregate(max=Max('order'))['max']
self.instance.order = max_order + 1 if max_order else 1
commodity = super().save(commit)
ProductPageModel.objects.create(product=commodity, page=self.page.get_public_object())
return commodity
|
import asyncio
from datetime import datetime, timedelta
import logging
from uuid import UUID
import pygatt # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.components.device_tracker.const import (
CONF_SCAN_INTERVAL,
CONF_TRACK_NEW,
SCAN_INTERVAL,
SOURCE_TYPE_BLUETOOTH_LE,
)
from homeassistant.components.device_tracker.legacy import (
YAML_DEVICES,
async_load_config,
)
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_point_in_utc_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
# Base UUID: 00000000-0000-1000-8000-00805F9B34FB
# Battery characteristic: 0x2a19 (https://www.bluetooth.com/specifications/gatt/characteristics/)
BATTERY_CHARACTERISTIC_UUID = UUID("00002a19-0000-1000-8000-00805f9b34fb")
CONF_TRACK_BATTERY = "track_battery"
CONF_TRACK_BATTERY_INTERVAL = "track_battery_interval"
DEFAULT_TRACK_BATTERY_INTERVAL = timedelta(days=1)
DATA_BLE = "BLE"
DATA_BLE_ADAPTER = "ADAPTER"
BLE_PREFIX = "BLE_"
MIN_SEEN_NEW = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_TRACK_BATTERY, default=False): cv.boolean,
vol.Optional(
CONF_TRACK_BATTERY_INTERVAL, default=DEFAULT_TRACK_BATTERY_INTERVAL
): cv.time_period,
}
)
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up the Bluetooth LE Scanner."""
new_devices = {}
hass.data.setdefault(DATA_BLE, {DATA_BLE_ADAPTER: None})
def handle_stop(event):
"""Try to shut down the bluetooth child process nicely."""
# These should never be unset at the point this runs, but just for
# safety's sake, use `get`.
adapter = hass.data.get(DATA_BLE, {}).get(DATA_BLE_ADAPTER)
if adapter is not None:
adapter.kill()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, handle_stop)
if config[CONF_TRACK_BATTERY]:
battery_track_interval = config[CONF_TRACK_BATTERY_INTERVAL]
else:
battery_track_interval = timedelta(0)
def see_device(address, name, new_device=False, battery=None):
"""Mark a device as seen."""
if name is not None:
name = name.strip("\x00")
if new_device:
if address in new_devices:
new_devices[address]["seen"] += 1
if name:
new_devices[address]["name"] = name
else:
name = new_devices[address]["name"]
_LOGGER.debug("Seen %s %s times", address, new_devices[address]["seen"])
if new_devices[address]["seen"] < MIN_SEEN_NEW:
return
_LOGGER.debug("Adding %s to tracked devices", address)
devs_to_track.append(address)
if battery_track_interval > timedelta(0):
devs_track_battery[address] = dt_util.as_utc(
datetime.fromtimestamp(0)
)
else:
_LOGGER.debug("Seen %s for the first time", address)
new_devices[address] = {"seen": 1, "name": name}
return
see(
mac=BLE_PREFIX + address,
host_name=name,
source_type=SOURCE_TYPE_BLUETOOTH_LE,
battery=battery,
)
def discover_ble_devices():
"""Discover Bluetooth LE devices."""
_LOGGER.debug("Discovering Bluetooth LE devices")
try:
adapter = pygatt.GATTToolBackend()
hass.data[DATA_BLE][DATA_BLE_ADAPTER] = adapter
devs = adapter.scan()
devices = {x["address"]: x["name"] for x in devs}
_LOGGER.debug("Bluetooth LE devices discovered = %s", devices)
except (RuntimeError, pygatt.exceptions.BLEError) as error:
_LOGGER.error("Error during Bluetooth LE scan: %s", error)
return {}
return devices
yaml_path = hass.config.path(YAML_DEVICES)
devs_to_track = []
devs_donot_track = []
devs_track_battery = {}
# Load all known devices.
# We just need the devices so set consider_home and home range
# to 0
for device in asyncio.run_coroutine_threadsafe(
async_load_config(yaml_path, hass, 0), hass.loop
).result():
# check if device is a valid bluetooth device
if device.mac and device.mac[:4].upper() == BLE_PREFIX:
address = device.mac[4:]
if device.track:
_LOGGER.debug("Adding %s to BLE tracker", device.mac)
devs_to_track.append(address)
if battery_track_interval > timedelta(0):
devs_track_battery[address] = dt_util.as_utc(
datetime.fromtimestamp(0)
)
else:
_LOGGER.debug("Adding %s to BLE do not track", device.mac)
devs_donot_track.append(address)
# if track new devices is true discover new devices
# on every scan.
track_new = config.get(CONF_TRACK_NEW)
if not devs_to_track and not track_new:
_LOGGER.warning("No Bluetooth LE devices to track!")
return False
interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
def update_ble(now):
"""Lookup Bluetooth LE devices and update status."""
devs = discover_ble_devices()
if devs_track_battery:
adapter = hass.data[DATA_BLE][DATA_BLE_ADAPTER]
for mac in devs_to_track:
if mac not in devs:
continue
if devs[mac] is None:
devs[mac] = mac
battery = None
if (
mac in devs_track_battery
and now > devs_track_battery[mac] + battery_track_interval
):
handle = None
try:
adapter.start(reset_on_start=True)
_LOGGER.debug("Reading battery for Bluetooth LE device %s", mac)
bt_device = adapter.connect(mac)
# Try to get the handle; it will raise a BLEError exception if not available
handle = bt_device.get_handle(BATTERY_CHARACTERISTIC_UUID)
battery = ord(bt_device.char_read(BATTERY_CHARACTERISTIC_UUID))
devs_track_battery[mac] = now
except pygatt.exceptions.NotificationTimeout:
_LOGGER.warning("Timeout when trying to get battery status")
except pygatt.exceptions.BLEError as err:
_LOGGER.warning("Could not read battery status: %s", err)
if handle is not None:
# If the device does not offer battery information, there is no point in asking again later on.
# Remove the device from the battery-tracked devices, so that their battery is not wasted
# trying to get an unavailable information.
del devs_track_battery[mac]
finally:
adapter.stop()
see_device(mac, devs[mac], battery=battery)
if track_new:
for address in devs:
if address not in devs_to_track and address not in devs_donot_track:
_LOGGER.info("Discovered Bluetooth LE device %s", address)
see_device(address, devs[address], new_device=True)
track_point_in_utc_time(hass, update_ble, dt_util.utcnow() + interval)
update_ble(dt_util.utcnow())
return True
|
from flexx import event
class Test1(event.Component):
data = event.ListProp([], doc='An array property')
@event.action
def add(self, i):
self._mutate_data([i], 'insert', len(self.data))
class Test2(event.Component):
other = event.ComponentProp(None, settable=True)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.data = [] # just a local variable, not a property
@event.reaction('other.data')
def track_data(self, *events):
for ev in events:
if ev.mutation == 'set':
self.data[:] = ev.new_value
elif ev.mutation == 'insert':
self.data[ev.index:ev.index] = ev.objects
elif ev.mutation == 'remove':
self.data[ev.index:ev.index+ev.objects] = [] # objects is int here
elif ev.mutation == 'replace':
self.data[ev.index:ev.index+len(ev.objects)] = ev.objects
else:
raise NotImplementedError(ev.mutation)
# The above shows all the cases that one should handle to cover
# all possible array mutations. If you just want to keep an
# array in sync, you can just use:
# event.mutate_array(self.data, ev)
# which would work in JS and Python, on normal lists and ndarrays.
test1 = Test1()
test2 = Test2(other=test1)
test1.add(4)
test1.add(7)
test1.add(6)
print(test2.data) # Events have not been send yet
event.loop.iter()
print(test2.data) # Now they are
|
from .exceptions import ConfigurationError, GrammarError, assert_config
from .utils import get_regexp_width, Serialize
from .parsers.grammar_analysis import GrammarAnalyzer
from .lexer import LexerThread, TraditionalLexer, ContextualLexer, Lexer, Token, TerminalDef
from .parsers import earley, xearley, cyk
from .parsers.lalr_parser import LALR_Parser
from .tree import Tree
from .common import LexerConf, ParserConf
try:
import regex
except ImportError:
regex = None
import re
###{standalone
def _wrap_lexer(lexer_class):
future_interface = getattr(lexer_class, '__future_interface__', False)
if future_interface:
return lexer_class
else:
class CustomLexerWrapper(Lexer):
def __init__(self, lexer_conf):
self.lexer = lexer_class(lexer_conf)
def lex(self, lexer_state, parser_state):
return self.lexer.lex(lexer_state.text)
return CustomLexerWrapper
class MakeParsingFrontend:
def __init__(self, parser_type, lexer_type):
self.parser_type = parser_type
self.lexer_type = lexer_type
def __call__(self, lexer_conf, parser_conf, options):
assert isinstance(lexer_conf, LexerConf)
assert isinstance(parser_conf, ParserConf)
parser_conf.parser_type = self.parser_type
lexer_conf.lexer_type = self.lexer_type
return ParsingFrontend(lexer_conf, parser_conf, options)
@classmethod
def deserialize(cls, data, memo, callbacks, options):
lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo)
parser_conf = ParserConf.deserialize(data['parser_conf'], memo)
parser = LALR_Parser.deserialize(data['parser'], memo, callbacks, options.debug)
parser_conf.callbacks = callbacks
terminals = [item for item in memo.values() if isinstance(item, TerminalDef)]
lexer_conf.callbacks = _get_lexer_callbacks(options.transformer, terminals)
lexer_conf.re_module = regex if options.regex else re
lexer_conf.use_bytes = options.use_bytes
lexer_conf.g_regex_flags = options.g_regex_flags
lexer_conf.skip_validation = True
lexer_conf.postlex = options.postlex
return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser)
class ParsingFrontend(Serialize):
__serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser', 'options'
def __init__(self, lexer_conf, parser_conf, options, parser=None):
self.parser_conf = parser_conf
self.lexer_conf = lexer_conf
self.options = options
# Set-up parser
if parser: # From cache
self.parser = parser
else:
create_parser = {
'lalr': create_lalr_parser,
'earley': create_earley_parser,
'cyk': CYK_FrontEnd,
}[parser_conf.parser_type]
self.parser = create_parser(lexer_conf, parser_conf, options)
# Set-up lexer
lexer_type = lexer_conf.lexer_type
self.skip_lexer = False
if lexer_type in ('dynamic', 'dynamic_complete'):
self.skip_lexer = True
return
try:
create_lexer = {
'standard': create_traditional_lexer,
'contextual': create_contextual_lexer,
}[lexer_type]
except KeyError:
assert issubclass(lexer_type, Lexer), lexer_type
self.lexer = _wrap_lexer(lexer_type)(lexer_conf)
else:
self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex)
if lexer_conf.postlex:
self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex)
def parse(self, text, start=None):
if start is None:
start = self.parser_conf.start
if len(start) > 1:
raise ConfigurationError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start)
start ,= start
if self.skip_lexer:
return self.parser.parse(text, start)
lexer_thread = LexerThread(self.lexer, text)
return self.parser.parse(lexer_thread, start)
def get_frontend(parser, lexer):
assert_config(parser, ('lalr', 'earley', 'cyk'))
if not isinstance(lexer, type): # not custom lexer?
expected = {
'lalr': ('standard', 'contextual'),
'earley': ('standard', 'dynamic', 'dynamic_complete'),
'cyk': ('standard', ),
}[parser]
assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser)
return MakeParsingFrontend(parser, lexer)
def _get_lexer_callbacks(transformer, terminals):
result = {}
for terminal in terminals:
callback = getattr(transformer, terminal.name, None)
if callback is not None:
result[terminal.name] = callback
return result
class PostLexConnector:
def __init__(self, lexer, postlexer):
self.lexer = lexer
self.postlexer = postlexer
def make_lexer_state(self, text):
return self.lexer.make_lexer_state(text)
def lex(self, lexer_state, parser_state):
i = self.lexer.lex(lexer_state, parser_state)
return self.postlexer.process(i)
def create_traditional_lexer(lexer_conf, parser, postlex):
return TraditionalLexer(lexer_conf)
def create_contextual_lexer(lexer_conf, parser, postlex):
states = {idx:list(t.keys()) for idx, t in parser._parse_table.states.items()}
always_accept = postlex.always_accept if postlex else ()
return ContextualLexer(lexer_conf, states, always_accept=always_accept)
def create_lalr_parser(lexer_conf, parser_conf, options=None):
debug = options.debug if options else False
return LALR_Parser(parser_conf, debug=debug)
create_earley_parser = NotImplemented
CYK_FrontEnd = NotImplemented
###}
class EarleyRegexpMatcher:
def __init__(self, lexer_conf):
self.regexps = {}
for t in lexer_conf.terminals:
if t.priority != 1:
raise GrammarError("Dynamic Earley doesn't support weights on terminals", t, t.priority)
regexp = t.pattern.to_regexp()
try:
width = get_regexp_width(regexp)[0]
except ValueError:
raise GrammarError("Bad regexp in token %s: %s" % (t.name, regexp))
else:
if width == 0:
raise GrammarError("Dynamic Earley doesn't allow zero-width regexps", t)
if lexer_conf.use_bytes:
regexp = regexp.encode('utf-8')
self.regexps[t.name] = lexer_conf.re_module.compile(regexp, lexer_conf.g_regex_flags)
def match(self, term, text, index=0):
return self.regexps[term.name].match(text, index)
def create_earley_parser__dynamic(lexer_conf, parser_conf, options=None, **kw):
earley_matcher = EarleyRegexpMatcher(lexer_conf)
return xearley.Parser(parser_conf, earley_matcher.match, ignore=lexer_conf.ignore, **kw)
def _match_earley_basic(term, token):
return term.name == token.type
def create_earley_parser__basic(lexer_conf, parser_conf, options, **kw):
return earley.Parser(parser_conf, _match_earley_basic, **kw)
def create_earley_parser(lexer_conf, parser_conf, options):
resolve_ambiguity = options.ambiguity == 'resolve'
debug = options.debug if options else False
tree_class = options.tree_class or Tree if options.ambiguity != 'forest' else None
extra = {}
if lexer_conf.lexer_type == 'dynamic':
f = create_earley_parser__dynamic
elif lexer_conf.lexer_type == 'dynamic_complete':
extra['complete_lex'] =True
f = create_earley_parser__dynamic
else:
f = create_earley_parser__basic
return f(lexer_conf, parser_conf, options, resolve_ambiguity=resolve_ambiguity, debug=debug, tree_class=tree_class, **extra)
class CYK_FrontEnd:
def __init__(self, lexer_conf, parser_conf, options=None):
self._analysis = GrammarAnalyzer(parser_conf)
self.parser = cyk.Parser(parser_conf.rules)
self.callbacks = parser_conf.callbacks
def parse(self, lexer_thread, start):
tokens = list(lexer_thread.lex(None))
tree = self.parser.parse(tokens, start)
return self._transform(tree)
def _transform(self, tree):
subtrees = list(tree.iter_subtrees())
for subtree in subtrees:
subtree.children = [self._apply_callback(c) if isinstance(c, Tree) else c for c in subtree.children]
return self._apply_callback(tree)
def _apply_callback(self, tree):
return self.callbacks[tree.rule](tree.children)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec
FLAGS = flags.FLAGS
PLACEMENT_GROUP_CLUSTER = 'cluster'
PLACEMENT_GROUP_SUPERCLUSTER = 'supercluster'
PLACEMENT_GROUP_SPREAD = 'spread'
PLACEMENT_GROUP_NONE = 'none'
PLACEMENT_GROUP_OPTIONS = frozenset([
PLACEMENT_GROUP_CLUSTER,
PLACEMENT_GROUP_SPREAD,
PLACEMENT_GROUP_NONE
])
# Default placement group style is specified by Cloud Specific Placement Group.
flags.DEFINE_enum(
'placement_group_style', None,
list(PLACEMENT_GROUP_OPTIONS) + [PLACEMENT_GROUP_SUPERCLUSTER],
'The vm placement group option to use. Default set by cloud.')
def GetPlacementGroupSpecClass(cloud):
"""Returns the PlacementGroupSpec class corresponding to 'cloud'."""
return spec.GetSpecClass(BasePlacementGroupSpec, CLOUD=cloud)
def GetPlacementGroupClass(cloud):
"""Returns the PlacementGroup class corresponding to 'cloud'."""
return resource.GetResourceClass(BasePlacementGroup,
CLOUD=cloud)
class BasePlacementGroupSpec(spec.BaseSpec):
"""Storing various data about a placement group.
Attributes:
zone: The zone the in which the placement group will launch.
"""
SPEC_TYPE = 'BasePlacementGroupSpec'
CLOUD = None
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super(BasePlacementGroupSpec, cls)._ApplyFlags(config_values, flag_values)
if FLAGS.placement_group_style:
config_values['placement_group_style'] = FLAGS.placement_group_style
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Can be overridden by derived classes to add options or impose additional
requirements on existing options.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(BasePlacementGroupSpec, cls)._GetOptionDecoderConstructions()
result.update({'zone': (option_decoders.StringDecoder, {'none_ok': True})})
return result
class BasePlacementGroup(resource.BaseResource):
"""Base class for Placement Groups.
This class holds Placement Group methods and attributes relating to the
Placement Groups as a cloud
resource.
Attributes:
zone: The zone the Placement Group was launched in.
"""
RESOURCE_TYPE = 'BasePlacementGroup'
def __init__(self, placement_group_spec):
"""Initialize BasePlacementGroup class.
Args:
placement_group_spec: placement_group.BasePlacementGroupSpec object of the
placement group.
"""
super(BasePlacementGroup, self).__init__()
self.zone = placement_group_spec.zone
|
import asyncio
from pyinsteon.address import Address
from homeassistant.components import insteon
from homeassistant.components.insteon.const import (
CONF_CAT,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_X10,
DOMAIN,
PORT_HUB_V1,
PORT_HUB_V2,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from .const import (
MOCK_ADDRESS,
MOCK_CAT,
MOCK_IMPORT_CONFIG_PLM,
MOCK_IMPORT_FULL_CONFIG_HUB_V1,
MOCK_IMPORT_FULL_CONFIG_HUB_V2,
MOCK_IMPORT_FULL_CONFIG_PLM,
MOCK_IMPORT_MINIMUM_HUB_V1,
MOCK_IMPORT_MINIMUM_HUB_V2,
MOCK_SUBCAT,
MOCK_USER_INPUT_PLM,
PATCH_CONNECTION,
)
from .mock_devices import MockDevices
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def mock_successful_connection(*args, **kwargs):
"""Return a successful connection."""
return True
async def mock_failed_connection(*args, **kwargs):
"""Return a failed connection."""
raise ConnectionError("Connection failed")
async def test_setup_entry(hass: HomeAssistantType):
"""Test setting up the entry."""
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_INPUT_PLM)
config_entry.add_to_hass(hass)
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "async_close") as mock_close, patch.object(
insteon, "devices", new=MockDevices()
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
{},
)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
# pylint: disable=no-member
assert insteon.devices.async_save.call_count == 1
assert mock_close.called
async def test_import_plm(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a PLM."""
config = {}
config[DOMAIN] = MOCK_IMPORT_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_DEVICE] == MOCK_IMPORT_CONFIG_PLM[CONF_PORT]
assert CONF_PORT not in data
async def test_import_hub1(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a hub v1."""
config = {}
config[DOMAIN] = MOCK_IMPORT_MINIMUM_HUB_V1
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_HOST] == MOCK_IMPORT_FULL_CONFIG_HUB_V1[CONF_HOST]
assert data[CONF_PORT] == PORT_HUB_V1
assert CONF_USERNAME not in data
assert CONF_PASSWORD not in data
async def test_import_hub2(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a hub v2."""
config = {}
config[DOMAIN] = MOCK_IMPORT_MINIMUM_HUB_V2
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_HOST] == MOCK_IMPORT_FULL_CONFIG_HUB_V2[CONF_HOST]
assert data[CONF_PORT] == PORT_HUB_V2
assert data[CONF_USERNAME] == MOCK_IMPORT_MINIMUM_HUB_V2[CONF_USERNAME]
assert data[CONF_PASSWORD] == MOCK_IMPORT_MINIMUM_HUB_V2[CONF_PASSWORD]
async def test_import_options(hass: HomeAssistantType):
"""Test setting up the entry from YAML including options."""
config = {}
config[DOMAIN] = MOCK_IMPORT_FULL_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01) # Need to yield to async processes
# pylint: disable=no-member
assert insteon.devices.add_x10_device.call_count == 2
assert insteon.devices.set_id.call_count == 1
options = hass.config_entries.async_entries(DOMAIN)[0].options
assert len(options[CONF_OVERRIDE]) == 1
assert options[CONF_OVERRIDE][0][CONF_ADDRESS] == str(Address(MOCK_ADDRESS))
assert options[CONF_OVERRIDE][0][CONF_CAT] == MOCK_CAT
assert options[CONF_OVERRIDE][0][CONF_SUBCAT] == MOCK_SUBCAT
assert len(options[CONF_X10]) == 2
assert options[CONF_X10][0] == MOCK_IMPORT_FULL_CONFIG_PLM[CONF_X10][0]
assert options[CONF_X10][1] == MOCK_IMPORT_FULL_CONFIG_PLM[CONF_X10][1]
async def test_import_failed_connection(hass: HomeAssistantType):
"""Test a failed connection in import does not create a config entry."""
config = {}
config[DOMAIN] = MOCK_IMPORT_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_failed_connection
), patch.object(insteon, "async_close"), patch.object(
insteon, "devices", new=MockDevices(connected=False)
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(DOMAIN)
async def test_setup_entry_failed_connection(hass: HomeAssistantType, caplog):
"""Test setting up the entry with a failed connection."""
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_INPUT_PLM)
config_entry.add_to_hass(hass)
with patch.object(
insteon, "async_connect", new=mock_failed_connection
), patch.object(insteon, "devices", new=MockDevices(connected=False)):
assert await async_setup_component(
hass,
insteon.DOMAIN,
{},
)
assert "Could not connect to Insteon modem" in caplog.text
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.