text
stringlengths 213
32.3k
|
---|
from __future__ import absolute_import
from pyspark.mllib.regression import LabeledPoint
import numpy as np
from ..mllib.adapter import to_vector, from_vector
try:
from elephas.java import java_classes
from elephas.java.ndarray import ndarray
except Exception:
print("WARNING")
from six.moves import zip
def to_java_rdd(jsc, features, labels, batch_size):
"""Convert numpy features and labels into a JavaRDD of
DL4J DataSet type.
:param jsc: JavaSparkContext from pyjnius
:param features: numpy array with features
:param labels: numpy array with labels:
:return: JavaRDD<DataSet>
"""
data_sets = java_classes.ArrayList()
num_batches = int(len(features) / batch_size)
for i in range(num_batches):
xi = ndarray(features[:batch_size].copy())
yi = ndarray(labels[:batch_size].copy())
data_set = java_classes.DataSet(xi.array, yi.array)
data_sets.add(data_set)
features = features[batch_size:]
labels = labels[batch_size:]
return jsc.parallelize(data_sets)
def to_simple_rdd(sc, features, labels):
"""Convert numpy arrays of features and labels into
an RDD of pairs.
:param sc: Spark context
:param features: numpy array with features
:param labels: numpy array with labels
:return: Spark RDD with feature-label pairs
"""
pairs = [(x, y) for x, y in zip(features, labels)]
return sc.parallelize(pairs)
def to_labeled_point(sc, features, labels, categorical=False):
"""Convert numpy arrays of features and labels into
a LabeledPoint RDD for MLlib and ML integration.
:param sc: Spark context
:param features: numpy array with features
:param labels: numpy array with labels
:param categorical: boolean, whether labels are already one-hot encoded or not
:return: LabeledPoint RDD with features and labels
"""
labeled_points = []
for x, y in zip(features, labels):
if categorical:
lp = LabeledPoint(np.argmax(y), to_vector(x))
else:
lp = LabeledPoint(y, to_vector(x))
labeled_points.append(lp)
return sc.parallelize(labeled_points)
def from_labeled_point(rdd, categorical=False, nb_classes=None):
"""Convert a LabeledPoint RDD back to a pair of numpy arrays
:param rdd: LabeledPoint RDD
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: optional int, indicating the number of class labels
:return: pair of numpy arrays, features and labels
"""
features = np.asarray(
rdd.map(lambda lp: from_vector(lp.features)).collect())
labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32')
if categorical:
if not nb_classes:
nb_classes = np.max(labels) + 1
temp = np.zeros((len(labels), nb_classes))
for i, label in enumerate(labels):
temp[i, label] = 1.
labels = temp
return features, labels
def encode_label(label, nb_classes):
"""One-hot encoding of a single label
:param label: class label (int or double without floating point digits)
:param nb_classes: int, number of total classes
:return: one-hot encoded vector
"""
encoded = np.zeros(nb_classes)
encoded[int(label)] = 1.
return encoded
def lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=None):
"""Convert a LabeledPoint RDD into an RDD of feature-label pairs
:param lp_rdd: LabeledPoint RDD of features and labels
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: int, number of total classes
:return: Spark RDD with feature-label pairs
"""
if categorical:
if not nb_classes:
labels = np.asarray(lp_rdd.map(
lambda lp: lp.label).collect(), dtype='int32')
nb_classes = np.max(labels) + 1
rdd = lp_rdd.map(lambda lp: (from_vector(lp.features),
encode_label(lp.label, nb_classes)))
else:
rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), lp.label))
return rdd
|
try:
import simplejson as json
except ImportError:
import json
def deserialize(cassette_string):
return json.loads(cassette_string)
def serialize(cassette_dict):
error_message = (
"Does this HTTP interaction contain binary data? "
"If so, use a different serializer (like the yaml serializer) "
"for this request?"
)
try:
return json.dumps(cassette_dict, indent=4) + "\n"
except UnicodeDecodeError as original: # py2
raise UnicodeDecodeError(
original.encoding,
b"Error serializing cassette to JSON",
original.start,
original.end,
original.args[-1] + error_message,
)
except TypeError: # py3
raise TypeError(error_message)
|
import logging
from typing import Dict, List, Optional
from pycfdns import CloudflareUpdater
from pycfdns.exceptions import (
CloudflareAuthenticationException,
CloudflareConnectionException,
CloudflareZoneException,
)
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.config_entries import CONN_CLASS_CLOUD_PUSH, ConfigFlow
from homeassistant.const import CONF_API_TOKEN, CONF_ZONE
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import CONF_RECORDS
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_API_TOKEN): str,
}
)
def _zone_schema(zones: Optional[List] = None):
"""Zone selection schema."""
zones_list = []
if zones is not None:
zones_list = zones
return vol.Schema({vol.Required(CONF_ZONE): vol.In(zones_list)})
def _records_schema(records: Optional[List] = None):
"""Zone records selection schema."""
records_dict = {}
if records:
records_dict = {name: name for name in records}
return vol.Schema({vol.Required(CONF_RECORDS): cv.multi_select(records_dict)})
async def validate_input(hass: HomeAssistant, data: Dict):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
zone = data.get(CONF_ZONE)
records = None
cfupdate = CloudflareUpdater(
async_get_clientsession(hass),
data[CONF_API_TOKEN],
zone,
[],
)
try:
zones = await cfupdate.get_zones()
if zone:
zone_id = await cfupdate.get_zone_id()
records = await cfupdate.get_zone_records(zone_id, "A")
except CloudflareConnectionException as error:
raise CannotConnect from error
except CloudflareAuthenticationException as error:
raise InvalidAuth from error
except CloudflareZoneException as error:
raise InvalidZone from error
return {"zones": zones, "records": records}
class CloudflareConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Cloudflare."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_CLOUD_PUSH
def __init__(self):
"""Initialize the Cloudflare config flow."""
self.cloudflare_config = {}
self.zones = None
self.records = None
async def async_step_user(self, user_input: Optional[Dict] = None):
"""Handle a flow initiated by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
assert self.hass
persistent_notification.async_dismiss(self.hass, "cloudflare_setup")
errors = {}
if user_input is not None:
info, errors = await self._async_validate_or_error(user_input)
if not errors:
self.cloudflare_config.update(user_input)
self.zones = info["zones"]
return await self.async_step_zone()
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_zone(self, user_input: Optional[Dict] = None):
"""Handle the picking the zone."""
errors = {}
if user_input is not None:
self.cloudflare_config.update(user_input)
info, errors = await self._async_validate_or_error(self.cloudflare_config)
if not errors:
await self.async_set_unique_id(user_input[CONF_ZONE])
self.records = info["records"]
return await self.async_step_records()
return self.async_show_form(
step_id="zone",
data_schema=_zone_schema(self.zones),
errors=errors,
)
async def async_step_records(self, user_input: Optional[Dict] = None):
"""Handle the picking the zone records."""
errors = {}
if user_input is not None:
self.cloudflare_config.update(user_input)
title = self.cloudflare_config[CONF_ZONE]
return self.async_create_entry(title=title, data=self.cloudflare_config)
return self.async_show_form(
step_id="records",
data_schema=_records_schema(self.records),
errors=errors,
)
async def _async_validate_or_error(self, config):
errors = {}
info = {}
try:
info = await validate_input(self.hass, config)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except InvalidZone:
errors["base"] = "invalid_zone"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return info, errors
class CannotConnect(HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(HomeAssistantError):
"""Error to indicate there is invalid auth."""
class InvalidZone(HomeAssistantError):
"""Error to indicate we cannot validate zone exists in account."""
|
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
LightEntity,
)
from .const import ATTR_DISCOVER_DEVICES
from .entity import HMDevice
SUPPORT_HOMEMATIC = SUPPORT_BRIGHTNESS
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Homematic light platform."""
if discovery_info is None:
return
devices = []
for conf in discovery_info[ATTR_DISCOVER_DEVICES]:
new_device = HMLight(conf)
devices.append(new_device)
add_entities(devices, True)
class HMLight(HMDevice, LightEntity):
"""Representation of a Homematic light."""
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
# Is dimmer?
if self._state == "LEVEL":
return int(self._hm_get_state() * 255)
return None
@property
def is_on(self):
"""Return true if light is on."""
try:
return self._hm_get_state() > 0
except TypeError:
return False
@property
def supported_features(self):
"""Flag supported features."""
features = SUPPORT_BRIGHTNESS
if "COLOR" in self._hmdevice.WRITENODE:
features |= SUPPORT_COLOR
if "PROGRAM" in self._hmdevice.WRITENODE:
features |= SUPPORT_EFFECT
if hasattr(self._hmdevice, "get_color_temp"):
features |= SUPPORT_COLOR_TEMP
return features
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
if not self.supported_features & SUPPORT_COLOR:
return None
hue, sat = self._hmdevice.get_hs_color(self._channel)
return hue * 360.0, sat * 100.0
@property
def color_temp(self):
"""Return the color temp in mireds [int]."""
if not self.supported_features & SUPPORT_COLOR_TEMP:
return None
hm_color_temp = self._hmdevice.get_color_temp(self._channel)
return self.max_mireds - (self.max_mireds - self.min_mireds) * hm_color_temp
@property
def effect_list(self):
"""Return the list of supported effects."""
if not self.supported_features & SUPPORT_EFFECT:
return None
return self._hmdevice.get_effect_list()
@property
def effect(self):
"""Return the current color change program of the light."""
if not self.supported_features & SUPPORT_EFFECT:
return None
return self._hmdevice.get_effect()
def turn_on(self, **kwargs):
"""Turn the light on and/or change color or color effect settings."""
if ATTR_TRANSITION in kwargs:
self._hmdevice.setValue("RAMP_TIME", kwargs[ATTR_TRANSITION])
if ATTR_BRIGHTNESS in kwargs and self._state == "LEVEL":
percent_bright = float(kwargs[ATTR_BRIGHTNESS]) / 255
self._hmdevice.set_level(percent_bright, self._channel)
elif (
ATTR_HS_COLOR not in kwargs
and ATTR_COLOR_TEMP not in kwargs
and ATTR_EFFECT not in kwargs
):
self._hmdevice.on(self._channel)
if ATTR_HS_COLOR in kwargs and self.supported_features & SUPPORT_COLOR:
self._hmdevice.set_hs_color(
hue=kwargs[ATTR_HS_COLOR][0] / 360.0,
saturation=kwargs[ATTR_HS_COLOR][1] / 100.0,
channel=self._channel,
)
if ATTR_COLOR_TEMP in kwargs:
hm_temp = (self.max_mireds - kwargs[ATTR_COLOR_TEMP]) / (
self.max_mireds - self.min_mireds
)
self._hmdevice.set_color_temp(hm_temp)
if ATTR_EFFECT in kwargs:
self._hmdevice.set_effect(kwargs[ATTR_EFFECT])
def turn_off(self, **kwargs):
"""Turn the light off."""
self._hmdevice.off(self._channel)
def _init_data_struct(self):
"""Generate a data dict (self._data) from the Homematic metadata."""
# Use LEVEL
self._state = "LEVEL"
self._data[self._state] = None
if self.supported_features & SUPPORT_COLOR:
self._data.update({"COLOR": None})
if self.supported_features & SUPPORT_EFFECT:
self._data.update({"PROGRAM": None})
|
import numpy as np
from tensornetwork.backends import backend_factory
from tensornetwork.backend_contextmanager import get_default_backend
from tensornetwork.backends.abstract_backend import AbstractBackend
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
# TODO (mganahl): this class is very similar to BaseMPS. The two could probably
# be merged.
class BaseMPO:
"""
Base class for MPOs.
"""
def __init__(self,
tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
name: Optional[Text] = None) -> None:
"""
Initialize a BaseMPO.
Args:
tensors: A list of `Tensor` objects.
backend: The name of the backend that should be used to perform
contractions.
name: A name for the MPO.
"""
if backend is None:
backend = get_default_backend()
if isinstance(backend, AbstractBackend):
self.backend = backend
else:
self.backend = backend_factory.get_backend(backend)
self.tensors = [self.backend.convert_to_tensor(t) for t in tensors]
if len(self.tensors) > 0:
if not all(
[self.tensors[0].dtype == tensor.dtype for tensor in self.tensors]):
raise TypeError('not all dtypes in BaseMPO.tensors are the same')
self.name = name
def __iter__(self):
return iter(self.tensors)
def __len__(self) -> int:
return len(self.tensors)
@property
def dtype(self) -> Type[np.number]:
if not all(
[self.tensors[0].dtype == tensor.dtype for tensor in self.tensors]):
raise TypeError('not all dtypes in BaseMPO.tensors are the same')
return self.tensors[0].dtype
@property
def bond_dimensions(self) -> List[int]:
"""Returns a vector of all bond dimensions.
The vector will have length `N+1`, where `N == num_sites`."""
return [self.tensors[0].shape[0]
] + [tensor.shape[1] for tensor in self.tensors]
class InfiniteMPO(BaseMPO):
"""
Base class for implementation of infinite MPOs. Users should implement
specific infinite MPOs by deriving from InfiniteMPO.
"""
def __init__(self,
tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
name: Optional[Text] = None) -> None:
"""
Initialize an infinite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
super().__init__(tensors=tensors, backend=backend, name=name)
if self.bond_dimensions[0] != self.bond_dimensions[-1]:
raise ValueError('left and right MPO ancillary dimension have to match')
def roll(self, num_sites) -> None:
tensors = [self.tensors[n] for n in range(num_sites, len(self.tensors))
] + [self.tensors[n] for n in range(num_sites)]
self.tensors = tensors
class FiniteMPO(BaseMPO):
"""
Base class for implementation of finite MPOs. Users should implement
specific finite MPOs by deriving from FiniteMPO
"""
def __init__(self,
tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
name: Optional[Text] = None) -> None:
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
super().__init__(tensors=tensors, backend=backend, name=name)
if (self.bond_dimensions[0] != 1) or (self.bond_dimensions[-1] != 1):
raise ValueError('left and right MPO ancillary dimensions have to be 1')
class FiniteXXZ(FiniteMPO):
"""
The Heisenberg Hamiltonian.
"""
def __init__(self,
Jz: np.ndarray,
Jxy: np.ndarray,
Bz: np.ndarray,
dtype: Type[np.number],
backend: Optional[Union[AbstractBackend, Text]] = None,
name: Text = 'XXZ_MPO') -> None:
"""
Returns the MPO of the finite XXZ model.
Args:
Jz: The Sz*Sz coupling strength between nearest neighbor lattice sites.
Jxy: The (Sx*Sx + Sy*Sy) coupling strength between nearest neighbor.
lattice sites
Bz: Magnetic field on each lattice site.
dtype: The dtype of the MPO.
backend: An optional backend.
name: A name for the MPO.
Returns:
FiniteXXZ: The mpo of the finite XXZ model.
"""
self.Jz = Jz
self.Jxy = Jxy
self.Bz = Bz
N = len(Bz)
mpo = []
temp = np.zeros((1, 5, 2, 2), dtype=dtype)
#BSz
temp[0, 0, 0, 0] = -0.5 * Bz[0]
temp[0, 0, 1, 1] = 0.5 * Bz[0]
#Sm
temp[0, 1, 0, 1] = Jxy[0] / 2.0 * 1.0
#Sp
temp[0, 2, 1, 0] = Jxy[0] / 2.0 * 1.0
#Sz
temp[0, 3, 0, 0] = Jz[0] * (-0.5)
temp[0, 3, 1, 1] = Jz[0] * 0.5
#11
temp[0, 4, 0, 0] = 1.0
temp[0, 4, 1, 1] = 1.0
mpo.append(temp)
for n in range(1, N - 1):
temp = np.zeros((5, 5, 2, 2), dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#Sp
temp[1, 0, 1, 0] = 1.0
#Sm
temp[2, 0, 0, 1] = 1.0
#Sz
temp[3, 0, 0, 0] = -0.5
temp[3, 0, 1, 1] = 0.5
#BSz
temp[4, 0, 0, 0] = -0.5 * Bz[n]
temp[4, 0, 1, 1] = 0.5 * Bz[n]
#Sm
temp[4, 1, 0, 1] = Jxy[n] / 2.0 * 1.0
#Sp
temp[4, 2, 1, 0] = Jxy[n] / 2.0 * 1.0
#Sz
temp[4, 3, 0, 0] = Jz[n] * (-0.5)
temp[4, 3, 1, 1] = Jz[n] * 0.5
#11
temp[4, 4, 0, 0] = 1.0
temp[4, 4, 1, 1] = 1.0
mpo.append(temp)
temp = np.zeros((5, 1, 2, 2), dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#Sp
temp[1, 0, 1, 0] = 1.0
#Sm
temp[2, 0, 0, 1] = 1.0
#Sz
temp[3, 0, 0, 0] = -0.5
temp[3, 0, 1, 1] = 0.5
#BSz
temp[4, 0, 0, 0] = -0.5 * Bz[-1]
temp[4, 0, 1, 1] = 0.5 * Bz[-1]
mpo.append(temp)
super().__init__(tensors=mpo, backend=backend, name=name)
class FiniteTFI(FiniteMPO):
"""
The famous transverse field Ising Hamiltonian.
The ground state energy of the infinite system at criticality is -4/pi.
Convention: sigma_z=diag([-1,1])
"""
def __init__(self,
Jx: np.ndarray,
Bz: np.ndarray,
dtype: Type[np.number],
backend: Optional[Union[AbstractBackend, Text]] = None,
name: Text = 'TFI_MPO') -> None:
"""
Returns the MPO of the finite TFI model.
Args:
Jx: The Sx*Sx coupling strength between nearest neighbor lattice sites.
Bz: Magnetic field on each lattice site.
dtype: The dtype of the MPO.
backend: An optional backend.
name: A name for the MPO.
Returns:
FiniteTFI: The mpo of the infinite TFI model.
"""
self.Jx = Jx.astype(dtype)
self.Bz = Bz.astype(dtype)
N = len(Bz)
sigma_x = np.array([[0, 1], [1, 0]]).astype(dtype)
sigma_z = np.diag([-1, 1]).astype(dtype)
mpo = []
temp = np.zeros(shape=[1, 3, 2, 2], dtype=dtype)
#Bsigma_z
temp[0, 0, :, :] = self.Bz[0] * sigma_z
#sigma_x
temp[0, 1, :, :] = self.Jx[0] * sigma_x
#11
temp[0, 2, 0, 0] = 1.0
temp[0, 2, 1, 1] = 1.0
mpo.append(temp)
for n in range(1, N - 1):
temp = np.zeros(shape=[3, 3, 2, 2], dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#sigma_x
temp[1, 0, :, :] = sigma_x
#Bsigma_z
temp[2, 0, :, :] = self.Bz[n] * sigma_z
#sigma_x
temp[2, 1, :, :] = self.Jx[n] * sigma_x
#11
temp[2, 2, 0, 0] = 1.0
temp[2, 2, 1, 1] = 1.0
mpo.append(temp)
temp = np.zeros([3, 1, 2, 2], dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#sigma_x
temp[1, 0, :, :] = sigma_x
#Bsigma_z
temp[2, 0, :, :] = self.Bz[-1] * sigma_z
mpo.append(temp)
super().__init__(tensors=mpo, backend=backend, name=name)
|
import pytest
from qutebrowser.config import websettings
from qutebrowser.misc import objects
from qutebrowser.utils import usertypes
@pytest.mark.parametrize([
'user_agent', 'os_info', 'webkit_version',
'upstream_browser_key', 'upstream_browser_version', 'qt_key'
], [
(
# QtWebEngine, Linux
# (no differences other than Chrome version with older Qt Versions)
("Mozilla/5.0 (X11; Linux x86_64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"QtWebEngine/5.14.0 Chrome/77.0.3865.98 Safari/537.36"),
"X11; Linux x86_64",
"537.36",
"Chrome", "77.0.3865.98",
"QtWebEngine",
), (
# QtWebKit, Linux
("Mozilla/5.0 (X11; Linux x86_64) "
"AppleWebKit/602.1 (KHTML, like Gecko) "
"qutebrowser/1.8.3 "
"Version/10.0 Safari/602.1"),
"X11; Linux x86_64",
"602.1",
"Version", "10.0",
"Qt",
), (
# QtWebEngine, macOS
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"QtWebEngine/5.13.2 Chrome/73.0.3683.105 Safari/537.36"),
"Macintosh; Intel Mac OS X 10_12_6",
"537.36",
"Chrome", "73.0.3683.105",
"QtWebEngine",
), (
# QtWebEngine, Windows
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"QtWebEngine/5.12.5 Chrome/69.0.3497.128 Safari/537.36"),
"Windows NT 10.0; Win64; x64",
"537.36",
"Chrome", "69.0.3497.128",
"QtWebEngine",
)
])
def test_parse_user_agent(user_agent, os_info, webkit_version,
upstream_browser_key, upstream_browser_version,
qt_key):
parsed = websettings.UserAgent.parse(user_agent)
assert parsed.os_info == os_info
assert parsed.webkit_version == webkit_version
assert parsed.upstream_browser_key == upstream_browser_key
assert parsed.upstream_browser_version == upstream_browser_version
assert parsed.qt_key == qt_key
def test_user_agent(monkeypatch, config_stub, qapp):
webenginesettings = pytest.importorskip(
"qutebrowser.browser.webengine.webenginesettings")
monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebEngine)
webenginesettings.init_user_agent()
config_stub.val.content.headers.user_agent = 'test {qt_key}'
assert websettings.user_agent() == 'test QtWebEngine'
config_stub.val.content.headers.user_agent = 'test2 {qt_key}'
assert websettings.user_agent() == 'test2 QtWebEngine'
def test_config_init(request, monkeypatch, config_stub):
if request.config.webengine:
from qutebrowser.browser.webengine import webenginesettings
monkeypatch.setattr(webenginesettings, 'init', lambda: None)
else:
from qutebrowser.browser.webkit import webkitsettings
monkeypatch.setattr(webkitsettings, 'init', lambda: None)
websettings.init(args=None)
assert config_stub.dump_userconfig() == '<Default configuration>'
|
from django.utils.translation import gettext_noop as _
from weblate.utils.translation import pgettext_noop as pgettext
SELECTION_MANUAL = 0
SELECTION_ALL = 1
SELECTION_COMPONENT_LIST = 2
SELECTION_ALL_PUBLIC = 3
SELECTION_ALL_PROTECTED = 4
PERMISSIONS = (
# Translators: Permission name
("billing.view", _("View billing info")),
# Translators: Permission name
("change.download", _("Download changes")),
# Translators: Permission name
("component.edit", _("Edit component settings")),
# Translators: Permission name
("component.lock", _("Lock component, preventing translations")),
# Translators: Permission name
("comment.add", _("Post comment")),
# Translators: Permission name
("comment.delete", _("Delete comment")),
# Translators: Permission name
("glossary.add", _("Add glossary entry")),
# Translators: Permission name
("glossary.edit", _("Edit glossary entry")),
# Translators: Permission name
("glossary.delete", _("Delete glossary entry")),
# Translators: Permission name
("glossary.upload", _("Upload glossary entries")),
# Translators: Permission name
("machinery.view", _("Use automatic suggestions")),
# Translators: Permission name
("memory.edit", _("Edit translation memory")),
# Translators: Permission name
("memory.delete", _("Delete translation memory")),
# Translators: Permission name
("project.edit", _("Edit project settings")),
# Translators: Permission name
("project.permissions", _("Manage project access")),
# Translators: Permission name
("reports.view", _("Download reports")),
# Translators: Permission name
("screenshot.add", _("Add screenshot")),
# Translators: Permission name
("screenshot.edit", _("Edit screenshot")),
# Translators: Permission name
("screenshot.delete", _("Delete screenshot")),
# Translators: Permission name
("source.edit", _("Edit additional string info")),
# Translators: Permission name
("suggestion.accept", _("Accept suggestion")),
# Translators: Permission name
("suggestion.add", _("Add suggestion")),
# Translators: Permission name
("suggestion.delete", _("Delete suggestion")),
# Translators: Permission name
("suggestion.vote", _("Vote on suggestion")),
# Translators: Permission name
("translation.add", _("Add language for translation")),
# Translators: Permission name
("translation.auto", _("Perform automatic translation")),
# Translators: Permission name
("translation.delete", _("Delete existing translation")),
# Translators: Permission name
("translation.add_more", _("Add several languages for translation")),
# Translators: Permission name
("unit.add", _("Add new string")),
# Translators: Permission name
("unit.delete", _("Remove a string")),
# Translators: Permission name
("unit.check", _("Ignore failing check")),
# Translators: Permission name
("unit.edit", _("Edit strings")),
# Translators: Permission name
("unit.review", _("Review strings")),
# Translators: Permission name
("unit.override", _("Edit string when suggestions are enforced")),
# Translators: Permission name
("unit.template", _("Edit source strings")),
# Translators: Permission name
("upload.authorship", _("Define author of uploaded translation")),
# Translators: Permission name
("upload.overwrite", _("Overwrite existing strings with upload")),
# Translators: Permission name
("upload.perform", _("Upload translations")),
# Translators: Permission name
("vcs.access", _("Access the internal repository")),
# Translators: Permission name
("vcs.commit", _("Commit changes to the internal repository")),
# Translators: Permission name
("vcs.push", _("Push change from the internal repository")),
# Translators: Permission name
("vcs.reset", _("Reset changes in the internal repository")),
# Translators: Permission name
("vcs.view", _("View upstream repository location")),
# Translators: Permission name
("vcs.update", _("Update the internal repository")),
)
# Permissions which are not scoped per project
GLOBAL_PERMISSIONS = (
# Translators: Permission name
("management.use", _("Use management interface")),
# Translators: Permission name
("project.add", _("Add new projects")),
# Translators: Permission name
("language.add", _("Add language definitions")),
# Translators: Permission name
("language.edit", _("Manage language definitions")),
# Translators: Permission name
("group.edit", _("Manage groups")),
# Translators: Permission name
("user.edit", _("Manage users")),
# Translators: Permission name
("role.edit", _("Manage roles")),
# Translators: Permission name
("announcement.edit", _("Manage announcements")),
# Translators: Permission name
("memory.edit", _("Manage translation memory")),
# Translators: Permission name
("componentlist.edit", _("Manage component lists")),
)
GLOBAL_PERM_NAMES = {perm[0] for perm in GLOBAL_PERMISSIONS}
def filter_perms(prefix):
"""Filter permission based on prefix."""
return {perm[0] for perm in PERMISSIONS if perm[0].startswith(prefix)}
# Translator permissions
TRANSLATE_PERMS = {
"comment.add",
"suggestion.accept",
"suggestion.add",
"suggestion.vote",
"unit.check",
"unit.edit",
"upload.overwrite",
"upload.perform",
"machinery.view",
}
# Default set of roles
ROLES = (
(pgettext("Access control role", "Add suggestion"), {"suggestion.add"}),
(pgettext("Access control role", "Access repository"), {"vcs.access", "vcs.view"}),
(
pgettext("Access control role", "Power user"),
TRANSLATE_PERMS
| {
"translation.add",
"unit.template",
"suggestion.delete",
"vcs.access",
"vcs.view",
}
| filter_perms("glossary."),
),
(pgettext("Access control role", "Translate"), TRANSLATE_PERMS),
(
pgettext("Access control role", "Edit source"),
TRANSLATE_PERMS | {"unit.template", "source.edit"},
),
(pgettext("Access control role", "Manage languages"), filter_perms("translation.")),
(pgettext("Access control role", "Manage glossary"), filter_perms("glossary.")),
(
pgettext("Access control role", "Manage translation memory"),
filter_perms("memory."),
),
(
pgettext("Access control role", "Manage screenshots"),
filter_perms("screenshot."),
),
(
pgettext("Access control role", "Review strings"),
TRANSLATE_PERMS | {"unit.review", "unit.override"},
),
(pgettext("Access control role", "Manage repository"), filter_perms("vcs.")),
(pgettext("Access control role", "Administration"), [x[0] for x in PERMISSIONS]),
(pgettext("Access control role", "Billing"), filter_perms("billing.")),
)
# Default set of roles for groups
GROUPS = (
(
pgettext("Access control group", "Guests"),
("Add suggestion", "Access repository"),
SELECTION_ALL_PUBLIC,
),
(pgettext("Access control group", "Viewers"), (), SELECTION_ALL_PROTECTED),
(pgettext("Access control group", "Users"), ("Power user",), SELECTION_ALL_PUBLIC),
(pgettext("Access control group", "Reviewers"), ("Review strings",), SELECTION_ALL),
(pgettext("Access control group", "Managers"), ("Administration",), SELECTION_ALL),
)
# Per project group definitions
ACL_GROUPS = {
pgettext("Per project access control group", "Translate"): "Translate",
pgettext("Per project access control group", "Sources"): "Edit source",
pgettext("Per project access control group", "Languages"): "Manage languages",
pgettext("Per project access control group", "Glossary"): "Manage glossary",
pgettext("Per project access control group", "Memory"): "Manage translation memory",
pgettext("Per project access control group", "Screenshots"): "Manage screenshots",
pgettext("Per project access control group", "Review"): "Review strings",
pgettext("Per project access control group", "VCS"): "Manage repository",
pgettext("Per project access control group", "Administration"): "Administration",
pgettext("Per project access control group", "Billing"): "Billing",
}
|
from datetime import timedelta
import time
from unittest import mock
import pytest
import zigpy.profiles.zha
import zigpy.zcl.clusters.general as general
import homeassistant.components.zha.core.device as zha_core_device
from homeassistant.const import STATE_OFF, STATE_UNAVAILABLE
import homeassistant.helpers.device_registry as ha_dev_reg
import homeassistant.util.dt as dt_util
from .common import async_enable_traffic, make_zcl_header
from tests.async_mock import patch
from tests.common import async_fire_time_changed
@pytest.fixture
def zigpy_device(zigpy_device_mock):
"""Device tracker zigpy device."""
def _dev(with_basic_channel: bool = True):
in_clusters = [general.OnOff.cluster_id]
if with_basic_channel:
in_clusters.append(general.Basic.cluster_id)
endpoints = {
3: {
"in_clusters": in_clusters,
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
}
return zigpy_device_mock(endpoints)
return _dev
@pytest.fixture
def zigpy_device_mains(zigpy_device_mock):
"""Device tracker zigpy device."""
def _dev(with_basic_channel: bool = True):
in_clusters = [general.OnOff.cluster_id]
if with_basic_channel:
in_clusters.append(general.Basic.cluster_id)
endpoints = {
3: {
"in_clusters": in_clusters,
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
}
return zigpy_device_mock(
endpoints, node_descriptor=b"\x02@\x84_\x11\x7fd\x00\x00,d\x00\x00"
)
return _dev
@pytest.fixture
def device_with_basic_channel(zigpy_device_mains):
"""Return a zha device with a basic channel present."""
return zigpy_device_mains(with_basic_channel=True)
@pytest.fixture
def device_without_basic_channel(zigpy_device):
"""Return a zha device with a basic channel present."""
return zigpy_device(with_basic_channel=False)
@pytest.fixture
async def ota_zha_device(zha_device_restored, zigpy_device_mock):
"""ZHA device with OTA cluster fixture."""
zigpy_dev = zigpy_device_mock(
{
1: {
"in_clusters": [general.Basic.cluster_id],
"out_clusters": [general.Ota.cluster_id],
"device_type": 0x1234,
}
},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
zha_device = await zha_device_restored(zigpy_dev)
return zha_device
def _send_time_changed(hass, seconds):
"""Send a time changed event."""
now = dt_util.utcnow() + timedelta(seconds=seconds)
async_fire_time_changed(hass, now)
@patch(
"homeassistant.components.zha.core.channels.general.BasicChannel.async_initialize",
new=mock.MagicMock(),
)
async def test_check_available_success(
hass, device_with_basic_channel, zha_device_restored
):
"""Check device availability success on 1st try."""
# pylint: disable=protected-access
zha_device = await zha_device_restored(device_with_basic_channel)
await async_enable_traffic(hass, [zha_device])
basic_ch = device_with_basic_channel.endpoints[3].basic
basic_ch.read_attributes.reset_mock()
device_with_basic_channel.last_seen = None
assert zha_device.available is True
_send_time_changed(hass, zha_core_device.CONSIDER_UNAVAILABLE_MAINS + 2)
await hass.async_block_till_done()
assert zha_device.available is False
assert basic_ch.read_attributes.await_count == 0
device_with_basic_channel.last_seen = (
time.time() - zha_core_device.CONSIDER_UNAVAILABLE_MAINS - 2
)
_seens = [time.time(), device_with_basic_channel.last_seen]
def _update_last_seen(*args, **kwargs):
device_with_basic_channel.last_seen = _seens.pop()
basic_ch.read_attributes.side_effect = _update_last_seen
# successfully ping zigpy device, but zha_device is not yet available
_send_time_changed(hass, 91)
await hass.async_block_till_done()
assert basic_ch.read_attributes.await_count == 1
assert basic_ch.read_attributes.await_args[0][0] == ["manufacturer"]
assert zha_device.available is False
# There was traffic from the device: pings, but not yet available
_send_time_changed(hass, 91)
await hass.async_block_till_done()
assert basic_ch.read_attributes.await_count == 2
assert basic_ch.read_attributes.await_args[0][0] == ["manufacturer"]
assert zha_device.available is False
# There was traffic from the device: don't try to ping, marked as available
_send_time_changed(hass, 91)
await hass.async_block_till_done()
assert basic_ch.read_attributes.await_count == 2
assert basic_ch.read_attributes.await_args[0][0] == ["manufacturer"]
assert zha_device.available is True
@patch(
"homeassistant.components.zha.core.channels.general.BasicChannel.async_initialize",
new=mock.MagicMock(),
)
async def test_check_available_unsuccessful(
hass, device_with_basic_channel, zha_device_restored
):
"""Check device availability all tries fail."""
# pylint: disable=protected-access
zha_device = await zha_device_restored(device_with_basic_channel)
await async_enable_traffic(hass, [zha_device])
basic_ch = device_with_basic_channel.endpoints[3].basic
assert zha_device.available is True
assert basic_ch.read_attributes.await_count == 0
device_with_basic_channel.last_seen = (
time.time() - zha_core_device.CONSIDER_UNAVAILABLE_MAINS - 2
)
# unsuccessfuly ping zigpy device, but zha_device is still available
_send_time_changed(hass, 91)
await hass.async_block_till_done()
assert basic_ch.read_attributes.await_count == 1
assert basic_ch.read_attributes.await_args[0][0] == ["manufacturer"]
assert zha_device.available is True
# still no traffic, but zha_device is still available
_send_time_changed(hass, 91)
await hass.async_block_till_done()
assert basic_ch.read_attributes.await_count == 2
assert basic_ch.read_attributes.await_args[0][0] == ["manufacturer"]
assert zha_device.available is True
# not even trying to update, device is unavailble
_send_time_changed(hass, 91)
await hass.async_block_till_done()
assert basic_ch.read_attributes.await_count == 2
assert basic_ch.read_attributes.await_args[0][0] == ["manufacturer"]
assert zha_device.available is False
@patch(
"homeassistant.components.zha.core.channels.general.BasicChannel.async_initialize",
new=mock.MagicMock(),
)
async def test_check_available_no_basic_channel(
hass, device_without_basic_channel, zha_device_restored, caplog
):
"""Check device availability for a device without basic cluster."""
# pylint: disable=protected-access
zha_device = await zha_device_restored(device_without_basic_channel)
await async_enable_traffic(hass, [zha_device])
assert zha_device.available is True
device_without_basic_channel.last_seen = (
time.time() - zha_core_device.CONSIDER_UNAVAILABLE_BATTERY - 2
)
assert "does not have a mandatory basic cluster" not in caplog.text
_send_time_changed(hass, 91)
await hass.async_block_till_done()
assert zha_device.available is False
assert "does not have a mandatory basic cluster" in caplog.text
async def test_ota_sw_version(hass, ota_zha_device):
"""Test device entry gets sw_version updated via OTA channel."""
ota_ch = ota_zha_device.channels.pools[0].client_channels["1:0x0019"]
dev_registry = await ha_dev_reg.async_get_registry(hass)
entry = dev_registry.async_get(ota_zha_device.device_id)
assert entry.sw_version is None
cluster = ota_ch.cluster
hdr = make_zcl_header(1, global_command=False)
sw_version = 0x2345
cluster.handle_message(hdr, [1, 2, 3, sw_version, None])
await hass.async_block_till_done()
entry = dev_registry.async_get(ota_zha_device.device_id)
assert int(entry.sw_version, base=16) == sw_version
@pytest.mark.parametrize(
"device, last_seen_delta, is_available",
(
("zigpy_device", 0, True),
(
"zigpy_device",
zha_core_device.CONSIDER_UNAVAILABLE_MAINS + 2,
True,
),
(
"zigpy_device",
zha_core_device.CONSIDER_UNAVAILABLE_BATTERY - 2,
True,
),
(
"zigpy_device",
zha_core_device.CONSIDER_UNAVAILABLE_BATTERY + 2,
False,
),
("zigpy_device_mains", 0, True),
(
"zigpy_device_mains",
zha_core_device.CONSIDER_UNAVAILABLE_MAINS - 2,
True,
),
(
"zigpy_device_mains",
zha_core_device.CONSIDER_UNAVAILABLE_MAINS + 2,
False,
),
(
"zigpy_device_mains",
zha_core_device.CONSIDER_UNAVAILABLE_BATTERY - 2,
False,
),
(
"zigpy_device_mains",
zha_core_device.CONSIDER_UNAVAILABLE_BATTERY + 2,
False,
),
),
)
async def test_device_restore_availability(
hass, request, device, last_seen_delta, is_available, zha_device_restored
):
"""Test initial availability for restored devices."""
zigpy_device = request.getfixturevalue(device)()
zha_device = await zha_device_restored(
zigpy_device, last_seen=time.time() - last_seen_delta
)
entity_id = "switch.fakemanufacturer_fakemodel_e769900a_on_off"
await hass.async_block_till_done()
# ensure the switch entity was created
assert hass.states.get(entity_id).state is not None
assert zha_device.available is is_available
if is_available:
assert hass.states.get(entity_id).state == STATE_OFF
else:
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
|
import numpy as np
def _ecdf(x):
"""No frills empirical cdf used in fdrcorrection."""
nobs = len(x)
return np.arange(1, nobs + 1) / float(nobs)
def fdr_correction(pvals, alpha=0.05, method='indep'):
"""P-value correction with False Discovery Rate (FDR).
Correction for multiple comparison using FDR [1]_.
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests.
Parameters
----------
pvals : array_like
Set of p-values of the individual tests.
alpha : float
Error rate.
method : 'indep' | 'negcorr'
If 'indep' it implements Benjamini/Hochberg for independent or if
'negcorr' it corresponds to Benjamini/Yekutieli.
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not.
pval_corrected : array
P-values adjusted for multiple hypothesis testing to limit FDR.
References
----------
.. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps
in functional neuroimaging using the false discovery rate.
Neuroimage. 2002 Apr;15(4):870-8.
"""
pvals = np.asarray(pvals)
shape_init = pvals.shape
pvals = pvals.ravel()
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))
ecdffactor = _ecdf(pvals_sorted) / cm
else:
raise ValueError("Method should be 'indep' and 'negcorr'")
reject = pvals_sorted < (ecdffactor * alpha)
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
else:
rejectmax = 0
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
pvals_corrected[pvals_corrected > 1.0] = 1.0
pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)
reject = reject[sortrevind].reshape(shape_init)
return reject, pvals_corrected
def bonferroni_correction(pval, alpha=0.05):
"""P-value correction with Bonferroni method.
Parameters
----------
pval : array_like
Set of p-values of the individual tests.
alpha : float
Error rate.
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not.
pval_corrected : array
P-values adjusted for multiple hypothesis testing to limit FDR.
"""
pval = np.asarray(pval)
pval_corrected = pval * float(pval.size)
# p-values must not be larger than 1.
pval_corrected = pval_corrected.clip(max=1.)
reject = pval_corrected < alpha
return reject, pval_corrected
|
from collections import defaultdict
from datetime import timedelta
from functools import partial
import ipaddress
import logging
import time
from typing import Any, Callable, Dict, List, Set, Tuple, cast
from urllib.parse import urlparse
import attr
from getmac import get_mac_address
from huawei_lte_api.AuthorizedConnection import AuthorizedConnection
from huawei_lte_api.Client import Client
from huawei_lte_api.Connection import Connection
from huawei_lte_api.exceptions import (
ResponseErrorLoginRequiredException,
ResponseErrorNotSupportedException,
)
from requests.exceptions import Timeout
from url_normalize import url_normalize
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.device_tracker.const import (
DOMAIN as DEVICE_TRACKER_DOMAIN,
)
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
CONF_URL,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CALLBACK_TYPE, ServiceCall
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
discovery,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
ADMIN_SERVICES,
ALL_KEYS,
CONNECTION_TIMEOUT,
DEFAULT_DEVICE_NAME,
DEFAULT_NOTIFY_SERVICE_NAME,
DOMAIN,
KEY_DEVICE_BASIC_INFORMATION,
KEY_DEVICE_INFORMATION,
KEY_DEVICE_SIGNAL,
KEY_DIALUP_MOBILE_DATASWITCH,
KEY_MONITORING_CHECK_NOTIFICATIONS,
KEY_MONITORING_MONTH_STATISTICS,
KEY_MONITORING_STATUS,
KEY_MONITORING_TRAFFIC_STATISTICS,
KEY_NET_CURRENT_PLMN,
KEY_NET_NET_MODE,
KEY_SMS_SMS_COUNT,
KEY_WLAN_HOST_LIST,
KEY_WLAN_WIFI_FEATURE_SWITCH,
NOTIFY_SUPPRESS_TIMEOUT,
SERVICE_CLEAR_TRAFFIC_STATISTICS,
SERVICE_REBOOT,
SERVICE_RESUME_INTEGRATION,
SERVICE_SUSPEND_INTEGRATION,
UPDATE_OPTIONS_SIGNAL,
UPDATE_SIGNAL,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
NOTIFY_SCHEMA = vol.Any(
None,
vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_RECIPIENT): vol.Any(
None, vol.All(cv.ensure_list, [cv.string])
),
}
),
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_URL): cv.url,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(NOTIFY_DOMAIN): NOTIFY_SCHEMA,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_URL): cv.url})
CONFIG_ENTRY_PLATFORMS = (
BINARY_SENSOR_DOMAIN,
DEVICE_TRACKER_DOMAIN,
SENSOR_DOMAIN,
SWITCH_DOMAIN,
)
@attr.s
class Router:
"""Class for router state."""
connection: Connection = attr.ib()
url: str = attr.ib()
mac: str = attr.ib()
signal_update: CALLBACK_TYPE = attr.ib()
data: Dict[str, Any] = attr.ib(init=False, factory=dict)
subscriptions: Dict[str, Set[str]] = attr.ib(
init=False,
factory=lambda: defaultdict(set, ((x, {"initial_scan"}) for x in ALL_KEYS)),
)
inflight_gets: Set[str] = attr.ib(init=False, factory=set)
unload_handlers: List[CALLBACK_TYPE] = attr.ib(init=False, factory=list)
client: Client
suspended = attr.ib(init=False, default=False)
notify_last_attempt: float = attr.ib(init=False, default=-1)
def __attrs_post_init__(self) -> None:
"""Set up internal state on init."""
self.client = Client(self.connection)
@property
def device_name(self) -> str:
"""Get router device name."""
for key, item in (
(KEY_DEVICE_BASIC_INFORMATION, "devicename"),
(KEY_DEVICE_INFORMATION, "DeviceName"),
):
try:
return cast(str, self.data[key][item])
except (KeyError, TypeError):
pass
return DEFAULT_DEVICE_NAME
@property
def device_identifiers(self) -> Set[Tuple[str, str]]:
"""Get router identifiers for device registry."""
try:
return {(DOMAIN, self.data[KEY_DEVICE_INFORMATION]["SerialNumber"])}
except (KeyError, TypeError):
return set()
@property
def device_connections(self) -> Set[Tuple[str, str]]:
"""Get router connections for device registry."""
return {(dr.CONNECTION_NETWORK_MAC, self.mac)} if self.mac else set()
def _get_data(self, key: str, func: Callable[[], Any]) -> None:
if not self.subscriptions.get(key):
return
if key in self.inflight_gets:
_LOGGER.debug("Skipping already inflight get for %s", key)
return
self.inflight_gets.add(key)
_LOGGER.debug("Getting %s for subscribers %s", key, self.subscriptions[key])
try:
self.data[key] = func()
except ResponseErrorNotSupportedException:
_LOGGER.info(
"%s not supported by device, excluding from future updates", key
)
self.subscriptions.pop(key)
except ResponseErrorLoginRequiredException:
if isinstance(self.connection, AuthorizedConnection):
_LOGGER.debug("Trying to authorize again...")
if self.connection.enforce_authorized_connection():
_LOGGER.debug(
"...success, %s will be updated by a future periodic run",
key,
)
else:
_LOGGER.debug("...failed")
return
_LOGGER.info(
"%s requires authorization, excluding from future updates", key
)
self.subscriptions.pop(key)
except Timeout:
grace_left = (
self.notify_last_attempt - time.monotonic() + NOTIFY_SUPPRESS_TIMEOUT
)
if grace_left > 0:
_LOGGER.debug(
"%s timed out, %.1fs notify timeout suppress grace remaining",
key,
grace_left,
exc_info=True,
)
else:
raise
finally:
self.inflight_gets.discard(key)
_LOGGER.debug("%s=%s", key, self.data.get(key))
def update(self) -> None:
"""Update router data."""
if self.suspended:
_LOGGER.debug("Integration suspended, not updating data")
return
self._get_data(KEY_DEVICE_INFORMATION, self.client.device.information)
if self.data.get(KEY_DEVICE_INFORMATION):
# Full information includes everything in basic
self.subscriptions.pop(KEY_DEVICE_BASIC_INFORMATION, None)
self._get_data(
KEY_DEVICE_BASIC_INFORMATION, self.client.device.basic_information
)
self._get_data(KEY_DEVICE_SIGNAL, self.client.device.signal)
self._get_data(
KEY_DIALUP_MOBILE_DATASWITCH, self.client.dial_up.mobile_dataswitch
)
self._get_data(
KEY_MONITORING_MONTH_STATISTICS, self.client.monitoring.month_statistics
)
self._get_data(
KEY_MONITORING_CHECK_NOTIFICATIONS,
self.client.monitoring.check_notifications,
)
self._get_data(KEY_MONITORING_STATUS, self.client.monitoring.status)
self._get_data(
KEY_MONITORING_TRAFFIC_STATISTICS, self.client.monitoring.traffic_statistics
)
self._get_data(KEY_NET_CURRENT_PLMN, self.client.net.current_plmn)
self._get_data(KEY_NET_NET_MODE, self.client.net.net_mode)
self._get_data(KEY_SMS_SMS_COUNT, self.client.sms.sms_count)
self._get_data(KEY_WLAN_HOST_LIST, self.client.wlan.host_list)
self._get_data(
KEY_WLAN_WIFI_FEATURE_SWITCH, self.client.wlan.wifi_feature_switch
)
self.signal_update()
def logout(self) -> None:
"""Log out router session."""
if not isinstance(self.connection, AuthorizedConnection):
return
try:
self.client.user.logout()
except ResponseErrorNotSupportedException:
_LOGGER.debug("Logout not supported by device", exc_info=True)
except ResponseErrorLoginRequiredException:
_LOGGER.debug("Logout not supported when not logged in", exc_info=True)
except Exception: # pylint: disable=broad-except
_LOGGER.warning("Logout error", exc_info=True)
def cleanup(self, *_: Any) -> None:
"""Clean up resources."""
self.subscriptions.clear()
for handler in self.unload_handlers:
handler()
self.unload_handlers.clear()
self.logout()
@attr.s
class HuaweiLteData:
"""Shared state."""
hass_config: dict = attr.ib()
# Our YAML config, keyed by router URL
config: Dict[str, Dict[str, Any]] = attr.ib()
routers: Dict[str, Router] = attr.ib(init=False, factory=dict)
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool:
"""Set up Huawei LTE component from config entry."""
url = config_entry.data[CONF_URL]
# Override settings from YAML config, but only if they're changed in it
# Old values are stored as *_from_yaml in the config entry
yaml_config = hass.data[DOMAIN].config.get(url)
if yaml_config:
# Config values
new_data = {}
for key in CONF_USERNAME, CONF_PASSWORD:
if key in yaml_config:
value = yaml_config[key]
if value != config_entry.data.get(f"{key}_from_yaml"):
new_data[f"{key}_from_yaml"] = value
new_data[key] = value
# Options
new_options = {}
yaml_recipient = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_RECIPIENT)
if yaml_recipient is not None and yaml_recipient != config_entry.options.get(
f"{CONF_RECIPIENT}_from_yaml"
):
new_options[f"{CONF_RECIPIENT}_from_yaml"] = yaml_recipient
new_options[CONF_RECIPIENT] = yaml_recipient
yaml_notify_name = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_NAME)
if (
yaml_notify_name is not None
and yaml_notify_name != config_entry.options.get(f"{CONF_NAME}_from_yaml")
):
new_options[f"{CONF_NAME}_from_yaml"] = yaml_notify_name
new_options[CONF_NAME] = yaml_notify_name
# Update entry if overrides were found
if new_data or new_options:
hass.config_entries.async_update_entry(
config_entry,
data={**config_entry.data, **new_data},
options={**config_entry.options, **new_options},
)
# Get MAC address for use in unique ids. Being able to use something
# from the API would be nice, but all of that seems to be available only
# through authenticated calls (e.g. device_information.SerialNumber), and
# we want this available and the same when unauthenticated too.
host = urlparse(url).hostname
try:
if ipaddress.ip_address(host).version == 6:
mode = "ip6"
else:
mode = "ip"
except ValueError:
mode = "hostname"
mac = await hass.async_add_executor_job(partial(get_mac_address, **{mode: host}))
def get_connection() -> Connection:
"""
Set up a connection.
Authorized one if username/pass specified (even if empty), unauthorized one otherwise.
"""
username = config_entry.data.get(CONF_USERNAME)
password = config_entry.data.get(CONF_PASSWORD)
if username or password:
connection: Connection = AuthorizedConnection(
url, username=username, password=password, timeout=CONNECTION_TIMEOUT
)
else:
connection = Connection(url, timeout=CONNECTION_TIMEOUT)
return connection
def signal_update() -> None:
"""Signal updates to data."""
dispatcher_send(hass, UPDATE_SIGNAL, url)
try:
connection = await hass.async_add_executor_job(get_connection)
except Timeout as ex:
raise ConfigEntryNotReady from ex
# Set up router and store reference to it
router = Router(connection, url, mac, signal_update)
hass.data[DOMAIN].routers[url] = router
# Do initial data update
await hass.async_add_executor_job(router.update)
# Clear all subscriptions, enabled entities will push back theirs
router.subscriptions.clear()
# Set up device registry
device_data = {}
sw_version = None
if router.data.get(KEY_DEVICE_INFORMATION):
device_info = router.data[KEY_DEVICE_INFORMATION]
sw_version = device_info.get("SoftwareVersion")
if device_info.get("DeviceName"):
device_data["model"] = device_info["DeviceName"]
if not sw_version and router.data.get(KEY_DEVICE_BASIC_INFORMATION):
sw_version = router.data[KEY_DEVICE_BASIC_INFORMATION].get("SoftwareVersion")
if sw_version:
device_data["sw_version"] = sw_version
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections=router.device_connections,
identifiers=router.device_identifiers,
name=router.device_name,
manufacturer="Huawei",
**device_data,
)
# Forward config entry setup to platforms
for domain in CONFIG_ENTRY_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, domain)
)
# Notify doesn't support config entry setup yet, load with discovery for now
await discovery.async_load_platform(
hass,
NOTIFY_DOMAIN,
DOMAIN,
{
CONF_URL: url,
CONF_NAME: config_entry.options.get(CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME),
CONF_RECIPIENT: config_entry.options.get(CONF_RECIPIENT),
},
hass.data[DOMAIN].hass_config,
)
# Add config entry options update listener
router.unload_handlers.append(
config_entry.add_update_listener(async_signal_options_update)
)
def _update_router(*_: Any) -> None:
"""
Update router data.
Separate passthrough function because lambdas don't work with track_time_interval.
"""
router.update()
# Set up periodic update
router.unload_handlers.append(
async_track_time_interval(hass, _update_router, SCAN_INTERVAL)
)
# Clean up at end
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, router.cleanup)
return True
async def async_unload_entry(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> bool:
"""Unload config entry."""
# Forward config entry unload to platforms
for domain in CONFIG_ENTRY_PLATFORMS:
await hass.config_entries.async_forward_entry_unload(config_entry, domain)
# Forget about the router and invoke its cleanup
router = hass.data[DOMAIN].routers.pop(config_entry.data[CONF_URL])
await hass.async_add_executor_job(router.cleanup)
return True
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up Huawei LTE component."""
# dicttoxml (used by huawei-lte-api) has uselessly verbose INFO level.
# https://github.com/quandyfactory/dicttoxml/issues/60
logging.getLogger("dicttoxml").setLevel(logging.WARNING)
# Arrange our YAML config to dict with normalized URLs as keys
domain_config: Dict[str, Dict[str, Any]] = {}
if DOMAIN not in hass.data:
hass.data[DOMAIN] = HuaweiLteData(hass_config=config, config=domain_config)
for router_config in config.get(DOMAIN, []):
domain_config[url_normalize(router_config.pop(CONF_URL))] = router_config
def service_handler(service: ServiceCall) -> None:
"""Apply a service."""
url = service.data.get(CONF_URL)
routers = hass.data[DOMAIN].routers
if url:
router = routers.get(url)
elif not routers:
_LOGGER.error("%s: no routers configured", service.service)
return
elif len(routers) == 1:
router = next(iter(routers.values()))
else:
_LOGGER.error(
"%s: more than one router configured, must specify one of URLs %s",
service.service,
sorted(routers),
)
return
if not router:
_LOGGER.error("%s: router %s unavailable", service.service, url)
return
if service.service == SERVICE_CLEAR_TRAFFIC_STATISTICS:
if router.suspended:
_LOGGER.debug("%s: ignored, integration suspended", service.service)
return
result = router.client.monitoring.set_clear_traffic()
_LOGGER.debug("%s: %s", service.service, result)
elif service.service == SERVICE_REBOOT:
if router.suspended:
_LOGGER.debug("%s: ignored, integration suspended", service.service)
return
result = router.client.device.reboot()
_LOGGER.debug("%s: %s", service.service, result)
elif service.service == SERVICE_RESUME_INTEGRATION:
# Login will be handled automatically on demand
router.suspended = False
_LOGGER.debug("%s: %s", service.service, "done")
elif service.service == SERVICE_SUSPEND_INTEGRATION:
router.logout()
router.suspended = True
_LOGGER.debug("%s: %s", service.service, "done")
else:
_LOGGER.error("%s: unsupported service", service.service)
for service in ADMIN_SERVICES:
hass.helpers.service.async_register_admin_service(
DOMAIN,
service,
service_handler,
schema=SERVICE_SCHEMA,
)
for url, router_config in domain_config.items():
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_URL: url,
CONF_USERNAME: router_config.get(CONF_USERNAME),
CONF_PASSWORD: router_config.get(CONF_PASSWORD),
},
)
)
return True
async def async_signal_options_update(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> None:
"""Handle config entry options update."""
async_dispatcher_send(hass, UPDATE_OPTIONS_SIGNAL, config_entry)
async def async_migrate_entry(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> bool:
"""Migrate config entry to new version."""
if config_entry.version == 1:
options = dict(config_entry.options)
recipient = options.get(CONF_RECIPIENT)
if isinstance(recipient, str):
options[CONF_RECIPIENT] = [x.strip() for x in recipient.split(",")]
config_entry.version = 2
hass.config_entries.async_update_entry(config_entry, options=options)
_LOGGER.info("Migrated config entry to version %d", config_entry.version)
return True
@attr.s
class HuaweiLteBaseEntity(Entity):
"""Huawei LTE entity base class."""
router: Router = attr.ib()
_available: bool = attr.ib(init=False, default=True)
_unsub_handlers: List[Callable] = attr.ib(init=False, factory=list)
@property
def _entity_name(self) -> str:
raise NotImplementedError
@property
def _device_unique_id(self) -> str:
"""Return unique ID for entity within a router."""
raise NotImplementedError
@property
def unique_id(self) -> str:
"""Return unique ID for entity."""
return f"{self.router.mac}-{self._device_unique_id}"
@property
def name(self) -> str:
"""Return entity name."""
return f"Huawei {self.router.device_name} {self._entity_name}"
@property
def available(self) -> bool:
"""Return whether the entity is available."""
return self._available
@property
def should_poll(self) -> bool:
"""Huawei LTE entities report their state without polling."""
return False
@property
def device_info(self) -> Dict[str, Any]:
"""Get info for matching with parent router."""
return {
"identifiers": self.router.device_identifiers,
"connections": self.router.device_connections,
}
async def async_update(self) -> None:
"""Update state."""
raise NotImplementedError
async def async_update_options(self, config_entry: ConfigEntry) -> None:
"""Update config entry options."""
async def async_added_to_hass(self) -> None:
"""Connect to update signals."""
assert self.hass is not None
self._unsub_handlers.append(
async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self._async_maybe_update)
)
self._unsub_handlers.append(
async_dispatcher_connect(
self.hass, UPDATE_OPTIONS_SIGNAL, self._async_maybe_update_options
)
)
async def _async_maybe_update(self, url: str) -> None:
"""Update state if the update signal comes from our router."""
if url == self.router.url:
self.async_schedule_update_ha_state(True)
async def _async_maybe_update_options(self, config_entry: ConfigEntry) -> None:
"""Update options if the update signal comes from our router."""
if config_entry.data[CONF_URL] == self.router.url:
await self.async_update_options(config_entry)
async def async_will_remove_from_hass(self) -> None:
"""Invoke unsubscription handlers."""
for unsub in self._unsub_handlers:
unsub()
self._unsub_handlers.clear()
|
import logging
from poolsense import PoolSense
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from homeassistant.helpers import aiohttp_client
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
class PoolSenseConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for PoolSense."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize PoolSense config flow."""
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(user_input[CONF_EMAIL])
self._abort_if_unique_id_configured()
_LOGGER.debug(
"Configuring user: %s - Password hidden", user_input[CONF_EMAIL]
)
poolsense = PoolSense(
aiohttp_client.async_get_clientsession(self.hass),
user_input[CONF_EMAIL],
user_input[CONF_PASSWORD],
)
api_key_valid = await poolsense.test_poolsense_credentials()
if not api_key_valid:
errors["base"] = "invalid_auth"
if not errors:
return self.async_create_entry(
title=user_input[CONF_EMAIL],
data={
CONF_EMAIL: user_input[CONF_EMAIL],
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(CONF_EMAIL): str, vol.Required(CONF_PASSWORD): str}
),
errors=errors,
)
|
import asyncio
import os
import threading
import pytest
import voluptuous as vol
from homeassistant import config_entries, setup
import homeassistant.config as config_util
from homeassistant.const import EVENT_COMPONENT_LOADED, EVENT_HOMEASSISTANT_START
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
import homeassistant.util.dt as dt_util
from tests.async_mock import Mock, patch
from tests.common import (
MockConfigEntry,
MockModule,
MockPlatform,
assert_setup_component,
get_test_config_dir,
get_test_home_assistant,
mock_entity_platform,
mock_integration,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
@pytest.fixture(autouse=True)
def mock_handlers():
"""Mock config flows."""
class MockFlowHandler(config_entries.ConfigFlow):
"""Define a mock flow handler."""
VERSION = 1
with patch.dict(config_entries.HANDLERS, {"comp": MockFlowHandler}):
yield
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({"comp_conf": {"hello": str}}, required=True)
mock_integration(
self.hass, MockModule("comp_conf", config_schema=config_schema)
)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass, "comp_conf", {"comp_conf": None}
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {"comp_conf": {}})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass,
"comp_conf",
{"comp_conf": {"hello": "world", "invalid": "extra"}},
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(
self.hass, "comp_conf", {"comp_conf": {"hello": "world"}}
)
def test_validate_platform_config(self, caplog):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({})
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=platform_schema_base),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "not_existing", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "whatever", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": [{"platform": "whatever", "hello": "world"}]},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": None}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": {}}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
def test_validate_platform_config_2(self, caplog):
"""Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({"hello": "world"})
mock_integration(
self.hass,
MockModule(
"platform_conf",
platform_schema=platform_schema,
platform_schema_base=platform_schema_base,
),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema_base
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_3(self, caplog):
"""Test fallback to component PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE.extend({"hello": str})
platform_schema = PLATFORM_SCHEMA.extend({"cheers": str, "hello": "world"})
mock_integration(
self.hass, MockModule("platform_conf", platform_schema=component_schema)
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_4(self):
"""Test entity_namespace in PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE
platform_schema = PLATFORM_SCHEMA
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=component_schema),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
"platform_conf": {
# pass: entity_namespace accepted by PLATFORM_SCHEMA
"platform": "whatever",
"entity_namespace": "yummy",
}
},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert setup.setup_component(self.hass, "non_existing", {}) is False
def test_component_not_double_initialized(self):
"""Test we do not set up a component twice."""
mock_setup = Mock(return_value=True)
mock_integration(self.hass, MockModule("comp", setup=mock_setup))
assert setup.setup_component(self.hass, "comp", {})
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, "comp", {})
assert not mock_setup.called
@patch("homeassistant.util.package.install_package", return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
mock_integration(self.hass, MockModule("comp", requirements=["package==0.0.1"]))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not set up twice."""
result = []
async def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
mock_integration(self.hass, MockModule("comp", async_setup=async_setup))
def setup_component():
"""Set up the component."""
setup.setup_component(self.hass, "comp", {})
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, "comp", {})
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not set up a component if not all dependencies loaded."""
deps = ["maybe_existing"]
mock_integration(self.hass, MockModule("comp", dependencies=deps))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(self.hass, MockModule("comp2", dependencies=deps))
mock_integration(self.hass, MockModule("maybe_existing"))
assert setup.setup_component(self.hass, "comp2", {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
mock_integration(
self.hass, MockModule("comp", setup=lambda hass, config: False)
)
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Raise exception."""
raise Exception("fail!")
mock_integration(self.hass, MockModule("comp", setup=exception_setup))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Test that config is passed in."""
if config.get("comp_a", {}).get("valid", False):
return True
raise Exception(f"Config not passed in: {config}")
platform = MockPlatform()
mock_integration(self.hass, MockModule("comp_a", setup=config_check_setup))
mock_integration(
self.hass,
MockModule("platform_a", setup=config_check_setup, dependencies=["comp_a"]),
)
mock_entity_platform(self.hass, "switch.platform_a", platform)
setup.setup_component(
self.hass,
"switch",
{"comp_a": {"valid": True}, "switch": {"platform": "platform_a"}},
)
self.hass.block_till_done()
assert "comp_a" in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend(
{"valid": True}, extra=vol.PREVENT_EXTRA
)
mock_setup = Mock(spec_set=True)
mock_entity_platform(
self.hass,
"switch.platform_a",
MockPlatform(platform_schema=platform_schema, setup_platform=mock_setup),
)
with assert_setup_component(0, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "invalid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"switch",
{
"switch": {
"platform": "platform_a",
"valid": True,
"invalid_extra": True,
}
},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(1, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "valid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: None)
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule("disabled_component", setup=lambda hass, config: False),
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: True)
)
assert setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
def component1_setup(hass, config):
"""Set up mock component."""
discovery.discover(hass, "test_component2", {}, "test_component2", {})
discovery.discover(hass, "test_component3", {}, "test_component3", {})
return True
def component_track_setup(hass, config):
"""Set up mock component."""
call_order.append(1)
return True
mock_integration(
self.hass, MockModule("test_component1", setup=component1_setup)
)
mock_integration(
self.hass, MockModule("test_component2", setup=component_track_setup)
)
mock_integration(
self.hass, MockModule("test_component3", setup=component_track_setup)
)
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(self.hass, "test_component1", {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
async def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
mock_integration(hass, MockModule("test_component1"))
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
async def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
mock_integration(
hass, MockModule("test_component1", platform_schema=PLATFORM_SCHEMA)
)
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert len(mock_call.mock_calls) == 0
async def test_platform_error_slow_setup(hass, caplog):
"""Don't block startup more than SLOW_SETUP_MAX_WAIT."""
with patch.object(setup, "SLOW_SETUP_MAX_WAIT", 1):
called = []
async def async_setup(*args):
"""Tracking Setup."""
called.append(1)
await asyncio.sleep(2)
mock_integration(hass, MockModule("test_component1", async_setup=async_setup))
result = await setup.async_setup_component(hass, "test_component1", {})
assert len(called) == 1
assert not result
assert "test_component1 is taking longer than 1 seconds" in caplog.text
async def test_when_setup_already_loaded(hass):
"""Test when setup."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add("test")
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Should be called right away
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
async def test_setup_import_blows_up(hass):
"""Test that we handle it correctly when importing integration blows up."""
with patch(
"homeassistant.loader.Integration.get_component", side_effect=ValueError
):
assert not await setup.async_setup_component(hass, "sun", {})
async def test_parallel_entry_setup(hass):
"""Test config entries are set up in parallel."""
MockConfigEntry(domain="comp", data={"value": 1}).add_to_hass(hass)
MockConfigEntry(domain="comp", data={"value": 2}).add_to_hass(hass)
calls = []
async def mock_async_setup_entry(hass, entry):
"""Mock setting up an entry."""
calls.append(entry.data["value"])
await asyncio.sleep(0)
calls.append(entry.data["value"])
return True
mock_integration(
hass,
MockModule(
"comp",
async_setup_entry=mock_async_setup_entry,
),
)
mock_entity_platform(hass, "config_flow.comp", None)
await setup.async_setup_component(hass, "comp", {})
assert calls == [1, 2, 1, 2]
async def test_integration_disabled(hass, caplog):
"""Test we can disable an integration."""
disabled_reason = "Dependency contains code that breaks Home Assistant"
mock_integration(
hass,
MockModule("test_component1", partial_manifest={"disabled": disabled_reason}),
)
result = await setup.async_setup_component(hass, "test_component1", {})
assert not result
assert disabled_reason in caplog.text
|
import pypck
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.const import CONF_ADDRESS
from . import LcnDevice
from .const import (
CONF_CONNECTIONS,
CONF_DIMMABLE,
CONF_OUTPUT,
CONF_TRANSITION,
DATA_LCN,
OUTPUT_PORTS,
)
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN light platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
if config[CONF_OUTPUT] in OUTPUT_PORTS:
device = LcnOutputLight(config, address_connection)
else: # in RELAY_PORTS
device = LcnRelayLight(config, address_connection)
devices.append(device)
async_add_entities(devices)
class LcnOutputLight(LcnDevice, LightEntity):
"""Representation of a LCN light for output ports."""
def __init__(self, config, address_connection):
"""Initialize the LCN light."""
super().__init__(config, address_connection)
self.output = pypck.lcn_defs.OutputPort[config[CONF_OUTPUT]]
self._transition = pypck.lcn_defs.time_to_ramp_value(config[CONF_TRANSITION])
self.dimmable = config[CONF_DIMMABLE]
self._brightness = 255
self._is_on = None
self._is_dimming_to_zero = False
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.output)
@property
def supported_features(self):
"""Flag supported features."""
features = SUPPORT_TRANSITION
if self.dimmable:
features |= SUPPORT_BRIGHTNESS
return features
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""Return True if entity is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._is_on = True
self._is_dimming_to_zero = False
if ATTR_BRIGHTNESS in kwargs:
percent = int(kwargs[ATTR_BRIGHTNESS] / 255.0 * 100)
else:
percent = 100
if ATTR_TRANSITION in kwargs:
transition = pypck.lcn_defs.time_to_ramp_value(
kwargs[ATTR_TRANSITION] * 1000
)
else:
transition = self._transition
self.address_connection.dim_output(self.output.value, percent, transition)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._is_on = False
if ATTR_TRANSITION in kwargs:
transition = pypck.lcn_defs.time_to_ramp_value(
kwargs[ATTR_TRANSITION] * 1000
)
else:
transition = self._transition
self._is_dimming_to_zero = bool(transition)
self.address_connection.dim_output(self.output.value, 0, transition)
self.async_write_ha_state()
def input_received(self, input_obj):
"""Set light state when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusOutput)
or input_obj.get_output_id() != self.output.value
):
return
self._brightness = int(input_obj.get_percent() / 100.0 * 255)
if self.brightness == 0:
self._is_dimming_to_zero = False
if not self._is_dimming_to_zero:
self._is_on = self.brightness > 0
self.async_write_ha_state()
class LcnRelayLight(LcnDevice, LightEntity):
"""Representation of a LCN light for relay ports."""
def __init__(self, config, address_connection):
"""Initialize the LCN light."""
super().__init__(config, address_connection)
self.output = pypck.lcn_defs.RelayPort[config[CONF_OUTPUT]]
self._is_on = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.output)
@property
def is_on(self):
"""Return True if entity is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._is_on = True
states = [pypck.lcn_defs.RelayStateModifier.NOCHANGE] * 8
states[self.output.value] = pypck.lcn_defs.RelayStateModifier.ON
self.address_connection.control_relays(states)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._is_on = False
states = [pypck.lcn_defs.RelayStateModifier.NOCHANGE] * 8
states[self.output.value] = pypck.lcn_defs.RelayStateModifier.OFF
self.address_connection.control_relays(states)
self.async_write_ha_state()
def input_received(self, input_obj):
"""Set light state when LCN input object (command) is received."""
if not isinstance(input_obj, pypck.inputs.ModStatusRelays):
return
self._is_on = input_obj.get_state(self.output.value)
self.async_write_ha_state()
|
from django.db import models
from shop.models.defaults.commodity import Commodity
from shop.models.defaults.cart import Cart
from shop.models.defaults.cart_item import CartItem
from shop.models.defaults.order import Order
from shop.models.order import BaseOrderItem
from shop.models.defaults.delivery import Delivery
from shop.models.defaults.delivery_item import DeliveryItem
from shop.models.defaults.address import BillingAddress, ShippingAddress
from shop.models.defaults.customer import Customer
from shop.models.inventory import BaseInventory, AvailableProductMixin
__all__ = ['Commodity', 'Cart', 'CartItem', 'Order', 'OrderItem', 'Delivery', 'DeliveryItem',
'BillingAddress', 'ShippingAddress', 'Customer']
class OrderItem(BaseOrderItem):
quantity = models.PositiveIntegerField()
canceled = models.BooleanField(default=False)
class MyProduct(AvailableProductMixin, Commodity):
pass
class MyProductInventory(BaseInventory):
product = models.ForeignKey(
MyProduct,
on_delete=models.CASCADE,
related_name='inventory_set',
)
quantity = models.PositiveIntegerField(default=0)
|
import datetime
from unittest import mock
from aiohomekit.testing import FakeController
import pytest
import homeassistant.util.dt as dt_util
import tests.async_mock
from tests.components.light.conftest import mock_light_profiles # noqa
@pytest.fixture
def utcnow(request):
"""Freeze time at a known point."""
now = dt_util.utcnow()
start_dt = datetime.datetime(now.year + 1, 1, 1, 0, 0, 0)
with mock.patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
yield dt_utcnow
@pytest.fixture
def controller(hass):
"""Replace aiohomekit.Controller with an instance of aiohomekit.testing.FakeController."""
instance = FakeController()
with tests.async_mock.patch("aiohomekit.Controller", return_value=instance):
yield instance
|
from django.contrib.admin import SimpleListFilter
from django.db.models import Count
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext_lazy
from zinnia.models.author import Author
from zinnia.models.category import Category
class RelatedPublishedFilter(SimpleListFilter):
"""
Base filter for related objects to published entries.
"""
model = None
lookup_key = None
def lookups(self, request, model_admin):
"""
Return published objects with the number of entries.
"""
active_objects = self.model.published.all().annotate(
count_entries_published=Count('entries')).order_by(
'-count_entries_published', '-pk')
for active_object in active_objects:
yield (
str(active_object.pk), ngettext_lazy(
'%(item)s (%(count)i entry)',
'%(item)s (%(count)i entries)',
active_object.count_entries_published) % {
'item': smart_str(active_object),
'count': active_object.count_entries_published})
def queryset(self, request, queryset):
"""
Return the object's entries if a value is set.
"""
if self.value():
params = {self.lookup_key: self.value()}
return queryset.filter(**params)
class AuthorListFilter(RelatedPublishedFilter):
"""
List filter for EntryAdmin with published authors only.
"""
model = Author
lookup_key = 'authors__id'
title = _('published authors')
parameter_name = 'author'
class CategoryListFilter(RelatedPublishedFilter):
"""
List filter for EntryAdmin about categories
with published entries.
"""
model = Category
lookup_key = 'categories__id'
title = _('published categories')
parameter_name = 'category'
|
try:
from collections import defaultdict
except ImportError:
from kitchen.pycompat25.collections import defaultdict
import diamond.collector
from diamond.collector import str_to_bool
class UnboundCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(UnboundCollector, self).get_default_config_help()
config_help.update({
'bin': 'Path to unbound-control binary',
'histogram': 'Include histogram in collection',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UnboundCollector, self).get_default_config()
config.update({
'path': 'unbound',
'bin': self.find_binary('/usr/sbin/unbound-control'),
'histogram': True,
})
return config
def get_massaged_histogram(self, raw_histogram):
histogram = defaultdict(int)
for intv in sorted(raw_histogram.keys()):
if intv <= 0.001024:
# Let's compress <1ms into 1 data point
histogram['1ms'] += raw_histogram[intv]
elif intv < 1.0:
# Convert to ms and since we're using the upper limit
# divide by 2 for lower limit
intv_name = ''.join([str(int(intv / 0.001024 / 2)), 'ms+'])
histogram[intv_name] = raw_histogram[intv]
elif intv == 1.0:
histogram['512ms+'] = raw_histogram[intv]
elif 1.0 < intv <= 64.0:
# Convert upper limit into lower limit seconds
intv_name = ''.join([str(int(intv / 2)), 's+'])
histogram[intv_name] = raw_histogram[intv]
else:
# Compress everything >64s into 1 data point
histogram['64s+'] += raw_histogram[intv]
return histogram
def collect(self):
stats_output = self.run_command([' stats'])
if stats_output is None:
return
stats_output = stats_output[0]
raw_histogram = {}
include_hist = str_to_bool(self.config['histogram'])
for line in stats_output.splitlines():
stat_name, stat_value = line.split('=')
if not stat_name.startswith('histogram'):
self.publish(stat_name, stat_value)
elif include_hist:
hist_intv = float(stat_name.split('.', 4)[4])
raw_histogram[hist_intv] = float(stat_value)
if include_hist:
histogram = self.get_massaged_histogram(raw_histogram)
for intv, value in histogram.iteritems():
self.publish('histogram.' + intv, value)
|
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
import pytz
from qstrader.signals.momentum import MomentumSignal
@pytest.mark.parametrize(
'start_dt,lookbacks,prices,expected',
[
(
pd.Timestamp('2019-01-01 14:30:00', tz=pytz.utc),
[6, 12],
[
99.34, 101.87, 98.32, 92.98, 103.87,
104.51, 97.62, 95.22, 96.09, 100.34,
105.14, 107.49, 90.23, 89.43, 87.68
],
[-0.08752211468415028, -0.10821806346623242]
)
]
)
def test_momentum_signal(start_dt, lookbacks, prices, expected):
"""
Checks that the momentum signal correctly calculates the
holding period return based momentum for various lookbacks.
"""
universe = Mock()
universe.get_assets.return_value = ['EQ:SPY']
mom = MomentumSignal(start_dt, universe, lookbacks)
for price_idx in range(len(prices)):
mom.append('EQ:SPY', prices[price_idx])
for i, lookback in enumerate(lookbacks):
assert np.isclose(mom('EQ:SPY', lookback), expected[i])
|
import os
import fileinput
try:
unicode
except NameError:
unicode = str
def collapseuser(path):
"""Reverse of os.path.expanduser: return path relative to ~, if
such representation is meaningful. If path is not ~ or a
subdirectory, the absolute path will be returned.
"""
path = os.path.abspath(unicode(path))
home = os.path.expanduser("~")
if os.path.exists(os.path.expanduser("~/Pythonista.app")):
althome = os.path.dirname(os.path.realpath(os.path.expanduser("~/Pythonista.app")))
else:
althome = home
if path.startswith(home):
collapsed = os.path.relpath(path, home)
elif path.startswith(althome):
collapsed = os.path.relpath(path, althome)
else:
collapsed = path
return "~" if collapsed == "." else os.path.join("~", collapsed)
def get_lan_ip():
try:
from objc_util import ObjCClass
NSHost = ObjCClass('NSHost')
addresses = []
for address in NSHost.currentHost().addresses():
address = str(address)
if 48 <= ord(address[0]) <= 57 and address != '127.0.0.1':
addresses.append(address)
return ' '.join(addresses)
except ImportError:
return ''
def input_stream(files=()):
""" Handles input files similar to fileinput.
The advantage of this function is it recovers from errors if one
file is invalid and proceed with the next file
"""
fileinput.close()
try:
if not files:
for line in fileinput.input(files):
yield line, '', fileinput.filelineno()
else:
while files:
thefile = files.pop(0)
try:
for line in fileinput.input(thefile):
yield line, fileinput.filename(), fileinput.filelineno()
except IOError as e:
yield None, fileinput.filename(), e
finally:
fileinput.close()
def sizeof_fmt(num):
"""
Return a human readable string describing the size of something.
:param num: the number in machine-readble form
:type num: int
:param base: base of each unit (e.g. 1024 for KiB -> MiB)
:type base: int
:param suffix: suffix to add. By default, the string returned by sizeof_fmt() does not contain a suffix other than 'K', 'M', ...
:type suffix: str
"""
for unit in ['B', 'KiB', 'MiB', 'GiB']:
if num < 1024:
return "%3.1f%s" % (num, unit)
num /= 1024.0
return "%3.1f%s" % (num, 'Ti')
|
import argparse
import sqlite3
import sys
import os
class Error(Exception):
"""Exception for errors in this module."""
def parse():
"""Parse command line arguments."""
description = ("This program is meant to extract browser history from your"
" previous browser and import them into qutebrowser.")
epilog = ("Databases:\n\n\tqutebrowser: Is named 'history.sqlite' and can "
"be found at your --basedir. In order to find where your "
"basedir is you can run ':open qute:version' inside qutebrowser."
"\n\n\tFirefox: Is named 'places.sqlite', and can be found at "
"your system's profile folder. Check this link for where it is "
"located: http://kb.mozillazine.org/Profile_folder"
"\n\n\tChrome: Is named 'History', and can be found at the "
"respective User Data Directory. Check this link for where it is"
"located: https://chromium.googlesource.com/chromium/src/+/"
"master/docs/user_data_dir.md\n\n"
"Example: hist_importer.py -b firefox -s /Firefox/Profile/"
"places.sqlite -d /qutebrowser/data/history.sqlite")
parser = argparse.ArgumentParser(
description=description, epilog=epilog,
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-b', '--browser', dest='browser', required=True,
type=str, help='Browsers: {firefox, chrome}')
parser.add_argument('-s', '--source', dest='source', required=True,
type=str, help='Source: Full path to the sqlite data'
'base file from the source browser.')
parser.add_argument('-d', '--dest', dest='dest', required=True, type=str,
help='\nDestination: Full path to the qutebrowser '
'sqlite database')
return parser.parse_args()
def open_db(data_base):
"""Open connection with database."""
if os.path.isfile(data_base):
return sqlite3.connect(data_base)
raise Error('The file {} does not exist.'.format(data_base))
def extract(source, query):
"""Get records from source database.
Args:
source: File path to the source database where we want to extract the
data from.
query: The query string to be executed in order to retrieve relevant
attributes as (datetime, url, time) from the source database according
to the browser chosen.
"""
try:
conn = open_db(source)
cursor = conn.cursor()
cursor.execute(query)
history = cursor.fetchall()
conn.close()
return history
except sqlite3.OperationalError as op_e:
raise Error('Could not perform queries on the source database: '
'{}'.format(op_e))
def clean(history):
"""Clean up records from source database.
Receives a list of record and sanityze them in order for them to be
properly imported to qutebrowser. Sanitation requires adding a 4th
attribute 'redirect' which is filled with '0's, and also purging all
records that have a NULL/None datetime attribute.
Args:
history: List of records (datetime, url, title) from source database.
"""
# replace missing titles with an empty string
for index, record in enumerate(history):
if record[1] is None:
cleaned = list(record)
cleaned[1] = ''
history[index] = tuple(cleaned)
nulls = [record for record in history if None in record]
for null_record in nulls:
history.remove(null_record)
history = [list(record) for record in history]
for record in history:
record.append('0')
return history
def insert_qb(history, dest):
"""Insert history into dest database.
Args:
history: List of records.
dest: File path to the destination database, where history will be
inserted.
"""
conn = open_db(dest)
cursor = conn.cursor()
cursor.executemany(
'INSERT INTO History (url,title,atime,redirect) VALUES (?,?,?,?)',
history
)
cursor.execute('DROP TABLE CompletionHistory')
conn.commit()
conn.close()
def run():
"""Main control flux of the script."""
args = parse()
browser = args.browser.lower()
source, dest = args.source, args.dest
query = {
'firefox': 'select url,title,last_visit_date/1000000 as date '
'from moz_places where url like "http%" or url '
'like "ftp%" or url like "file://%"',
'chrome': 'select url,title,last_visit_time/10000000 as date '
'from urls',
}
if browser not in query:
raise Error('Sorry, the selected browser: "{}" is not '
'supported.'.format(browser))
history = extract(source, query[browser])
history = clean(history)
insert_qb(history, dest)
def main():
try:
run()
except Error as e:
sys.exit(str(e))
if __name__ == "__main__":
main()
|
import logging
import numpy as np
import time
from collections import Counter
logger = logging.getLogger(__name__)
phout_columns = [
'time', 'tag', 'interval_real', 'connect_time', 'send_time', 'latency',
'receive_time', 'interval_event', 'size_out', 'size_in', 'net_code',
'proto_code'
]
phantom_config = {
"interval_real": ["total", "max", "min", "hist", "q", "len"],
"connect_time": ["total", "max", "min", "len"],
"send_time": ["total", "max", "min", "len"],
"latency": ["total", "max", "min", "len"],
"receive_time": ["total", "max", "min", "len"],
"interval_event": ["total", "max", "min", "len"],
"size_out": ["total", "max", "min", "len"],
"size_in": ["total", "max", "min", "len"],
"net_code": ["count"],
"proto_code": ["count"],
}
class Worker(object):
"""
Aggregate Pandas dataframe or dict with numpy ndarrays in it
"""
def __init__(self, config, verbose_histogram):
if verbose_histogram:
bins = np.linspace(0, 4990, 500) # 10µs accuracy
bins = np.append(bins,
np.linspace(5000, 9900, 50)) # 100µs accuracy
bins = np.append(bins,
np.linspace(10, 499, 490) * 1000) # 1ms accuracy
bins = np.append(bins,
np.linspace(500, 2995, 500) * 1000) # 5ms accuracy
bins = np.append(bins, np.linspace(3000, 9990, 700) * 1000) # 10ms accuracy
bins = np.append(bins, np.linspace(10000, 29950, 400) * 1000) # 50ms accuracy
bins = np.append(bins, np.linspace(30000, 119900, 900) * 1000) # 100ms accuracy
bins = np.append(bins, np.linspace(120, 300, 181) * 1000000) # 1s accuracy
else:
# yapf: disable
bins = np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 20, 30, 40, 50, 60, 70, 80, 90,
100, 150, 200, 250, 300, 350, 400, 450,
500, 600, 650, 700, 750, 800, 850, 900, 950,
1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500,
5000, 5500, 6000, 6500, 7000, 7500, 8000, 8500, 9000, 9500, 10000, 11000,
12000, 13000, 14000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000,
55000, 60000,
]) * 1000
# yapf: enable
self.bins = bins
self.percentiles = np.array([50, 75, 80, 85, 90, 95, 98, 99, 100])
self.config = config
self.aggregators = {
"hist": self._histogram,
"q": self._quantiles,
"mean": self._mean,
"total": self._total,
"min": self._min,
"max": self._max,
"count": self._count,
"len": self._len,
}
def _histogram(self, series):
data, bins = np.histogram(series, bins=self.bins)
mask = data > 0
return {
"data": [e.item() for e in data[mask]],
"bins": [e.item() for e in bins[1:][mask]],
}
def _mean(self, series):
return series.mean().item()
def _total(self, series):
return series.sum().item()
def _max(self, series):
return series.max().item()
def _min(self, series):
return series.min().item()
def _count(self, series):
return {str(k): v for k, v in dict(Counter(series)).items()}
def _len(self, series):
return len(series)
def _quantiles(self, series):
return {
"q": list(self.percentiles),
"value": list(np.percentile(series, self.percentiles)),
}
def aggregate(self, data):
return {
key: {
aggregate: self.aggregators.get(aggregate)(data[key])
for aggregate in self.config[key]
}
for key in self.config
}
class DataPoller(object):
def __init__(self, source, poll_period, max_wait=31):
"""
:param source: generator, should raise StopIteration at some point otherwise tank will be hanging
:param poll_period:
"""
self.poll_period = poll_period or 0.01
self.source = source
self.wait_cntr_max = max_wait // self.poll_period or 1
self.wait_counter = 0
def __iter__(self):
for chunk in self.source:
if chunk is not None:
self.wait_counter = 0
yield chunk
elif self.wait_counter < self.wait_cntr_max:
self.wait_counter += 1
else:
logger.warning('Data poller has been receiving no data for {} seconds.\n'
'Closing data poller'.format(self.wait_cntr_max * self.poll_period))
break
time.sleep(self.poll_period)
def to_utc(ts):
# dst = daylight saving time
is_dst = time.daylight and time.localtime().tm_isdst > 0
offset = (time.altzone if is_dst else time.timezone)
import pdb
pdb.set_trace()
return ts + offset
class Aggregator(object):
def __init__(self, source, config, verbose_histogram):
self.worker = Worker(config, verbose_histogram)
self.source = source
self.groupby = 'tag'
def __iter__(self):
for ts, chunk, rps in self.source:
by_tag = list(chunk.groupby([self.groupby]))
start_time = time.time()
result = {
"ts": ts,
"tagged":
{tag: self.worker.aggregate(data)
for tag, data in by_tag},
"overall": self.worker.aggregate(chunk),
"counted_rps": rps
}
logger.debug(
"Aggregation time: %.2fms", (time.time() - start_time) * 1000)
yield result
|
from .base_classes import Container, Command
from .labelref import Marker, Label
class Section(Container):
"""A class that represents a section."""
#: A section should normally start in its own paragraph
end_paragraph = True
#: Default prefix to use with Marker
marker_prefix = "sec"
#: Number the sections when the section element is compatible,
#: by changing the `~.Section` class default all
#: subclasses will also have the new default.
numbering = True
def __init__(self, title, numbering=None, *, label=True, **kwargs):
"""
Args
----
title: str
The section title.
numbering: bool
Add a number before the section title.
label: Label or bool or str
Can set a label manually or use a boolean to set
preference between automatic or no label
"""
self.title = title
if numbering is not None:
self.numbering = numbering
if isinstance(label, Label):
self.label = label
elif isinstance(label, str):
if ':' in label:
label = label.split(':', 1)
self.label = Label(Marker(label[1], label[0]))
else:
self.label = Label(Marker(label, self.marker_prefix))
elif label:
self.label = Label(Marker(title, self.marker_prefix))
else:
self.label = None
super().__init__(**kwargs)
def dumps(self):
"""Represent the section as a string in LaTeX syntax.
Returns
-------
str
"""
if not self.numbering:
num = '*'
else:
num = ''
string = Command(self.latex_name + num, self.title).dumps()
if self.label is not None:
string += '%\n' + self.label.dumps()
string += '%\n' + self.dumps_content()
return string
class Part(Section):
"""A class that represents a part."""
marker_prefix = "part"
class Chapter(Section):
"""A class that represents a chapter."""
marker_prefix = "chap"
class Subsection(Section):
"""A class that represents a subsection."""
marker_prefix = "subsec"
class Subsubsection(Section):
"""A class that represents a subsubsection."""
marker_prefix = "ssubsec"
class Paragraph(Section):
"""A class that represents a paragraph."""
marker_prefix = "para"
class Subparagraph(Section):
"""A class that represents a subparagraph."""
marker_prefix = "subpara"
|
import argparse
import logging
import sys
from typing import Any
from typing import Mapping
from typing import Optional
from typing import Sequence
import yaml
from paasta_tools.cli.utils import LONG_RUNNING_INSTANCE_TYPE_HANDLERS
from paasta_tools.flink_tools import get_flink_ingress_url_root
from paasta_tools.kubernetes_tools import create_custom_resource
from paasta_tools.kubernetes_tools import CustomResourceDefinition
from paasta_tools.kubernetes_tools import ensure_namespace
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import KubeCustomResource
from paasta_tools.kubernetes_tools import KubeKind
from paasta_tools.kubernetes_tools import list_custom_resources
from paasta_tools.kubernetes_tools import load_custom_resource_definitions
from paasta_tools.kubernetes_tools import paasta_prefixed
from paasta_tools.kubernetes_tools import sanitise_kubernetes_name
from paasta_tools.kubernetes_tools import update_custom_resource
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_config_hash
from paasta_tools.utils import get_git_sha_from_dockerurl
from paasta_tools.utils import load_all_configs
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
class StdoutKubeClient:
"""Replace all destructive operations in Kubernetes APIs with
writing out YAML to stdout."""
class StdoutWrapper:
def __init__(self, target) -> None:
self.target = target
def __getattr__(self, attr):
if attr.startswith("create") or attr.startswith("replace"):
return self.yaml_dump
return getattr(self.target, attr)
def yaml_dump(self, **kwargs):
body = kwargs.get("body")
if not body:
return
ns = kwargs.get("namespace")
if ns:
if "metadata" not in body:
body["metadata"] = {}
body["metadata"]["namespace"] = ns
yaml.safe_dump(body, sys.stdout, indent=4, explicit_start=True)
def __init__(self, kube_client) -> None:
self.deployments = StdoutKubeClient.StdoutWrapper(kube_client.deployments)
self.core = StdoutKubeClient.StdoutWrapper(kube_client.core)
self.policy = StdoutKubeClient.StdoutWrapper(kube_client.policy)
self.apiextensions = StdoutKubeClient.StdoutWrapper(kube_client.apiextensions)
self.custom = StdoutKubeClient.StdoutWrapper(kube_client.custom)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Creates custom_resources.")
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
parser.add_argument(
"-s", "--service", default=None, help="Service to setup CRs for"
)
parser.add_argument(
"-i", "--instance", default=None, help="Service instance to setup CR for"
)
parser.add_argument(
"-D",
"--dry-run",
action="store_true",
default=False,
help="Output kubernetes configuration instead of applying it",
)
parser.add_argument(
"-c", "--cluster", default=None, help="Cluster to setup CRs for"
)
args = parser.parse_args()
return args
def main() -> None:
args = parse_args()
soa_dir = args.soa_dir
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
kube_client: Any = KubeClient()
if args.dry_run:
kube_client = StdoutKubeClient(kube_client)
system_paasta_config = load_system_paasta_config()
cluster = args.cluster or system_paasta_config.get_cluster()
custom_resource_definitions = load_custom_resource_definitions(system_paasta_config)
setup_kube_succeeded = setup_all_custom_resources(
kube_client=kube_client,
soa_dir=soa_dir,
cluster=cluster,
custom_resource_definitions=custom_resource_definitions,
service=args.service,
instance=args.instance,
)
sys.exit(0 if setup_kube_succeeded else 1)
def setup_all_custom_resources(
kube_client: KubeClient,
soa_dir: str,
cluster: str,
custom_resource_definitions: Sequence[CustomResourceDefinition],
service: str = None,
instance: str = None,
) -> bool:
cluster_crds = {
crd.spec.names.kind
for crd in kube_client.apiextensions.list_custom_resource_definition(
label_selector=paasta_prefixed("service")
).items
}
log.debug(f"CRDs found: {cluster_crds}")
results = []
for crd in custom_resource_definitions:
if crd.kube_kind.singular not in cluster_crds:
# TODO: kube_kind.singular seems to correspond to `crd.names.kind`
# and not `crd.names.singular`
log.warning(f"CRD {crd.kube_kind.singular} " f"not found in {cluster}")
continue
# by convention, entries where key begins with _ are used as templates
raw_config_dicts = load_all_configs(
cluster=cluster, file_prefix=crd.file_prefix, soa_dir=soa_dir
)
config_dicts = {}
for svc, raw_sdict in raw_config_dicts.items():
sdict = {inst: idict for inst, idict in raw_sdict.items() if inst[0] != "_"}
if sdict:
config_dicts[svc] = sdict
if not config_dicts:
continue
ensure_namespace(
kube_client=kube_client, namespace=f"paasta-{crd.kube_kind.plural}"
)
results.append(
setup_custom_resources(
kube_client=kube_client,
kind=crd.kube_kind,
crd=crd,
config_dicts=config_dicts,
version=crd.version,
group=crd.group,
cluster=cluster,
service=service,
instance=instance,
)
)
return any(results) if results else True
def setup_custom_resources(
kube_client: KubeClient,
kind: KubeKind,
version: str,
crd: CustomResourceDefinition,
config_dicts: Mapping[str, Mapping[str, Any]],
group: str,
cluster: str,
service: str = None,
instance: str = None,
) -> bool:
succeded = True
if config_dicts:
crs = list_custom_resources(
kube_client=kube_client, kind=kind, version=version, group=group
)
for svc, config in config_dicts.items():
if service is not None and service != svc:
continue
if not reconcile_kubernetes_resource(
kube_client=kube_client,
service=svc,
instance=instance,
instance_configs=config,
kind=kind,
custom_resources=crs,
version=version,
group=group,
cluster=cluster,
crd=crd,
):
succeded = False
return succeded
def get_dashboard_base_url(kind: str, cluster: str) -> Optional[str]:
system_paasta_config = load_system_paasta_config()
dashboard_links = system_paasta_config.get_dashboard_links()
if kind.lower() == "flink":
flink_link = dashboard_links.get(cluster, {}).get("Flink")
if flink_link is None:
flink_link = get_flink_ingress_url_root(cluster)
if flink_link[-1:] != "/":
flink_link += "/"
return flink_link
return None
def format_custom_resource(
instance_config: Mapping[str, Any],
service: str,
instance: str,
cluster: str,
kind: str,
version: str,
group: str,
namespace: str,
git_sha: str,
) -> Mapping[str, Any]:
sanitised_service = sanitise_kubernetes_name(service)
sanitised_instance = sanitise_kubernetes_name(instance)
resource: Mapping[str, Any] = {
"apiVersion": f"{group}/{version}",
"kind": kind,
"metadata": {
"name": f"{sanitised_service}-{sanitised_instance}",
"namespace": namespace,
"labels": {
"yelp.com/paasta_service": service,
"yelp.com/paasta_instance": instance,
"yelp.com/paasta_cluster": cluster,
paasta_prefixed("service"): service,
paasta_prefixed("instance"): instance,
paasta_prefixed("cluster"): cluster,
},
"annotations": {},
},
"spec": instance_config,
}
url = get_dashboard_base_url(kind, cluster)
if url:
resource["metadata"]["annotations"][paasta_prefixed("dashboard_base_url")] = url
config_hash = get_config_hash(resource)
resource["metadata"]["annotations"]["yelp.com/desired_state"] = "running"
resource["metadata"]["annotations"][paasta_prefixed("desired_state")] = "running"
resource["metadata"]["labels"]["yelp.com/paasta_config_sha"] = config_hash
resource["metadata"]["labels"][paasta_prefixed("config_sha")] = config_hash
resource["metadata"]["labels"][paasta_prefixed("git_sha")] = git_sha
return resource
def reconcile_kubernetes_resource(
kube_client: KubeClient,
service: str,
instance_configs: Mapping[str, Any],
custom_resources: Sequence[KubeCustomResource],
kind: KubeKind,
version: str,
group: str,
crd: CustomResourceDefinition,
cluster: str,
instance: str = None,
) -> bool:
succeeded = True
config_handler = LONG_RUNNING_INSTANCE_TYPE_HANDLERS[crd.file_prefix]
for inst, config in instance_configs.items():
if instance is not None and instance != inst:
continue
try:
soa_config = config_handler.loader(
service=service,
instance=inst,
cluster=cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
)
git_sha = get_git_sha_from_dockerurl(soa_config.get_docker_url(), long=True)
formatted_resource = format_custom_resource(
instance_config=config,
service=service,
instance=inst,
cluster=cluster,
kind=kind.singular,
version=version,
group=group,
namespace=f"paasta-{kind.plural}",
git_sha=git_sha,
)
desired_resource = KubeCustomResource(
service=service,
instance=inst,
config_sha=formatted_resource["metadata"]["labels"][
paasta_prefixed("config_sha")
],
git_sha=formatted_resource["metadata"]["labels"].get(
paasta_prefixed("git_sha")
),
kind=kind.singular,
name=formatted_resource["metadata"]["name"],
namespace=f"paasta-{kind.plural}",
)
if not (service, inst, kind.singular) in [
(c.service, c.instance, c.kind) for c in custom_resources
]:
log.info(f"{desired_resource} does not exist so creating")
create_custom_resource(
kube_client=kube_client,
version=version,
kind=kind,
formatted_resource=formatted_resource,
group=group,
)
elif desired_resource not in custom_resources:
sanitised_service = sanitise_kubernetes_name(service)
sanitised_instance = sanitise_kubernetes_name(inst)
log.info(f"{desired_resource} exists but config_sha doesn't match")
update_custom_resource(
kube_client=kube_client,
name=f"{sanitised_service}-{sanitised_instance}",
version=version,
kind=kind,
formatted_resource=formatted_resource,
group=group,
)
else:
log.info(f"{desired_resource} is up to date, no action taken")
except Exception as e:
log.error(str(e))
succeeded = False
return succeeded
if __name__ == "__main__":
main()
|
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_TYPE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
ATTR_MIN_VALUE = "min_value"
ATTR_MIN_ENTITY_ID = "min_entity_id"
ATTR_MAX_VALUE = "max_value"
ATTR_MAX_ENTITY_ID = "max_entity_id"
ATTR_COUNT_SENSORS = "count_sensors"
ATTR_MEAN = "mean"
ATTR_MEDIAN = "median"
ATTR_LAST = "last"
ATTR_LAST_ENTITY_ID = "last_entity_id"
ATTR_TO_PROPERTY = [
ATTR_COUNT_SENSORS,
ATTR_MAX_VALUE,
ATTR_MAX_ENTITY_ID,
ATTR_MEAN,
ATTR_MEDIAN,
ATTR_MIN_VALUE,
ATTR_MIN_ENTITY_ID,
ATTR_LAST,
ATTR_LAST_ENTITY_ID,
]
CONF_ENTITY_IDS = "entity_ids"
CONF_ROUND_DIGITS = "round_digits"
ICON = "mdi:calculator"
SENSOR_TYPES = {
ATTR_MIN_VALUE: "min",
ATTR_MAX_VALUE: "max",
ATTR_MEAN: "mean",
ATTR_MEDIAN: "median",
ATTR_LAST: "last",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_TYPE, default=SENSOR_TYPES[ATTR_MAX_VALUE]): vol.All(
cv.string, vol.In(SENSOR_TYPES.values())
),
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENTITY_IDS): cv.entity_ids,
vol.Optional(CONF_ROUND_DIGITS, default=2): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the min/max/mean sensor."""
entity_ids = config.get(CONF_ENTITY_IDS)
name = config.get(CONF_NAME)
sensor_type = config.get(CONF_TYPE)
round_digits = config.get(CONF_ROUND_DIGITS)
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities([MinMaxSensor(entity_ids, name, sensor_type, round_digits)])
def calc_min(sensor_values):
"""Calculate min value, honoring unknown states."""
val = None
entity_id = None
for sensor_id, sensor_value in sensor_values:
if sensor_value not in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
if val is None or val > sensor_value:
entity_id, val = sensor_id, sensor_value
return entity_id, val
def calc_max(sensor_values):
"""Calculate max value, honoring unknown states."""
val = None
entity_id = None
for sensor_id, sensor_value in sensor_values:
if sensor_value not in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
if val is None or val < sensor_value:
entity_id, val = sensor_id, sensor_value
return entity_id, val
def calc_mean(sensor_values, round_digits):
"""Calculate mean value, honoring unknown states."""
result = []
for _, sensor_value in sensor_values:
if sensor_value not in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
result.append(sensor_value)
if len(result) == 0:
return None
return round(sum(result) / len(result), round_digits)
def calc_median(sensor_values, round_digits):
"""Calculate median value, honoring unknown states."""
result = []
for _, sensor_value in sensor_values:
if sensor_value not in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
result.append(sensor_value)
if len(result) == 0:
return None
result.sort()
if len(result) % 2 == 0:
median1 = result[len(result) // 2]
median2 = result[len(result) // 2 - 1]
median = (median1 + median2) / 2
else:
median = result[len(result) // 2]
return round(median, round_digits)
class MinMaxSensor(Entity):
"""Representation of a min/max sensor."""
def __init__(self, entity_ids, name, sensor_type, round_digits):
"""Initialize the min/max sensor."""
self._entity_ids = entity_ids
self._sensor_type = sensor_type
self._round_digits = round_digits
if name:
self._name = name
else:
self._name = f"{next(v for k, v in SENSOR_TYPES.items() if self._sensor_type == v)} sensor".capitalize()
self._unit_of_measurement = None
self._unit_of_measurement_mismatch = False
self.min_value = self.max_value = self.mean = self.last = self.median = None
self.min_entity_id = self.max_entity_id = self.last_entity_id = None
self.count_sensors = len(self._entity_ids)
self.states = {}
async def async_added_to_hass(self):
"""Handle added to Hass."""
self.async_on_remove(
async_track_state_change_event(
self.hass, self._entity_ids, self._async_min_max_sensor_state_listener
)
)
self._calc_values()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._unit_of_measurement_mismatch:
return None
return getattr(
self, next(k for k, v in SENSOR_TYPES.items() if self._sensor_type == v)
)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._unit_of_measurement_mismatch:
return "ERR"
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
attr: getattr(self, attr)
for attr in ATTR_TO_PROPERTY
if getattr(self, attr) is not None
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@callback
def _async_min_max_sensor_state_listener(self, event):
"""Handle the sensor state changes."""
new_state = event.data.get("new_state")
entity = event.data.get("entity_id")
if new_state.state is None or new_state.state in [
STATE_UNKNOWN,
STATE_UNAVAILABLE,
]:
self.states[entity] = STATE_UNKNOWN
self._calc_values()
self.async_write_ha_state()
return
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if self._unit_of_measurement != new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
):
_LOGGER.warning(
"Units of measurement do not match for entity %s", self.entity_id
)
self._unit_of_measurement_mismatch = True
try:
self.states[entity] = float(new_state.state)
self.last = float(new_state.state)
self.last_entity_id = entity
except ValueError:
_LOGGER.warning(
"Unable to store state. Only numerical states are supported"
)
self._calc_values()
self.async_write_ha_state()
@callback
def _calc_values(self):
"""Calculate the values."""
sensor_values = [
(entity_id, self.states[entity_id])
for entity_id in self._entity_ids
if entity_id in self.states
]
self.min_entity_id, self.min_value = calc_min(sensor_values)
self.max_entity_id, self.max_value = calc_max(sensor_values)
self.mean = calc_mean(sensor_values, self._round_digits)
self.median = calc_median(sensor_values, self._round_digits)
|
import tempfile
import unittest
from absl import flags
import mock
from perfkitbenchmarker import beam_benchmark_helper
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
class BeamBenchmarkHelperTestCase(unittest.TestCase):
def setUp(self):
# TODO(ferneyhough): See exactly why this is needed and find a better way
# to do this. Unittests in PKB should not have to add this call manually.
FLAGS.mark_as_parsed()
super(BeamBenchmarkHelperTestCase, self).setUp()
def test_runner_option_override_use_override(self):
test_option_val = 'TestVal'
actual_options = []
beam_benchmark_helper.AddRunnerPipelineOption(actual_options, None,
test_option_val)
self.assertListEqual(['--runner=' + test_option_val], actual_options)
def test_runner_option_override_empty_override(self):
test_option_val = ''
actual_options = []
beam_benchmark_helper.AddRunnerPipelineOption(actual_options, None,
test_option_val)
self.assertListEqual([], actual_options)
def test_dataflow_runner_name_added(self):
test_option_val = 'dataflow'
actual_command = []
beam_benchmark_helper.AddRunnerArgument(actual_command, test_option_val)
self.assertListEqual(['-DintegrationTestRunner=' + test_option_val],
actual_command)
def test_direct_runner_name_added(self):
test_option_val = 'direct'
actual_command = []
beam_benchmark_helper.AddRunnerArgument(actual_command, test_option_val)
self.assertListEqual(['-DintegrationTestRunner=' + test_option_val],
actual_command)
def test_runner_name_empty(self):
test_option_val = ''
actual_command = []
beam_benchmark_helper.AddRunnerArgument(actual_command, test_option_val)
self.assertListEqual([], actual_command)
def test_extra_property_empty_property(self):
test_option_val = ''
actual_command = []
beam_benchmark_helper.AddExtraProperties(actual_command, test_option_val)
self.assertListEqual([], actual_command)
def test_extra_property_single_property(self):
test_option_val = '[key=value]'
actual_mvn_command = []
beam_benchmark_helper.AddExtraProperties(actual_mvn_command,
test_option_val)
self.assertListEqual(['-Dkey=value'], actual_mvn_command)
def test_extra_property_single_property_quoted(self):
test_option_val = '["key=value"]'
actual_mvn_command = []
beam_benchmark_helper.AddExtraProperties(actual_mvn_command,
test_option_val)
self.assertListEqual(['-Dkey=value'], actual_mvn_command)
def test_extra_property_multiple_properties(self):
test_option_val = '["key=value", "key2=value2"]'
actual_mvn_command = []
beam_benchmark_helper.AddExtraProperties(actual_mvn_command,
test_option_val)
self.assertListEqual(['-Dkey=value', '-Dkey2=value2'], actual_mvn_command)
def test_integrationPipelineOptions_rejection(self):
test_option_val = '["integrationTestPipelineOptions=..."]'
actual_mvn_command = []
with self.assertRaises(ValueError):
beam_benchmark_helper.AddExtraProperties(actual_mvn_command,
test_option_val)
def test_hdfs_filesystem_addition(self):
test_option_val = 'hdfs'
actual_command = []
beam_benchmark_helper.AddFilesystemArgument(actual_command, test_option_val)
self.assertListEqual(['-Dfilesystem=hdfs'], actual_command)
def test_empty_filesystem(self):
test_option_val = ''
actual_command = []
beam_benchmark_helper.AddFilesystemArgument(actual_command, test_option_val)
self.assertListEqual([], actual_command)
def test_add_task(self):
test_module_val = ':sdks:java:io'
test_task_val = 'tests'
actual_command = []
beam_benchmark_helper.AddTaskArgument(actual_command, test_task_val,
test_module_val)
self.assertListEqual([':sdks:java:io:tests'], actual_command)
def test_add_empty_task(self):
test_option_val = ''
actual_command = []
with self.assertRaises(ValueError):
beam_benchmark_helper.AddTaskArgument(actual_command, test_option_val,
test_option_val)
def test_initialize_beam_repo_beam_exists(self):
FLAGS.beam_location = tempfile.mkdtemp()
with mock.patch.object(beam_benchmark_helper, '_PrebuildBeam') as mock_prebuild, \
mock.patch.object(vm_util, 'GenTempDir'):
mock_spec = mock.MagicMock()
mock_spec.dpb_service.SERVICE_TYPE = dpb_service.DATAFLOW
beam_benchmark_helper.InitializeBeamRepo(mock_spec)
mock_prebuild.assert_called_once()
def test_initialize_beam_repo_beam_not_exists(self):
FLAGS.beam_location = None
with mock.patch.object(beam_benchmark_helper, '_PrebuildBeam') as mock_prebuild, \
mock.patch.object(vm_util, 'GenTempDir'), \
mock.patch.object(vm_util, 'GetTempDir'), \
mock.patch.object(vm_util, 'IssueCommand') as mock_run:
mock_spec = mock.MagicMock()
mock_spec.dpb_service.SERVICE_TYPE = dpb_service.DATAFLOW
beam_benchmark_helper.InitializeBeamRepo(mock_spec)
expected_cmd = ['git', 'clone', 'https://github.com/apache/beam.git']
mock_run.assert_called_once_with(expected_cmd,
cwd=vm_util.GetTempDir())
mock_prebuild.assert_called_once()
def test_beam_prebuild(self):
FLAGS.beam_prebuilt = False
FLAGS.beam_it_module = ':sdks:java'
FLAGS.beam_runner = 'dataflow'
FLAGS.beam_filesystem = 'hdfs'
FLAGS.beam_extra_properties = '[extra_key=extra_value]'
with mock.patch.object(beam_benchmark_helper, '_GetGradleCommand') as mock_gradle, \
mock.patch.object(beam_benchmark_helper, '_GetBeamDir'), \
mock.patch.object(vm_util, 'IssueCommand') as mock_run:
mock_gradle.return_value = 'gradlew'
beam_benchmark_helper._PrebuildBeam()
expected_cmd = [
'gradlew',
'--stacktrace',
'--info',
':sdks:java:clean',
':sdks:java:assemble',
'-DintegrationTestRunner=dataflow',
'-Dfilesystem=hdfs',
'-Dextra_key=extra_value'
]
mock_run.assert_called_once_with(expected_cmd,
cwd=beam_benchmark_helper._GetBeamDir(),
timeout=1500)
def test_build_python_gradle_command(self):
FLAGS.beam_python_attr = 'IT'
FLAGS.beam_it_module = ':sdks:python'
FLAGS.beam_runner = 'TestRunner'
FLAGS.beam_python_sdk_location = 'py/location.tar'
FLAGS.beam_sdk = beam_benchmark_helper.BEAM_PYTHON_SDK
with mock.patch.object(beam_benchmark_helper, '_GetGradleCommand') as mock_gradle, \
mock.patch.object(beam_benchmark_helper, '_GetBeamDir'), \
mock.patch.object(vm_util, 'ExecutableOnPath', return_value=True) as exec_check:
mock_gradle.return_value = 'gradlew'
mock_spec = mock.MagicMock()
mock_spec.service_type = dpb_service.DATAFLOW
actual_cmd, _ = beam_benchmark_helper.BuildBeamCommand(mock_spec,
'apache_beam.py',
['--args'])
expected_cmd = [
'gradlew',
':sdks:python:integrationTest',
'-Dtests=apache_beam.py',
'-Dattr=IT',
'-DpipelineOptions=--args "--runner=TestRunner" '
'"--sdk_location=py/location.tar"',
'--info',
'--scan',
]
self.assertListEqual(expected_cmd, actual_cmd)
exec_check.assert_called_once()
def test_build_java_gradle_command(self):
FLAGS.beam_it_module = ':sdks:java'
FLAGS.beam_runner = 'dataflow'
FLAGS.beam_filesystem = 'hdfs'
FLAGS.beam_extra_properties = '["extra_key=extra_value"]'
FLAGS.beam_sdk = beam_benchmark_helper.BEAM_JAVA_SDK
with mock.patch.object(beam_benchmark_helper, '_GetGradleCommand') as mock_gradle, \
mock.patch.object(beam_benchmark_helper, '_GetBeamDir'), \
mock.patch.object(vm_util, 'ExecutableOnPath', return_value=True) as exec_check:
mock_gradle.return_value = 'gradlew'
mock_spec = mock.MagicMock()
mock_spec.service_type = dpb_service.DATAFLOW
actual_cmd, _ = beam_benchmark_helper.BuildBeamCommand(
mock_spec, 'org.apache.beam.sdk.java', ['--args'])
expected_cmd = [
'gradlew',
':sdks:java:integrationTest',
'--tests=org.apache.beam.sdk.java',
'-DintegrationTestRunner=dataflow',
'-Dfilesystem=hdfs',
'-Dextra_key=extra_value',
'-DintegrationTestPipelineOptions=[--args,"--runner=TestDataflowRunner"]',
'--stacktrace',
'--info',
'--scan',
]
self.assertListEqual(expected_cmd, actual_cmd)
exec_check.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
import typing
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from keras.initializers import constant
class MatchingTensorLayer(Layer):
"""
Layer that captures the basic interactions between two tensors.
:param channels: Number of word interaction tensor channels
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
:param init_diag: Whether to initialize the diagonal elements
of the matrix.
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.MatchingTensorLayer(channels=4,
... normalize=True,
... init_diag=True)
>>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10
>>> layer.build([[num_batch, left_len, num_dim],
... [num_batch, right_len, num_dim]])
"""
def __init__(self, channels: int = 4, normalize: bool = True,
init_diag: bool = True, **kwargs):
""":class:`MatchingTensorLayer` constructor."""
super().__init__(**kwargs)
self._channels = channels
self._normalize = normalize
self._init_diag = init_diag
self._shape1 = None
self._shape2 = None
def build(self, input_shape: list):
"""
Build the layer.
:param input_shape: the shapes of the input tensors,
for MatchingTensorLayer we need two input tensors.
"""
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingTensorLayer` layer should be called '
'on a list of 2 inputs.')
self._shape1 = input_shape[0]
self._shape2 = input_shape[1]
for idx in (0, 2):
if self._shape1[idx] != self._shape2[idx]:
raise ValueError(
'Incompatible dimensions: '
f'{self._shape1[idx]} != {self._shape2[idx]}.'
f'Layer shapes: {self._shape1}, {self._shape2}.'
)
if self._init_diag:
interaction_matrix = np.float32(
np.random.uniform(
-0.05, 0.05,
[self._channels, self._shape1[2], self._shape2[2]]
)
)
for channel_index in range(self._channels):
np.fill_diagonal(interaction_matrix[channel_index], 0.1)
self.interaction_matrix = self.add_weight(
name='interaction_matrix',
shape=(self._channels, self._shape1[2], self._shape2[2]),
initializer=constant(interaction_matrix),
trainable=True
)
else:
self.interaction_matrix = self.add_weight(
name='interaction_matrix',
shape=(self._channels, self._shape1[2], self._shape2[2]),
initializer='uniform',
trainable=True
)
super(MatchingTensorLayer, self).build(input_shape)
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of MatchingTensorLayer.
:param inputs: two input tensors.
"""
x1 = inputs[0]
x2 = inputs[1]
# Normalize x1 and x2
if self._normalize:
x1 = K.l2_normalize(x1, axis=2)
x2 = K.l2_normalize(x2, axis=2)
# b = batch size
# l = length of `x1`
# r = length of `x2`
# d, e = embedding size
# c = number of channels
# output = [b, c, l, r]
output = tf.einsum(
'bld,cde,bre->bclr',
x1, self.interaction_matrix, x2
)
return output
def compute_output_shape(self, input_shape: list) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors,
for MatchingTensorLayer we need two input tensors.
"""
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingTensorLayer` layer should be called '
'on a list of 2 inputs.')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if len(shape1) != 3 or len(shape2) != 3:
raise ValueError('A `MatchingTensorLayer` layer should be called '
'on 2 inputs with 3 dimensions.')
if shape1[0] != shape2[0] or shape1[2] != shape2[2]:
raise ValueError('A `MatchingTensorLayer` layer should be called '
'on 2 inputs with same 0,2 dimensions.')
output_shape = [shape1[0], self._channels, shape1[1], shape2[1]]
return tuple(output_shape)
|
import logging
import threading
import time
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
DOMAIN = "raspihats"
CONF_I2C_HATS = "i2c_hats"
CONF_BOARD = "board"
CONF_CHANNELS = "channels"
CONF_INDEX = "index"
CONF_INVERT_LOGIC = "invert_logic"
CONF_INITIAL_STATE = "initial_state"
I2C_HAT_NAMES = [
"Di16",
"Rly10",
"Di6Rly6",
"DI16ac",
"DQ10rly",
"DQ16oc",
"DI6acDQ6rly",
]
I2C_HATS_MANAGER = "I2CH_MNG"
def setup(hass, config):
"""Set up the raspihats component."""
hass.data[I2C_HATS_MANAGER] = I2CHatsManager()
def start_i2c_hats_keep_alive(event):
"""Start I2C-HATs keep alive."""
hass.data[I2C_HATS_MANAGER].start_keep_alive()
def stop_i2c_hats_keep_alive(event):
"""Stop I2C-HATs keep alive."""
hass.data[I2C_HATS_MANAGER].stop_keep_alive()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_i2c_hats_keep_alive)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_i2c_hats_keep_alive)
return True
def log_message(source, *parts):
"""Build log message."""
message = source.__class__.__name__
for part in parts:
message += f": {part!s}"
return message
class I2CHatsException(Exception):
"""I2C-HATs exception."""
class I2CHatsDIScanner:
"""Scan Digital Inputs and fire callbacks."""
_DIGITAL_INPUTS = "di"
_OLD_VALUE = "old_value"
_CALLBACKS = "callbacks"
def setup(self, i2c_hat):
"""Set up the I2C-HAT instance for digital inputs scanner."""
if hasattr(i2c_hat, self._DIGITAL_INPUTS):
digital_inputs = getattr(i2c_hat, self._DIGITAL_INPUTS)
old_value = None
# Add old value attribute
setattr(digital_inputs, self._OLD_VALUE, old_value)
# Add callbacks dict attribute {channel: callback}
setattr(digital_inputs, self._CALLBACKS, {})
def register_callback(self, i2c_hat, channel, callback):
"""Register edge callback."""
if hasattr(i2c_hat, self._DIGITAL_INPUTS):
digital_inputs = getattr(i2c_hat, self._DIGITAL_INPUTS)
callbacks = getattr(digital_inputs, self._CALLBACKS)
callbacks[channel] = callback
setattr(digital_inputs, self._CALLBACKS, callbacks)
def scan(self, i2c_hat):
"""Scan I2C-HATs digital inputs and fire callbacks."""
if hasattr(i2c_hat, self._DIGITAL_INPUTS):
digital_inputs = getattr(i2c_hat, self._DIGITAL_INPUTS)
callbacks = getattr(digital_inputs, self._CALLBACKS)
old_value = getattr(digital_inputs, self._OLD_VALUE)
value = digital_inputs.value # i2c data transfer
if old_value is not None and value != old_value:
for channel in range(0, len(digital_inputs.channels)):
state = (value >> channel) & 0x01
old_state = (old_value >> channel) & 0x01
if state != old_state:
callback = callbacks.get(channel)
if callback is not None:
callback(state)
setattr(digital_inputs, self._OLD_VALUE, value)
class I2CHatsManager(threading.Thread):
"""Manages all I2C-HATs instances."""
_EXCEPTION = "exception"
_CALLBACKS = "callbacks"
def __init__(self):
"""Init I2C-HATs Manager."""
threading.Thread.__init__(self)
self._lock = threading.Lock()
self._i2c_hats = {}
self._run = False
self._di_scanner = I2CHatsDIScanner()
def register_board(self, board, address):
"""Register I2C-HAT."""
with self._lock:
i2c_hat = self._i2c_hats.get(address)
if i2c_hat is None:
# This is a Pi module and can't be installed in CI without
# breaking the build.
# pylint: disable=import-outside-toplevel,import-error
import raspihats.i2c_hats as module
constructor = getattr(module, board)
i2c_hat = constructor(address)
setattr(i2c_hat, self._CALLBACKS, {})
# Setting exception attribute will trigger online callbacks
# when keep alive thread starts.
setattr(i2c_hat, self._EXCEPTION, None)
self._di_scanner.setup(i2c_hat)
self._i2c_hats[address] = i2c_hat
status_word = i2c_hat.status # read status_word to reset bits
_LOGGER.info(log_message(self, i2c_hat, "registered", status_word))
def run(self):
"""Keep alive for I2C-HATs."""
# This is a Pi module and can't be installed in CI without
# breaking the build.
# pylint: disable=import-outside-toplevel,import-error
from raspihats.i2c_hats import ResponseException
_LOGGER.info(log_message(self, "starting"))
while self._run:
with self._lock:
for i2c_hat in list(self._i2c_hats.values()):
try:
self._di_scanner.scan(i2c_hat)
self._read_status(i2c_hat)
if hasattr(i2c_hat, self._EXCEPTION):
if getattr(i2c_hat, self._EXCEPTION) is not None:
_LOGGER.warning(
log_message(self, i2c_hat, "online again")
)
delattr(i2c_hat, self._EXCEPTION)
# trigger online callbacks
callbacks = getattr(i2c_hat, self._CALLBACKS)
for callback in list(callbacks.values()):
callback()
except ResponseException as ex:
if not hasattr(i2c_hat, self._EXCEPTION):
_LOGGER.error(log_message(self, i2c_hat, ex))
setattr(i2c_hat, self._EXCEPTION, ex)
time.sleep(0.05)
_LOGGER.info(log_message(self, "exiting"))
def _read_status(self, i2c_hat):
"""Read I2C-HATs status."""
status_word = i2c_hat.status
if status_word.value != 0x00:
_LOGGER.error(log_message(self, i2c_hat, status_word))
def start_keep_alive(self):
"""Start keep alive mechanism."""
self._run = True
threading.Thread.start(self)
def stop_keep_alive(self):
"""Stop keep alive mechanism."""
self._run = False
self.join()
def register_di_callback(self, address, channel, callback):
"""Register I2C-HAT digital input edge callback."""
with self._lock:
i2c_hat = self._i2c_hats[address]
self._di_scanner.register_callback(i2c_hat, channel, callback)
def register_online_callback(self, address, channel, callback):
"""Register I2C-HAT online callback."""
with self._lock:
i2c_hat = self._i2c_hats[address]
callbacks = getattr(i2c_hat, self._CALLBACKS)
callbacks[channel] = callback
setattr(i2c_hat, self._CALLBACKS, callbacks)
def read_di(self, address, channel):
"""Read a value from a I2C-HAT digital input."""
# This is a Pi module and can't be installed in CI without
# breaking the build.
# pylint: disable=import-outside-toplevel,import-error
from raspihats.i2c_hats import ResponseException
with self._lock:
i2c_hat = self._i2c_hats[address]
try:
value = i2c_hat.di.value
return (value >> channel) & 0x01
except ResponseException as ex:
raise I2CHatsException(str(ex)) from ex
def write_dq(self, address, channel, value):
"""Write a value to a I2C-HAT digital output."""
# This is a Pi module and can't be installed in CI without
# breaking the build.
# pylint: disable=import-outside-toplevel,import-error
from raspihats.i2c_hats import ResponseException
with self._lock:
i2c_hat = self._i2c_hats[address]
try:
i2c_hat.dq.channels[channel] = value
except ResponseException as ex:
raise I2CHatsException(str(ex)) from ex
def read_dq(self, address, channel):
"""Read a value from a I2C-HAT digital output."""
# This is a Pi module and can't be installed in CI without
# breaking the build.
# pylint: disable=import-outside-toplevel,import-error
from raspihats.i2c_hats import ResponseException
with self._lock:
i2c_hat = self._i2c_hats[address]
try:
return i2c_hat.dq.channels[channel]
except ResponseException as ex:
raise I2CHatsException(str(ex)) from ex
|
import argparse
import logging
import sys
import traceback
from contextlib import contextmanager
from typing import Generator
from kubernetes.client import V1Deployment
from kubernetes.client import V1StatefulSet
from paasta_tools.kubernetes.application.tools import Application # type: ignore
from paasta_tools.kubernetes.application.tools import (
list_namespaced_applications,
) # type: ignore
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.utils import _log
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
APPLICATION_TYPES = [V1StatefulSet, V1Deployment]
class DontKillEverythingError(Exception):
pass
@contextmanager
def alert_state_change(application: Application, soa_dir: str) -> Generator:
service = application.kube_deployment.service
instance = application.kube_deployment.instance
cluster = load_system_paasta_config().get_cluster()
try:
yield
log_line = (
"Deleted stale Kubernetes apps that looks lost: %s"
% application.item.metadata.name
)
_log(
service=service,
component="deploy",
level="event",
cluster=cluster,
instance=instance,
line=log_line,
)
except Exception:
loglines = ["Exception raised during cleanup of service %s:" % application]
loglines.extend(traceback.format_exc().rstrip().split("\n"))
for logline in loglines:
_log(
service=service,
component="deploy",
level="debug",
cluster=cluster,
instance=instance,
line=logline,
)
raise
def cleanup_unused_apps(
soa_dir: str, kill_threshold: float = 0.5, force: bool = False
) -> None:
"""Clean up old or invalid jobs/apps from kubernetes. Retrieves
both a list of apps currently in kubernetes and a list of valid
app ids in order to determine what to kill.
:param soa_dir: The SOA config directory to read from
:param kill_threshold: The decimal fraction of apps we think is
sane to kill when this job runs.
:param force: Force the cleanup if we are above the kill_threshold"""
log.info("Creating KubeClient")
kube_client = KubeClient()
log.info("Loading running Kubernetes apps")
applications = list_namespaced_applications(
kube_client, "paasta", APPLICATION_TYPES
)
log.info("Retrieving valid apps from yelpsoa_configs")
valid_services = set(
get_services_for_cluster(instance_type="kubernetes", soa_dir=soa_dir)
)
log.info("Determining apps to be killed")
applications_to_kill = [
applicaton
for applicaton in applications
if (applicaton.kube_deployment.service, applicaton.kube_deployment.instance)
not in valid_services
]
log.debug("Running apps: %s" % applications)
log.debug("Valid apps: %s" % valid_services)
log.debug("Terminating: %s" % applications_to_kill)
if applications_to_kill:
above_kill_threshold = float(len(applications_to_kill)) / float(
len(applications)
) > float(kill_threshold)
if above_kill_threshold and not force:
log.critical(
"Paasta was about to kill more than %s of the running services, this "
"is probably a BAD mistake!, run again with --force if you "
"really need to destroy everything" % kill_threshold
)
raise DontKillEverythingError
for applicaton in applications_to_kill:
with alert_state_change(applicaton, soa_dir):
applicaton.deep_delete(kube_client)
def parse_args(argv):
parser = argparse.ArgumentParser(description="Cleans up stale kubernetes jobs.")
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
"-t",
"--kill-threshold",
dest="kill_threshold",
default=0.5,
help="The decimal fraction of apps we think is "
"sane to kill when this job runs",
)
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
parser.add_argument(
"-f",
"--force",
action="store_true",
dest="force",
default=False,
help="Force the cleanup if we are above the " "kill_threshold",
)
return parser.parse_args(argv)
def main(argv=None) -> None:
args = parse_args(argv)
soa_dir = args.soa_dir
kill_threshold = args.kill_threshold
force = args.force
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
try:
cleanup_unused_apps(soa_dir, kill_threshold=kill_threshold, force=force)
except DontKillEverythingError:
sys.exit(1)
if __name__ == "__main__":
main()
|
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytz
from qstrader.broker.portfolio.position_handler import PositionHandler
from qstrader.broker.transaction.transaction import Transaction
def test_transact_position_new_position():
"""
Tests the 'transact_position' method for a transaction
with a brand new asset and checks that all objects are
set correctly.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
transaction = Transaction(
asset,
quantity=100,
dt=pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC),
price=960.0,
order_id=123,
commission=26.83
)
ph.transact_position(transaction)
# Check that the position object is set correctly
pos = ph.positions[asset]
assert pos.buy_quantity == 100
assert pos.sell_quantity == 0
assert pos.net_quantity == 100
assert pos.direction == 1
assert pos.avg_price == 960.2683000000001
def test_transact_position_current_position():
"""
Tests the 'transact_position' method for a transaction
with a current asset and checks that all objects are
set correctly.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
dt = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2015-05-06 16:00:00', tz=pytz.UTC)
transaction_long = Transaction(
asset,
quantity=100,
dt=dt,
price=960.0,
order_id=123,
commission=26.83
)
ph.transact_position(transaction_long)
transaction_long_again = Transaction(
asset,
quantity=200,
dt=new_dt,
price=990.0,
order_id=234,
commission=18.53
)
ph.transact_position(transaction_long_again)
# Check that the position object is set correctly
pos = ph.positions[asset]
assert pos.buy_quantity == 300
assert pos.sell_quantity == 0
assert pos.net_quantity == 300
assert pos.direction == 1
assert np.isclose(pos.avg_price, 980.1512)
def test_transact_position_quantity_zero():
"""
Tests the 'transact_position' method for a transaction
with net zero quantity after the transaction to ensure
deletion of the position.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
dt = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2015-05-06 16:00:00', tz=pytz.UTC)
transaction_long = Transaction(
asset,
quantity=100,
dt=dt,
price=960.0,
order_id=123, commission=26.83
)
ph.transact_position(transaction_long)
transaction_close = Transaction(
asset,
quantity=-100,
dt=new_dt,
price=980.0,
order_id=234,
commission=18.53
)
ph.transact_position(transaction_close)
# Go long and then close, then check that the
# positions OrderedDict is empty
assert ph.positions == OrderedDict()
def test_total_values_for_no_transactions():
"""
Tests 'total_market_value', 'total_unrealised_pnl',
'total_realised_pnl' and 'total_pnl' for the case
of no transactions being carried out.
"""
ph = PositionHandler()
assert ph.total_market_value() == 0.0
assert ph.total_unrealised_pnl() == 0.0
assert ph.total_realised_pnl() == 0.0
assert ph.total_pnl() == 0.0
def test_total_values_for_two_separate_transactions():
"""
Tests 'total_market_value', 'total_unrealised_pnl',
'total_realised_pnl' and 'total_pnl' for single
transactions in two separate assets.
"""
ph = PositionHandler()
# Asset 1
asset1 = 'EQ:AMZN'
dt1 = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
trans_pos_1 = Transaction(
asset1,
quantity=75,
dt=dt1,
price=483.45,
order_id=1,
commission=15.97
)
ph.transact_position(trans_pos_1)
# Asset 2
asset2 = 'EQ:MSFT'
dt2 = pd.Timestamp('2015-05-07 15:00:00', tz=pytz.UTC)
trans_pos_2 = Transaction(
asset2,
quantity=250,
dt=dt2,
price=142.58,
order_id=2,
commission=8.35
)
ph.transact_position(trans_pos_2)
# Check all total values
assert ph.total_market_value() == 71903.75
assert np.isclose(ph.total_unrealised_pnl(), -24.31999999999971)
assert ph.total_realised_pnl() == 0.0
assert np.isclose(ph.total_pnl(), -24.31999999999971)
|
import os
import sys
import shutil
import hashlib
import warnings
import subprocess
import importlib.util
from docutils.parsers.rst import Directive
from docutils import nodes
from flexx import app
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(THIS_DIR))
HTML_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '_build', 'html'))
SIMPLE_CODE_T1 = """
from flexx import app, ui
class App(ui.Widget):
def init(self):
""" # mind the indentation
SIMPLE_CODE_T2 = """
from flexx import flx
class App(flx.Widget):
def init(self):
""" # mind the indentation
all_examples = []
class uiexample(nodes.raw): pass
def create_ui_example(filename, to_root, height=300, source=None):
""" Given a filename, export the containing app to HTML, return
generated HTML. Needs to be done via filename, not direct code, so
that PScript can obtain source.
"""
code = open(filename, 'rb').read().decode()
fname = os.path.split(filename)[1]
filename_parts = 'examples', fname[:-3] + '.html'
filename_abs = os.path.join(HTML_DIR, *filename_parts)
filename_rel = to_root + '/' + '/'.join(filename_parts)
# Import - mod_name must be unique, because JS modules match Py modules
try:
mod_name = "app_" + fname[:-3]
if sys.version_info >= (3, 5):
spec = importlib.util.spec_from_file_location(mod_name, filename)
m = importlib.util.module_from_spec(spec)
sys.modules[mod_name] = m # Flexx needs to be able to access the module
spec.loader.exec_module(m)
else: # http://stackoverflow.com/a/67692/2271927
from importlib.machinery import SourceFileLoader
m = SourceFileLoader(mod_name, filename).load_module()
sys.modules[mod_name] = m
except Exception as err:
err_text = str(err)
msg = 'Example not generated. <pre>%s</pre>' % err_text
if os.environ.get('READTHEDOCS', False):
msg = 'This example is not build on read-the-docs. <pre>%s</pre>' % err_text
open(filename_abs, 'wt', encoding='utf-8').write(msg)
warnings.warn('Could not import ui example in %s: %s' % (source or filename, err_text))
return get_html(filename_rel, 60)
# Get class name
line1 = code.splitlines()[0]
class_name = None
if 'class App(' in code:
class_name = 'App'
elif 'class MyApp' in code:
class_name = 'MyApp'
elif 'class Example' in code:
class_name = 'Example'
elif line1.startswith('# doc-export:'):
class_name = line1.split(':', 1)[1].strip()
#
if class_name:
assert class_name.isidentifier()
else:
msg = 'Could not determine app widget class in:<pre>%s</pre>' % code
warnings.warn(msg)
open(filename_abs, 'wt', encoding='utf-8').write(msg)
return get_html(filename_rel, height)
# Export
try:
app.export(m.__dict__[class_name], filename_abs, link=2, overwrite=False)
except Exception as err:
err_text = str(err)
msg = 'Example not generated. <pre>%s</pre>' % err_text
open(filename_abs, 'wt', encoding='utf-8').write(msg.replace('\\n', '<br />'))
print('==========\n%s\n==========' % code)
print('ERROR: Could not export ui example: %s in %s\nSee code above.' % (err_text, fname))
raise err
all_examples.append((class_name, mod_name, filename_parts[-1]))
return get_html(filename_rel, height)
def get_html(filename_rel, height):
""" Get the html to embed the given page into another page using an iframe.
"""
# Styles
astyle = 'font-size:small; float:right;'
dstyle = ('width: 500px; height: %ipx; align: center; resize:both; overflow: hidden; '
'box-shadow: 5px 5px 5px #777; padding: 4px;')
istyle = 'width: 100%; height: 100%; border: 2px solid #094;'
# Show app in iframe, wrapped in a resizable div
html = ''
html += "<a target='new' href='%s' style='%s'>open in new tab</a>" % (filename_rel, astyle)
html += "<div style='%s'>" % dstyle % height
html += "<iframe src='%s' style='%s'>iframe not supported</iframe>" % (filename_rel, istyle)
html += "</div>"
return html
def visit_uiexample_html(self, node):
global should_export_flexx_deps
# Fix for rtd
if not hasattr(node, 'code'):
return
# Get code
code = ori_code = node.code.strip() + '\n'
# Is this a simple example?
if 'import' not in code:
if 'flx.' in code:
code = SIMPLE_CODE_T2 + '\n '.join([line for line in code.splitlines()])
else:
code = SIMPLE_CODE_T1 + '\n '.join([line for line in code.splitlines()])
# Get id and filename
this_id = hashlib.md5(code.encode('utf-8')).hexdigest()
fname = 'example%s.html' % this_id
filename_py = os.path.join(HTML_DIR, 'examples', 'example%s.py' % this_id)
# Write Python file
with open(filename_py, 'wb') as f:
f.write(code.encode())
# Get html file
html = create_ui_example(filename_py, '..', node.height, source=node.source)
self.body.append(html + '<br />')
def depart_uiexample_html(self, node):
pass
class UIExampleDirective(Directive):
has_content = True
def run(self):
# Get code and extact height
code = '\n'.join(self.content)
try:
height = int(self.content[0])
except Exception:
height = 300
else:
code = code.split('\n', 1)[1].strip()
# Code block
literal = nodes.literal_block(code, code)
literal['language'] = 'python'
literal['linenos'] = False
# iframe
iframe = uiexample('')
iframe.code = code
iframe.height = height
return[literal, iframe]
def setup(Sphynx):
Sphynx.add_node(uiexample, html=(visit_uiexample_html, depart_uiexample_html))
Sphynx.add_directive('uiexample', UIExampleDirective)
Sphynx.connect('build-finished', finish)
examples_dir = os.path.join(HTML_DIR, 'examples')
if os.path.isdir(examples_dir):
shutil.rmtree(examples_dir) # because we export with overwrite==False
os.makedirs(examples_dir)
def finish(Sphynx, *args):
# Write overview page that contains *all* examples
parts = []
for class_name, mod_name, fname in all_examples:
parts.append('<br /><h3>%s in %s</h3>' % (class_name, mod_name))
parts.append(get_html('examples/' + fname, 300))
parts.insert(0, '<!DOCTYPE html><html><body>This page may take a while to load ... <br />')
parts.append('</body></html>')
code = '\n'.join(parts)
with open(os.path.join(HTML_DIR, 'examples', 'all_examples.html'), 'wb') as file:
file.write(code.encode())
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.http.response import HttpResponseServerError
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
from django.views.decorators.http import require_POST
from weblate.lang.models import Language
from weblate.trans.forms import ContextForm, MatrixLanguageForm
from weblate.trans.models import Unit
from weblate.trans.util import redirect_next, render
from weblate.utils import messages
from weblate.utils.views import get_component, show_form_errors
@require_POST
@login_required
def edit_context(request, pk):
unit = get_object_or_404(Unit, pk=pk)
if not unit.is_source:
raise Http404("Non source unit!")
if not request.user.has_perm("source.edit", unit.translation.component):
raise PermissionDenied()
form = ContextForm(request.POST, instance=unit, user=request.user)
if form.is_valid():
form.save()
else:
messages.error(request, _("Failed to change a context!"))
show_form_errors(request, form)
return redirect_next(request.POST.get("next"), unit.get_absolute_url())
@login_required
def matrix(request, project, component):
"""Matrix view of all strings."""
obj = get_component(request, project, component)
show = False
languages = None
language_codes = None
if "lang" in request.GET:
form = MatrixLanguageForm(obj, request.GET)
show = form.is_valid()
else:
form = MatrixLanguageForm(obj)
if show:
languages = Language.objects.filter(code__in=form.cleaned_data["lang"]).order()
language_codes = ",".join(languages.values_list("code", flat=True))
return render(
request,
"matrix.html",
{
"object": obj,
"project": obj.project,
"languages": languages,
"language_codes": language_codes,
"languages_form": form,
},
)
@login_required
def matrix_load(request, project, component):
"""Backend for matrix view of all strings."""
obj = get_component(request, project, component)
try:
offset = int(request.GET.get("offset", ""))
except ValueError:
return HttpResponseServerError("Missing offset")
language_codes = request.GET.get("lang")
if not language_codes or offset is None:
return HttpResponseServerError("Missing lang")
# Can not use filter to keep ordering
translations = [
get_object_or_404(obj.translation_set, language__code=lang)
for lang in language_codes.split(",")
]
data = []
source_units = obj.source_translation.unit_set.order()[offset : offset + 20]
source_ids = [unit.pk for unit in source_units]
translated_units = [
{
unit.source_unit_id: unit
for unit in translation.unit_set.order().filter(source_unit__in=source_ids)
}
for translation in translations
]
for unit in source_units:
units = []
# Avoid need to fetch source unit again
unit.source_unit = unit
for translation in translated_units:
if unit.pk in translation:
# Avoid need to fetch source unit again
translation[unit.pk].source_unit = unit
units.append(translation[unit.pk])
else:
units.append(None)
data.append((unit, units))
return render(
request,
"matrix-table.html",
{
"object": obj,
"data": data,
"last": translations[0].unit_set.count() <= offset + 20,
},
)
|
import csv
import datetime
import numbers
import os
import pkg_resources
import numpy as np
import pandas as pd
import scipy
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils import column_or_1d
from auto_ml._version import __version__ as auto_ml_version
def is_linear_model(model_names):
linear_models = set(['RANSACRegressor', 'LinearRegression', 'Ridge', 'Lasso', 'ElasticNet', 'LassoLars', 'OrthogonalMatchingPursuit', 'BayesianRidge', 'ARDRegression', 'SGDRegressor', 'PassiveAggressiveRegressor', 'LogisticRegression', 'RidgeClassifier', 'SGDClassifier', 'Perceptron', 'PassiveAggressiveClassifier'])
if len(linear_models & (set(model_names))) > 0:
return True
else:
return False
def write_gs_param_results_to_file(trained_gs, most_recent_filename):
timestamp_time = datetime.datetime.now()
write_most_recent_gs_result_to_file(trained_gs, most_recent_filename, timestamp_time)
grid_scores = trained_gs.grid_scores_
scorer = trained_gs.scorer_
best_score = trained_gs.best_score_
file_name = 'pipeline_grid_search_results.csv'
write_header = False
if not os.path.isfile(file_name):
write_header = True
with open(file_name, 'a') as results_file:
writer = csv.writer(results_file, dialect='excel')
if write_header:
writer.writerow(['timestamp', 'scorer', 'best_score', 'all_grid_scores'])
writer.writerow([timestamp_time, scorer, best_score, grid_scores])
def write_most_recent_gs_result_to_file(trained_gs, most_recent_filename, timestamp):
timestamp_time = timestamp
grid_scores = trained_gs.grid_scores_
scorer = trained_gs.scorer_
best_score = trained_gs.best_score_
file_name = most_recent_filename
write_header = False
make_header = False
if not os.path.isfile(most_recent_filename):
header_row = ['timestamp', 'scorer', 'best_score', 'cv_mean', 'cv_all']
write_header = True
make_header = True
rows_to_write = []
for score in grid_scores:
row = [timestamp_time, scorer, best_score, score[1], score[2]]
for k, v in score[0].items():
if make_header:
header_row.append(k)
row.append(v)
rows_to_write.append(row)
make_header = False
with open(file_name, 'a') as results_file:
writer = csv.writer(results_file, dialect='excel')
if write_header:
writer.writerow(header_row)
for row in rows_to_write:
writer.writerow(row)
def safely_drop_columns(df, cols_to_drop):
safe_cols_to_drop = []
for col in cols_to_drop:
if col in df.columns:
safe_cols_to_drop.append(col)
df.drop(safe_cols_to_drop, axis=1, inplace=True)
return df
def drop_duplicate_columns(df):
count_cols_to_drop = 0
cols = list(df.columns)
for idx, item in enumerate(df.columns):
if item in df.columns[:idx]:
print('#####################################################')
print('We found a duplicate column, and will be removing it')
print('If you intended to send in two different pieces of information, please make sure they have different column names')
print('Here is the duplicate column:')
print(item)
print('#####################################################')
cols[idx] = 'DROPME'
count_cols_to_drop += 1
if count_cols_to_drop > 0:
df.columns = cols
df.drop('DROPME', axis=1, inplace=True)
return df
def get_boston_dataset():
boston = load_boston()
df_boston = pd.DataFrame(boston.data)
df_boston.columns = boston.feature_names
df_boston['MEDV'] = boston['target']
df_boston_train, df_boston_test = train_test_split(df_boston, test_size=0.2, random_state=42)
return df_boston_train, df_boston_test
bad_vals_as_strings = set([str(float('nan')), str(float('inf')), str(float('-inf')), 'None', 'none', 'NaN', 'NAN', 'nan', 'NULL', 'null', '', 'inf', '-inf'])
def delete_rows_csr(mat, indices):
"""
Remove the rows denoted by ``indices`` form the CSR sparse matrix ``mat``.
"""
if not isinstance(mat, scipy.sparse.csr_matrix):
raise ValueError("works only for CSR format -- use .tocsr() first")
indices = list(indices)
mask = np.ones(mat.shape[0], dtype=bool)
mask[indices] = False
return mat[mask]
def drop_missing_y_vals(df, y, output_column=None):
y = list(y)
indices_to_drop = []
indices_to_keep = []
for idx, val in enumerate(y):
if not isinstance(val, str):
if isinstance(val, numbers.Number) or val is None or isinstance(val, np.generic):
val = str(val)
else:
val = val.encode('utf-8').decode('utf-8')
if val in bad_vals_as_strings:
indices_to_drop.append(idx)
if len(indices_to_drop) > 0:
set_of_indices_to_drop = set(indices_to_drop)
print('We encountered a number of missing values for this output column')
if output_column is not None:
print(output_column)
print('And here is the number of missing (nan, None, etc.) values for this column:')
print(len(indices_to_drop))
print('Here are some example missing values')
for idx, df_idx in enumerate(indices_to_drop):
if idx >= 5:
break
print(y[df_idx])
print('We will remove these values, and continue with training on the cleaned dataset')
support_mask = [True if idx not in set_of_indices_to_drop else False for idx in range(df.shape[0]) ]
if isinstance(df, pd.DataFrame):
df.drop(df.index[indices_to_drop], axis=0, inplace=True)
# df = df.loc[support_mask,]
elif scipy.sparse.issparse(df):
df = delete_rows_csr(df, indices_to_drop)
elif isinstance(df, np.ndarray):
df = np.delete(df, indices_to_drop, axis=0)
y = [val for idx, val in enumerate(y) if idx not in set_of_indices_to_drop]
return df, y
class CustomLabelEncoder():
def __init__(self):
self.label_map = {}
def fit(self, list_of_labels):
if not isinstance(list_of_labels, pd.Series):
list_of_labels = pd.Series(list_of_labels)
unique_labels = list_of_labels.unique()
try:
unique_labels = sorted(unique_labels)
except TypeError:
unique_labels = unique_labels
for idx, val in enumerate(unique_labels):
self.label_map[val] = idx
return self
def transform(self, in_vals):
return_vals = []
for val in in_vals:
if not isinstance(val, str):
if isinstance(val, float) or isinstance(val, int) or val is None or isinstance(val, np.generic):
val = str(val)
else:
val = val.encode('utf-8').decode('utf-8')
if val not in self.label_map:
self.label_map[val] = len(self.label_map.keys())
return_vals.append(self.label_map[val])
if len(in_vals) == 1:
return return_vals[0]
else:
return return_vals
class ExtendedLabelEncoder(LabelEncoder):
def __init__(self):
super(self.__class__, self).__init__()
def transform(self, y):
y = column_or_1d(y, warn=True)
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
self.classes_ = np.hstack((self.classes_, diff))
return np.searchsorted(self.classes_, y)[0]
def get_versions():
libraries_to_check = ['dill', 'h5py', 'keras', 'lightgbm', 'numpy', 'pandas', 'pathos', 'python', 'scikit-learn', 'scipy', 'sklearn-deap2', 'tabulate', 'tensorflow', 'xgboost']
versions = {
'auto_ml': auto_ml_version
}
for lib in libraries_to_check:
try:
versions[lib] = pkg_resources.get_distribution(lib).version
except:
pass
return versions
class ExtendedPipeline(Pipeline):
def __init__(self, steps, keep_cat_features=False, name=None, training_features=None):
super(self.__class__, self).__init__(steps)
self.keep_cat_features = keep_cat_features
self.__versions__ = get_versions()
self.name = name
self.feature_importances_ = None
self.training_features = training_features
@if_delegate_has_method(delegate='_final_estimator')
def predict_uncertainty(self, X):
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_uncertainty(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def score_uncertainty(self, X):
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score_uncertainty(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform_only(self, X):
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].transform_only(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_intervals(self, X, return_type=None):
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_intervals(Xt, return_type=return_type)
def clean_params(params):
cleaned_params = {}
for k, v in params.items():
if k[:7] == 'model__':
cleaned_params[k[7:]] = v
return cleaned_params
|
import logging
from emulated_roku import EmulatedRokuCommandHandler, EmulatedRokuServer
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import CoreState, EventOrigin
LOGGER = logging.getLogger(__package__)
EVENT_ROKU_COMMAND = "roku_command"
ATTR_COMMAND_TYPE = "type"
ATTR_SOURCE_NAME = "source_name"
ATTR_KEY = "key"
ATTR_APP_ID = "app_id"
ROKU_COMMAND_KEYDOWN = "keydown"
ROKU_COMMAND_KEYUP = "keyup"
ROKU_COMMAND_KEYPRESS = "keypress"
ROKU_COMMAND_LAUNCH = "launch"
class EmulatedRoku:
"""Manages an emulated_roku server."""
def __init__(
self,
hass,
name,
host_ip,
listen_port,
advertise_ip,
advertise_port,
upnp_bind_multicast,
):
"""Initialize the properties."""
self.hass = hass
self.roku_usn = name
self.host_ip = host_ip
self.listen_port = listen_port
self.advertise_port = advertise_port
self.advertise_ip = advertise_ip
self.bind_multicast = upnp_bind_multicast
self._api_server = None
self._unsub_start_listener = None
self._unsub_stop_listener = None
async def setup(self):
"""Start the emulated_roku server."""
class EventCommandHandler(EmulatedRokuCommandHandler):
"""emulated_roku command handler to turn commands into events."""
def __init__(self, hass):
self.hass = hass
def on_keydown(self, roku_usn, key):
"""Handle keydown event."""
self.hass.bus.async_fire(
EVENT_ROKU_COMMAND,
{
ATTR_SOURCE_NAME: roku_usn,
ATTR_COMMAND_TYPE: ROKU_COMMAND_KEYDOWN,
ATTR_KEY: key,
},
EventOrigin.local,
)
def on_keyup(self, roku_usn, key):
"""Handle keyup event."""
self.hass.bus.async_fire(
EVENT_ROKU_COMMAND,
{
ATTR_SOURCE_NAME: roku_usn,
ATTR_COMMAND_TYPE: ROKU_COMMAND_KEYUP,
ATTR_KEY: key,
},
EventOrigin.local,
)
def on_keypress(self, roku_usn, key):
"""Handle keypress event."""
self.hass.bus.async_fire(
EVENT_ROKU_COMMAND,
{
ATTR_SOURCE_NAME: roku_usn,
ATTR_COMMAND_TYPE: ROKU_COMMAND_KEYPRESS,
ATTR_KEY: key,
},
EventOrigin.local,
)
def launch(self, roku_usn, app_id):
"""Handle launch event."""
self.hass.bus.async_fire(
EVENT_ROKU_COMMAND,
{
ATTR_SOURCE_NAME: roku_usn,
ATTR_COMMAND_TYPE: ROKU_COMMAND_LAUNCH,
ATTR_APP_ID: app_id,
},
EventOrigin.local,
)
LOGGER.debug(
"Initializing emulated_roku %s on %s:%s",
self.roku_usn,
self.host_ip,
self.listen_port,
)
handler = EventCommandHandler(self.hass)
self._api_server = EmulatedRokuServer(
self.hass.loop,
handler,
self.roku_usn,
self.host_ip,
self.listen_port,
advertise_ip=self.advertise_ip,
advertise_port=self.advertise_port,
bind_multicast=self.bind_multicast,
)
async def emulated_roku_stop(event):
"""Wrap the call to emulated_roku.close."""
LOGGER.debug("Stopping emulated_roku %s", self.roku_usn)
self._unsub_stop_listener = None
await self._api_server.close()
async def emulated_roku_start(event):
"""Wrap the call to emulated_roku.start."""
try:
LOGGER.debug("Starting emulated_roku %s", self.roku_usn)
self._unsub_start_listener = None
await self._api_server.start()
except OSError:
LOGGER.exception(
"Failed to start Emulated Roku %s on %s:%s",
self.roku_usn,
self.host_ip,
self.listen_port,
)
# clean up inconsistent state on errors
await emulated_roku_stop(None)
else:
self._unsub_stop_listener = self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, emulated_roku_stop
)
# start immediately if already running
if self.hass.state == CoreState.running:
await emulated_roku_start(None)
else:
self._unsub_start_listener = self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, emulated_roku_start
)
return True
async def unload(self):
"""Unload the emulated_roku server."""
LOGGER.debug("Unloading emulated_roku %s", self.roku_usn)
if self._unsub_start_listener:
self._unsub_start_listener()
self._unsub_start_listener = None
if self._unsub_stop_listener:
self._unsub_stop_listener()
self._unsub_stop_listener = None
await self._api_server.close()
return True
|
import logging
import sys
import os.path
from gensim.corpora import dmlcorpus, MmCorpus
from gensim.similarities import MatrixSimilarity, SparseMatrixSimilarity
import gensim_build
# set to True to do everything EXCEPT actually writing out similar.xml files to disk.
# similar.xml files are NOT written if DRY_RUN is true.
DRY_RUN = False
# how many 'most similar' documents to store in each similar.xml?
MIN_SCORE = 0.0 # prune based on similarity score (all below MIN_SCORE are ignored)
MAX_SIMILAR = 10 # prune based on rank (at most MAX_SIMILAR are stored). set to 0 to store all of them (no limit).
# if there are no similar articles (after the pruning), do we still want to generate similar.xml?
SAVE_EMPTY = True
# xml template for similar articles
ARTICLE = """
<article weight="%(score)f">
<authors>
<author>%(author)s</author>
</authors>
<title>%(title)s</title>
<suffix>%(suffix)s</suffix>
<links>
<link source="%(source)s" id="%(intId)s" path="%(pathId)s"/>
</links>
</article>"""
# template for the whole similar.xml file (will be filled with multiple ARTICLE instances)
SIMILAR = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<related>%s
</related>
"""
def generateSimilar(corpus, index, method):
for docNo, topSims in enumerate(index): # for each document
# store similarities to the following file
outfile = os.path.join(corpus.articleDir(docNo), 'similar_%s.xml' % method)
articles = [] # collect similars in this list
for docNo2, score in topSims: # for each most similar article
if score > MIN_SCORE and docNo != docNo2:
source, (intId, pathId) = corpus.documents[docNo2]
meta = corpus.getMeta(docNo2)
suffix, author, title = '', meta.get('author', ''), meta.get('title', '')
articles.append(ARTICLE % locals()) # add the similar article to output
if len(articles) >= MAX_SIMILAR:
break
# now `articles` holds multiple strings in similar_*.xml format
if SAVE_EMPTY or articles:
output = ''.join(articles) # concat all similars to one string
if not DRY_RUN: # only open output files for writing if DRY_RUN is false
logging.info("generating %s (%i similars)", outfile, len(articles))
outfile = open(outfile, 'w')
outfile.write(SIMILAR % output) # add xml headers and print to file
outfile.close()
else:
logging.info("would be generating %s (%i similars):%s\n", outfile, len(articles), output)
else:
logging.debug("skipping %s (no similar found)", outfile)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logging.info("running %s", ' '.join(sys.argv))
program = os.path.basename(sys.argv[0])
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
language = sys.argv[1]
method = sys.argv[2].strip().lower()
logging.info("loading corpus mappings")
config = dmlcorpus.DmlConfig('%s_%s' % (gensim_build.PREFIX, language),
resultDir=gensim_build.RESULT_DIR, acceptLangs=[language])
logging.info("loading word id mapping from %s", config.resultFile('wordids.txt'))
id2word = dmlcorpus.DmlCorpus.loadDictionary(config.resultFile('wordids.txt'))
logging.info("loaded %i word ids", len(id2word))
corpus = dmlcorpus.DmlCorpus.load(config.resultFile('.pkl'))
input = MmCorpus(config.resultFile('_%s.mm' % method))
assert len(input) == len(corpus), \
"corpus size mismatch (%i vs %i): run ./gensim_genmodel.py again" % (len(input), len(corpus))
# initialize structure for similarity queries
if method == 'lsi' or method == 'rp': # for these methods, use dense vectors
index = MatrixSimilarity(input, num_best=MAX_SIMILAR + 1, num_features=input.numTerms)
else:
index = SparseMatrixSimilarity(input, num_best=MAX_SIMILAR + 1)
index.normalize = False
generateSimilar(corpus, index, method)
logging.info("finished running %s", program)
|
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
# We just want to make sure these run, not necessarily make sure that they're super accurate (which takes more time, and is dataset dependent)
df_boston_train = df_boston_train.sample(frac=0.5)
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
# the random seed gets a score of -3.21 on python 3.5
# There's a ton of noise here, due to small sample sizes
lower_bound = -3.4
if model_name == 'DeepLearningRegressor':
lower_bound = -24
if model_name == 'LGBMRegressor':
lower_bound = -16
if model_name == 'GradientBoostingRegressor':
lower_bound = -5.1
if model_name == 'CatBoostRegressor':
lower_bound = -4.5
if model_name == 'XGBRegressor':
lower_bound = -4.8
assert lower_bound < test_score < -2.75
def getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('predictions[0]')
print(predictions[0])
print('type(predictions)')
print(type(predictions))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -2.9
if model_name == 'DeepLearningRegressor':
lower_bound = -7.8
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.4
if model_name == 'CatBoostRegressor':
lower_bound = -3.7
assert lower_bound < first_score < -2.7
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.1 < duration.total_seconds() / 1.0 < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.7
|
import urllib2
import json
import diamond.collector
class EventstoreProjectionsCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(
EventstoreProjectionsCollector, self).get_default_config_help(
)
config_help.update({
'path': "name of the metric in the metricpath",
'protocol': 'protocol used to connect to eventstore',
'hostname': 'hostname of the eventstore instance',
'route': 'route in eventstore for projections',
'port': 'tcp port where eventstore is listening',
'headers': 'Header variable if needed',
'replace_dollarsign':
'A value to replace a dollar sign ($) in projection names by',
'debug': 'Enable or disable debug mode',
})
return config_help
def get_default_config(self):
default_config = super(
EventstoreProjectionsCollector, self).get_default_config(
)
default_config.update({
'path': "eventstore",
'protocol': 'http://',
'hostname': 'localhost',
'route': '/projections/all-non-transient',
'port': 2113,
'headers': {'User-Agent': 'Diamond Eventstore metrics collector'},
'replace_dollarsign': '_',
'debug': False,
})
return default_config
def _json_to_flat_metrics(self, prefix, data):
for key, value in data.items():
if isinstance(value, dict):
for k, v in self._json_to_flat_metrics(
"%s.%s" % (prefix, key), value):
yield k, v
elif isinstance(value, basestring):
if value == "Running":
value = 1
yield ("%s.%s" % (prefix, key), value)
elif value == "Stopped":
value = 0
yield ("%s.%s" % (prefix, key), value)
else:
if self.config['debug']:
self.log.debug("ignoring string value = %s", value)
continue
else:
try:
int(value)
except ValueError:
self.log.debug("cast to int failed, value = %s", value)
finally:
yield ("%s.%s" % (prefix, key), value)
def collect(self):
eventstore_host = "%s%s:%s%s" % (
self.config['protocol'],
self.config['hostname'],
self.config['port'],
self.config['route']
)
req = urllib2.Request(eventstore_host, headers=self.config['headers'])
req.add_header('Content-type', 'application/json')
try:
resp = urllib2.urlopen(req)
except urllib2.URLError as e:
self.log.error("Can't open url %s. %s", eventstore_host, e)
else:
content = resp.read()
try:
json_dict = json.loads(content)
projections = json_dict['projections']
data = {}
for projection in projections:
if self.config['replace_dollarsign']:
name = projection["name"].replace(
'$',
self.config['replace_dollarsign']
)
else:
name = projection["name"]
data[name] = projection
except ValueError as e:
self.log.error("failed parsing JSON Object \
from %s. %s", eventstore_host, e)
else:
for metric_name, metric_value in self._json_to_flat_metrics(
"projections", data):
self.publish(metric_name, metric_value)
|
import os
import pandas as pd
import pytz
from qstrader.alpha_model.fixed_signals import FixedSignalsAlphaModel
from qstrader.asset.universe.static import StaticUniverse
from qstrader.trading.backtest import BacktestTradingSession
def test_backtest_sixty_forty(etf_filepath):
"""
Ensures that a full end-to-end weekly rebalanced backtested
trading session with fixed proportion weights produces the
correct rebalance orders as well as correctly calculated
market values after a single month's worth of daily
backtesting.
"""
os.environ['QSTRADER_CSV_DATA_DIR'] = etf_filepath
assets = ['EQ:ABC', 'EQ:DEF']
universe = StaticUniverse(assets)
signal_weights = {'EQ:ABC': 0.6, 'EQ:DEF': 0.4}
alpha_model = FixedSignalsAlphaModel(signal_weights)
start_dt = pd.Timestamp('2019-01-01 00:00:00', tz=pytz.UTC)
end_dt = pd.Timestamp('2019-01-31 23:59:00', tz=pytz.UTC)
backtest = BacktestTradingSession(
start_dt,
end_dt,
universe,
alpha_model,
portfolio_id='000001',
rebalance='weekly',
rebalance_weekday='WED',
long_only=True,
cash_buffer_percentage=0.05
)
backtest.run(results=False)
portfolio = backtest.broker.portfolios['000001']
portfolio_dict = portfolio.portfolio_to_dict()
expected_dict = {
'EQ:ABC': {
'unrealised_pnl': -31121.26203538094,
'realised_pnl': 0.0,
'total_pnl': -31121.26203538094,
'market_value': 561680.8382534103,
'quantity': 4674
},
'EQ:DEF': {
'unrealised_pnl': 18047.831359406424,
'realised_pnl': 613.3956570402925,
'total_pnl': 18661.227016446715,
'market_value': 376203.80367208034,
'quantity': 1431.0
}
}
history_df = portfolio.history_to_df().reset_index()
expected_df = pd.read_csv(os.path.join(etf_filepath, 'sixty_forty_history.dat'))
pd.testing.assert_frame_equal(history_df, expected_df)
assert portfolio_dict == expected_dict
def test_backtest_long_short_leveraged(etf_filepath):
"""
Ensures that a full end-to-end daily rebalanced backtested
trading session of a leveraged long short portfolio with
fixed proportion weights produces the correct rebalance
orders as well as correctly calculated market values after
a single month's worth of daily backtesting.
"""
os.environ['QSTRADER_CSV_DATA_DIR'] = etf_filepath
assets = ['EQ:ABC', 'EQ:DEF']
universe = StaticUniverse(assets)
signal_weights = {'EQ:ABC': 1.0, 'EQ:DEF': -0.7}
alpha_model = FixedSignalsAlphaModel(signal_weights)
start_dt = pd.Timestamp('2019-01-01 00:00:00', tz=pytz.UTC)
end_dt = pd.Timestamp('2019-01-31 23:59:00', tz=pytz.UTC)
backtest = BacktestTradingSession(
start_dt,
end_dt,
universe,
alpha_model,
portfolio_id='000001',
rebalance='daily',
long_only=False,
gross_leverage=2.0
)
backtest.run(results=False)
portfolio = backtest.broker.portfolios['000001']
portfolio_dict = portfolio.portfolio_to_dict()
expected_dict = {
'EQ:ABC': {
'unrealised_pnl': -48302.832839363175,
'realised_pnl': -3930.9847615026706,
'total_pnl': -52233.81760086585,
'market_value': 1055344.698660986,
'quantity': 8782.0
},
'EQ:DEF': {
'unrealised_pnl': -42274.737165376326,
'realised_pnl': -9972.897320721153,
'total_pnl': -52247.63448609748,
'market_value': -742417.5692312752,
'quantity': -2824.0
}
}
history_df = portfolio.history_to_df().reset_index()
expected_df = pd.read_csv(os.path.join(etf_filepath, 'long_short_history.dat'))
pd.testing.assert_frame_equal(history_df, expected_df)
assert portfolio_dict == expected_dict
|
import broadlink.exceptions as blke
from homeassistant.components.broadlink.const import DOMAIN
from homeassistant.components.broadlink.device import get_domains
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.helpers.entity_registry import async_entries_for_device
from . import get_device
from tests.async_mock import patch
from tests.common import mock_device_registry, mock_registry
async def test_device_setup(hass):
"""Test a successful setup."""
device = get_device("Office")
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass)
assert mock_entry.state == ENTRY_STATE_LOADED
assert mock_api.auth.call_count == 1
assert mock_api.get_fwversion.call_count == 1
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
assert mock_init.call_count == 0
async def test_device_setup_authentication_error(hass):
"""Test we handle an authentication error."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_ERROR
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 1
assert mock_init.mock_calls[0][2]["context"]["source"] == "reauth"
assert mock_init.mock_calls[0][2]["data"] == {
"name": device.name,
**device.get_entry_data(),
}
async def test_device_setup_network_timeout(hass):
"""Test we handle a network timeout."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.NetworkTimeoutError()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_os_error(hass):
"""Test we handle an OS error."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = OSError()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_broadlink_exception(hass):
"""Test we handle a Broadlink exception."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.BroadlinkException()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_ERROR
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_update_network_timeout(hass):
"""Test we handle a network timeout in the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.NetworkTimeoutError()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_update_authorization_error(hass):
"""Test we handle an authorization error in the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = (blke.AuthorizationError(), None)
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_LOADED
assert mock_api.auth.call_count == 2
assert mock_api.check_sensors.call_count == 2
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
assert mock_init.call_count == 0
async def test_device_setup_update_authentication_error(hass):
"""Test we handle an authentication error in the update step."""
device = get_device("Garage")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.AuthorizationError()
mock_api.auth.side_effect = (None, blke.AuthenticationError())
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 2
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 1
assert mock_init.mock_calls[0][2]["context"]["source"] == "reauth"
assert mock_init.mock_calls[0][2]["data"] == {
"name": device.name,
**device.get_entry_data(),
}
async def test_device_setup_update_broadlink_exception(hass):
"""Test we handle a Broadlink exception in the update step."""
device = get_device("Garage")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.BroadlinkException()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_get_fwversion_broadlink_exception(hass):
"""Test we load the device even if we cannot read the firmware version."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.get_fwversion.side_effect = blke.BroadlinkException()
with patch.object(hass.config_entries, "async_forward_entry_setup") as mock_forward:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_setup_get_fwversion_os_error(hass):
"""Test we load the device even if we cannot read the firmware version."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.get_fwversion.side_effect = OSError()
with patch.object(hass.config_entries, "async_forward_entry_setup") as mock_forward:
_, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_setup_registry(hass):
"""Test we register the device and the entries correctly."""
device = get_device("Office")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
_, mock_entry = await device.setup_entry(hass)
await hass.async_block_till_done()
assert len(device_registry.devices) == 1
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
assert device_entry.identifiers == {(DOMAIN, device.mac)}
assert device_entry.name == device.name
assert device_entry.model == device.model
assert device_entry.manufacturer == device.manufacturer
assert device_entry.sw_version == device.fwversion
for entry in async_entries_for_device(entity_registry, device_entry.id):
assert entry.original_name.startswith(device.name)
async def test_device_unload_works(hass):
"""Test we unload the device."""
device = get_device("Office")
with patch.object(hass.config_entries, "async_forward_entry_setup"):
mock_api, mock_entry = await device.setup_entry(hass)
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == ENTRY_STATE_NOT_LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_unload_authentication_error(hass):
"""Test we unload a device that failed the authentication step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
with patch.object(hass.config_entries, "async_forward_entry_setup"), patch.object(
hass.config_entries.flow, "async_init"
):
_, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == ENTRY_STATE_NOT_LOADED
assert mock_forward.call_count == 0
async def test_device_unload_update_failed(hass):
"""Test we unload a device that failed the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.NetworkTimeoutError()
with patch.object(hass.config_entries, "async_forward_entry_setup"):
_, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == ENTRY_STATE_NOT_LOADED
assert mock_forward.call_count == 0
async def test_device_update_listener(hass):
"""Test we update device and entity registry when the entry is renamed."""
device = get_device("Office")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass)
await hass.async_block_till_done()
with patch(
"homeassistant.components.broadlink.device.blk.gendevice", return_value=mock_api
):
hass.config_entries.async_update_entry(mock_entry, title="New Name")
await hass.async_block_till_done()
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
assert device_entry.name == "New Name"
for entry in async_entries_for_device(entity_registry, device_entry.id):
assert entry.original_name.startswith("New Name")
|
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_DISPLAY_OPTIONS
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
TIME_STR_FORMAT = "%H:%M"
OPTION_TYPES = {
"time": "Time",
"date": "Date",
"date_time": "Date & Time",
"date_time_utc": "Date & Time (UTC)",
"date_time_iso": "Date & Time (ISO)",
"time_date": "Time & Date",
"beat": "Internet Time",
"time_utc": "Time (UTC)",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DISPLAY_OPTIONS, default=["time"]): vol.All(
cv.ensure_list, [vol.In(OPTION_TYPES)]
)
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Time and Date sensor."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return False
async_add_entities(
[TimeDateSensor(hass, variable) for variable in config[CONF_DISPLAY_OPTIONS]]
)
class TimeDateSensor(Entity):
"""Implementation of a Time and Date sensor."""
def __init__(self, hass, option_type):
"""Initialize the sensor."""
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
self.hass = hass
self.unsub = None
self._update_internal_state(dt_util.utcnow())
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if "date" in self.type and "time" in self.type:
return "mdi:calendar-clock"
if "date" in self.type:
return "mdi:calendar"
return "mdi:clock"
async def async_added_to_hass(self) -> None:
"""Set up next update."""
self.unsub = async_track_point_in_utc_time(
self.hass, self.point_in_time_listener, self.get_next_interval()
)
async def async_will_remove_from_hass(self) -> None:
"""Cancel next update."""
if self.unsub:
self.unsub()
self.unsub = None
def get_next_interval(self, now=None):
"""Compute next time an update should occur."""
if now is None:
now = dt_util.utcnow()
if self.type == "date":
now = dt_util.start_of_local_day(dt_util.as_local(now))
return now + timedelta(seconds=86400)
if self.type == "beat":
interval = 86.4
else:
interval = 60
timestamp = int(dt_util.as_timestamp(now))
delta = interval - (timestamp % interval)
return now + timedelta(seconds=delta)
def _update_internal_state(self, time_date):
time = dt_util.as_local(time_date).strftime(TIME_STR_FORMAT)
time_utc = time_date.strftime(TIME_STR_FORMAT)
date = dt_util.as_local(time_date).date().isoformat()
date_utc = time_date.date().isoformat()
# Calculate Swatch Internet Time.
time_bmt = time_date + timedelta(hours=1)
delta = timedelta(
hours=time_bmt.hour,
minutes=time_bmt.minute,
seconds=time_bmt.second,
microseconds=time_bmt.microsecond,
)
beat = int((delta.seconds + delta.microseconds / 1000000.0) / 86.4)
if self.type == "time":
self._state = time
elif self.type == "date":
self._state = date
elif self.type == "date_time":
self._state = f"{date}, {time}"
elif self.type == "date_time_utc":
self._state = f"{date_utc}, {time_utc}"
elif self.type == "time_date":
self._state = f"{time}, {date}"
elif self.type == "time_utc":
self._state = time_utc
elif self.type == "beat":
self._state = f"@{beat:03d}"
elif self.type == "date_time_iso":
self._state = dt_util.parse_datetime(f"{date} {time}").isoformat()
@callback
def point_in_time_listener(self, time_date):
"""Get the latest data and update state."""
self._update_internal_state(time_date)
self.async_write_ha_state()
self.unsub = async_track_point_in_utc_time(
self.hass, self.point_in_time_listener, self.get_next_interval()
)
|
import subprocess
from io import BytesIO
import os
from zipfile import ZipFile
import pytest
import requests
from babelfish import Country
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from subliminal import Episode, Movie
from subliminal.cache import region
@pytest.fixture(autouse=True, scope='session')
def configure_region():
region.configure('dogpile.cache.null')
region.configure = Mock()
@pytest.fixture
def movies():
return {'man_of_steel':
Movie(os.path.join('Man of Steel (2013)', 'man.of.steel.2013.720p.bluray.x264-felony.mkv'), 'Man of Steel',
source='Blu-ray', release_group='felony', resolution='720p', video_codec='H.264', audio_codec='DTS',
imdb_id='tt0770828', size=7033732714, year=2013,
hashes={'napiprojekt': '6303e7ee6a835e9fcede9fb2fb00cb36',
'opensubtitles': '5b8f8f4e41ccb21e',
'shooter': '314f454ab464775498ae6f1f5ad813a9;fdaa8b702d8936feba2122e93ba5c44f;'
'0a6935e3436aa7db5597ef67a2c494e3;4d269733f36ddd49f71e92732a462fe5',
'thesubdb': 'ad32876133355929d814457537e12dc2'}),
'enders_game':
Movie('enders.game.2013.720p.bluray.x264-sparks.mkv', 'Ender\'s Game',
source='Blu-ray', release_group='sparks', resolution='720p', video_codec='H.264', year=2013),
'café_society':
Movie(u'Café Society.1080p.avc1.RARBG.mp4', u'Café Society', year=2016),
'interstellar':
Movie('Interstellar.2014.2014.1080p.BluRay.x264.YIFY.rar', 'Interstellar',
source='Blu-ray', release_group='YIFY', resolution='1080p', video_codec='H.264', year=2014),
'jack_reacher_never_go_back':
Movie(os.path.join('Jack Reacher- Never Go Back (2016)',
'Jack.Reacher.Never.Go.Back.2016.1080p.WEBDL.AC3.x264-FGT.mkv'),
'Jack Reacher: Never Go Back',
source='Web', release_group='FGT', resolution='1080p', video_codec='H.264',
audio_codec='Dolby Digital', imdb_id='tt3393786', year=2016)
}
@pytest.fixture
def episodes():
return {'bbt_s07e05':
Episode(os.path.join('The Big Bang Theory', 'Season 07',
'The.Big.Bang.Theory.S07E05.720p.HDTV.X264-DIMENSION.mkv'),
'The Big Bang Theory', 7, 5, title='The Workplace Proximity', year=2007, tvdb_id=4668379,
series_tvdb_id=80379, series_imdb_id='tt0898266', source='HDTV', release_group='DIMENSION',
resolution='720p', video_codec='H.264', audio_codec='Dolby Digital',
imdb_id='tt3229392', size=501910737,
hashes={'napiprojekt': '6303e7ee6a835e9fcede9fb2fb00cb36',
'opensubtitles': '6878b3ef7c1bd19e',
'shooter': 'c13e0e5243c56d280064d344676fff94;cd4184d1c0c623735f6db90841ce15fc;'
'3faefd72f92b63f2504269b4f484a377;8c68d1ef873afb8ba0cc9f97cbac41c1',
'thesubdb': '9dbbfb7ba81c9a6237237dae8589fccc'}),
'got_s03e10':
Episode(os.path.join('Game of Thrones', 'Season 03',
'Game.of.Thrones.S03E10.Mhysa.720p.WEB-DL.DD5.1.H.264-NTb.mkv'),
'Game of Thrones', 3, 10, title='Mhysa', tvdb_id=4517466, series_tvdb_id=121361,
series_imdb_id='tt0944947', source='Web', release_group='NTb', resolution='720p',
video_codec='H.264', audio_codec='Dolby Digital', imdb_id='tt2178796', size=2142810931,
hashes={'napiprojekt': '6303e7ee6a835e9fcede9fb2fb00cb36',
'opensubtitles': 'b850baa096976c22',
'shooter': 'b02d992c04ad74b31c252bd5a097a036;ef1b32f873b2acf8f166fc266bdf011a;'
'82ce34a3bcee0c66ed3b26d900d31cca;78113770551f3efd1e2d4ec45898c59c',
'thesubdb': 'b1f899c77f4c960b84b8dbf840d4e42d'}),
'dallas_s01e03':
Episode('Dallas.S01E03.mkv', 'Dallas', 1, 3, title='Spy in the House', year=1978, tvdb_id=228224,
series_tvdb_id=77092, series_imdb_id='tt0077000'),
'dallas_2012_s01e03':
Episode('Dallas.2012.S01E03.mkv', 'Dallas', 1, 3, title='The Price You Pay', year=2012,
original_series=False, tvdb_id=4199511, series_tvdb_id=242521, series_imdb_id='tt1723760',
imdb_id='tt2205526'),
'marvels_agents_of_shield_s02e06':
Episode('Marvels.Agents.of.S.H.I.E.L.D.S02E06.720p.HDTV.x264-KILLERS.mkv',
'Marvel\'s Agents of S.H.I.E.L.D.', 2, 6, year=2013, source='HDTV', release_group='KILLERS',
resolution='720p', video_codec='H.264'),
'csi_cyber_s02e03':
Episode('CSI.Cyber.S02E03.hdtv-lol.mp4', 'CSI: Cyber', 2, 3, source='HDTV', release_group='lol'),
'the_x_files_s10e02':
Episode('The.X-Files.S10E02.HDTV.x264-KILLERS.mp4', 'The X-Files', 10, 2, source='HDTV',
release_group='KILLERS', video_codec='H.264'),
'colony_s01e09':
Episode('Colony.S01E09.720p.HDTV.x264-KILLERS.mkv', 'Colony', 1, 9, title='Zero Day', year=2016,
tvdb_id=5463229, series_tvdb_id=284210, series_imdb_id='tt4209256', source='HDTV',
release_group='KILLERS', resolution='720p', video_codec='H.264', imdb_id='tt4926022'),
'the_jinx_e05':
Episode('The.Jinx-The.Life.and.Deaths.of.Robert.Durst.E05.BDRip.x264-ROVERS.mkv',
'The Jinx: The Life and Deaths of Robert Durst', 1, 5, year=2015, original_series=True,
source='Blu-ray', release_group='ROVERS', video_codec='H.264'),
'the_100_s03e09':
Episode('The.100.S03E09.720p.HDTV.x264-AVS.mkv', 'The 100', 3, 9, title='Stealing Fire', year=2014,
tvdb_id=5544536, series_tvdb_id=268592, series_imdb_id='tt2661044', source='HDTV',
release_group='AVS', resolution='720p', video_codec='H.264', imdb_id='tt4799896'),
'the fall':
Episode('the_fall.3x01.720p_hdtv_x264-fov.mkv', 'The Fall', 3, 1, title='The Fall', year=2013,
tvdb_id=5749493, series_tvdb_id=258107, series_imdb_id='tt2294189', source='HDTV',
release_group='fov', resolution='720p', video_codec='H.264', imdb_id='tt4516230'),
'csi_s15e18':
Episode('CSI.S15E18.720p.HDTV.X264.DIMENSION.mkv', 'CSI: Crime Scene Investigation', 15, 18,
title='The End Game', year=2000, tvdb_id=5104359, series_tvdb_id=72546, series_imdb_id='tt0247082',
source='HDTV', release_group='DIMENSION', resolution='720p', video_codec='H.264',
imdb_id='tt4145952'),
'turn_s04e03':
Episode('Turn.S04E03.720p.HDTV.x264-AVS.mkv', "TURN: Washington's Spies", 4, 3,
title='Blood for Blood', year=2014, tvdb_id=6124360, series_tvdb_id=272135,
series_imdb_id='tt2543328',
source='HDTV', release_group='AVS', resolution='720p', video_codec='H.264',
imdb_id='tt6137686', alternative_series=['Turn']),
'turn_s03e01':
Episode('Turn.S03E01.720p.HDTV.x264-AVS.mkv', "TURN: Washington's Spies", 3, 1,
title='Valediction', year=2014, tvdb_id=5471384, series_tvdb_id=272135,
series_imdb_id='tt2543328',
source='HDTV', release_group='AVS', resolution='720p', video_codec='H.264',
imdb_id='tt4909774', alternative_series=['Turn']),
'marvels_jessica_jones_s01e13':
Episode('Marvels.Jessica.Jones.S01E13.720p.WEBRip.x264-2HD', "Marvels Jessica Jones", 1, 13,
title='AKA Smile', year=2015, tvdb_id=5311273, series_tvdb_id=284190,
series_imdb_id='tt2357547',
source='Web', release_group='2HD', resolution='720p', video_codec='H.264',
imdb_id='tt4162096', alternative_series=['Jessica Jones']),
'fear_walking_dead_s03e10':
Episode('Fear.the.Walking.Dead.S03E10.1080p.WEB-DL.DD5.1.H264-RARBG', 'Fear the Walking Dead', 3, 10,
resolution='1080p', source='Web', video_codec='H.264', release_group='RARBG'),
'the_end_of_the_fucking_world':
Episode('the.end.of.the.fucking.world.s01e04.720p.web.x264-skgtv.mkv', 'The End of the Fucking World', 1, 4,
resolution='720p', source='Web', video_codec='H.264', release_group='skgtv',
alternative_series=['The end of the f***ing world']),
'Marvels.Agents.of.S.H.I.E.L.D.S05E01-E02':
Episode('Marvels.Agents.of.S.H.I.E.L.D.S05E01-E02.720p.HDTV.x264-AVS', 'Marvels.Agents.of.S.H.I.E.L.D', 5,
1, resolution='720p', source='HDTV', video_codec='H.264', release_group='AVS'),
'alex_inc_s01e04':
Episode('Alex.Inc.S01E04.HDTV.x264-SVA.mkv', 'Alex, Inc.', 1, 4, source='HDTV', video_codec='H.264',
release_group='SVA', year=2018, title='The Nanny', series_imdb_id='tt6466948', tvdb_id=6627151,
series_tvdb_id=328635),
'shameless_us_s08e01':
Episode('Shameless.US.s08e01.web.h264-convoy', 'Shameless', 8, 1, source='Web', video_codec='H.264',
country=Country('US'), original_series=False, release_group='convoy', year=2011,
alternative_series=['Shameless US'], title='We Become What We... Frank!',
series_imdb_id='tt1586680', series_tvdb_id=161511, imdb_id='tt6347410', tvdb_id=6227949),
'house_of_cards_us_s06e01':
Episode('house.of.cards.us.s06e01.720p.web-dl.x264', 'House of Cards', 6, 1, source='Web',
video_codec='H.264', country=Country('US'), year=2013, original_series=False,
alternative_series=['House of Cards (2013)'], title='Chapter 66', series_imdb_id='tt1856010',
series_tvdb_id=262980, imdb_id='tt7538918', tvdb_id=6553109),
'walking_dead_s08e07':
Episode('The Walking Dead - 08x07 - Time for After.AMZN.WEB-DL-CasStudio.mkv', 'The Walking Dead',
8, 7, source='Web', streaming_service='Amazon Prime', release_group='CasStudio')
}
@pytest.fixture(scope='session')
def mkv():
data_path = os.path.join('tests', 'data', 'mkv')
# download matroska test suite
if not os.path.exists(data_path) or len(os.listdir(data_path)) != 8:
r = requests.get('http://downloads.sourceforge.net/project/matroska/test_files/matroska_test_w1_1.zip')
with ZipFile(BytesIO(r.content), 'r') as f:
f.extractall(data_path, [m for m in f.namelist() if os.path.splitext(m)[1] == '.mkv'])
# populate a dict with mkv files
files = {}
for path in os.listdir(data_path):
name, _ = os.path.splitext(path)
files[name] = os.path.join(data_path, path)
return files
@pytest.fixture(scope='session')
def rar(mkv):
data_path = os.path.join('tests', 'data', 'rar')
if not os.path.exists(data_path):
os.makedirs(data_path)
downloaded_files = {
'pwd-protected': 'https://github.com/markokr/rarfile/blob/master/test/files/rar5-psw.rar?raw=true',
'simple': 'https://github.com/markokr/rarfile/blob/master/test/files/rar5-quick-open.rar?raw=true'
}
generated_files = {
'video': [mkv['test1']],
'videos': [mkv['test3'], mkv['test4'], mkv['test5']],
}
files = {}
for filename, download_url in downloaded_files.items():
files[filename] = os.path.join(data_path, filename) + '.rar'
if not os.path.exists(files[filename]):
r = requests.get(download_url)
with open(files[filename], 'wb') as f:
f.write(r.content)
for filename, videos in generated_files.items():
files[filename] = os.path.join(data_path, filename) + '.rar'
if not os.path.exists(files[filename]):
subprocess.call(['rar', 'a', files[filename]] + videos)
return files
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from nfsd import NfsdCollector
##########################################################################
class TestNfsdCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NfsdCollector', {
'interval': 1
})
self.collector = NfsdCollector(config, None)
def test_import(self):
self.assertTrue(NfsdCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_stat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/rpc/nfsd')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
NfsdCollector.PROC = self.getFixturePath('proc_nfsd_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
NfsdCollector.PROC = self.getFixturePath('proc_nfsd_2')
self.collector.collect()
metrics = {
'.input_output.bytes-read': 3139369493.0,
'.input_output.bytes-written': 15691669.0,
'.net.cnt': 14564086.0,
'.net.tcpcnt': 14562696.0,
'.net.tcpconn': 30773.0,
'.read-ahead.10-pct': 8751152.0,
'.read-ahead.cache-size': 32.0,
'.read-ahead.not-found': 18612.0,
'.reply_cache.misses': 71080.0,
'.reply_cache.nocache': 14491982.0,
'.rpc.cnt': 14563007.0,
'.threads.10-20-pct': 22163.0,
'.threads.100-pct': 22111.0,
'.threads.20-30-pct': 8448.0,
'.threads.30-40-pct': 1642.0,
'.threads.50-60-pct': 5072.0,
'.threads.60-70-pct': 1210.0,
'.threads.70-80-pct': 3889.0,
'.threads.80-90-pct': 2654.0,
'.threads.fullcnt': 1324492.0,
'.threads.threads': 8.0,
'.v2.unknown': 18.0,
'.v3.access': 136921.0,
'.v3.commit': 635.0,
'.v3.create': 1655.0,
'.v3.fsinfo': 11.0,
'.v3.fsstat': 34450.0,
'.v3.getattr': 724974.0,
'.v3.lookup': 213165.0,
'.v3.null': 8.0,
'.v3.read': 8761683.0,
'.v3.readdir': 11295.0,
'.v3.readdirplus': 132298.0,
'.v3.remove': 1488.0,
'.v3.unknown': 22.0,
'.v3.write': 67937.0,
'.v4.compound': 4476320.0,
'.v4.null': 18.0,
'.v4.ops.access': 2083822.0,
'.v4.ops.close': 34801.0,
'.v4.ops.commit': 3955.0,
'.v4.ops.getattr': 2302848.0,
'.v4.ops.getfh': 51791.0,
'.v4.ops.lookup': 68501.0,
'.v4.ops.open': 34847.0,
'.v4.ops.open_conf': 29002.0,
'.v4.ops.putfh': 4435270.0,
'.v4.ops.putrootfh': 6237.0,
'.v4.ops.read': 8030.0,
'.v4.ops.readdir': 272.0,
'.v4.ops.remove': 7802.0,
'.v4.ops.renew': 28594.0,
'.v4.ops.restorefh': 34839.0,
'.v4.ops.savefh': 34847.0,
'.v4.ops.setattr': 7870.0,
'.v4.ops.setcltid': 6226.0,
'.v4.ops.setcltidconf': 6227.0,
'.v4.ops.unknown': 40.0,
'.v4.ops.write': 76562.0,
'.v4.unknown': 2.0
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from django.db import models
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy
from weblate.utils.colors import COLOR_CHOICES
class Label(models.Model):
project = models.ForeignKey("Project", on_delete=models.deletion.CASCADE)
name = models.CharField(verbose_name=gettext_lazy("Label name"), max_length=190)
color = models.CharField(
verbose_name=gettext_lazy("Color"),
max_length=30,
choices=COLOR_CHOICES,
blank=False,
default=None,
)
class Meta:
app_label = "trans"
unique_together = ("project", "name")
verbose_name = "label"
verbose_name_plural = "label"
def __str__(self):
return mark_safe(
'<span class="label label-{}">{}</span>'.format(
self.color, escape(self.name)
)
)
|
from datetime import timedelta
import logging
from pybotvac.exceptions import NeatoRobotException
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.entity import ToggleEntity
from .const import NEATO_DOMAIN, NEATO_LOGIN, NEATO_ROBOTS, SCAN_INTERVAL_MINUTES
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=SCAN_INTERVAL_MINUTES)
SWITCH_TYPE_SCHEDULE = "schedule"
SWITCH_TYPES = {SWITCH_TYPE_SCHEDULE: ["Schedule"]}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Neato switch with config entry."""
dev = []
neato = hass.data.get(NEATO_LOGIN)
for robot in hass.data[NEATO_ROBOTS]:
for type_name in SWITCH_TYPES:
dev.append(NeatoConnectedSwitch(neato, robot, type_name))
if not dev:
return
_LOGGER.debug("Adding switches %s", dev)
async_add_entities(dev, True)
class NeatoConnectedSwitch(ToggleEntity):
"""Neato Connected Switches."""
def __init__(self, neato, robot, switch_type):
"""Initialize the Neato Connected switches."""
self.type = switch_type
self.robot = robot
self._available = neato.logged_in if neato is not None else False
self._robot_name = f"{self.robot.name} {SWITCH_TYPES[self.type][0]}"
self._state = None
self._schedule_state = None
self._clean_state = None
self._robot_serial = self.robot.serial
def update(self):
"""Update the states of Neato switches."""
_LOGGER.debug("Running Neato switch update for '%s'", self.entity_id)
try:
self._state = self.robot.state
except NeatoRobotException as ex:
if self._available: # Print only once when available
_LOGGER.error(
"Neato switch connection error for '%s': %s", self.entity_id, ex
)
self._state = None
self._available = False
return
self._available = True
_LOGGER.debug("self._state=%s", self._state)
if self.type == SWITCH_TYPE_SCHEDULE:
_LOGGER.debug("State: %s", self._state)
if self._state["details"]["isScheduleEnabled"]:
self._schedule_state = STATE_ON
else:
self._schedule_state = STATE_OFF
_LOGGER.debug(
"Schedule state for '%s': %s", self.entity_id, self._schedule_state
)
@property
def name(self):
"""Return the name of the switch."""
return self._robot_name
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def unique_id(self):
"""Return a unique ID."""
return self._robot_serial
@property
def is_on(self):
"""Return true if switch is on."""
if self.type == SWITCH_TYPE_SCHEDULE:
if self._schedule_state == STATE_ON:
return True
return False
@property
def device_info(self):
"""Device info for neato robot."""
return {"identifiers": {(NEATO_DOMAIN, self._robot_serial)}}
def turn_on(self, **kwargs):
"""Turn the switch on."""
if self.type == SWITCH_TYPE_SCHEDULE:
try:
self.robot.enable_schedule()
except NeatoRobotException as ex:
_LOGGER.error(
"Neato switch connection error '%s': %s", self.entity_id, ex
)
def turn_off(self, **kwargs):
"""Turn the switch off."""
if self.type == SWITCH_TYPE_SCHEDULE:
try:
self.robot.disable_schedule()
except NeatoRobotException as ex:
_LOGGER.error(
"Neato switch connection error '%s': %s", self.entity_id, ex
)
|
import sys
import pathlib
import json
MATCHERS = {
# scripts/dev/ci/run.sh:41:39: error: Double quote array expansions to
# avoid re-splitting elements. [SC2068]
"shellcheck": [
{
"pattern": [
{
"regexp": r"^(.+):(\d+):(\d+):\s(note|warning|error):\s(.*)\s\[(SC\d+)\]$",
"file": 1,
"line": 2,
"column": 3,
"severity": 4,
"message": 5,
"code": 6,
},
],
},
],
"yamllint": [
{
"pattern": [
{
"regexp": r"^\033\[4m([^\033]+)\033\[0m$",
"file": 1,
},
{
"regexp": r"^ \033\[2m(\d+):(\d+)\033\[0m \033\[3[13]m([^\033]+)\033\[0m +([^\033]*)\033\[2m\(([^)]+)\)\033\[0m$",
"line": 1,
"column": 2,
"severity": 3,
"message": 4,
"code": 5,
"loop": True,
},
],
},
],
# filename.py:313: unused function 'i_am_never_used' (60% confidence)
"vulture": [
{
"severity": "warning",
"pattern": [
{
"regexp": r"^([^:]+):(\d+): ([^(]+ \(\d+% confidence\))$",
"file": 1,
"line": 2,
"message": 3,
}
]
},
],
# filename.py:1:1: D100 Missing docstring in public module
"flake8": [
{
# "undefined name" is FXXX (i.e. not an error), but e.g. multiple
# spaces before an operator is EXXX (i.e. an error) - that makes little
# sense, so let's just treat everything as a warning instead.
"severity": "warning",
"pattern": [
{
"regexp": r"^(\033\[0m)?([^:]+):(\d+):(\d+): ([A-Z]\d{3}) (.*)$",
"file": 2,
"line": 3,
"column": 4,
"code": 5,
"message": 6,
},
],
},
],
# filename.py:80: error: Name 'foo' is not defined [name-defined]
"mypy": [
{
"pattern": [
{
"regexp": r"^(\033\[0m)?([^:]+):(\d+): ([^:]+): (.*) \[(.*)\]$",
"file": 2,
"line": 3,
"severity": 4,
"message": 5,
"code": 6,
},
],
},
],
# For some reason, ANSI color escape codes end up as part of the message
# GitHub gets with colored pylint output - so we have those escape codes
# (e.g. "\033[35m...\033[0m") as part of the regex patterns...
"pylint": [
{
# filename.py:80:10: E0602: Undefined variable 'foo' (undefined-variable)
"severity": "error",
"pattern": [
{
"regexp": r"^([^:]+):(\d+):(\d+): (E\d+): \033\[[\d;]+m([^\033]+).*$",
"file": 1,
"line": 2,
"column": 3,
"code": 4,
"message": 5,
},
],
},
{
# filename.py:78:14: W0613: Unused argument 'unused' (unused-argument)
"severity": "warning",
"pattern": [
{
"regexp": r"^([^:]+):(\d+):(\d+): ([A-DF-Z]\d+): \033\[[\d;]+m([^\033]+).*$",
"file": 1,
"line": 2,
"column": 3,
"code": 4,
"message": 5,
},
],
},
],
"tests": [
{
# pytest test summary output
"severity": "error",
"pattern": [
{
"regexp": r'^=+ short test summary info =+$',
},
{
"regexp": r"^((ERROR|FAILED) .*)",
"message": 1,
"loop": True,
}
],
},
{
# pytest error lines
# E end2end.fixtures.testprocess.WaitForTimeout: Timed out
# after 15000ms waiting for [...]
"severity": "error",
"pattern": [
{
"regexp": r'^\033\[1m\033\[31mE ([a-zA-Z0-9.]+: [^\033]*)\033\[0m$',
"message": 1,
},
],
},
],
"misc": [
{
"severity": "error",
"pattern": [
{
"regexp": r'^([^:]+):(\d+): (Found .*)',
"file": 1,
"line": 2,
"message": 3,
}
]
}
]
}
def add_matcher(output_dir, owner, data):
data['owner'] = owner
out_data = {'problemMatcher': [data]}
output_file = output_dir / '{}.json'.format(owner)
with output_file.open('w', encoding='utf-8') as f:
json.dump(out_data, f)
print("::add-matcher::{}".format(output_file))
def main(testenv, tempdir):
testenv = sys.argv[1]
if testenv.startswith('py3'):
testenv = 'tests'
if testenv not in MATCHERS:
return
output_dir = pathlib.Path(tempdir)
for idx, data in enumerate(MATCHERS[testenv]):
owner = '{}-{}'.format(testenv, idx)
add_matcher(output_dir=output_dir, owner=owner, data=data)
if __name__ == '__main__':
sys.exit(main(*sys.argv[1:]))
|
import logging
from bimmer_connected.state import ChargingState, LockState
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PLUG,
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from homeassistant.const import ATTR_ATTRIBUTION, LENGTH_KILOMETERS
from . import DOMAIN as BMW_DOMAIN
from .const import ATTRIBUTION
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"lids": ["Doors", DEVICE_CLASS_OPENING, "mdi:car-door-lock"],
"windows": ["Windows", DEVICE_CLASS_OPENING, "mdi:car-door"],
"door_lock_state": ["Door lock state", "lock", "mdi:car-key"],
"lights_parking": ["Parking lights", "light", "mdi:car-parking-lights"],
"condition_based_services": [
"Condition based services",
DEVICE_CLASS_PROBLEM,
"mdi:wrench",
],
"check_control_messages": [
"Control messages",
DEVICE_CLASS_PROBLEM,
"mdi:car-tire-alert",
],
}
SENSOR_TYPES_ELEC = {
"charging_status": ["Charging status", "power", "mdi:ev-station"],
"connection_status": ["Connection status", DEVICE_CLASS_PLUG, "mdi:car-electric"],
}
SENSOR_TYPES_ELEC.update(SENSOR_TYPES)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BMW sensors."""
accounts = hass.data[BMW_DOMAIN]
_LOGGER.debug("Found BMW accounts: %s", ", ".join([a.name for a in accounts]))
devices = []
for account in accounts:
for vehicle in account.account.vehicles:
if vehicle.has_hv_battery:
_LOGGER.debug("BMW with a high voltage battery")
for key, value in sorted(SENSOR_TYPES_ELEC.items()):
if key in vehicle.available_attributes:
device = BMWConnectedDriveSensor(
account, vehicle, key, value[0], value[1], value[2]
)
devices.append(device)
elif vehicle.has_internal_combustion_engine:
_LOGGER.debug("BMW with an internal combustion engine")
for key, value in sorted(SENSOR_TYPES.items()):
if key in vehicle.available_attributes:
device = BMWConnectedDriveSensor(
account, vehicle, key, value[0], value[1], value[2]
)
devices.append(device)
add_entities(devices, True)
class BMWConnectedDriveSensor(BinarySensorEntity):
"""Representation of a BMW vehicle binary sensor."""
def __init__(
self, account, vehicle, attribute: str, sensor_name, device_class, icon
):
"""Initialize sensor."""
self._account = account
self._vehicle = vehicle
self._attribute = attribute
self._name = f"{self._vehicle.name} {self._attribute}"
self._unique_id = f"{self._vehicle.vin}-{self._attribute}"
self._sensor_name = sensor_name
self._device_class = device_class
self._icon = icon
self._state = None
@property
def should_poll(self) -> bool:
"""Return False.
Data update is triggered from BMWConnectedDriveEntity.
"""
return False
@property
def unique_id(self):
"""Return the unique ID of the binary sensor."""
return self._unique_id
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self):
"""Return the class of the binary sensor."""
return self._device_class
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the binary sensor."""
vehicle_state = self._vehicle.state
result = {
"car": self._vehicle.name,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
if self._attribute == "lids":
for lid in vehicle_state.lids:
result[lid.name] = lid.state.value
elif self._attribute == "windows":
for window in vehicle_state.windows:
result[window.name] = window.state.value
elif self._attribute == "door_lock_state":
result["door_lock_state"] = vehicle_state.door_lock_state.value
result["last_update_reason"] = vehicle_state.last_update_reason
elif self._attribute == "lights_parking":
result["lights_parking"] = vehicle_state.parking_lights.value
elif self._attribute == "condition_based_services":
for report in vehicle_state.condition_based_services:
result.update(self._format_cbs_report(report))
elif self._attribute == "check_control_messages":
check_control_messages = vehicle_state.check_control_messages
has_check_control_messages = vehicle_state.has_check_control_messages
if has_check_control_messages:
cbs_list = []
for message in check_control_messages:
cbs_list.append(message["ccmDescriptionShort"])
result["check_control_messages"] = cbs_list
else:
result["check_control_messages"] = "OK"
elif self._attribute == "charging_status":
result["charging_status"] = vehicle_state.charging_status.value
result["last_charging_end_result"] = vehicle_state.last_charging_end_result
elif self._attribute == "connection_status":
result["connection_status"] = vehicle_state.connection_status
return sorted(result.items())
def update(self):
"""Read new state data from the library."""
vehicle_state = self._vehicle.state
# device class opening: On means open, Off means closed
if self._attribute == "lids":
_LOGGER.debug("Status of lid: %s", vehicle_state.all_lids_closed)
self._state = not vehicle_state.all_lids_closed
if self._attribute == "windows":
self._state = not vehicle_state.all_windows_closed
# device class lock: On means unlocked, Off means locked
if self._attribute == "door_lock_state":
# Possible values: LOCKED, SECURED, SELECTIVE_LOCKED, UNLOCKED
self._state = vehicle_state.door_lock_state not in [
LockState.LOCKED,
LockState.SECURED,
]
# device class light: On means light detected, Off means no light
if self._attribute == "lights_parking":
self._state = vehicle_state.are_parking_lights_on
# device class problem: On means problem detected, Off means no problem
if self._attribute == "condition_based_services":
self._state = not vehicle_state.are_all_cbs_ok
if self._attribute == "check_control_messages":
self._state = vehicle_state.has_check_control_messages
# device class power: On means power detected, Off means no power
if self._attribute == "charging_status":
self._state = vehicle_state.charging_status in [ChargingState.CHARGING]
# device class plug: On means device is plugged in,
# Off means device is unplugged
if self._attribute == "connection_status":
self._state = vehicle_state.connection_status == "CONNECTED"
def _format_cbs_report(self, report):
result = {}
service_type = report.service_type.lower().replace("_", " ")
result[f"{service_type} status"] = report.state.value
if report.due_date is not None:
result[f"{service_type} date"] = report.due_date.strftime("%Y-%m-%d")
if report.due_distance is not None:
distance = round(
self.hass.config.units.length(report.due_distance, LENGTH_KILOMETERS)
)
result[
f"{service_type} distance"
] = f"{distance} {self.hass.config.units.length_unit}"
return result
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
|
from datetime import timedelta
import logging
from random import randrange
import metno
from homeassistant.const import (
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
EVENT_CORE_CONFIG_UPDATE,
LENGTH_FEET,
LENGTH_METERS,
)
from homeassistant.core import Config, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util.distance import convert as convert_distance
import homeassistant.util.dt as dt_util
from .const import CONF_TRACK_HOME, DOMAIN
URL = "https://aa015h6buqvih86i1.api.met.no/weatherapi/locationforecast/2.0/complete"
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Set up configured Met."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass, config_entry):
"""Set up Met as config entry."""
coordinator = MetDataUpdateCoordinator(hass, config_entry)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
if config_entry.data.get(CONF_TRACK_HOME, False):
coordinator.track_home()
hass.data[DOMAIN][config_entry.entry_id] = coordinator
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "weather")
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
await hass.config_entries.async_forward_entry_unload(config_entry, "weather")
hass.data[DOMAIN][config_entry.entry_id].untrack_home()
hass.data[DOMAIN].pop(config_entry.entry_id)
return True
class MetDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Met data."""
def __init__(self, hass, config_entry):
"""Initialize global Met data updater."""
self._unsub_track_home = None
self.weather = MetWeatherData(
hass, config_entry.data, hass.config.units.is_metric
)
self.weather.init_data()
update_interval = timedelta(minutes=randrange(55, 65))
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
async def _async_update_data(self):
"""Fetch data from Met."""
try:
return await self.weather.fetch_data()
except Exception as err:
raise UpdateFailed(f"Update failed: {err}") from err
def track_home(self):
"""Start tracking changes to HA home setting."""
if self._unsub_track_home:
return
async def _async_update_weather_data(_event=None):
"""Update weather data."""
self.weather.init_data()
await self.async_refresh()
self._unsub_track_home = self.hass.bus.async_listen(
EVENT_CORE_CONFIG_UPDATE, _async_update_weather_data
)
def untrack_home(self):
"""Stop tracking changes to HA home setting."""
if self._unsub_track_home:
self._unsub_track_home()
self._unsub_track_home = None
class MetWeatherData:
"""Keep data for Met.no weather entities."""
def __init__(self, hass, config, is_metric):
"""Initialise the weather entity data."""
self.hass = hass
self._config = config
self._is_metric = is_metric
self._weather_data = None
self.current_weather_data = {}
self.daily_forecast = None
self.hourly_forecast = None
def init_data(self):
"""Weather data inialization - get the coordinates."""
if self._config.get(CONF_TRACK_HOME, False):
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
elevation = self.hass.config.elevation
else:
latitude = self._config[CONF_LATITUDE]
longitude = self._config[CONF_LONGITUDE]
elevation = self._config[CONF_ELEVATION]
if not self._is_metric:
elevation = int(
round(convert_distance(elevation, LENGTH_FEET, LENGTH_METERS))
)
coordinates = {
"lat": str(latitude),
"lon": str(longitude),
"msl": str(elevation),
}
self._weather_data = metno.MetWeatherData(
coordinates, async_get_clientsession(self.hass), api_url=URL
)
async def fetch_data(self):
"""Fetch data from API - (current weather and forecast)."""
await self._weather_data.fetching_data()
self.current_weather_data = self._weather_data.get_current_weather()
time_zone = dt_util.DEFAULT_TIME_ZONE
self.daily_forecast = self._weather_data.get_forecast(time_zone, False)
self.hourly_forecast = self._weather_data.get_forecast(time_zone, True)
return self
|
import sys
if sys.version_info < (3, 7):
from typing import _ForwardRef as ForwardRef
from typing import GenericMeta as _GenericAlias
from typing import _Union, Union
def get_type_args(tp):
if isinstance(tp, (_GenericAlias, _Union)):
return tp.__args__
return ()
def get_type_origin(tp):
if isinstance(tp, _GenericAlias):
return tp.__extra__
if isinstance(tp, _Union):
return Union
return None
elif sys.version_info < (3, 8):
from typing import ForwardRef, _GenericAlias # type: ignore
def get_type_args(tp):
if isinstance(tp, _GenericAlias):
return tp.__args__
return ()
def get_type_origin(tp):
if isinstance(tp, _GenericAlias):
return tp.__origin__
return None
else:
from typing import ForwardRef, _GenericAlias
from typing import get_args as get_type_args
from typing import get_origin as get_type_origin
__all__ = (
"ForwardRef",
"_GenericAlias",
get_type_args.__name__,
get_type_origin.__name__,
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import io
import string
from absl.flags import _helpers
import six
def _is_integer_type(instance):
"""Returns True if instance is an integer, and not a bool."""
return (isinstance(instance, six.integer_types) and
not isinstance(instance, bool))
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(cls, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for cls with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
*args: Positional initializer arguments.
**kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(cls, *args, **kwargs)
else:
instances = cls._instances
key = (cls,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(cls, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(cls, *args)
# NOTE about Genericity and Metaclass of ArgumentParser.
# (1) In the .py source (this file)
# - is not declared as Generic
# - has _ArgumentParserCache as a metaclass
# (2) In the .pyi source (type stub)
# - is declared as Generic
# - doesn't have a metaclass
# The reason we need this is due to Generic having a different metaclass
# (for python versions <= 3.7) and a class can have only one metaclass.
#
# * Lack of metaclass in .pyi is not a deal breaker, since the metaclass
# doesn't affect any type information. Also type checkers can check the type
# parameters.
# * However, not declaring ArgumentParser as Generic in the source affects
# runtime annotation processing. In particular this means, subclasses should
# inherit from `ArgumentParser` and not `ArgumentParser[SomeType]`.
# The corresponding DEFINE_someType method (the public API) can be annotated
# to return FlagHolder[SomeType].
class ArgumentParser(six.with_metaclass(_ArgumentParserCache, object)):
"""Base class used to parse and convert arguments.
The parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
syntactic_help = ''
def parse(self, argument):
"""Parses the string argument and returns the native value.
By default it returns its argument unmodified.
Args:
argument: string argument passed in the commandline.
Raises:
ValueError: Raised when it fails to parse the argument.
TypeError: Raised when the argument has the wrong type.
Returns:
The parsed value in native type.
"""
if not isinstance(argument, six.string_types):
raise TypeError('flag value must be a string, found "{}"'.format(
type(argument)))
return argument
def flag_type(self):
"""Returns a string representing the type of the flag."""
return 'string'
def _custom_xml_dom_elements(self, doc):
"""Returns a list of minidom.Element to add additional flag information.
Args:
doc: minidom.Document, the DOM document it should create nodes from.
"""
del doc # Unused.
return []
class ArgumentSerializer(object):
"""Base class for generating string representations of a flag value."""
def serialize(self, value):
"""Returns a serialized string of the value."""
return _helpers.str_or_unicode(value)
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def is_outside_bounds(self, val):
"""Returns whether the value is outside the bounds or not."""
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def parse(self, argument):
"""See base class."""
val = self.convert(argument)
if self.is_outside_bounds(val):
raise ValueError('%s is not %s' % (val, self.syntactic_help))
return val
def _custom_xml_dom_elements(self, doc):
elements = []
if self.lower_bound is not None:
elements.append(_helpers.create_xml_dom_element(
doc, 'lower_bound', self.lower_bound))
if self.upper_bound is not None:
elements.append(_helpers.create_xml_dom_element(
doc, 'upper_bound', self.upper_bound))
return elements
def convert(self, argument):
"""Returns the correct numeric value of argument.
Subclass must implement this method, and raise TypeError if argument is not
string or has the right numeric type.
Args:
argument: string argument passed in the commandline, or the numeric type.
Raises:
TypeError: Raised when argument is not a string or the right numeric type.
ValueError: Raised when failed to convert argument to the numeric value.
"""
raise NotImplementedError
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'a'
number_name = 'number'
syntactic_help = ' '.join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument):
"""Returns the float value of argument."""
if (_is_integer_type(argument) or isinstance(argument, float) or
isinstance(argument, six.string_types)):
return float(argument)
else:
raise TypeError(
'Expect argument to be a string, int, or float, found {}'.format(
type(argument)))
def flag_type(self):
"""See base class."""
return 'float'
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'an'
number_name = 'integer'
syntactic_help = ' '.join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = 'a positive %s' % self.number_name
elif upper_bound == -1:
sh = 'a negative %s' % self.number_name
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument):
"""Returns the int value of argument."""
if _is_integer_type(argument):
return argument
elif isinstance(argument, six.string_types):
base = 10
if len(argument) > 2 and argument[0] == '0':
if argument[1] == 'o':
base = 8
elif argument[1] == 'x':
base = 16
return int(argument, base)
else:
raise TypeError('Expect argument to be a string or int, found {}'.format(
type(argument)))
def flag_type(self):
"""See base class."""
return 'int'
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def parse(self, argument):
"""See base class."""
if isinstance(argument, six.string_types):
if argument.lower() in ('true', 't', '1'):
return True
elif argument.lower() in ('false', 'f', '0'):
return False
else:
raise ValueError('Non-boolean argument to boolean flag', argument)
elif isinstance(argument, six.integer_types):
# Only allow bool or integer 0, 1.
# Note that float 1.0 == True, 0.0 == False.
bool_value = bool(argument)
if argument == bool_value:
return bool_value
else:
raise ValueError('Non-boolean argument to boolean flag', argument)
raise TypeError('Non-boolean argument to boolean flag', argument)
def flag_type(self):
"""See base class."""
return 'bool'
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set)."""
def __init__(self, enum_values, case_sensitive=True):
"""Initializes EnumParser.
Args:
enum_values: [str], a non-empty list of string values in the enum.
case_sensitive: bool, whether or not the enum is to be case-sensitive.
Raises:
ValueError: When enum_values is empty.
"""
if not enum_values:
raise ValueError(
'enum_values cannot be empty, found "{}"'.format(enum_values))
super(EnumParser, self).__init__()
self.enum_values = enum_values
self.case_sensitive = case_sensitive
def parse(self, argument):
"""Determines validity of argument and returns the correct element of enum.
Args:
argument: str, the supplied flag value.
Returns:
The first matching element from enum_values.
Raises:
ValueError: Raised when argument didn't match anything in enum.
"""
if self.case_sensitive:
if argument not in self.enum_values:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return argument
else:
if argument.upper() not in [value.upper() for value in self.enum_values]:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return [value for value in self.enum_values
if value.upper() == argument.upper()][0]
def flag_type(self):
"""See base class."""
return 'string enum'
class EnumClassParser(ArgumentParser):
"""Parser of an Enum class member."""
def __init__(self, enum_class, case_sensitive=True):
"""Initializes EnumParser.
Args:
enum_class: class, the Enum class with all possible flag values.
case_sensitive: bool, whether or not the enum is to be case-sensitive. If
False, all member names must be unique when case is ignored.
Raises:
TypeError: When enum_class is not a subclass of Enum.
ValueError: When enum_class is empty.
"""
# Users must have an Enum class defined before using EnumClass flag.
# Therefore this dependency is guaranteed.
import enum
if not issubclass(enum_class, enum.Enum):
raise TypeError('{} is not a subclass of Enum.'.format(enum_class))
if not enum_class.__members__:
raise ValueError('enum_class cannot be empty, but "{}" is empty.'
.format(enum_class))
if not case_sensitive:
members = collections.Counter(
name.lower() for name in enum_class.__members__)
duplicate_keys = {
member for member, count in members.items() if count > 1
}
if duplicate_keys:
raise ValueError(
'Duplicate enum values for {} using case_sensitive=False'.format(
duplicate_keys))
super(EnumClassParser, self).__init__()
self.enum_class = enum_class
self._case_sensitive = case_sensitive
if case_sensitive:
self._member_names = tuple(enum_class.__members__)
else:
self._member_names = tuple(
name.lower() for name in enum_class.__members__)
@property
def member_names(self):
"""The accepted enum names, in lowercase if not case sensitive."""
return self._member_names
def parse(self, argument):
"""Determines validity of argument and returns the correct element of enum.
Args:
argument: str or Enum class member, the supplied flag value.
Returns:
The first matching Enum class member in Enum class.
Raises:
ValueError: Raised when argument didn't match anything in enum.
"""
if isinstance(argument, self.enum_class):
return argument
elif not isinstance(argument, six.string_types):
raise ValueError(
'{} is not an enum member or a name of a member in {}'.format(
argument, self.enum_class))
key = EnumParser(
self._member_names, case_sensitive=self._case_sensitive).parse(argument)
if self._case_sensitive:
return self.enum_class[key]
else:
# If EnumParser.parse() return a value, we're guaranteed to find it
# as a member of the class
return next(value for name, value in self.enum_class.__members__.items()
if name.lower() == key.lower())
def flag_type(self):
"""See base class."""
return 'enum class'
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def serialize(self, value):
"""See base class."""
return self.list_sep.join([_helpers.str_or_unicode(x) for x in value])
class EnumClassListSerializer(ListSerializer):
"""A serializer for MultiEnumClass flags.
This serializer simply joins the output of `EnumClassSerializer` using a
provided seperator.
"""
def __init__(self, list_sep, **kwargs):
"""Initializes EnumClassListSerializer.
Args:
list_sep: String to be used as a separator when serializing
**kwargs: Keyword arguments to the `EnumClassSerializer` used to serialize
individual values.
"""
super(EnumClassListSerializer, self).__init__(list_sep)
self._element_serializer = EnumClassSerializer(**kwargs)
def serialize(self, value):
"""See base class."""
if isinstance(value, list):
return self.list_sep.join(
self._element_serializer.serialize(x) for x in value)
else:
return self._element_serializer.serialize(value)
class CsvListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def serialize(self, value):
"""Serializes a list as a CSV string or unicode."""
if six.PY2:
# In Python2 csv.writer doesn't accept unicode, so we convert to UTF-8.
output = io.BytesIO()
csv.writer(output).writerow([unicode(x).encode('utf-8') for x in value])
serialized_value = output.getvalue().decode('utf-8').strip()
else:
# In Python3 csv.writer expects a text stream.
output = io.StringIO()
csv.writer(output).writerow([str(x) for x in value])
serialized_value = output.getvalue().strip()
# We need the returned value to be pure ascii or Unicodes so that
# when the xml help is generated they are usefully encodable.
return _helpers.str_or_unicode(serialized_value)
class EnumClassSerializer(ArgumentSerializer):
"""Class for generating string representations of an enum class flag value."""
def __init__(self, lowercase):
"""Initializes EnumClassSerializer.
Args:
lowercase: If True, enum member names are lowercased during serialization.
"""
self._lowercase = lowercase
def serialize(self, value):
"""Returns a serialized string of the Enum class value."""
as_string = _helpers.str_or_unicode(value.name)
return as_string.lower() if self._lowercase else as_string
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = 'a %s separated list' % self._name
def parse(self, argument):
"""See base class."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
return [s.strip() for s in argument.split(self._token)]
def flag_type(self):
"""See base class."""
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
super(ListParser, self).__init__(',', 'comma')
def parse(self, argument):
"""Parses argument as comma-separated list of strings."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
try:
return [s.strip() for s in list(csv.reader([argument], strict=True))[0]]
except csv.Error as e:
# Provide a helpful report for case like
# --listflag="$(printf 'hello,\nworld')"
# IOW, list flag values containing naked newlines. This error
# was previously "reported" by allowing csv.Error to
# propagate.
raise ValueError('Unable to parse the value %r as a %s: %s'
% (argument, self.flag_type(), e))
def _custom_xml_dom_elements(self, doc):
elements = super(ListParser, self)._custom_xml_dom_elements(doc)
elements.append(_helpers.create_xml_dom_element(
doc, 'list_separator', repr(',')))
return elements
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self, comma_compat=False):
"""Initializer.
Args:
comma_compat: bool, whether to support comma as an additional separator.
If False then only whitespace is supported. This is intended only for
backwards compatibility with flags that used to be comma-separated.
"""
self._comma_compat = comma_compat
name = 'whitespace or comma' if self._comma_compat else 'whitespace'
super(WhitespaceSeparatedListParser, self).__init__(None, name)
def parse(self, argument):
"""Parses argument as whitespace-separated list of strings.
It also parses argument as comma-separated list of strings if requested.
Args:
argument: string argument passed in the commandline.
Returns:
[str], the parsed flag value.
"""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
if self._comma_compat:
argument = argument.replace(',', ' ')
return argument.split()
def _custom_xml_dom_elements(self, doc):
elements = super(WhitespaceSeparatedListParser, self
)._custom_xml_dom_elements(doc)
separators = list(string.whitespace)
if self._comma_compat:
separators.append(',')
separators.sort()
for sep_char in separators:
elements.append(_helpers.create_xml_dom_element(
doc, 'list_separator', repr(sep_char)))
return elements
|
import logging
from urllib.parse import urljoin
import requests
import yaml
from paasta_tools.utils import get_user_agent
log = logging.getLogger(__name__)
class TronRequestError(Exception):
pass
class TronClient:
"""
Client for interacting with a Tron master.
"""
def __init__(self, url):
self.master_url = url
def _request(self, method, url, data):
headers = {"User-Agent": get_user_agent()}
kwargs = {"url": urljoin(self.master_url, url), "headers": headers}
if method == "GET":
kwargs["params"] = data
response = requests.get(**kwargs)
elif method == "POST":
kwargs["data"] = data
response = requests.post(**kwargs)
else:
raise ValueError(f"Unrecognized method: {method}")
return self._get_response_or_error(response)
def _get_response_or_error(self, response):
try:
result = response.json()
if "error" in result:
raise TronRequestError(result["error"])
return result
except ValueError: # Not JSON
if not response.ok:
raise TronRequestError(
"Status code {status_code} for {url}: {reason}".format(
status_code=response.status_code,
url=response.url,
reason=response.reason,
)
)
return response.text
def _get(self, url, data=None):
return self._request("GET", url, data)
def _post(self, url, data=None):
return self._request("POST", url, data)
def update_namespace(self, namespace, new_config, skip_if_unchanged=True):
"""Updates the configuration for a namespace.
:param namespace: str
:param new_config: str, should be valid YAML.
:param skip_if_unchanged: boolean. If False, will send the update
even if the current config matches the new config.
"""
current_config = self._get("/api/config", {"name": namespace, "no_header": 1})
if skip_if_unchanged:
if yaml.safe_load(new_config) == yaml.safe_load(current_config["config"]):
log.debug("No change in config, skipping update.")
return
return self._post(
"/api/config",
data={
"name": namespace,
"config": new_config,
"hash": current_config["hash"],
"check": 0,
},
)
def list_namespaces(self):
"""Gets the namespaces that are currently configured."""
response = self._get("/api")
return response.get("namespaces", [])
def get_job_content(self, job: str) -> dict:
return self._get(f"/api/jobs/{job}/")
def get_latest_job_run_id(self, job_content: dict) -> str:
job_runs = sorted(
job_content.get("runs", []),
key=lambda k: (k["state"] != "scheduled", k["run_num"]),
reverse=True,
)
if not job_runs:
return None
return job_runs[0]["run_num"]
def get_action_run(self, job: str, action: str, run_id: str) -> dict:
return self._get(
f"/api/jobs/{job}/{run_id}/{action}?include_stderr=1&include_stdout=1&num_lines=10"
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import threading
import uuid
from absl import flags
from perfkitbenchmarker import network
from perfkitbenchmarker import providers
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.alicloud import util
from six.moves import range
FLAGS = flags.FLAGS
MAX_NAME_LENGTH = 128
class AliVpc(resource.BaseResource):
"""An object representing an AliCloud VPC."""
def __init__(self, name, region):
super(AliVpc, self).__init__()
self.region = region
self.id = None
self.name = name
def _Create(self):
"""Creates the VPC."""
create_cmd = util.ALI_PREFIX + [
'ecs',
'CreateVpc',
'--VpcName %s' % self.name,
'--RegionId %s' % self.region,
'--CidrBlock 10.0.0.0/16']
create_cmd = util.GetEncodedCmd(create_cmd)
stdout, _, _ = vm_util.IssueCommand(create_cmd, raise_on_failure=False)
response = json.loads(stdout)
self.id = response['VpcId']
def _Exists(self):
"""Returns true if the VPC exists."""
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeVpcs',
'--RegionId %s' % self.region,
'--VpcId %s' % self.id]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
vpcs = response['Vpcs']['Vpc']
assert len(vpcs) < 2, 'Too many VPCs.'
return len(vpcs) > 0
@vm_util.Retry(poll_interval=5, max_retries=30, log_errors=False)
def _WaitForVpcStatus(self, status_list):
"""Waits until VPC's status is in status_list"""
logging.info('Waits until the status of VPC is in status_list: %s',
status_list)
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeVpcs',
'--RegionId %s' % self.region,
'--VpcId %s' % self.id]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
vpcs = response['Vpcs']['Vpc']
assert len(vpcs) == 1
vpc_status = response['Vpcs']['Vpc'][0]['Status']
assert vpc_status in status_list
def _Delete(self):
"""Delete's the VPC."""
delete_cmd = util.ALI_PREFIX + [
'ecs',
'DeleteVpc',
'--RegionId %s' % self.region,
'--VpcId %s' % self.id]
delete_cmd = util.GetEncodedCmd(delete_cmd)
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
class AliVSwitch(resource.BaseResource):
"""An object representing an AliCloud VSwitch."""
def __init__(self, name, zone, vpc_id):
super(AliVSwitch, self).__init__()
self.region = util.GetRegionByZone(zone)
self.id = None
self.vpc_id = vpc_id
self.zone = zone
self.name = name
def _Create(self):
"""Creates the VSwitch."""
create_cmd = util.ALI_PREFIX + [
'ecs',
'CreateVSwitch',
'--VSwitchName %s' % self.name,
'--ZoneId %s' % self.zone,
'--RegionId %s' % self.region,
'--CidrBlock 10.0.0.0/24',
'--VpcId %s' % self.vpc_id,
]
create_cmd = util.GetEncodedCmd(create_cmd)
stdout, _, _ = vm_util.IssueCommand(create_cmd, raise_on_failure=False)
response = json.loads(stdout)
self.id = response['VSwitchId']
def _Delete(self):
"""Deletes the VSwitch."""
delete_cmd = util.ALI_PREFIX + [
'ecs',
'DeleteVSwitch',
'--RegionId %s' % self.region,
'--VSwitchId %s' % self.id]
delete_cmd = util.GetEncodedCmd(delete_cmd)
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def _Exists(self):
"""Returns true if the VSwitch exists."""
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeVSwitches',
'--RegionId %s' % self.region,
'--VpcId %s' % self.vpc_id,
'--ZoneId %s' % self.zone]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
vswitches = response['VSwitches']['VSwitch']
assert len(vswitches) < 2, 'Too many VSwitches.'
return len(vswitches) > 0
class AliSecurityGroup(resource.BaseResource):
"""Object representing an AliCloud Security Group."""
def __init__(self, name, region, use_vpc=True, vpc_id=None):
super(AliSecurityGroup, self).__init__()
self.name = name
self.region = region
self.use_vpc = use_vpc
self.vpc_id = vpc_id
def _Create(self):
"""Creates the security group."""
create_cmd = util.ALI_PREFIX + [
'ecs',
'CreateSecurityGroup',
'--SecurityGroupName %s' % self.name,
'--RegionId %s' % self.region]
if self.use_vpc:
create_cmd.append('--VpcId %s' % self.vpc_id)
create_cmd = util.GetEncodedCmd(create_cmd)
stdout, _ = vm_util.IssueRetryableCommand(create_cmd)
self.group_id = json.loads(stdout)['SecurityGroupId']
def _Delete(self):
"""Deletes the security group."""
delete_cmd = util.ALI_PREFIX + [
'ecs',
'DeleteSecurityGroup',
'--RegionId %s' % self.region,
'--SecurityGroupId %s' % self.group_id]
delete_cmd = util.GetEncodedCmd(delete_cmd)
vm_util.IssueRetryableCommand(delete_cmd)
def _Exists(self):
"""Returns true if the security group exists."""
show_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeSecurityGroups',
'--RegionId %s' % self.region,
'--SecurityGroupId %s' % self.group_id]
show_cmd = util.GetEncodedCmd(show_cmd)
stdout, _ = vm_util.IssueRetryableCommand(show_cmd)
response = json.loads(stdout)
securityGroups = response['SecurityGroups']['SecurityGroup']
assert len(securityGroups) < 2, 'Too many securityGroups.'
if not securityGroups:
return False
return True
class AliFirewall(network.BaseFirewall):
"""An object representing the AliCloud Firewall."""
CLOUD = providers.ALICLOUD
def __init__(self):
self.firewall_set = set()
self._lock = threading.Lock()
def AllowIcmp(self, vm):
"""Opens the ICMP protocol on the firewall.
Args:
vm: The BaseVirtualMachine object to open the ICMP protocol for.
"""
if vm.is_static:
return
with self._lock:
authorize_cmd = util.ALI_PREFIX + [
'ecs',
'AuthorizeSecurityGroup',
'--IpProtocol ICMP',
'--PortRange -1/-1',
'--SourceCidrIp 0.0.0.0/0',
'--RegionId %s' % vm.region,
'--SecurityGroupId %s' % vm.group_id]
if FLAGS.ali_use_vpc:
authorize_cmd.append('--NicType intranet')
authorize_cmd = util.GetEncodedCmd(authorize_cmd)
vm_util.IssueRetryableCommand(authorize_cmd)
def AllowPort(self, vm, start_port, end_port=None, source_range=None):
"""Opens a port on the firewall.
Args:
vm: The BaseVirtualMachine object to open the port for.
start_port: The first local port in a range of ports to open.
end_port: The last port in a range of ports to open. If None, only
start_port will be opened.
source_range: unsupported at present.
"""
if not end_port:
end_port = start_port
for port in range(start_port, end_port + 1):
self._AllowPort(vm, port)
def _AllowPort(self, vm, port):
"""Opens a port on the firewall.
Args:
vm: The BaseVirtualMachine object to open the port for.
port: The local port to open.
"""
if vm.is_static:
return
entry = (port, vm.group_id)
if entry in self.firewall_set:
return
with self._lock:
if entry in self.firewall_set:
return
for protocol in ('tcp', 'udp'):
authorize_cmd = util.ALI_PREFIX + [
'ecs',
'AuthorizeSecurityGroup',
'--IpProtocol %s' % protocol,
'--PortRange %s/%s' % (port, port),
'--SourceCidrIp 0.0.0.0/0',
'--RegionId %s' % vm.region,
'--SecurityGroupId %s' % vm.group_id]
if FLAGS.ali_use_vpc:
authorize_cmd.append('--NicType intranet')
authorize_cmd = util.GetEncodedCmd(authorize_cmd)
vm_util.IssueRetryableCommand(authorize_cmd)
self.firewall_set.add(entry)
def DisallowAllPorts(self):
"""Closes all ports on the firewall."""
pass
class AliNetwork(network.BaseNetwork):
"""Object representing a AliCloud Network."""
CLOUD = providers.ALICLOUD
def __init__(self, spec):
super(AliNetwork, self).__init__(spec)
self.name = (
'perfkit-%s-%s' % (FLAGS.run_uri, str(uuid.uuid4())[-12:]))
self.region = util.GetRegionByZone(spec.zone)
self.use_vpc = FLAGS.ali_use_vpc
if self.use_vpc:
self.vpc = AliVpc(self.name, self.region)
self.vswitch = None
self.security_group = None
else:
self.security_group = \
AliSecurityGroup(self.name, self.region, use_vpc=False)
@vm_util.Retry()
def Create(self):
"""Creates the network."""
if self.use_vpc:
self.vpc.Create()
self.vpc._WaitForVpcStatus(['Available'])
if self.vswitch is None:
self.vswitch = AliVSwitch(self.name, self.zone, self.vpc.id)
self.vswitch.Create()
if self.security_group is None:
self.security_group = AliSecurityGroup(self.name,
self.region,
use_vpc=True,
vpc_id=self.vpc.id)
self.security_group.Create()
else:
self.security_group.Create()
def Delete(self):
"""Deletes the network."""
if self.use_vpc:
self.security_group.Delete()
self.vswitch.Delete()
self.security_group.Delete()
self.vpc.Delete()
else:
self.security_group.Delete()
|
import itertools
import json
import logging
import re
import threading
import uuid
from enum import Enum
from perfkitbenchmarker import context
from perfkitbenchmarker import errors
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
flags.DEFINE_integer('vpn_service_tunnel_count', None,
'Number of tunnels to create for each VPN Gateway pair.')
flags.DEFINE_integer('vpn_service_gateway_count', None,
'Number of VPN Gateways to create for each vm_group.')
flags.DEFINE_string('vpn_service_name', None,
'If set, use this name for VPN Service.')
flags.DEFINE_string('vpn_service_shared_key', None,
'If set, use this PSK for VPNs.')
flags.DEFINE_integer('vpn_service_ike_version', None, 'IKE version')
class VPN_ROUTING_TYPE(Enum):
STATIC = 'static'
DYNAMIC = 'dynamic'
flags.DEFINE_enum(
'vpn_service_routing_type', None,
[VPN_ROUTING_TYPE.STATIC.value, VPN_ROUTING_TYPE.DYNAMIC.value],
'static or dynamic(BGP)')
FLAGS = flags.FLAGS
def GetVPNServiceClass():
"""Gets the VPNService class.
Args:
Returns:
Implementation class
"""
return resource.GetResourceClass(VPNService)
class VPN(object):
"""An object representing the VPN.
A VPN instance manages tunnel configurations for exactly 1 pair of endpoints.
"""
def __init__(self, *args, **kwargs):
return object.__init__(self, *args, **kwargs)
def getKeyFromGatewayPair(self, gateway_pair, suffix=''):
"""Return the VPN key for a pair of endpoints.
Args:
gateway_pair: A tuple of 2 VPN gateways which define the VPN tunnel.
suffix: A unique suffix if multiple tunnels b/t this gateway pair exist.
Returns:
string. The VPN key.
"""
key = 'vpn' + ''.join(
gateway for gateway in gateway_pair) + suffix + FLAGS.run_uri
return key
def Create(self, gateway_pair, suffix=''):
self.GatewayPair = gateway_pair
self.name = self.getKeyFromGatewayPair(gateway_pair)
self.tunnel_config = TunnelConfig(tunnel_name=self.name, suffix=suffix)
def Delete(self):
pass
def GetVPN(self, gateway_pair, suffix=''):
"""Gets a VPN object for the gateway_pair or creates one if none exists.
Args:
gateway_pair: a tuple of two VpnGateways
"""
benchmark_spec = context.GetThreadBenchmarkSpec()
if benchmark_spec is None:
raise errors.Error('GetVPN called in a thread without a '
'BenchmarkSpec.')
with benchmark_spec.vpns_lock:
key = self.getKeyFromGatewayPair(gateway_pair, suffix)
if key not in benchmark_spec.vpns:
self.Create(gateway_pair, suffix)
benchmark_spec.vpns[key] = self
return benchmark_spec.vpns[key]
def ConfigureTunnel(self):
"""Configure the VPN tunnel."""
benchmark_spec = context.GetThreadBenchmarkSpec()
vpn_gateway_0 = benchmark_spec.vpn_gateways[self.GatewayPair[0]]
vpn_gateway_1 = benchmark_spec.vpn_gateways[self.GatewayPair[1]]
both_gateways_are_passive = (vpn_gateway_0.require_target_to_init
and vpn_gateway_1.require_target_to_init)
assert not both_gateways_are_passive, 'Cant connect 2 passive VPN Gateways'
tunnel_config_hash = None
# In this loop we hand each vpn_gateway endpoint the tunnel_config.
# Each endpoint will try to setup its tunnel from the latest target endpoint
# dictionary and updates its own endpoint dictionary with any new values.
# This continues until either both endpoint tunnels have enough information
# to setup their tunnel (isTunnelConfigured is True), or there isn't enough
# information to configure the tunnel (raises ValueError).
while not self.isTunnelConfigured():
vpn_gateway_0.ConfigureTunnel(self.tunnel_config)
vpn_gateway_1.ConfigureTunnel(self.tunnel_config)
if self.tunnel_config.hash() == tunnel_config_hash:
raise ValueError('Not enough info to configure tunnel.')
tunnel_config_hash = self.tunnel_config.hash()
tunnel_status = self.isTunnelReady()
logging.debug('Tunnel is ready?: %s ', tunnel_status)
def isTunnelConfigured(self):
"""Returns True if the tunnel configuration is complete.
Returns:
boolean.
"""
is_tunnel_configured = False
if len(self.tunnel_config.endpoints) == 2:
if (self.tunnel_config.endpoints[self.GatewayPair[0]]['is_configured'] and
self.tunnel_config.endpoints[self.GatewayPair[1]]['is_configured']):
logging.debug('Tunnel is configured.')
is_tunnel_configured = True
return is_tunnel_configured
@vm_util.Retry(retryable_exceptions=errors.Resource.RetryableCreationError)
def isTunnelReady(self):
"""Returns True if the tunnel is up.
Returns:
boolean.
"""
benchmark_spec = context.GetThreadBenchmarkSpec()
logging.debug('Tunnel endpoints configured. Waiting for tunnel...')
ready = (
benchmark_spec.vpn_gateways[self.GatewayPair[0]].IsTunnelReady(
self.tunnel_config.endpoints[self.GatewayPair[0]]['tunnel_id']) and
benchmark_spec.vpn_gateways[self.GatewayPair[1]].IsTunnelReady(
self.tunnel_config.endpoints[self.GatewayPair[1]]['tunnel_id']))
if not ready:
raise errors.Resource.RetryableCreationError()
return ready
class TunnelConfig(object):
"""Object to hold all parms needed to configure a tunnel.
tunnel_config =
{ tunnel_name = ''
routing = ''
psk = ''
endpoints = [ep1={...}, ep2={...}
}
endpoint =
{ name = ''
ip = ''
cidr = ''
require_target_to_init = t/f
tunnel_id = ''
}
}
"""
_tunnelconfig_lock = threading.Lock()
def __init__(self, **kwargs):
super(TunnelConfig, self).__init__()
self.tunnel_name = kwargs.get('tunnel_name', 'unnamed_tunnel')
self.endpoints = {}
self.routing = kwargs.get('routing', None)
self.ike_version = kwargs.get('ike_version', 2)
self.shared_key = kwargs.get('shared_key', 'key' + FLAGS.run_uri)
self.suffix = kwargs.get('suffix', '')
def setConfig(self, **kwargs):
with self._tunnelconfig_lock:
for key in kwargs:
setattr(self, key, kwargs[key])
def __str__(self):
return str(json.dumps(self.__dict__, sort_keys=True, default=str))
def hash(self):
"""Hash the current tunnel config.
Returns:
int: An integer that changes if any properties have changes.
"""
return hash(json.dumps(self.__dict__, sort_keys=True, default=str))
class VPNService(resource.BaseResource):
"""Service class to manage VPN lifecycle."""
RESOURCE_TYPE = 'BaseVPNService'
REQUIRED_ATTRS = ['SERVICE']
def __init__(self, spec):
"""Initialize the VPN Service object.
Args:
vpn_service_spec: spec of the vpn service.
"""
super(VPNService, self).__init__()
self.name = spec.name
self.tunnel_count = spec.tunnel_count
self.gateway_count = FLAGS.vpn_service_gateway_count
self.routing = spec.routing_type
self.ike_version = spec.ike_version
self.shared_key = spec.shared_key
self.spec = spec
self.vpns = {}
self.vpn_properties = {
'tunnel_count': self.tunnel_count,
'gateway_count': self.gateway_count,
'routing': self.routing,
'ike_version': self.ike_version,
'shared_key': self.shared_key,
}
def GetResourceMetadata(self):
"""Returns a dictionary of metadata about the resource."""
if not self.created:
return {}
result = self.metadata.copy()
if self.routing is not None:
result['vpn_service_routing_type'] = self.routing
if self.ike_version is not None:
result['vpn_service_ike_version'] = self.ike_version
if self.tunnel_count is not None:
result['vpn_service_tunnel_count'] = self.tunnel_count
if self.gateway_count is not None:
result['gateway_count'] = self.gateway_count
# if self.psk is not None: # probably don't want to publish this.
# result['vpn_service_shared_key'] = self.psk
return result
def _Create(self):
"""Creates VPN objects for VpnGateway pairs."""
benchmark_spec = context.GetThreadBenchmarkSpec()
if benchmark_spec is None:
raise errors.Error('CreateVPN Service. called in a thread without a '
'BenchmarkSpec.')
self.vpn_gateway_pairs = self.GetVpnGatewayPairs(
benchmark_spec.vpn_gateways)
for gateway_pair in self.vpn_gateway_pairs:
# creates the vpn if it doesn't exist and registers in bm_spec.vpns
suffix = self.GetNewSuffix()
vpn_id = VPN().getKeyFromGatewayPair(gateway_pair, suffix)
self.vpns[vpn_id] = VPN().GetVPN(gateway_pair, suffix)
self.vpns[vpn_id].tunnel_config.setConfig(**self.vpn_properties)
vm_util.RunThreaded(lambda vpn: self.vpns[vpn].ConfigureTunnel(),
list(self.vpns.keys()))
def _Delete(self):
pass
def GetNewSuffix(self):
"""Names for tunnels, fr's, routes, etc need to be unique.
Returns:
string. A random string value.
"""
return format(uuid.uuid4().fields[1], 'x')
def GetMetadata(self):
"""Return a dictionary of the metadata for VPNs created."""
basic_data = {
'vpn_service_name': self.name,
'vpn_service_routing_type': self.routing,
'vpn_service_ike_version': self.ike_version,
'vpn_service_tunnel_count': self.tunnel_count,
'vpn_service_gateway_count': self.gateway_count,
# 'vpn_service_psk': self.psk,
}
return basic_data
def GetVpnGatewayPairs(self, vpn_gateways):
"""Returns pairs of gateways to create VPNs between.
Currently creates a pair between all non-matching region endpoints (mesh).
--vpn_service_gateway_count flag dictates how many gateways are created in
each vm_group(region).
--vpn_service_tunnel_count flag dictates how many VPN tunnels to create for
each gateway pair.
@TODO Add more pairing strategies as needed.
Args:
vpn_gateways: The dict of gateways created.
Returns:
list. The list of tuples of gateway pairs to create VPNs for.
"""
vpn_gateway_pairs = itertools.combinations(vpn_gateways, 2)
r = re.compile(
r'(?P<gateway_prefix>.*-.*-.*)?-(?P<gateway_tnum>[0-9])-(?P<run_id>.*)')
def filterGateways(gateway_pair):
return r.search(gateway_pair[0]).group('gateway_prefix') != r.search(
gateway_pair[1]).group('gateway_prefix')
return list(filter(filterGateways, vpn_gateway_pairs))
|
import logging
from velbus.util import VelbusException
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_FLASH,
ATTR_TRANSITION,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from . import VelbusEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Velbus light based on config_entry."""
cntrl = hass.data[DOMAIN][entry.entry_id]["cntrl"]
modules_data = hass.data[DOMAIN][entry.entry_id]["light"]
entities = []
for address, channel in modules_data:
module = cntrl.get_module(address)
entities.append(VelbusLight(module, channel))
async_add_entities(entities)
class VelbusLight(VelbusEntity, LightEntity):
"""Representation of a Velbus light."""
@property
def name(self):
"""Return the display name of this entity."""
if self._module.light_is_buttonled(self._channel):
return f"LED {self._module.get_name(self._channel)}"
return self._module.get_name(self._channel)
@property
def supported_features(self):
"""Flag supported features."""
if self._module.light_is_buttonled(self._channel):
return SUPPORT_FLASH
return SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
@property
def entity_registry_enabled_default(self):
"""Disable Button LEDs by default."""
if self._module.light_is_buttonled(self._channel):
return False
return True
@property
def is_on(self):
"""Return true if the light is on."""
return self._module.is_on(self._channel)
@property
def brightness(self):
"""Return the brightness of the light."""
return int((self._module.get_dimmer_state(self._channel) * 255) / 100)
def turn_on(self, **kwargs):
"""Instruct the Velbus light to turn on."""
if self._module.light_is_buttonled(self._channel):
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_LONG:
attr, *args = "set_led_state", self._channel, "slow"
elif kwargs[ATTR_FLASH] == FLASH_SHORT:
attr, *args = "set_led_state", self._channel, "fast"
else:
attr, *args = "set_led_state", self._channel, "on"
else:
attr, *args = "set_led_state", self._channel, "on"
else:
if ATTR_BRIGHTNESS in kwargs:
# Make sure a low but non-zero value is not rounded down to zero
if kwargs[ATTR_BRIGHTNESS] == 0:
brightness = 0
else:
brightness = max(int((kwargs[ATTR_BRIGHTNESS] * 100) / 255), 1)
attr, *args = (
"set_dimmer_state",
self._channel,
brightness,
kwargs.get(ATTR_TRANSITION, 0),
)
else:
attr, *args = (
"restore_dimmer_state",
self._channel,
kwargs.get(ATTR_TRANSITION, 0),
)
try:
getattr(self._module, attr)(*args)
except VelbusException as err:
_LOGGER.error("A Velbus error occurred: %s", err)
def turn_off(self, **kwargs):
"""Instruct the velbus light to turn off."""
if self._module.light_is_buttonled(self._channel):
attr, *args = "set_led_state", self._channel, "off"
else:
attr, *args = (
"set_dimmer_state",
self._channel,
0,
kwargs.get(ATTR_TRANSITION, 0),
)
try:
getattr(self._module, attr)(*args)
except VelbusException as err:
_LOGGER.error("A Velbus error occurred: %s", err)
|
from unittest import mock
import sip
import pytest
from PyQt5.QtCore import QObject
from helpers import utils
from qutebrowser.misc import throttle
DELAY = 500 if utils.ON_CI else 100
@pytest.fixture
def func():
return mock.Mock(spec=[])
@pytest.fixture
def throttled(func):
return throttle.Throttle(func, DELAY)
def test_immediate(throttled, func, qapp):
throttled("foo")
throttled("foo")
func.assert_called_once_with("foo")
def test_immediate_kwargs(throttled, func, qapp):
throttled(foo="bar")
throttled(foo="bar")
func.assert_called_once_with(foo="bar")
def test_delayed(throttled, func, qtbot):
throttled("foo")
throttled("foo")
throttled("foo")
throttled("bar")
func.assert_called_once_with("foo")
func.reset_mock()
qtbot.wait(2 * DELAY)
func.assert_called_once_with("bar")
def test_delayed_immediate_delayed(throttled, func, qtbot):
throttled("foo")
throttled("foo")
throttled("foo")
throttled("bar")
func.assert_called_once_with("foo")
func.reset_mock()
qtbot.wait(4 * DELAY)
func.assert_called_once_with("bar")
func.reset_mock()
throttled("baz")
throttled("baz")
throttled("bop")
func.assert_called_once_with("baz")
func.reset_mock()
qtbot.wait(2 * DELAY)
func.assert_called_once_with("bop")
def test_delayed_delayed(throttled, func, qtbot):
throttled("foo")
throttled("foo")
throttled("foo")
throttled("bar")
func.assert_called_once_with("foo")
func.reset_mock()
qtbot.wait(int(1.5 * DELAY))
func.assert_called_once_with("bar")
func.reset_mock()
throttled("baz")
throttled("baz")
throttled("bop")
qtbot.wait(2 * DELAY)
func.assert_called_once_with("bop")
func.reset_mock()
def test_cancel(throttled, func, qtbot):
throttled("foo")
throttled("foo")
throttled("foo")
throttled("bar")
func.assert_called_once_with("foo")
func.reset_mock()
throttled.cancel()
qtbot.wait(int(1.5 * DELAY))
func.assert_not_called()
func.reset_mock()
def test_set(func, qtbot):
throttled = throttle.Throttle(func, DELAY)
throttled.set_delay(DELAY)
throttled("foo")
throttled("foo")
throttled("foo")
throttled("bar")
func.assert_called_once_with("foo")
func.reset_mock()
qtbot.wait(int(1.5 * DELAY))
func.assert_called_once_with("bar")
func.reset_mock()
def test_deleted_object(qtbot):
class Obj(QObject):
def func(self):
self.setObjectName("test")
obj = Obj()
throttled = throttle.Throttle(obj.func, DELAY, parent=obj)
throttled()
throttled()
sip.delete(obj)
qtbot.wait(int(1.5 * DELAY))
|
import operator
from stash.tests.stashtest import StashTestCase
class LibVersionTests(StashTestCase):
"""Tests for the 'libversion' module"""
def test_is_loaded(self):
"""assert that the 'libversion' module is loaded by StaSh"""
assert hasattr(self.stash, "libversion")
self.assertIsNotNone(self.stash)
def test_import(self):
"""test that the libversion module can be imported"""
# $STASH_ROOT/lib/ *should* be in sys.path, thus an import should be possible
import libversion
def test_version_specifier_parse(self):
"""test 'libversion.VersionSpecifier.parse_requirement()'"""
to_test = [
# format: (requirement_str, pkg_name, [(op1, v1), ...], extras)
("noversion",
"noversion",
None,
[]),
("test==1.2.3",
"test",
[(operator.eq,
"1.2.3")],
[]),
("test_2 == 7.3.12",
"test_2",
[(operator.eq,
"7.3.12")],
[]),
("with1number == 923.1512.12412",
"with1number",
[(operator.eq,
"923.1512.12412")],
[]),
("tge >= 2.0.1",
"tge",
[(operator.ge,
"2.0.1")],
[]),
("tgt > 3.0.0",
"tgt",
[(operator.gt,
"3.0.0")],
[]),
("tne != 7.0.0",
"tne",
[(operator.ne,
"7.0.0")],
[]),
("pkg_b (< 0.7.0",
"pkg_b",
[(operator.lt,
"0.7.0")],
[]),
("nondigitver <= 1.5.3b",
"nondigitver",
[(operator.le,
"1.5.3b")],
[]),
("wrapt < 2, >= 1",
"wrapt",
[(operator.lt,
"2"),
(operator.ge,
"1")],
[]),
("extras[simple]",
"extras",
None,
["simple"]),
("extras[multi, values]",
"extras",
None,
["multi",
"values"]),
("extras == 9.8.7 [with_version]",
"extras",
[(operator.eq,
"9.8.7")],
["with_version"]),
]
for req, pkg, spec, exp_extras in to_test:
name, ver_spec, extras = self.stash.libversion.VersionSpecifier.parse_requirement(req)
self.assertEqual(name, pkg)
if self.stash.PY3:
# in py3, assertItemsEqual has been renamed to assertCountEqual
if spec is not None:
self.assertCountEqual(ver_spec.specs, spec)
self.assertCountEqual(exp_extras, extras)
else:
if spec is not None:
self.assertItemsEqual(ver_spec.specs, spec)
self.assertItemsEqual(exp_extras, extras)
def test_version_specifier_match(self):
"""test 'libversion.VersionSpecifier().match()'"""
to_test = [
# format: (requirement_str, [(testversion, result)])
(
"eqtest == 1.0.0",
[
("1.0.0",
True),
("1.0.1",
False),
("1.0.0.0",
False),
("11.0.0",
False),
("0.1.0.0",
False),
("0.9.0",
False)
]
),
(
"lttest <= 2.0.0",
[
("2.0.0",
True),
("2.0.1",
False),
("3.0.0",
False),
("1.0.0",
True),
("1.9.0",
True),
("11.0.0",
False),
("1.9.2b",
True)
]
),
(
"gttest >= 3.5.0",
[("3.5.0",
True),
("3.4.9",
False),
("3.6.0",
True),
("11.0.0",
True),
("3.5.0a",
False),
("1.0.0",
False)]
),
("eqstr == str",
[("1.0.0",
False),
("str",
True),
("str2",
False),
("s",
False),
("99.999.99",
False)]),
("wrapt < 2, >= 1",
[("0.0.1",
False),
("1.0.0",
True),
("1.5.0",
True),
("2.0.0",
False),
("1.9.9",
True)]),
]
for rs, test in to_test:
_, ver_spec, extras = self.stash.libversion.VersionSpecifier.parse_requirement(rs)
for ts, expected in test:
result = ver_spec.match(ts)
self.assertEqual(result, expected)
def test_sort_versions_main(self):
"""test 'libversion.sort_versions()' for main versions"""
raw = ["1.0.0", "0.5.0", "0.6.0", "0.5.9", "11.0.3", "11.0.4", "0.1.0", "5.7.0"]
expected = ["11.0.4", "11.0.3", "5.7.0", "1.0.0", "0.6.0", "0.5.9", "0.5.0", "0.1.0"]
sortedres = self.stash.libversion.sort_versions(raw)
self.assertEqual(sortedres, expected)
def test_sort_versions_post(self):
"""test 'libversion.sort_versions()' for post release number"""
raw = ["1.0.0", "1.0.0.post2", "1.0.0.post3", "1.0.0-post1", "1.0.0.post"]
expected = ["1.0.0.post3", "1.0.0.post2", "1.0.0-post1", "1.0.0.post", "1.0.0"]
sortedres = self.stash.libversion.sort_versions(raw)
self.assertEqual(sortedres, expected)
def test_sort_versions_type(self):
"""test 'libversion.sort_versions()' for release type"""
raw = [
"1.0.0b",
"1.0.0rc",
"1.0.0a",
"1.0.0a2",
"1.0.0",
"1.0.0.post1",
"1.0.0a.dev2",
"1.0.0a.dev3",
"1!0.5.0",
"0.5.0",
"1.0.0a.dev1"
]
expected = [
"1!0.5.0",
"1.0.0.post1",
"1.0.0",
"1.0.0rc",
"1.0.0b",
"1.0.0a2",
"1.0.0a",
"1.0.0a.dev3",
"1.0.0a.dev2",
"1.0.0a.dev1",
"0.5.0"
]
sortedres = self.stash.libversion.sort_versions(raw)
self.assertEqual(sortedres, expected)
def test_version_parse(self):
"""test 'libversion.Version.parse()''"""
to_test = [
# format: (s, {key_to_check: expected_value, ...})
("1.2.3",
{
"epoch": 0,
"versiontuple": (1,
2,
3),
"is_postrelease": False
}),
("1!2.3",
{
"epoch": 1,
"versiontuple": (2,
3),
"is_devrelease": False
}),
(
"5.5.4a.post5",
{
"versiontuple": (5,
5,
4),
"rtype": self.stash.libversion.Version.TYPE_ALPHA,
"is_postrelease": True,
"postrelease": 5
}
),
(
"0.0.1rc5.dev7",
{
"versiontuple": (0,
0,
1),
"rtype": self.stash.libversion.Version.TYPE_RELEASE_CANDIDATE,
"subversion": 5,
"is_devrelease": True,
"devrelease": 7,
"is_postrelease": False
}
),
("0.8.4.post.dev",
{
"versiontuple": (0,
8,
4),
"is_postrelease": True,
"postrelease": 0,
"is_devrelease": True
}),
]
for vs, ea in to_test:
version = self.stash.libversion.Version.parse(vs)
self.assertIsInstance(version, self.stash.libversion.Version)
for ean in ea.keys():
eav = ea[ean]
assert hasattr(version, ean)
self.assertEqual(getattr(version, ean), eav)
def test_version_cmp(self):
"""test comparsion of 'libversion.Version()'-instances"""
to_test = [
# format: (vs1, op, vs2, res)
# testdata for general equality
("1.0.0",
operator.eq,
"1.0.0",
True),
("1.0.0",
operator.eq,
"0!1.0.0",
True),
("1.0.0",
operator.eq,
"1!1.0.0",
False),
("1.0.0",
operator.eq,
"1.0.0.post",
False),
("1.0.0",
operator.eq,
"1.0.0a",
False),
("1.0.0",
operator.eq,
"1.0.0b",
False),
("1.0.0.post1",
operator.eq,
"1.0.0.post2",
False),
("1.0.0.post",
operator.eq,
"1.0.0.post0",
True),
("1.0.0.post",
operator.eq,
"1.0.0.dev",
False),
# testdata for main version comparsion
("1.2.3",
operator.eq,
"1.5.0",
False),
("2.0.3",
operator.gt,
"1.9.7",
True),
("1.9.7",
operator.gt,
"2.0.3",
False),
("1.9.7",
operator.lt,
"2.0.3",
True),
("1.9.7",
operator.lt,
"1.9.7",
False),
("1.9.7",
operator.le,
"1.9.7",
True),
("2.4.9",
operator.gt,
"11.0.5",
False),
("2.4.9",
operator.gt,
"1.0.5",
True),
("2.4.9",
operator.gt,
"2.5.1",
False),
("2.5.2",
operator.gt,
"2.5.1",
True),
# testdata for rtype comparsion
("1.0.0",
operator.eq,
"1.0.0a",
False),
("1.0.0",
operator.eq,
"1.0.0b",
False),
("1.0.0",
operator.eq,
"1.0.0rc",
False),
("1.0.0a",
operator.eq,
"1.0.0b",
False),
("1.0.0a",
operator.eq,
"1.0.0rc",
False),
("1.0.0b",
operator.eq,
"1.0.0rc",
False),
("1.0.0",
operator.gt,
"1.0.0rc",
True),
("1.0.0rc",
operator.gt,
"1.0.0b",
True),
("1.0.0b",
operator.gt,
"1.0.0a",
True),
("1.0.0",
operator.gt,
"1.0.0b",
True),
("1.0.0",
operator.gt,
"1.0.0a",
True),
("1.0.0rc",
operator.gt,
"1.0.0a",
True),
# testdata for dev version comparsion
("1.0.0",
operator.gt,
"1.0.0.dev",
True),
("1.0.0",
operator.gt,
"1.0.0.dev1000",
True),
("1.0.0.dev",
operator.gt,
"1.0.0",
False),
("1.0.0.dev2",
operator.gt,
"1.0.0.dev",
True),
]
for vs1, op, vs2, expected in to_test:
v1 = self.stash.libversion.Version.parse(vs1)
v2 = self.stash.libversion.Version.parse(vs2)
self.assertEqual(op(v1, v2), expected)
|
import traceback
import enum
from typing import TYPE_CHECKING, Sequence
from PyQt5.QtCore import pyqtSlot, Qt, QObject
from PyQt5.QtGui import QKeySequence, QKeyEvent
from qutebrowser.browser import hints
from qutebrowser.commands import cmdexc
from qutebrowser.config import config
from qutebrowser.keyinput import basekeyparser, keyutils, macros
from qutebrowser.utils import usertypes, log, message, objreg, utils
if TYPE_CHECKING:
from qutebrowser.commands import runners
STARTCHARS = ":/?"
class LastPress(enum.Enum):
"""Whether the last keypress filtered a text or was part of a keystring."""
none = enum.auto()
filtertext = enum.auto()
keystring = enum.auto()
class CommandKeyParser(basekeyparser.BaseKeyParser):
"""KeyChainParser for command bindings.
Attributes:
_commandrunner: CommandRunner instance.
"""
def __init__(self, *, mode: usertypes.KeyMode,
win_id: int,
commandrunner: 'runners.CommandRunner',
parent: QObject = None,
do_log: bool = True,
passthrough: bool = False,
supports_count: bool = True) -> None:
super().__init__(mode=mode, win_id=win_id, parent=parent,
do_log=do_log, passthrough=passthrough,
supports_count=supports_count)
self._commandrunner = commandrunner
def execute(self, cmdstr: str, count: int = None) -> None:
try:
self._commandrunner.run(cmdstr, count)
except cmdexc.Error as e:
message.error(str(e), stack=traceback.format_exc())
class NormalKeyParser(CommandKeyParser):
"""KeyParser for normal mode with added STARTCHARS detection and more.
Attributes:
_partial_timer: Timer to clear partial keypresses.
"""
def __init__(self, *, win_id: int,
commandrunner: 'runners.CommandRunner',
parent: QObject = None) -> None:
super().__init__(mode=usertypes.KeyMode.normal, win_id=win_id,
commandrunner=commandrunner, parent=parent)
self._partial_timer = usertypes.Timer(self, 'partial-match')
self._partial_timer.setSingleShot(True)
self._partial_timer.timeout.connect(self._clear_partial_match)
self._inhibited = False
self._inhibited_timer = usertypes.Timer(self, 'normal-inhibited')
self._inhibited_timer.setSingleShot(True)
def __repr__(self) -> str:
return utils.get_repr(self)
def handle(self, e: QKeyEvent, *,
dry_run: bool = False) -> QKeySequence.SequenceMatch:
"""Override to abort if the key is a startchar."""
txt = e.text().strip()
if self._inhibited:
self._debug_log("Ignoring key '{}', because the normal mode is "
"currently inhibited.".format(txt))
return QKeySequence.NoMatch
match = super().handle(e, dry_run=dry_run)
if match == QKeySequence.PartialMatch and not dry_run:
timeout = config.val.input.partial_timeout
if timeout != 0:
self._partial_timer.setInterval(timeout)
self._partial_timer.start()
return match
def set_inhibited_timeout(self, timeout: int) -> None:
"""Ignore keypresses for the given duration."""
if timeout != 0:
self._debug_log("Inhibiting the normal mode for {}ms.".format(
timeout))
self._inhibited = True
self._inhibited_timer.setInterval(timeout)
self._inhibited_timer.timeout.connect(self._clear_inhibited)
self._inhibited_timer.start()
@pyqtSlot()
def _clear_partial_match(self) -> None:
"""Clear a partial keystring after a timeout."""
self._debug_log("Clearing partial keystring {}".format(
self._sequence))
self._sequence = keyutils.KeySequence()
self.keystring_updated.emit(str(self._sequence))
@pyqtSlot()
def _clear_inhibited(self) -> None:
"""Reset inhibition state after a timeout."""
self._debug_log("Releasing inhibition state of normal mode.")
self._inhibited = False
class HintKeyParser(basekeyparser.BaseKeyParser):
"""KeyChainParser for hints.
Attributes:
_filtertext: The text to filter with.
_hintmanager: The HintManager to use.
_last_press: The nature of the last keypress, a LastPress member.
"""
def __init__(self, *, win_id: int,
commandrunner: 'runners.CommandRunner',
hintmanager: hints.HintManager,
parent: QObject = None) -> None:
super().__init__(mode=usertypes.KeyMode.hint, win_id=win_id,
parent=parent, supports_count=False)
self._command_parser = CommandKeyParser(mode=usertypes.KeyMode.hint,
win_id=win_id,
commandrunner=commandrunner,
parent=self,
supports_count=False)
self._hintmanager = hintmanager
self._filtertext = ''
self._last_press = LastPress.none
self.keystring_updated.connect(self._hintmanager.handle_partial_key)
def _handle_filter_key(self, e: QKeyEvent) -> QKeySequence.SequenceMatch:
"""Handle keys for string filtering."""
log.keyboard.debug("Got filter key 0x{:x} text {}".format(
e.key(), e.text()))
if e.key() == Qt.Key_Backspace:
log.keyboard.debug("Got backspace, mode {}, filtertext '{}', "
"sequence '{}'".format(self._last_press,
self._filtertext,
self._sequence))
if self._last_press != LastPress.keystring and self._filtertext:
self._filtertext = self._filtertext[:-1]
self._hintmanager.filter_hints(self._filtertext)
return QKeySequence.ExactMatch
elif self._last_press == LastPress.keystring and self._sequence:
self._sequence = self._sequence[:-1]
self.keystring_updated.emit(str(self._sequence))
if not self._sequence and self._filtertext:
# Switch back to hint filtering mode (this can happen only
# in numeric mode after the number has been deleted).
self._hintmanager.filter_hints(self._filtertext)
self._last_press = LastPress.filtertext
return QKeySequence.ExactMatch
else:
return QKeySequence.NoMatch
elif self._hintmanager.current_mode() != 'number':
return QKeySequence.NoMatch
elif not e.text():
return QKeySequence.NoMatch
else:
self._filtertext += e.text()
self._hintmanager.filter_hints(self._filtertext)
self._last_press = LastPress.filtertext
return QKeySequence.ExactMatch
def handle(self, e: QKeyEvent, *,
dry_run: bool = False) -> QKeySequence.SequenceMatch:
"""Handle a new keypress and call the respective handlers."""
if dry_run:
return super().handle(e, dry_run=True)
assert not dry_run
if (self._command_parser.handle(e, dry_run=True) !=
QKeySequence.NoMatch):
log.keyboard.debug("Handling key via command parser")
self.clear_keystring()
return self._command_parser.handle(e)
match = super().handle(e)
if match == QKeySequence.PartialMatch:
self._last_press = LastPress.keystring
elif match == QKeySequence.ExactMatch:
self._last_press = LastPress.none
elif match == QKeySequence.NoMatch:
# We couldn't find a keychain so we check if it's a special key.
return self._handle_filter_key(e)
else:
raise ValueError("Got invalid match type {}!".format(match))
return match
def update_bindings(self, strings: Sequence[str],
preserve_filter: bool = False) -> None:
"""Update bindings when the hint strings changed.
Args:
strings: A list of hint strings.
preserve_filter: Whether to keep the current value of
`self._filtertext`.
"""
self._read_config()
self.bindings.update({keyutils.KeySequence.parse(s): s
for s in strings})
if not preserve_filter:
self._filtertext = ''
def execute(self, cmdstr: str, count: int = None) -> None:
assert count is None
self._hintmanager.handle_partial_key(cmdstr)
class RegisterKeyParser(CommandKeyParser):
"""KeyParser for modes that record a register key.
Attributes:
_register_mode: One of KeyMode.set_mark, KeyMode.jump_mark,
KeyMode.record_macro and KeyMode.run_macro.
"""
def __init__(self, *, win_id: int,
mode: usertypes.KeyMode,
commandrunner: 'runners.CommandRunner',
parent: QObject = None) -> None:
super().__init__(mode=usertypes.KeyMode.register, win_id=win_id,
commandrunner=commandrunner, parent=parent,
supports_count=False)
self._register_mode = mode
def handle(self, e: QKeyEvent, *,
dry_run: bool = False) -> QKeySequence.SequenceMatch:
"""Override to always match the next key and use the register."""
match = super().handle(e, dry_run=dry_run)
if match or dry_run:
return match
if keyutils.is_special(Qt.Key(e.key()), e.modifiers()):
# this is not a proper register key, let it pass and keep going
return QKeySequence.NoMatch
key = e.text()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
try:
if self._register_mode == usertypes.KeyMode.set_mark:
tabbed_browser.set_mark(key)
elif self._register_mode == usertypes.KeyMode.jump_mark:
tabbed_browser.jump_mark(key)
elif self._register_mode == usertypes.KeyMode.record_macro:
macros.macro_recorder.record_macro(key)
elif self._register_mode == usertypes.KeyMode.run_macro:
macros.macro_recorder.run_macro(self._win_id, key)
else:
raise ValueError("{} is not a valid register mode".format(
self._register_mode))
except cmdexc.Error as err:
message.error(str(err), stack=traceback.format_exc())
self.request_leave.emit(
self._register_mode, "valid register key", True)
return QKeySequence.ExactMatch
|
from django.test import SimpleTestCase
from weblate.utils.hash import (
calculate_checksum,
calculate_hash,
checksum_to_hash,
hash_to_checksum,
)
class HashTest(SimpleTestCase):
def test_hash(self):
"""Ensure hash is not changing."""
text = "Message"
text_hash = calculate_hash(text)
self.assertEqual(text_hash, 8445691827737211251)
self.assertEqual(text_hash, calculate_hash(text))
def test_hash_context(self):
"""Ensure hash works with context."""
text = "Message"
context = "Context"
text_hash = calculate_hash(context, text)
self.assertEqual(text_hash, -1602104568316855346)
self.assertEqual(text_hash, calculate_hash(context, text))
def test_hash_unicode(self):
"""Ensure hash works for unicode."""
text = "Příšerně žluťoučký kůň úpěl ďábelské ódy"
text_hash = calculate_hash(text)
self.assertEqual(text_hash, -4296353750398394478)
self.assertEqual(text_hash, calculate_hash(text))
def test_checksum(self):
"""Hash to checksum conversion."""
text_hash = calculate_hash("Message")
checksum = hash_to_checksum(text_hash)
self.assertEqual(checksum, "f5351ff85ab23173")
self.assertEqual(text_hash, checksum_to_hash(checksum))
def test_calculate_checksum(self):
self.assertEqual(calculate_checksum("Message"), "f5351ff85ab23173")
|
import collections
import typing
import numpy as np
from .stateful_unit import StatefulUnit
class FrequencyFilter(StatefulUnit):
"""
Frequency filter unit.
:param low: Lower bound, inclusive.
:param high: Upper bound, exclusive.
:param mode: One of `tf` (term frequency), `df` (document frequency),
and `idf` (inverse document frequency).
Examples::
>>> import matchzoo as mz
To filter based on term frequency (tf):
>>> tf_filter = mz.preprocessors.units.FrequencyFilter(
... low=2, mode='tf')
>>> tf_filter.fit([['A', 'B', 'B'], ['C', 'C', 'C']])
>>> tf_filter.transform(['A', 'B', 'C'])
['B', 'C']
To filter based on document frequency (df):
>>> tf_filter = mz.preprocessors.units.FrequencyFilter(
... low=2, mode='df')
>>> tf_filter.fit([['A', 'B'], ['B', 'C']])
>>> tf_filter.transform(['A', 'B', 'C'])
['B']
To filter based on inverse document frequency (idf):
>>> idf_filter = mz.preprocessors.units.FrequencyFilter(
... low=1.2, mode='idf')
>>> idf_filter.fit([['A', 'B'], ['B', 'C', 'D']])
>>> idf_filter.transform(['A', 'B', 'C'])
['A', 'C']
"""
def __init__(self, low: float = 0, high: float = float('inf'),
mode: str = 'df'):
"""Frequency filter unit."""
super().__init__()
self._low = low
self._high = high
self._mode = mode
def fit(self, list_of_tokens: typing.List[typing.List[str]]):
"""Fit `list_of_tokens` by calculating `mode` states."""
valid_terms = set()
if self._mode == 'tf':
stats = self._tf(list_of_tokens)
elif self._mode == 'df':
stats = self._df(list_of_tokens)
elif self._mode == 'idf':
stats = self._idf(list_of_tokens)
else:
raise ValueError(f"{self._mode} is not a valid filtering mode."
f"Mode must be one of `tf`, `df`, and `idf`.")
for k, v in stats.items():
if self._low <= v < self._high:
valid_terms.add(k)
self._context[self._mode] = valid_terms
def transform(self, input_: list) -> list:
"""Transform a list of tokens by filtering out unwanted words."""
valid_terms = self._context[self._mode]
return list(filter(lambda token: token in valid_terms, input_))
@classmethod
def _tf(cls, list_of_tokens: list) -> dict:
stats = collections.Counter()
for tokens in list_of_tokens:
stats.update(tokens)
return stats
@classmethod
def _df(cls, list_of_tokens: list) -> dict:
stats = collections.Counter()
for tokens in list_of_tokens:
stats.update(set(tokens))
return stats
@classmethod
def _idf(cls, list_of_tokens: list) -> dict:
num_docs = len(list_of_tokens)
stats = cls._df(list_of_tokens)
for key, val in stats.most_common():
stats[key] = np.log((1 + num_docs) / (1 + val)) + 1
return stats
|
import re
import posixpath
from typing import Optional, Set
from PyQt5.QtCore import QUrl
from qutebrowser.browser import webelem
from qutebrowser.config import config
from qutebrowser.utils import objreg, urlutils, log, message, qtutils
from qutebrowser.mainwindow import mainwindow
class Error(Exception):
"""Raised when the navigation can't be done."""
# Order of the segments in a URL.
# Each list entry is a tuple of (path name (string), getter, setter).
# Note that the getters must not use FullyDecoded decoded mode to prevent loss
# of information. (host and path use FullyDecoded by default)
_URL_SEGMENTS = [
('host',
lambda url: url.host(QUrl.FullyEncoded),
lambda url, host: url.setHost(host, QUrl.StrictMode)),
('port',
lambda url: str(url.port()) if url.port() > 0 else '',
lambda url, x: url.setPort(int(x))),
('path',
lambda url: url.path(QUrl.FullyEncoded),
lambda url, path: url.setPath(path, QUrl.StrictMode)),
('query',
lambda url: url.query(QUrl.FullyEncoded),
lambda url, query: url.setQuery(query, QUrl.StrictMode)),
('anchor',
lambda url: url.fragment(QUrl.FullyEncoded),
lambda url, fragment: url.setFragment(fragment, QUrl.StrictMode)),
]
def _get_incdec_value(match, inc_or_dec, count):
"""Get an incremented/decremented URL based on a URL match."""
pre, zeroes, number, post = match.groups()
# This should always succeed because we match \d+
val = int(number)
if inc_or_dec == 'decrement':
if val < count:
raise Error("Can't decrement {} by {}!".format(val, count))
val -= count
elif inc_or_dec == 'increment':
val += count
else:
raise ValueError("Invalid value {} for inc_or_dec!".format(inc_or_dec))
if zeroes:
if len(number) < len(str(val)):
zeroes = zeroes[1:]
elif len(number) > len(str(val)):
zeroes += '0'
return ''.join([pre, zeroes, str(val), post])
def incdec(url, count, inc_or_dec):
"""Helper method for :navigate when `where' is increment/decrement.
Args:
url: The current url.
count: How much to increment or decrement by.
inc_or_dec: Either 'increment' or 'decrement'.
tab: Whether to open the link in a new tab.
background: Open the link in a new background tab.
window: Open the link in a new window.
"""
urlutils.ensure_valid(url)
segments: Optional[Set[str]] = (
set(config.val.url.incdec_segments)
)
if segments is None:
segments = {'path', 'query'}
# Make a copy of the QUrl so we don't modify the original
url = QUrl(url)
# We're searching the last number so we walk the url segments backwards
for segment, getter, setter in reversed(_URL_SEGMENTS):
if segment not in segments:
continue
# Get the last number in a string not preceded by regex '%' or '%.'
match = re.fullmatch(r'(.*\D|^)(?<!%)(?<!%.)(0*)(\d+)(.*)',
getter(url))
if not match:
continue
setter(url, _get_incdec_value(match, inc_or_dec, count))
qtutils.ensure_valid(url)
return url
raise Error("No number found in URL!")
def path_up(url, count):
"""Helper method for :navigate when `where' is up.
Args:
url: The current url.
count: The number of levels to go up in the url.
"""
urlutils.ensure_valid(url)
url = url.adjusted(QUrl.RemoveFragment | QUrl.RemoveQuery)
path = url.path()
if not path or path == '/':
raise Error("Can't go up!")
for _i in range(0, min(count, path.count('/'))):
path = posixpath.join(path, posixpath.pardir)
path = posixpath.normpath(path)
url.setPath(path)
return url
def strip(url, count):
"""Strip fragment/query from a URL."""
if count != 1:
raise Error("Count is not supported when stripping URL components")
urlutils.ensure_valid(url)
return url.adjusted(QUrl.RemoveFragment | QUrl.RemoveQuery)
def _find_prevnext(prev, elems):
"""Find a prev/next element in the given list of elements."""
# First check for <link rel="prev(ious)|next"> as well as
# e.g. <a class="nav-(prev|next)"> (Hugo)
rel_values = {'prev', 'previous'} if prev else {'next'}
classes = {'nav-prev'} if prev else {'nav-next'}
for e in elems:
if e.tag_name() not in ['link', 'a']:
continue
if 'rel' in e and set(e['rel'].split(' ')) & rel_values:
log.hints.debug("Found {!r} with rel={}".format(e, e['rel']))
return e
elif e.classes() & classes:
log.hints.debug("Found {!r} with class={}".format(e, e.classes()))
return e
# Then check for regular links/buttons.
elems = [e for e in elems if e.tag_name() != 'link']
option = 'prev_regexes' if prev else 'next_regexes'
if not elems:
return None
# pylint: disable=bad-config-option
for regex in getattr(config.val.hints, option):
# pylint: enable=bad-config-option
log.hints.vdebug( # type: ignore[attr-defined]
"== Checking regex '{}'.".format(regex.pattern))
for e in elems:
text = str(e)
if not text:
continue
if regex.search(text):
log.hints.debug("Regex '{}' matched on '{}'.".format(
regex.pattern, text))
return e
else:
log.hints.vdebug( # type: ignore[attr-defined]
"No match on '{}'!".format(text))
return None
def prevnext(*, browsertab, win_id, baseurl, prev=False,
tab=False, background=False, window=False):
"""Click a "previous"/"next" element on the page.
Args:
browsertab: The WebKitTab/WebEngineTab of the page.
baseurl: The base URL of the current tab.
prev: True to open a "previous" link, False to open a "next" link.
tab: True to open in a new tab, False for the current tab.
background: True to open in a background tab.
window: True to open in a new window, False for the current one.
"""
def _prevnext_cb(elems):
elem = _find_prevnext(prev, elems)
word = 'prev' if prev else 'forward'
if elem is None:
message.error("No {} links found!".format(word))
return
url = elem.resolve_url(baseurl)
if url is None:
message.error("No {} links found!".format(word))
return
qtutils.ensure_valid(url)
cur_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if window:
new_window = mainwindow.MainWindow(
private=cur_tabbed_browser.is_private)
new_window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=new_window.win_id)
tabbed_browser.tabopen(url, background=False)
elif tab:
cur_tabbed_browser.tabopen(url, background=background)
else:
browsertab.load_url(url)
try:
link_selector = webelem.css_selector('links', baseurl)
except webelem.Error as e:
raise Error(str(e))
browsertab.elements.find_css(link_selector, callback=_prevnext_cb,
error_cb=lambda err: message.error(str(err)))
|
from mock import patch
from gitsweep.tests.testcases import CommandTestCase
class TestHelpMenu(CommandTestCase):
"""
Command-line tool can show the help menu.
"""
def test_help(self):
"""
If no arguments are given the help menu is displayed.
"""
(retcode, stdout, stderr) = self.gscommand('git-sweep -h')
self.assertResults('''
usage: git-sweep <action> [-h]
Clean up your Git remote branches.
optional arguments:
-h, --help show this help message and exit
action:
Preview changes or perform clean up
{preview,cleanup}
preview Preview the branches that will be deleted
cleanup Delete merged branches from the remote
''', stdout)
def test_fetch(self):
"""
Will fetch if told not to.
"""
(retcode, stdout, stderr) = self.gscommand('git-sweep preview')
self.assertResults('''
Fetching from the remote
No remote branches are available for cleaning up
''', stdout)
def test_no_fetch(self):
"""
Will not fetch if told not to.
"""
(retcode, stdout, stderr) = self.gscommand(
'git-sweep preview --nofetch')
self.assertResults('''
No remote branches are available for cleaning up
''', stdout)
def test_will_preview(self):
"""
Will preview the proposed deletes.
"""
for i in range(1, 6):
self.command('git checkout -b branch{0}'.format(i))
self.make_commit()
self.command('git checkout master')
self.make_commit()
self.command('git merge branch{0}'.format(i))
(retcode, stdout, stderr) = self.gscommand('git-sweep preview')
self.assertResults('''
Fetching from the remote
These branches have been merged into master:
branch1
branch2
branch3
branch4
branch5
To delete them, run again with `git-sweep cleanup`
''', stdout)
def test_will_preserve_arguments(self):
"""
The recommended cleanup command contains the same arguments given.
"""
for i in range(1, 6):
self.command('git checkout -b branch{0}'.format(i))
self.make_commit()
self.command('git checkout master')
self.make_commit()
self.command('git merge branch{0}'.format(i))
preview = 'git-sweep preview --master=master --origin=origin'
cleanup = 'git-sweep cleanup --master=master --origin=origin'
(retcode, stdout, stderr) = self.gscommand(preview)
self.assertResults('''
Fetching from the remote
These branches have been merged into master:
branch1
branch2
branch3
branch4
branch5
To delete them, run again with `{0}`
'''.format(cleanup), stdout)
def test_will_preview_none_found(self):
"""
Will preview the proposed deletes.
"""
for i in range(1, 6):
self.command('git checkout -b branch{0}'.format(i))
self.make_commit()
self.command('git checkout master')
(retcode, stdout, stderr) = self.gscommand('git-sweep preview')
self.assertResults('''
Fetching from the remote
No remote branches are available for cleaning up
''', stdout)
def test_will_cleanup(self):
"""
Will preview the proposed deletes.
"""
for i in range(1, 6):
self.command('git checkout -b branch{0}'.format(i))
self.make_commit()
self.command('git checkout master')
self.make_commit()
self.command('git merge branch{0}'.format(i))
with patch('gitsweep.cli.raw_input', create=True) as ri:
ri.return_value = 'y'
(retcode, stdout, stderr) = self.gscommand('git-sweep cleanup')
self.assertResults('''
Fetching from the remote
These branches have been merged into master:
branch1
branch2
branch3
branch4
branch5
Delete these branches? (y/n)
deleting branch1 (done)
deleting branch2 (done)
deleting branch3 (done)
deleting branch4 (done)
deleting branch5 (done)
All done!
Tell everyone to run `git fetch --prune` to sync with this remote.
(you don't have to, yours is synced)
''', stdout)
def test_will_abort_cleanup(self):
"""
Will preview the proposed deletes.
"""
for i in range(1, 6):
self.command('git checkout -b branch{0}'.format(i))
self.make_commit()
self.command('git checkout master')
self.make_commit()
self.command('git merge branch{0}'.format(i))
with patch('gitsweep.cli.raw_input', create=True) as ri:
ri.return_value = 'n'
(retcode, stdout, stderr) = self.gscommand('git-sweep cleanup')
self.assertResults('''
Fetching from the remote
These branches have been merged into master:
branch1
branch2
branch3
branch4
branch5
Delete these branches? (y/n)
OK, aborting.
''', stdout)
def test_will_skip_certain_branches(self):
"""
Can be forced to skip certain branches.
"""
for i in range(1, 6):
self.command('git checkout -b branch{0}'.format(i))
self.make_commit()
self.command('git checkout master')
self.make_commit()
self.command('git merge branch{0}'.format(i))
(retcode, stdout, stderr) = self.gscommand(
'git-sweep preview --skip=branch1,branch2')
cleanup = 'git-sweep cleanup --skip=branch1,branch2'
self.assertResults('''
Fetching from the remote
These branches have been merged into master:
branch3
branch4
branch5
To delete them, run again with `{0}`
'''.format(cleanup), stdout)
def test_will_force_clean(self):
"""
Will cleanup immediately if forced.
"""
for i in range(1, 6):
self.command('git checkout -b branch{0}'.format(i))
self.make_commit()
self.command('git checkout master')
self.make_commit()
self.command('git merge branch{0}'.format(i))
(retcode, stdout, stderr) = self.gscommand('git-sweep cleanup --force')
self.assertResults('''
Fetching from the remote
These branches have been merged into master:
branch1
branch2
branch3
branch4
branch5
deleting branch1 (done)
deleting branch2 (done)
deleting branch3 (done)
deleting branch4 (done)
deleting branch5 (done)
All done!
Tell everyone to run `git fetch --prune` to sync with this remote.
(you don't have to, yours is synced)
''', stdout)
|
import logging
import os
import tempfile
import unittest
try:
from mock import Mock
except ImportError:
from unittest.mock import Mock
import numpy as np
try:
import autograd # noqa:F401
autograd_installed = True
except ImportError:
autograd_installed = False
from gensim.models.poincare import PoincareRelations, PoincareModel, PoincareKeyedVectors
from gensim.test.utils import datapath
logger = logging.getLogger(__name__)
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_word2vec.tst')
class TestPoincareData(unittest.TestCase):
def test_encoding_handling(self):
"""Tests whether utf8 and non-utf8 data loaded correctly."""
non_utf8_file = datapath('poincare_cp852.tsv')
relations = [relation for relation in PoincareRelations(non_utf8_file, encoding='cp852')]
self.assertEqual(len(relations), 2)
self.assertEqual(relations[0], (u'tímto', u'budeš'))
utf8_file = datapath('poincare_utf8.tsv')
relations = [relation for relation in PoincareRelations(utf8_file)]
self.assertEqual(len(relations), 2)
self.assertEqual(relations[0], (u'tímto', u'budeš'))
class TestPoincareModel(unittest.TestCase):
def setUp(self):
self.data = PoincareRelations(datapath('poincare_hypernyms.tsv'))
self.data_large = PoincareRelations(datapath('poincare_hypernyms_large.tsv'))
def models_equal(self, model_1, model_2):
self.assertEqual(len(model_1.kv), len(model_2.kv))
self.assertEqual(set(model_1.kv.index_to_key), set(model_2.kv.index_to_key))
self.assertTrue(np.allclose(model_1.kv.vectors, model_2.kv.vectors))
def test_data_counts(self):
"""Tests whether data has been loaded correctly and completely."""
model = PoincareModel(self.data)
self.assertEqual(len(model.all_relations), 5)
self.assertEqual(len(model.node_relations[model.kv.get_index('kangaroo.n.01')]), 3)
self.assertEqual(len(model.kv), 7)
self.assertTrue('mammal.n.01' not in model.node_relations)
def test_data_counts_with_bytes(self):
"""Tests whether input bytes data is loaded correctly and completely."""
model = PoincareModel([(b'\x80\x01c', b'\x50\x71a'), (b'node.1', b'node.2')])
self.assertEqual(len(model.all_relations), 2)
self.assertEqual(len(model.node_relations[model.kv.get_index(b'\x80\x01c')]), 1)
self.assertEqual(len(model.kv), 4)
self.assertTrue(b'\x50\x71a' not in model.node_relations)
def test_persistence(self):
"""Tests whether the model is saved and loaded correctly."""
model = PoincareModel(self.data, burn_in=0, negative=3)
model.train(epochs=1)
model.save(testfile())
loaded = PoincareModel.load(testfile())
self.models_equal(model, loaded)
def test_persistence_separate_file(self):
"""Tests whether the model is saved and loaded correctly when the arrays are stored separately."""
model = PoincareModel(self.data, burn_in=0, negative=3)
model.train(epochs=1)
model.save(testfile(), sep_limit=1)
loaded = PoincareModel.load(testfile())
self.models_equal(model, loaded)
def test_online_learning(self):
"""Tests whether additional input data is loaded correctly and completely."""
model = PoincareModel(self.data, burn_in=0, negative=3)
self.assertEqual(len(model.kv), 7)
self.assertEqual(model.kv.get_vecattr('kangaroo.n.01', 'count'), 3)
self.assertEqual(model.kv.get_vecattr('cat.n.01', 'count'), 1)
model.build_vocab([('kangaroo.n.01', 'cat.n.01')], update=True) # update vocab
self.assertEqual(model.kv.get_vecattr('kangaroo.n.01', 'count'), 4)
self.assertEqual(model.kv.get_vecattr('cat.n.01', 'count'), 2)
def test_train_after_load(self):
"""Tests whether the model can be trained correctly after loading from disk."""
model = PoincareModel(self.data, burn_in=0, negative=3)
model.train(epochs=1)
model.save(testfile())
loaded = PoincareModel.load(testfile())
model.train(epochs=1)
loaded.train(epochs=1)
self.models_equal(model, loaded)
def test_persistence_old_model(self):
"""Tests whether model from older gensim version is loaded correctly."""
loaded = PoincareModel.load(datapath('poincare_test_3.4.0'))
self.assertEqual(loaded.kv.vectors.shape, (239, 2))
self.assertEqual(len(loaded.kv), 239)
self.assertEqual(loaded.size, 2)
self.assertEqual(len(loaded.all_relations), 200)
def test_train_old_model_after_load(self):
"""Tests whether loaded model from older gensim version can be trained correctly."""
loaded = PoincareModel.load(datapath('poincare_test_3.4.0'))
old_vectors = np.copy(loaded.kv.vectors)
loaded.train(epochs=2)
self.assertFalse(np.allclose(old_vectors, loaded.kv.vectors))
def test_invalid_data_raises_error(self):
"""Tests that error is raised on invalid input data."""
with self.assertRaises(ValueError):
PoincareModel([("a", "b", "c")])
with self.assertRaises(ValueError):
PoincareModel(["a", "b", "c"])
with self.assertRaises(ValueError):
PoincareModel("ab")
def test_vector_shape(self):
"""Tests whether vectors are initialized with the correct size."""
model = PoincareModel(self.data, size=20)
self.assertEqual(model.kv.vectors.shape, (7, 20))
def test_vector_dtype(self):
"""Tests whether vectors have the correct dtype before and after training."""
model = PoincareModel(self.data_large, dtype=np.float32, burn_in=0, negative=3)
self.assertEqual(model.kv.vectors.dtype, np.float32)
model.train(epochs=1)
self.assertEqual(model.kv.vectors.dtype, np.float32)
def test_training(self):
"""Tests that vectors are different before and after training."""
model = PoincareModel(self.data_large, burn_in=0, negative=3)
old_vectors = np.copy(model.kv.vectors)
model.train(epochs=2)
self.assertFalse(np.allclose(old_vectors, model.kv.vectors))
def test_training_multiple(self):
"""Tests that calling train multiple times results in different vectors."""
model = PoincareModel(self.data_large, burn_in=0, negative=3)
model.train(epochs=2)
old_vectors = np.copy(model.kv.vectors)
model.train(epochs=1)
self.assertFalse(np.allclose(old_vectors, model.kv.vectors))
old_vectors = np.copy(model.kv.vectors)
model.train(epochs=0)
self.assertTrue(np.allclose(old_vectors, model.kv.vectors))
def test_gradients_check(self):
"""Tests that the model is trained successfully with gradients check enabled."""
model = PoincareModel(self.data, negative=3)
try:
model.train(epochs=1, batch_size=1, check_gradients_every=1)
except Exception as e:
self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))
@unittest.skipIf(not autograd_installed, 'autograd needs to be installed for this test')
def test_wrong_gradients_raises_assertion(self):
"""Tests that discrepancy in gradients raises an error."""
model = PoincareModel(self.data, negative=3)
model._loss_grad = Mock(return_value=np.zeros((2 + model.negative, model.size)))
with self.assertRaises(AssertionError):
model.train(epochs=1, batch_size=1, check_gradients_every=1)
def test_reproducible(self):
"""Tests that vectors are same for two independent models trained with the same seed."""
model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)
model_1.train(epochs=2)
model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)
model_2.train(epochs=2)
self.assertTrue(np.allclose(model_1.kv.vectors, model_2.kv.vectors))
def test_burn_in(self):
"""Tests that vectors are different after burn-in."""
model = PoincareModel(self.data, burn_in=1, negative=3)
original_vectors = np.copy(model.kv.vectors)
model.train(epochs=0)
self.assertFalse(np.allclose(model.kv.vectors, original_vectors))
def test_burn_in_only_done_once(self):
"""Tests that burn-in does not happen when train is called a second time."""
model = PoincareModel(self.data, negative=3, burn_in=1)
model.train(epochs=0)
original_vectors = np.copy(model.kv.vectors)
model.train(epochs=0)
self.assertTrue(np.allclose(model.kv.vectors, original_vectors))
def test_negatives(self):
"""Tests that correct number of negatives are sampled."""
model = PoincareModel(self.data, negative=5)
self.assertEqual(len(model._get_candidate_negatives()), 5)
def test_error_if_negative_more_than_population(self):
"""Tests error is rased if number of negatives to sample is more than remaining nodes."""
model = PoincareModel(self.data, negative=5)
with self.assertRaises(ValueError):
model.train(epochs=1)
def test_no_duplicates_and_positives_in_negative_sample(self):
"""Tests that no duplicates or positively related nodes are present in negative samples."""
model = PoincareModel(self.data_large, negative=3)
positive_nodes = model.node_relations[0] # Positive nodes for node 0
num_samples = 100 # Repeat experiment multiple times
for i in range(num_samples):
negatives = model._sample_negatives(0)
self.assertFalse(positive_nodes & set(negatives))
self.assertEqual(len(negatives), len(set(negatives)))
def test_handle_duplicates(self):
"""Tests that correct number of negatives are used."""
vector_updates = np.array([[0.5, 0.5], [0.1, 0.2], [0.3, -0.2]])
node_indices = [0, 1, 0]
PoincareModel._handle_duplicates(vector_updates, node_indices)
vector_updates_expected = np.array([[0.0, 0.0], [0.1, 0.2], [0.8, 0.3]])
self.assertTrue((vector_updates == vector_updates_expected).all())
@classmethod
def tearDownClass(cls):
try:
os.unlink(testfile())
except OSError:
pass
class TestPoincareKeyedVectors(unittest.TestCase):
def setUp(self):
self.vectors = PoincareKeyedVectors.load_word2vec_format(datapath('poincare_vectors.bin'), binary=True)
def test_most_similar(self):
"""Test most_similar returns expected results."""
expected = [
'canine.n.02',
'hunting_dog.n.01',
'carnivore.n.01',
'placental.n.01',
'mammal.n.01'
]
predicted = [result[0] for result in self.vectors.most_similar('dog.n.01', topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_topn(self):
"""Test most_similar returns correct results when `topn` is specified."""
self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)
self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)
predicted = self.vectors.most_similar('dog.n.01', topn=None)
self.assertEqual(len(predicted), len(self.vectors) - 1)
self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')
def test_most_similar_raises_keyerror(self):
"""Test most_similar raises KeyError when input is out of vocab."""
with self.assertRaises(KeyError):
self.vectors.most_similar('not_in_vocab')
def test_most_similar_restrict_vocab(self):
"""Test most_similar returns handles restrict_vocab correctly."""
expected = set(self.vectors.index_to_key[:5])
predicted = set(result[0] for result in self.vectors.most_similar('dog.n.01', topn=5, restrict_vocab=5))
self.assertEqual(expected, predicted)
def test_most_similar_to_given(self):
"""Test most_similar_to_given returns correct results."""
predicted = self.vectors.most_similar_to_given('dog.n.01', ['carnivore.n.01', 'placental.n.01', 'mammal.n.01'])
self.assertEqual(predicted, 'carnivore.n.01')
def test_most_similar_with_vector_input(self):
"""Test most_similar returns expected results with an input vector instead of an input word."""
expected = [
'dog.n.01',
'canine.n.02',
'hunting_dog.n.01',
'carnivore.n.01',
'placental.n.01',
]
input_vector = self.vectors['dog.n.01']
predicted = [result[0] for result in self.vectors.most_similar([input_vector], topn=5)]
self.assertEqual(expected, predicted)
def test_distance(self):
"""Test that distance returns expected values."""
self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))
self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)
def test_distances(self):
"""Test that distances between one word and multiple other words have expected values."""
distances = self.vectors.distances('dog.n.01', ['mammal.n.01', 'dog.n.01'])
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
distances = self.vectors.distances('dog.n.01')
self.assertEqual(len(distances), len(self.vectors))
self.assertTrue(np.allclose(distances[-1], 10.04756))
def test_distances_with_vector_input(self):
"""Test that distances between input vector and a list of words have expected values."""
input_vector = self.vectors['dog.n.01']
distances = self.vectors.distances(input_vector, ['mammal.n.01', 'dog.n.01'])
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
distances = self.vectors.distances(input_vector)
self.assertEqual(len(distances), len(self.vectors))
self.assertTrue(np.allclose(distances[-1], 10.04756))
def test_poincare_distances_batch(self):
"""Test that poincare_distance_batch returns correct distances."""
vector_1 = self.vectors['dog.n.01']
vectors_2 = self.vectors[['mammal.n.01', 'dog.n.01']]
distances = self.vectors.vector_distance_batch(vector_1, vectors_2)
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
def test_poincare_distance(self):
"""Test that poincare_distance returns correct distance between two input vectors."""
vector_1 = self.vectors['dog.n.01']
vector_2 = self.vectors['mammal.n.01']
distance = self.vectors.vector_distance(vector_1, vector_2)
self.assertTrue(np.allclose(distance, 4.5278745))
distance = self.vectors.vector_distance(vector_1, vector_1)
self.assertTrue(np.allclose(distance, 0))
def test_closest_child(self):
"""Test closest_child returns expected value and returns None for lowest node in hierarchy."""
self.assertEqual(self.vectors.closest_child('dog.n.01'), 'terrier.n.01')
self.assertEqual(self.vectors.closest_child('harbor_porpoise.n.01'), None)
def test_closest_parent(self):
"""Test closest_parent returns expected value and returns None for highest node in hierarchy."""
self.assertEqual(self.vectors.closest_parent('dog.n.01'), 'canine.n.02')
self.assertEqual(self.vectors.closest_parent('mammal.n.01'), None)
def test_ancestors(self):
"""Test ancestors returns expected list and returns empty list for highest node in hierarchy."""
expected = ['canine.n.02', 'carnivore.n.01', 'placental.n.01', 'mammal.n.01']
self.assertEqual(self.vectors.ancestors('dog.n.01'), expected)
expected = []
self.assertEqual(self.vectors.ancestors('mammal.n.01'), expected)
def test_descendants(self):
"""Test descendants returns expected list and returns empty list for lowest node in hierarchy."""
expected = [
'terrier.n.01', 'sporting_dog.n.01', 'spaniel.n.01', 'water_spaniel.n.01', 'irish_water_spaniel.n.01'
]
self.assertEqual(self.vectors.descendants('dog.n.01'), expected)
self.assertEqual(self.vectors.descendants('dog.n.01', max_depth=3), expected[:3])
def test_similarity(self):
"""Test similarity returns expected value for two nodes, and for identical nodes."""
self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))
self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))
def norm(self):
"""Test norm returns expected value."""
self.assertTrue(np.allclose(self.vectors.norm('dog.n.01'), 0.97757602))
self.assertTrue(np.allclose(self.vectors.norm('mammal.n.01'), 0.03914723))
def test_difference_in_hierarchy(self):
"""Test difference_in_hierarchy returns expected value for two nodes, and for identical nodes."""
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'dog.n.01'), 0))
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('mammal.n.01', 'dog.n.01'), 0.9384287))
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'mammal.n.01'), -0.9384287))
def test_closer_than(self):
"""Test closer_than returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.closer_than('dog.n.01', 'dog.n.01'), [])
expected = set(['canine.n.02', 'hunting_dog.n.01'])
self.assertEqual(set(self.vectors.closer_than('dog.n.01', 'carnivore.n.01')), expected)
def test_rank(self):
"""Test rank returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)
self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
from collections import namedtuple
from google.protobuf.descriptor import FieldDescriptor
import json
from gmusicapi.exceptions import (
CallFailure, ParseException, ValidationException,
)
from gmusicapi.utils import utils
import requests
log = utils.DynamicClientLogger(__name__)
_auth_names = ('xt', 'sso', 'oauth', 'gpsoauth',)
"""
AuthTypes has fields for each type of auth, each of which store a bool:
xt: webclient xsrf param/cookie
sso: webclient Authorization header
oauth: musicmanager/mobileclient Bearer header
gpsoauth: mobileclient gpsoauth GoogleLogin header
"""
AuthTypes = namedtuple('AuthTypes', _auth_names)
def authtypes(**kwargs):
"""Convinience factory for AuthTypes that defaults authtypes to False."""
for name in _auth_names:
if name not in kwargs:
kwargs[name] = False
return AuthTypes(**kwargs)
class BuildRequestMeta(type):
"""Metaclass to create build_request from static/dynamic config."""
def __new__(cls, name, bases, dct):
# To not mess with mro and inheritance, build the class first.
new_cls = super().__new__(cls, name, bases, dct)
merge_keys = ('headers', 'params')
all_keys = ('method', 'url', 'files', 'data', 'verify', 'allow_redirects') + merge_keys
config = {} # stores key: val for static or f(*args, **kwargs) -> val for dyn
dyn = lambda key: 'dynamic_' + key # noqa
stat = lambda key: 'static_' + key # noqa
has_key = lambda key: hasattr(new_cls, key) # noqa
get_key = lambda key: getattr(new_cls, key) # noqa
for key in all_keys:
if not has_key(dyn(key)) and not has_key(stat(key)):
continue # this key will be ignored; requests will default it
if has_key(dyn(key)):
config[key] = get_key(dyn(key))
else:
config[key] = get_key(stat(key))
for key in merge_keys:
# merge case: dyn took precedence above, but stat also exists
if has_key(dyn(key)) and has_key(stat(key)):
def key_closure(stat_val=get_key(stat(key)), dyn_func=get_key(dyn(key))):
def build_key(*args, **kwargs):
dyn_val = dyn_func(*args, **kwargs)
stat_val.update(dyn_val)
return stat_val
return build_key
config[key] = key_closure()
# To explain some of the funkiness wrt closures, see:
# http://stackoverflow.com/questions/233673/lexical-closures-in-python
# create the actual build_request method
def req_closure(config=config):
def build_request(cls, *args, **kwargs):
req_kwargs = {}
for key, val in config.items():
if hasattr(val, '__call__'):
val = val(*args, **kwargs)
req_kwargs[key] = val
return req_kwargs
return build_request
new_cls.build_request = classmethod(req_closure())
return new_cls
class Call(metaclass=BuildRequestMeta):
"""
Clients should use Call.perform().
Calls define how to build their requests through static and dynamic data.
For example, a request might always send some user-agent: this is static.
Or, it might need the name of a song to modify: this is dynamic.
Specially named fields define the data, and correspond with requests.Request kwargs:
method: eg 'GET' or 'POST'
url: string
files: dictionary of {filename: fileobject} files to multipart upload.
data: the body of the request
If a dictionary is provided, form-encoding will take place.
A string will be sent as-is.
verify: if True, verify SSL certs
params (m): dictionary of URL parameters to append to the URL.
headers (m): dictionary
Static data shold prepends static_ to a field:
class SomeCall(Call):
static_url = 'http://foo.com/thiscall'
And dynamic data prepends dynamic_ to a method:
class SomeCall(Call):
#*args, **kwargs are passed from SomeCall.build_request (and Call.perform)
def dynamic_url(endpoint):
return 'http://foo.com/' + endpoint
Dynamic data takes precedence over static if both exist,
except for attributes marked with (m) above. These get merged, with dynamic overriding
on key conflicts (though all this really shouldn't be relied on).
Here's a contrived example that merges static and dynamic headers:
class SomeCall(Call):
static_headers = {'user-agent': "I'm totally a Google client!"}
@classmethod
def dynamic_headers(cls, keep_alive=False):
return {'Connection': keep_alive}
If neither a static nor dynamic member is defined,
the param is not used to create the requests.Request.
Calls declare the kind of auth they require with an AuthTypes object named required_auth.
Calls must define parse_response.
Calls can also define filter_response, validate and check_success.
Calls are organized semantically, so one endpoint might have multiple calls.
"""
gets_logged = True
fail_on_non_200 = True
required_auth = authtypes() # all false by default
@classmethod
def parse_response(cls, response):
"""Parses a requests.Response to data."""
raise NotImplementedError
@classmethod
def validate(cls, response, msg):
"""Raise ValidationException on problems.
:param response: a requests.Response
:param msg: the result of parse_response on response
"""
pass
@classmethod
def check_success(cls, response, msg):
"""Raise CallFailure on problems.
:param response: a requests.Response
:param msg: the result of parse_response on response
"""
pass
@classmethod
def filter_response(cls, msg):
"""Return a version of a parsed response appropriate for logging."""
return msg # default to identity
@classmethod
def perform(cls, session, validate, *args, **kwargs):
"""Send, parse, validate and check success of this call.
*args and **kwargs are passed to protocol.build_transaction.
:param session: a PlaySession used to send this request.
:param validate: if False, do not validate.
:param required_auth: if in kwargs, overrides the static protocol required_auth.
"""
# TODO link up these docs
call_name = cls.__name__
if cls.gets_logged:
log.debug("%s(args=%s, kwargs=%s)",
call_name,
[utils.truncate(a) for a in args],
{k: utils.truncate(v) for (k, v) in kwargs.items()}
)
else:
log.debug("%s(<omitted>)", call_name)
required_auth = kwargs.pop('required_auth', cls.required_auth)
req_kwargs = cls.build_request(*args, **kwargs)
response = session.send(req_kwargs, required_auth)
# TODO trim the logged response if it's huge?
safe_req_kwargs = req_kwargs.copy()
if safe_req_kwargs.get('headers', {}).get('Authorization', None) is not None:
safe_req_kwargs['headers']['Authorization'] = '<omitted>'
if cls.fail_on_non_200:
try:
response.raise_for_status()
except requests.HTTPError as e:
err_msg = str(e)
if cls.gets_logged:
err_msg += "\n(requests kwargs: %r)" % (safe_req_kwargs)
err_msg += "\n(response was: %r)" % response.text
raise CallFailure(err_msg, call_name)
try:
parsed_response = cls.parse_response(response)
except ParseException:
err_msg = ("the server's response could not be understood."
" The call may still have succeeded, but it's unlikely.")
if cls.gets_logged:
err_msg += "\n(requests kwargs: %r)" % (safe_req_kwargs)
err_msg += "\n(response was: %r)" % response.text
log.exception("could not parse %s response: %r", call_name, response.text)
else:
log.exception("could not parse %s response: (omitted)", call_name)
raise CallFailure(err_msg, call_name)
if cls.gets_logged:
log.debug(cls.filter_response(parsed_response))
try:
# order is important; validate only has a schema for a successful response
cls.check_success(response, parsed_response)
if validate:
cls.validate(response, parsed_response)
except CallFailure as e:
if not cls.gets_logged:
raise
# otherwise, reraise a new exception with our req/res context
err_msg = ("{e_message}\n"
"(requests kwargs: {req_kwargs!r})\n"
"(response was: {content!r})").format(
e_message=str(e),
req_kwargs=safe_req_kwargs,
content=response.text)
raise CallFailure(err_msg, e.callname) from e
except ValidationException as e:
# TODO shouldn't be using formatting
err_msg = "the response format for %s was not recognized." % call_name
err_msg += "\n\n%s\n" % e
if cls.gets_logged:
raw_response = response.text
if len(raw_response) > 10000:
raw_response = raw_response[:10000] + '...'
err_msg += ("\nFirst, try the develop branch."
" If you can recreate this error with the most recent code"
" please [create an issue](http://goo.gl/qbAW8) that includes"
" the above ValidationException"
" and the following request/response:\n%r\n\n%r\n"
"\nA traceback follows:\n") % (safe_req_kwargs, raw_response)
log.exception(err_msg)
return parsed_response
@staticmethod
def _parse_json(text):
try:
return json.loads(text)
except ValueError as e:
raise ParseException(str(e)) from e
@staticmethod
def _filter_proto(msg, make_copy=True):
"""Filter all byte fields in the message and submessages."""
filtered = msg
if make_copy:
filtered = msg.__class__()
filtered.CopyFrom(msg)
fields = filtered.ListFields()
# eg of filtering a specific field
# if any(fd.name == 'field_name' for fd, val in fields):
# filtered.field_name = '<name>'
# Filter all byte fields.
for field_name, val in ((fd.name, val) for fd, val in fields
if fd.type == FieldDescriptor.TYPE_BYTES):
setattr(filtered, field_name, bytes("<%s bytes>" % len(val), 'utf8'))
# Filter submessages.
for field in (val for fd, val in fields
if fd.type == FieldDescriptor.TYPE_MESSAGE):
# protobuf repeated api is bad for reflection
is_repeated = hasattr(field, '__len__')
if not is_repeated:
Call._filter_proto(field, make_copy=False)
else:
for i in range(len(field)):
# repeatedComposite does not allow setting
old_fields = [f for f in field]
del field[:]
field.extend([Call._filter_proto(f, make_copy=False)
for f in old_fields])
return filtered
|
import logging
from eebrightbox import EEBrightBox, EEBrightBoxException
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_VERSION = "version"
CONF_DEFAULT_IP = "192.168.1.1"
CONF_DEFAULT_USERNAME = "admin"
CONF_DEFAULT_VERSION = 2
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_VERSION, default=CONF_DEFAULT_VERSION): cv.positive_int,
vol.Required(CONF_HOST, default=CONF_DEFAULT_IP): cv.string,
vol.Required(CONF_USERNAME, default=CONF_DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
def get_scanner(hass, config):
"""Return a router scanner instance."""
scanner = EEBrightBoxScanner(config[DOMAIN])
return scanner if scanner.check_config() else None
class EEBrightBoxScanner(DeviceScanner):
"""Scan EE Brightbox router."""
def __init__(self, config):
"""Initialise the scanner."""
self.config = config
self.devices = {}
def check_config(self):
"""Check if provided configuration and credentials are correct."""
try:
with EEBrightBox(self.config) as ee_brightbox:
return bool(ee_brightbox.get_devices())
except EEBrightBoxException:
_LOGGER.exception("Failed to connect to the router")
return False
def scan_devices(self):
"""Scan for devices."""
with EEBrightBox(self.config) as ee_brightbox:
self.devices = {d["mac"]: d for d in ee_brightbox.get_devices()}
macs = [d["mac"] for d in self.devices.values() if d["activity_ip"]]
_LOGGER.debug("Scan devices %s", macs)
return macs
def get_device_name(self, device):
"""Get the name of a device from hostname."""
if device in self.devices:
return self.devices[device]["hostname"] or None
return None
def get_extra_attributes(self, device):
"""
Get the extra attributes of a device.
Extra attributes include:
- ip
- mac
- port - ethX or wifiX
- last_active
"""
port_map = {
"wl1": "wifi5Ghz",
"wl0": "wifi2.4Ghz",
"eth0": "eth0",
"eth1": "eth1",
"eth2": "eth2",
"eth3": "eth3",
}
if device in self.devices:
return {
"ip": self.devices[device]["ip"],
"mac": self.devices[device]["mac"],
"port": port_map[self.devices[device]["port"]],
"last_active": self.devices[device]["time_last_active"],
}
return {}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import mock
from perfkitbenchmarker import resource
from perfkitbenchmarker.providers.rackspace import util
import six
_RACK_PATH = 'path/rack'
class RackspaceResource(resource.BaseResource):
def __init__(self, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
def _Create(self):
raise NotImplementedError()
def _Delete(self):
raise NotImplementedError()
class RackCLICommandTestCase(unittest.TestCase):
def setUp(self):
super(RackCLICommandTestCase, self).setUp()
p = mock.patch(util.__name__ + '.FLAGS')
self.mock_flags = p.start()
self.addCleanup(p.stop)
self.mock_flags.rack_path = _RACK_PATH
def testCommonFlagsWithoutOptionalFlags(self):
rack_resource = RackspaceResource(profile=None)
cmd = util.RackCLICommand(rack_resource, 'servers', 'image', 'list')
self.assertEqual(cmd._GetCommand(), [
'path/rack', 'servers', 'image', 'list', '--output', 'json'])
def testCommonFlagsWithOptionalFlags(self):
rack_resource = RackspaceResource(profile='US', region='DFW')
cmd = util.RackCLICommand(rack_resource, 'servers', 'keypair', 'list')
cmd.flags['all-pages'] = True
self.assertEqual(cmd._GetCommand(), [
'path/rack', 'servers', 'keypair', 'list', '--all-pages',
'--output', 'json', '--profile', 'US', '--region', 'DFW'])
if __name__ == '__main__':
unittest.main()
|
from homeassistant.components.ipp.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.core import HomeAssistant
from tests.components.ipp import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_config_entry_not_ready(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the IPP configuration entry not ready."""
entry = await init_integration(hass, aioclient_mock, conn_error=True)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_config_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the IPP configuration entry unloading."""
entry = await init_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
assert entry.entry_id in hass.data[DOMAIN]
assert entry.state == ENTRY_STATE_LOADED
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.entry_id not in hass.data[DOMAIN]
assert entry.state == ENTRY_STATE_NOT_LOADED
|
import subprocess
import urwid
import os
import sys
factor_me = 362923067964327863989661926737477737673859044111968554257667
run_me = os.path.join(os.path.dirname(sys.argv[0]), 'subproc2.py')
output_widget = urwid.Text("Factors of %d:\n" % factor_me)
edit_widget = urwid.Edit("Type anything or press enter to exit:")
frame_widget = urwid.Frame(
header=edit_widget,
body=urwid.Filler(output_widget, valign='bottom'),
focus_part='header')
def exit_on_enter(key):
if key == 'enter': raise urwid.ExitMainLoop()
loop = urwid.MainLoop(frame_widget, unhandled_input=exit_on_enter)
def received_output(data):
output_widget.set_text(output_widget.text + data.decode('utf8'))
write_fd = loop.watch_pipe(received_output)
proc = subprocess.Popen(
['python', '-u', run_me, str(factor_me)],
stdout=write_fd,
close_fds=True)
loop.run()
proc.kill()
|
import sys
import functools
from typing import Optional
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, QUrl
from PyQt5.QtNetwork import (QNetworkProxy, QNetworkRequest, QHostInfo,
QNetworkReply, QNetworkAccessManager,
QHostAddress)
from PyQt5.QtQml import QJSEngine, QJSValue
from qutebrowser.utils import log, utils, qtutils
class ParseProxyError(Exception):
"""Error while parsing PAC result string."""
class EvalProxyError(Exception):
"""Error while evaluating PAC script."""
def _js_slot(*args):
"""Wrap a methods as a JavaScript function.
Register a PACContext method as a JavaScript function, and catch
exceptions returning them as JavaScript Error objects.
Args:
args: Types of method arguments.
Return: Wrapped method.
"""
def _decorator(method):
@functools.wraps(method)
def new_method(self, *args, **kwargs):
"""Call the underlying function."""
try:
return method(self, *args, **kwargs)
except:
e = str(sys.exc_info()[0])
log.network.exception("PAC evaluation error")
# pylint: disable=protected-access
return self._error_con.callAsConstructor([e])
# pylint: enable=protected-access
deco = pyqtSlot(*args, result=QJSValue)
return deco(new_method)
return _decorator
class _PACContext(QObject):
"""Implementation of PAC API functions that require native calls.
See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Necko/Proxy_Auto-Configuration_(PAC)_file
"""
JS_DEFINITIONS = """
function dnsResolve(host) {
return PAC.dnsResolve(host);
}
function myIpAddress() {
return PAC.myIpAddress();
}
"""
def __init__(self, engine):
"""Create a new PAC API implementation instance.
Args:
engine: QJSEngine which is used for running PAC.
"""
super().__init__(parent=engine)
self._engine = engine
self._error_con = engine.globalObject().property("Error")
@_js_slot(str)
def dnsResolve(self, host):
"""Resolve a DNS hostname.
Resolves the given DNS hostname into an IP address, and returns it
in the dot-separated format as a string.
Args:
host: hostname to resolve.
"""
ips = QHostInfo.fromName(host)
if ips.error() != QHostInfo.NoError or not ips.addresses():
err_f = "Failed to resolve host during PAC evaluation: {}"
log.network.info(err_f.format(host))
return QJSValue(QJSValue.NullValue)
else:
return ips.addresses()[0].toString()
@_js_slot()
def myIpAddress(self):
"""Get host IP address.
Return the server IP address of the current machine, as a string in
the dot-separated integer format.
"""
return QHostAddress(QHostAddress.LocalHost).toString()
class PACResolver:
"""Evaluate PAC script files and resolve proxies."""
@staticmethod
def _parse_proxy_host(host_str):
host, _colon, port_str = host_str.partition(':')
try:
port = int(port_str)
except ValueError:
raise ParseProxyError("Invalid port number")
return (host, port)
@staticmethod
def _parse_proxy_entry(proxy_str):
"""Parse one proxy string entry, as described in PAC specification."""
config = [c.strip() for c in proxy_str.split(' ') if c]
if not config:
raise ParseProxyError("Empty proxy entry")
if config[0] == "DIRECT":
if len(config) != 1:
raise ParseProxyError("Invalid number of parameters for " +
"DIRECT")
return QNetworkProxy(QNetworkProxy.NoProxy)
elif config[0] == "PROXY":
if len(config) != 2:
raise ParseProxyError("Invalid number of parameters for PROXY")
host, port = PACResolver._parse_proxy_host(config[1])
return QNetworkProxy(QNetworkProxy.HttpProxy, host, port)
elif config[0] in ["SOCKS", "SOCKS5"]:
if len(config) != 2:
raise ParseProxyError("Invalid number of parameters for SOCKS")
host, port = PACResolver._parse_proxy_host(config[1])
return QNetworkProxy(QNetworkProxy.Socks5Proxy, host, port)
else:
err = "Unknown proxy type: {}"
raise ParseProxyError(err.format(config[0]))
@staticmethod
def _parse_proxy_string(proxy_str):
proxies = proxy_str.split(';')
return [PACResolver._parse_proxy_entry(x) for x in proxies]
def _evaluate(self, js_code, js_file):
ret = self._engine.evaluate(js_code, js_file)
if ret.isError():
err = "JavaScript error while evaluating PAC file: {}"
raise EvalProxyError(err.format(ret.toString()))
def __init__(self, pac_str):
"""Create a PAC resolver.
Args:
pac_str: JavaScript code containing PAC resolver.
"""
self._engine = QJSEngine()
self._engine.installExtensions(QJSEngine.ConsoleExtension)
self._ctx = _PACContext(self._engine)
self._engine.globalObject().setProperty(
"PAC", self._engine.newQObject(self._ctx))
self._evaluate(_PACContext.JS_DEFINITIONS, "pac_js_definitions")
self._evaluate(utils.read_file("javascript/pac_utils.js"), "pac_utils")
proxy_config = self._engine.newObject()
proxy_config.setProperty("bindings", self._engine.newObject())
self._engine.globalObject().setProperty("ProxyConfig", proxy_config)
self._evaluate(pac_str, "pac")
global_js_object = self._engine.globalObject()
self._resolver = global_js_object.property("FindProxyForURL")
if not self._resolver.isCallable():
err = "Cannot resolve FindProxyForURL function, got '{}' instead"
raise EvalProxyError(err.format(self._resolver.toString()))
def resolve(self, query, from_file=False):
"""Resolve a proxy via PAC.
Args:
query: QNetworkProxyQuery.
from_file: Whether the proxy info is coming from a file.
Return:
A list of QNetworkProxy objects in order of preference.
"""
qtutils.ensure_valid(query.url())
if from_file:
string_flags = QUrl.PrettyDecoded
else:
string_flags = QUrl.RemoveUserInfo # type: ignore[assignment]
if query.url().scheme() == 'https':
string_flags |= QUrl.RemovePath # type: ignore[assignment]
string_flags |= QUrl.RemoveQuery # type: ignore[assignment]
result = self._resolver.call([query.url().toString(string_flags),
query.peerHostName()])
result_str = result.toString()
if not result.isString():
err = "Got strange value from FindProxyForURL: '{}'"
raise EvalProxyError(err.format(result_str))
return self._parse_proxy_string(result_str)
class PACFetcher(QObject):
"""Asynchronous fetcher of PAC files."""
finished = pyqtSignal()
def __init__(self, url, parent=None):
"""Resolve a PAC proxy from URL.
Args:
url: QUrl of a PAC proxy.
"""
super().__init__(parent)
pac_prefix = "pac+"
assert url.scheme().startswith(pac_prefix)
url.setScheme(url.scheme()[len(pac_prefix):])
self._pac_url = url
self._manager: Optional[QNetworkAccessManager] = QNetworkAccessManager()
self._manager.setProxy(QNetworkProxy(QNetworkProxy.NoProxy))
self._pac = None
self._error_message = None
self._reply = None
def __eq__(self, other):
return self._pac_url == other._pac_url
def __repr__(self):
return utils.get_repr(self, url=self._pac_url, constructor=True)
def fetch(self):
"""Fetch the proxy from the remote URL."""
assert self._manager is not None
self._reply = self._manager.get(QNetworkRequest(self._pac_url))
self._reply.finished.connect( # type: ignore[attr-defined]
self._finish)
@pyqtSlot()
def _finish(self):
assert self._reply is not None
if self._reply.error() != QNetworkReply.NoError:
error = "Can't fetch PAC file from URL, error code {}: {}"
self._error_message = error.format(
self._reply.error(), self._reply.errorString())
log.network.error(self._error_message)
else:
try:
pacscript = bytes(self._reply.readAll()).decode("utf-8")
except UnicodeError as e:
error = "Invalid encoding of a PAC file: {}"
self._error_message = error.format(e)
log.network.exception(self._error_message)
try:
self._pac = PACResolver(pacscript)
log.network.debug("Successfully evaluated PAC file.")
except EvalProxyError as e:
error = "Error in PAC evaluation: {}"
self._error_message = error.format(e)
log.network.exception(self._error_message)
self._manager = None
self._reply = None
self.finished.emit()
def _wait(self):
"""Wait until a reply from the remote server is received."""
if self._manager is not None:
loop = qtutils.EventLoop()
self.finished.connect(loop.quit)
loop.exec_()
def fetch_error(self):
"""Check if PAC script is successfully fetched.
Return None iff PAC script is downloaded and evaluated successfully,
error string otherwise.
"""
self._wait()
return self._error_message
def resolve(self, query):
"""Resolve a query via PAC.
Args: QNetworkProxyQuery.
Return a list of QNetworkProxy objects in order of preference.
"""
self._wait()
assert self._pac is not None
from_file = self._pac_url.scheme() == 'file'
try:
return self._pac.resolve(query, from_file=from_file)
except (EvalProxyError, ParseProxyError) as e:
log.network.exception("Error in PAC resolution: {}.".format(e))
# .invalid is guaranteed to be inaccessible in RFC 6761.
# Port 9 is for DISCARD protocol -- DISCARD servers act like
# /dev/null.
# Later NetworkManager.createRequest will detect this and display
# an error message.
error_host = "pac-resolve-error.qutebrowser.invalid"
return [QNetworkProxy(QNetworkProxy.HttpProxy, error_host, 9)]
|
from libpyvivotek import VivotekCamera
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_STREAM, Camera
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PASSWORD,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.helpers import config_validation as cv
CONF_FRAMERATE = "framerate"
CONF_SECURITY_LEVEL = "security_level"
CONF_STREAM_PATH = "stream_path"
DEFAULT_CAMERA_BRAND = "VIVOTEK"
DEFAULT_NAME = "VIVOTEK Camera"
DEFAULT_EVENT_0_KEY = "event_i0_enable"
DEFAULT_SECURITY_LEVEL = "admin"
DEFAULT_STREAM_SOURCE = "live.sdp"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_FRAMERATE, default=2): cv.positive_int,
vol.Optional(CONF_SECURITY_LEVEL, default=DEFAULT_SECURITY_LEVEL): cv.string,
vol.Optional(CONF_STREAM_PATH, default=DEFAULT_STREAM_SOURCE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a Vivotek IP Camera."""
creds = f"{config[CONF_USERNAME]}:{config[CONF_PASSWORD]}"
args = {
"config": config,
"cam": VivotekCamera(
host=config[CONF_IP_ADDRESS],
port=(443 if config[CONF_SSL] else 80),
verify_ssl=config[CONF_VERIFY_SSL],
usr=config[CONF_USERNAME],
pwd=config[CONF_PASSWORD],
digest_auth=config[CONF_AUTHENTICATION] == HTTP_DIGEST_AUTHENTICATION,
sec_lvl=config[CONF_SECURITY_LEVEL],
),
"stream_source": f"rtsp://{creds}@{config[CONF_IP_ADDRESS]}:554/{config[CONF_STREAM_PATH]}",
}
add_entities([VivotekCam(**args)], True)
class VivotekCam(Camera):
"""A Vivotek IP camera."""
def __init__(self, config, cam, stream_source):
"""Initialize a Vivotek camera."""
super().__init__()
self._cam = cam
self._frame_interval = 1 / config[CONF_FRAMERATE]
self._motion_detection_enabled = False
self._model_name = None
self._name = config[CONF_NAME]
self._stream_source = stream_source
@property
def supported_features(self):
"""Return supported features for this camera."""
return SUPPORT_STREAM
@property
def frame_interval(self):
"""Return the interval between frames of the mjpeg stream."""
return self._frame_interval
def camera_image(self):
"""Return bytes of camera image."""
return self._cam.snapshot()
@property
def name(self):
"""Return the name of this device."""
return self._name
async def stream_source(self):
"""Return the source of the stream."""
return self._stream_source
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self._motion_detection_enabled
def disable_motion_detection(self):
"""Disable motion detection in camera."""
response = self._cam.set_param(DEFAULT_EVENT_0_KEY, 0)
self._motion_detection_enabled = int(response) == 1
def enable_motion_detection(self):
"""Enable motion detection in camera."""
response = self._cam.set_param(DEFAULT_EVENT_0_KEY, 1)
self._motion_detection_enabled = int(response) == 1
@property
def brand(self):
"""Return the camera brand."""
return DEFAULT_CAMERA_BRAND
@property
def model(self):
"""Return the camera model."""
return self._model_name
def update(self):
"""Update entity status."""
self._model_name = self._cam.model_name
|
import re
import sys
from collections import namedtuple
from docker.errors import APIError
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.utils import _log
from paasta_tools.utils import DEFAULT_LOGLEVEL
from paasta_tools.utils import get_docker_client
from paasta_tools.utils import load_system_paasta_config
# Sorry to any non-yelpers but this won't
# do much as our metrics and logging libs
# are not open source
try:
import yelp_meteorite
except ImportError:
yelp_meteorite = None
try:
import clog
except ImportError:
clog = None
LogLine = namedtuple(
"LogLine",
[
"timestamp",
"hostname",
"container_id",
"cluster",
"service",
"instance",
"process_name",
"mesos_container_id",
"mem_limit",
],
)
def capture_oom_events_from_stdin():
process_name_regex = re.compile(
r"^\d+\s[a-zA-Z0-9\-]+\s.*\]\s(.+)\sinvoked\soom-killer:"
)
oom_regex_docker = re.compile(
r"^(\d+)\s([a-zA-Z0-9\-]+)\s.*Task in /docker/(\w{12})\w+ killed as a"
)
oom_regex_kubernetes = re.compile(
r"""
^(\d+)\s # timestamp
([a-zA-Z0-9\-]+) # hostname
\s.*Task\sin\s/kubepods/(?:[a-zA-Z]+/)? # start of message; non capturing, optional group for the qos cgroup
pod[-\w]+/(\w{12})\w+\s # containerid
killed\sas\sa* # eom
""",
re.VERBOSE,
)
process_name = ""
while True:
syslog = sys.stdin.readline()
if not syslog:
break
r = process_name_regex.search(syslog)
if r:
process_name = r.group(1)
r = oom_regex_docker.search(syslog)
if r:
yield (int(r.group(1)), r.group(2), r.group(3), process_name)
process_name = ""
r = oom_regex_kubernetes.search(syslog)
if r:
yield (int(r.group(1)), r.group(2), r.group(3), process_name)
process_name = ""
def get_container_env_as_dict(docker_inspect):
env_vars = {}
config = docker_inspect.get("Config")
if config is not None:
env = config.get("Env", [])
for i in env:
name, _, value = i.partition("=")
env_vars[name] = value
return env_vars
def log_to_clog(log_line):
"""Send the event to 'tmp_paasta_oom_events'."""
line = (
'{"timestamp": %d, "hostname": "%s", "container_id": "%s", "cluster": "%s", '
'"service": "%s", "instance": "%s", "process_name": "%s", '
'"mesos_container_id": "%s", "mem_limit": "%s"}'
% (
log_line.timestamp,
log_line.hostname,
log_line.container_id,
log_line.cluster,
log_line.service,
log_line.instance,
log_line.process_name,
log_line.mesos_container_id,
log_line.mem_limit,
)
)
clog.log_line("tmp_paasta_oom_events", line)
def log_to_paasta(log_line):
"""Add the event to the standard PaaSTA logging backend."""
line = "oom-killer killed {} on {} (container_id: {}).".format(
"a %s process" % log_line.process_name
if log_line.process_name
else "a process",
log_line.hostname,
log_line.container_id,
)
_log(
service=log_line.service,
instance=log_line.instance,
component="oom",
cluster=log_line.cluster,
level=DEFAULT_LOGLEVEL,
line=line,
)
def send_sfx_event(service, instance, cluster):
if yelp_meteorite:
service_instance_config = get_instance_config(
service=service, instance=instance, cluster=cluster
)
dimensions = {
"paasta_cluster": cluster,
"paasta_instance": instance,
"paasta_service": service,
"paasta_pool": service_instance_config.get_pool(),
}
yelp_meteorite.events.emit_event(
"paasta.service.oom_events", dimensions=dimensions,
)
counter = yelp_meteorite.create_counter(
"paasta.service.oom_count", default_dimensions=dimensions,
)
counter.count()
def main():
if clog is None:
print("CLog logger unavailable, exiting.", file=sys.stderr)
sys.exit(1)
clog.config.configure(
scribe_host="169.254.255.254",
scribe_port=1463,
monk_disable=False,
scribe_disable=False,
)
cluster = load_system_paasta_config().get_cluster()
client = get_docker_client()
for (
timestamp,
hostname,
container_id,
process_name,
) in capture_oom_events_from_stdin():
try:
docker_inspect = client.inspect_container(resource_id=container_id)
except (APIError):
continue
env_vars = get_container_env_as_dict(docker_inspect)
service = env_vars.get("PAASTA_SERVICE", "unknown")
instance = env_vars.get("PAASTA_INSTANCE", "unknown")
mesos_container_id = env_vars.get("MESOS_CONTAINER_NAME", "mesos-null")
mem_limit = env_vars.get("PAASTA_RESOURCE_MEM", "unknown")
log_line = LogLine(
timestamp=timestamp,
hostname=hostname,
container_id=container_id,
cluster=cluster,
service=service,
instance=instance,
process_name=process_name,
mesos_container_id=mesos_container_id,
mem_limit=mem_limit,
)
log_to_clog(log_line)
log_to_paasta(log_line)
send_sfx_event(service, instance, cluster)
if __name__ == "__main__":
main()
|
from gi.repository import Gdk, Gio, GObject, Gtk
KEYBINDING_FLAGS = GObject.SignalFlags.RUN_LAST | GObject.SignalFlags.ACTION
class MeldNotebook(Gtk.Notebook):
"""Notebook subclass with tab switch and reordering behaviour
MeldNotebook implements some fairly generic tab switching shortcuts
and a popup menu for simple tab controls, as well as some
Meld-specific tab label handling.
"""
__gtype_name__ = "MeldNotebook"
__gsignals__ = {
'tab-switch': (KEYBINDING_FLAGS, None, (int,)),
'page-label-changed': (0, None, (GObject.TYPE_STRING,)),
}
# Python 3.4; no bytes formatting
css = (
b"""
@binding-set TabSwitchBindings {
bind "<Alt>1" { "tab-switch" (0) };
bind "<Alt>2" { "tab-switch" (1) };
bind "<Alt>3" { "tab-switch" (2) };
bind "<Alt>4" { "tab-switch" (3) };
bind "<Alt>5" { "tab-switch" (4) };
bind "<Alt>6" { "tab-switch" (5) };
bind "<Alt>7" { "tab-switch" (6) };
bind "<Alt>8" { "tab-switch" (7) };
bind "<Alt>9" { "tab-switch" (8) };
bind "<Alt>0" { "tab-switch" (9) };
}
notebook.meld-notebook { -gtk-key-bindings: TabSwitchBindings; }
"""
)
ui = """
<?xml version="1.0" encoding="UTF-8"?>
<interface>
<menu id="tab-menu">
<item>
<attribute name="label">Move _Left</attribute>
<attribute name="action">popup.tabmoveleft</attribute>
</item>
<item>
<attribute name="label">Move _Right</attribute>
<attribute name="action">popup.tabmoveright</attribute>
</item>
<item>
<attribute name="label">_Close</attribute>
<attribute name="action">win.close</attribute>
</item>
</menu>
</interface>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.action_group = Gio.SimpleActionGroup()
actions = (
("tabmoveleft", self.on_tab_move_left),
("tabmoveright", self.on_tab_move_right),
)
for (name, callback) in actions:
action = Gio.SimpleAction.new(name, None)
action.connect('activate', callback)
self.action_group.add_action(action)
self.insert_action_group("popup", self.action_group)
builder = Gtk.Builder.new_from_string(self.ui, -1)
self.popup_menu = builder.get_object("tab-menu")
provider = Gtk.CssProvider()
provider.load_from_data(self.css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(), provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
stylecontext = self.get_style_context()
stylecontext.add_class('meld-notebook')
self.connect('button-press-event', self.on_button_press_event)
self.connect('popup-menu', self.on_popup_menu)
self.connect('page-added', self.on_page_added)
self.connect('page-removed', self.on_page_removed)
def do_tab_switch(self, page_num):
self.set_current_page(page_num)
def on_popup_menu(self, widget, event=None):
self.action_group.lookup_action("tabmoveleft").set_enabled(
self.get_current_page() > 0)
self.action_group.lookup_action("tabmoveright").set_enabled(
self.get_current_page() < self.get_n_pages() - 1)
popup = Gtk.Menu.new_from_model(self.popup_menu)
popup.attach_to_widget(widget, None)
popup.show_all()
if event:
popup.popup_at_pointer(event)
else:
popup.popup_at_widget(
widget,
Gdk.Gravity.NORTH_WEST,
Gdk.Gravity.NORTH_WEST,
event,
)
return True
def on_button_press_event(self, widget, event):
if (event.triggers_context_menu() and
event.type == Gdk.EventType.BUTTON_PRESS):
return self.on_popup_menu(widget, event)
return False
def on_tab_move_left(self, *args):
page_num = self.get_current_page()
child = self.get_nth_page(page_num)
page_num = page_num - 1 if page_num > 0 else 0
self.reorder_child(child, page_num)
def on_tab_move_right(self, *args):
page_num = self.get_current_page()
child = self.get_nth_page(page_num)
self.reorder_child(child, page_num + 1)
def on_page_added(self, notebook, child, page_num, *args):
child.connect("label-changed", self.on_label_changed)
self.props.show_tabs = self.get_n_pages() > 1
def on_page_removed(self, notebook, child, page_num, *args):
child.disconnect_by_func(self.on_label_changed)
self.props.show_tabs = self.get_n_pages() > 1
def on_label_changed(self, page, text, tooltip):
nbl = self.get_tab_label(page)
nbl.props.label_text = text
# Only update the window title if the current page is active
if self.get_current_page() == self.page_num(page):
self.emit('page-label-changed', text)
self.child_set_property(page, "menu-label", text)
|
import os
from unittest import SkipTest
from os import path as op
import sys
import pytest
from mne.utils import run_tests_if_main, _TempDir, _get_root_dir
skip_files = (
# known crlf
'FreeSurferColorLUT.txt',
'test_edf_stim_channel.txt',
'FieldTrip.py',
'license.txt',
# part of testing compatibility with older BV formats is testing
# the line endings and coding schemes used there
'test_old_layout_latin1_software_filter.vhdr',
'test_old_layout_latin1_software_filter.vmrk',
'searchindex.dat',
)
def _assert_line_endings(dir_):
"""Check line endings for a directory."""
if sys.platform == 'win32':
raise SkipTest('Skipping line endings check on Windows')
report = list()
good_exts = ('.py', '.dat', '.sel', '.lout', '.css', '.js', '.lay', '.txt',
'.elc', '.csd', '.sfp', '.json', '.hpts', '.vmrk', '.vhdr',
'.head', '.eve', '.ave', '.cov', '.label')
for dirpath, dirnames, filenames in os.walk(dir_):
for fname in filenames:
if op.splitext(fname)[1] not in good_exts or fname in skip_files:
continue
filename = op.join(dirpath, fname)
relfilename = op.relpath(filename, dir_)
try:
with open(filename, 'rb') as fid:
text = fid.read().decode('utf-8')
except UnicodeDecodeError:
report.append('In %s found non-decodable bytes' % relfilename)
else:
crcount = text.count('\r')
if crcount:
report.append('In %s found %i/%i CR/LF' %
(relfilename, crcount, text.count('\n')))
if len(report) > 0:
raise AssertionError('Found %s files with incorrect endings:\n%s'
% (len(report), '\n'.join(report)))
def test_line_endings():
"""Test line endings of mne-python."""
tempdir = _TempDir()
with open(op.join(tempdir, 'foo'), 'wb') as fid:
fid.write('bad\r\ngood\n'.encode('ascii'))
_assert_line_endings(tempdir)
with open(op.join(tempdir, 'bad.py'), 'wb') as fid:
fid.write(b'\x97')
pytest.raises(AssertionError, _assert_line_endings, tempdir)
with open(op.join(tempdir, 'bad.py'), 'wb') as fid:
fid.write('bad\r\ngood\n'.encode('ascii'))
pytest.raises(AssertionError, _assert_line_endings, tempdir)
# now check mne
_assert_line_endings(_get_root_dir())
run_tests_if_main()
|
from functools import partial
import numpy as np
from ..parallel import parallel_func
from ..io.pick import _picks_to_idx
from ..utils import logger, verbose, _time_mask, _check_option
from .multitaper import psd_array_multitaper
def _decomp_aggregate_mask(epoch, func, average, freq_sl):
_, _, spect = func(epoch)
spect = spect[..., freq_sl, :]
# Do the averaging here (per epoch) to save memory
if average == 'mean':
spect = np.nanmean(spect, axis=-1)
elif average == 'median':
spect = np.nanmedian(spect, axis=-1)
return spect
def _spect_func(epoch, func, freq_sl, average):
"""Aux function."""
# Decide if we should split this to save memory or not, since doing
# multiple calls will incur some performance overhead. Eventually we might
# want to write (really, go back to) our own spectrogram implementation
# that, if possible, averages after each transform, but this will incur
# a lot of overhead because of the many Python calls required.
kwargs = dict(func=func, average=average, freq_sl=freq_sl)
if epoch.nbytes > 10e6:
spect = np.apply_along_axis(
_decomp_aggregate_mask, -1, epoch, **kwargs)
else:
spect = _decomp_aggregate_mask(epoch, **kwargs)
return spect
def _check_nfft(n, n_fft, n_per_seg, n_overlap):
"""Ensure n_fft, n_per_seg and n_overlap make sense."""
if n_per_seg is None and n_fft > n:
raise ValueError(('If n_per_seg is None n_fft is not allowed to be > '
'n_times. If you want zero-padding, you have to set '
'n_per_seg to relevant length. Got n_fft of %d while'
' signal length is %d.') % (n_fft, n))
n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg
n_per_seg = n if n_per_seg > n else n_per_seg
if n_overlap >= n_per_seg:
raise ValueError(('n_overlap cannot be greater than n_per_seg (or '
'n_fft). Got n_overlap of %d while n_per_seg is '
'%d.') % (n_overlap, n_per_seg))
return n_fft, n_per_seg, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False):
"""Check PSD data / pull arrays from inst."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
picks = _picks_to_idx(inst.info, picks, 'data', with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
rba = 'NaN' if reject_by_annotation else None
data = inst.get_data(picks, start, stop + 1, reject_by_annotation=rba)
elif isinstance(inst, BaseEpochs):
data = inst.get_data(picks=picks)[:, :, time_mask]
else: # Evoked
data = inst.data[picks][:, time_mask]
return data, sfreq
@verbose
def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0,
n_per_seg=None, n_jobs=1, average='mean', window='hamming',
verbose=None):
"""Compute power spectral density (PSD) using Welch's method.
Parameters
----------
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
%(n_jobs)s
%(average-psd)s
.. versionadded:: 0.19.0
%(window-psd)s
.. versionadded:: 0.22.0
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs) or (..., n_freqs, n_segments)
The power spectral densities. If ``average='mean`` or
``average='median'``, the returned array will have the same shape
as the input data plus an additional frequency dimension.
If ``average=None``, the returned array will have the same shape as
the input data plus two additional dimensions corresponding to
frequencies and the unaggregated segments, respectively.
freqs : ndarray, shape (n_freqs,)
The frequencies.
Notes
-----
.. versionadded:: 0.14.0
"""
_check_option('average', average, (None, 'mean', 'median'))
dshape = x.shape[:-1]
n_times = x.shape[-1]
x = x.reshape(-1, n_times)
# Prep the PSD
n_fft, n_per_seg, n_overlap = _check_nfft(n_times, n_fft, n_per_seg,
n_overlap)
win_size = n_fft / float(sfreq)
logger.info("Effective window size : %0.3f (s)" % win_size)
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (sfreq / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
if not freq_mask.any():
raise ValueError(
f'No frequencies found between fmin={fmin} and fmax={fmax}')
freq_sl = slice(*(np.where(freq_mask)[0][[0, -1]] + [0, 1]))
del freq_mask
freqs = freqs[freq_sl]
# Parallelize across first N-1 dimensions
x_splits = np.array_split(x, n_jobs)
logger.debug(
f'Spectogram using {n_fft}-point FFT on {n_per_seg} samples with '
f'{n_overlap} overlap and {window} window')
from scipy.signal import spectrogram
parallel, my_spect_func, n_jobs = parallel_func(_spect_func, n_jobs=n_jobs)
func = partial(spectrogram, noverlap=n_overlap, nperseg=n_per_seg,
nfft=n_fft, fs=sfreq, window=window)
f_spect = parallel(my_spect_func(d, func=func, freq_sl=freq_sl,
average=average)
for d in x_splits)
psds = np.concatenate(f_spect, axis=0)
shape = dshape + (len(freqs),)
if average is None:
shape = shape + (-1,)
psds.shape = shape
return psds, freqs
@verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256,
n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1,
reject_by_annotation=True, average='mean', window='hamming',
verbose=None):
"""Compute the power spectral density (PSD) using Welch's method.
Calculates periodograms for a sliding window over the time dimension, then
averages them together for each channel/epoch.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest.
fmax : float
Max frequency of interest.
tmin : float | None
Min time of interest.
tmax : float | None
Max time of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
If n_per_seg is None, n_fft must be <= number of time points
in the data.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
%(picks_good_data_noref)s
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
%(n_jobs)s
%(reject_by_annotation_raw)s
.. versionadded:: 0.15.0
%(average-psd)s
.. versionadded:: 0.19.0
%(window-psd)s
.. versionadded:: 0.22.0
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs) or (..., n_freqs, n_segments)
The power spectral densities. If ``average='mean`` or
``average='median'`` and input is of type Raw or Evoked, then psds will
be of shape (n_channels, n_freqs); if input is of type Epochs, then
psds will be of shape (n_epochs, n_channels, n_freqs).
If ``average=None``, the returned array will have an additional
dimension corresponding to the unaggregated segments.
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_multitaper
psd_array_welch
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj,
reject_by_annotation=reject_by_annotation)
return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, n_per_seg=n_per_seg,
average=average, n_jobs=n_jobs, window=window,
verbose=verbose)
@verbose
def psd_multitaper(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, proj=False,
n_jobs=1, verbose=None):
"""Compute the power spectral density (PSD) using multitapers.
Calculates spectral density for orthogonal tapers, then averages them
together for each channel/epoch. See [1] for a description of the tapers
and [2] for the general method.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest.
fmax : float
Max frequency of interest.
tmin : float | None
Min time of interest.
tmax : float | None
Max time of interest.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
%(picks_good_data_noref)s
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
%(n_jobs)s
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_array_multitaper
psd_welch
csd_multitaper
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Slepian, D. "Prolate spheroidal wave functions, Fourier analysis,
and uncertainty V: The discrete case." Bell System Technical
Journal, vol. 57, 1978.
.. [2] Percival D.B. and Walden A.T. "Spectral Analysis for Physical
Applications: Multitaper and Conventional Univariate Techniques."
Cambridge University Press, 1993.
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj)
return psd_array_multitaper(data, sfreq, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
n_jobs=n_jobs, verbose=verbose)
|
import logging
from homeassistant.components.notify import ATTR_TARGET, BaseNotificationService
from . import DOMAIN as DOVADO_DOMAIN
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the Dovado Router SMS notification service."""
return DovadoSMSNotificationService(hass.data[DOVADO_DOMAIN].client)
class DovadoSMSNotificationService(BaseNotificationService):
"""Implement the notification service for the Dovado SMS component."""
def __init__(self, client):
"""Initialize the service."""
self._client = client
def send_message(self, message, **kwargs):
"""Send SMS to the specified target phone number."""
target = kwargs.get(ATTR_TARGET)
if not target:
_LOGGER.error("One target is required")
return
self._client.send_sms(target, message)
|
from random import shuffle
from typing import Optional
import aiohttp
from redbot.core.i18n import Translator, cog_i18n
from redbot.core import checks, Config, commands
from redbot.core.commands import UserInputOptional
_ = Translator("Image", __file__)
@cog_i18n(_)
class Image(commands.Cog):
"""Image related commands."""
default_global = {"imgur_client_id": None}
def __init__(self, bot):
super().__init__()
self.bot = bot
self.config = Config.get_conf(self, identifier=2652104208, force_registration=True)
self.config.register_global(**self.default_global)
self.session = aiohttp.ClientSession()
self.imgur_base_url = "https://api.imgur.com/3/"
def cog_unload(self):
self.session.detach()
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
async def initialize(self) -> None:
"""Move the API keys from cog stored config to core bot config if they exist."""
imgur_token = await self.config.imgur_client_id()
if imgur_token is not None:
if not await self.bot.get_shared_api_tokens("imgur"):
await self.bot.set_shared_api_tokens("imgur", client_id=imgur_token)
await self.config.imgur_client_id.clear()
@commands.group(name="imgur")
async def _imgur(self, ctx):
"""Retrieve pictures from Imgur.
Make sure to set the Client ID using `[p]imgurcreds`.
"""
pass
@_imgur.command(name="search")
async def imgur_search(self, ctx, count: UserInputOptional[int] = 1, *, term: str):
"""Search Imgur for the specified term.
Use `count` to choose how many images should be returned.
Command can return up to 5 images.
"""
if count < 1 or count > 5:
await ctx.send(_("Image count has to be between 1 and 5."))
return
url = self.imgur_base_url + "gallery/search/time/all/0"
params = {"q": term}
imgur_client_id = (await ctx.bot.get_shared_api_tokens("imgur")).get("client_id")
if not imgur_client_id:
await ctx.send(
_(
"A Client ID has not been set! Please set one with `{prefix}imgurcreds`."
).format(prefix=ctx.clean_prefix)
)
return
headers = {"Authorization": "Client-ID {}".format(imgur_client_id)}
async with self.session.get(url, headers=headers, params=params) as search_get:
data = await search_get.json()
if data["success"]:
results = data["data"]
if not results:
await ctx.send(_("Your search returned no results."))
return
shuffle(results)
msg = _("Search results...\n")
for r in results[:count]:
msg += r["gifv"] if "gifv" in r else r["link"]
msg += "\n"
await ctx.send(msg)
else:
await ctx.send(
_("Something went wrong. Error code is {code}.").format(code=data["status"])
)
@_imgur.command(name="subreddit")
async def imgur_subreddit(
self,
ctx,
subreddit: str,
count: Optional[int] = 1,
sort_type: str = "top",
window: str = "day",
):
"""Get images from a subreddit.
You can customize the search with the following options:
- `<count>`: number of images to return (up to 5)
- `<sort_type>`: new, top
- `<window>`: day, week, month, year, all
"""
if count < 1 or count > 5:
await ctx.send(_("Image count has to be between 1 and 5."))
return
sort_type = sort_type.lower()
window = window.lower()
if sort_type == "new":
sort = "time"
elif sort_type == "top":
sort = "top"
else:
await ctx.send(_("Only 'new' and 'top' are a valid sort type."))
return
if window not in ("day", "week", "month", "year", "all"):
await ctx.send_help()
return
imgur_client_id = (await ctx.bot.get_shared_api_tokens("imgur")).get("client_id")
if not imgur_client_id:
await ctx.send(
_(
"A Client ID has not been set! Please set one with `{prefix}imgurcreds`."
).format(prefix=ctx.clean_prefix)
)
return
links = []
headers = {"Authorization": "Client-ID {}".format(imgur_client_id)}
url = self.imgur_base_url + "gallery/r/{}/{}/{}/0".format(subreddit, sort, window)
async with self.session.get(url, headers=headers) as sub_get:
data = await sub_get.json()
if data["success"]:
items = data["data"]
if items:
for item in items[:count]:
link = item["gifv"] if "gifv" in item else item["link"]
links.append("{}\n{}".format(item["title"], link))
if links:
await ctx.send("\n".join(links))
else:
await ctx.send(_("No results found."))
else:
await ctx.send(
_("Something went wrong. Error code is {code}.").format(code=data["status"])
)
@checks.is_owner()
@commands.command()
async def imgurcreds(self, ctx):
"""Explain how to set imgur API tokens."""
message = _(
"To get an Imgur Client ID:\n"
"1. Login to an Imgur account.\n"
"2. Visit this page https://api.imgur.com/oauth2/addclient.\n"
"3. Enter a name for your application.\n"
"4. Select *Anonymous usage without user authorization* for the auth type.\n"
"5. Set the authorization callback URL to `https://localhost`.\n"
"6. Leave the app website blank.\n"
"7. Enter a valid email address and a description.\n"
"8. Check the captcha box and click next.\n"
"9. Your Client ID will be on the next page.\n"
"10. Run the command `{prefix}set api imgur client_id <your_client_id_here>`.\n"
).format(prefix=ctx.clean_prefix)
await ctx.maybe_send_embed(message)
@commands.guild_only()
@commands.command()
async def gif(self, ctx, *, keywords):
"""Retrieve the first search result from Giphy."""
giphy_api_key = (await ctx.bot.get_shared_api_tokens("GIPHY")).get("api_key")
if not giphy_api_key:
await ctx.send(
_("An API key has not been set! Please set one with `{prefix}giphycreds`.").format(
prefix=ctx.clean_prefix
)
)
return
url = "http://api.giphy.com/v1/gifs/search"
async with self.session.get(url, params={"api_key": giphy_api_key, "q": keywords}) as r:
result = await r.json()
if r.status == 200:
if result["data"]:
await ctx.send(result["data"][0]["url"])
else:
await ctx.send(_("No results found."))
else:
await ctx.send(_("Error contacting the Giphy API."))
@commands.guild_only()
@commands.command()
async def gifr(self, ctx, *, keywords):
"""Retrieve a random GIF from a Giphy search."""
giphy_api_key = (await ctx.bot.get_shared_api_tokens("GIPHY")).get("api_key")
if not giphy_api_key:
await ctx.send(
_("An API key has not been set! Please set one with `{prefix}giphycreds`.").format(
prefix=ctx.clean_prefix
)
)
return
url = "http://api.giphy.com/v1/gifs/random"
async with self.session.get(url, params={"api_key": giphy_api_key, "tag": keywords}) as r:
result = await r.json()
if r.status == 200:
if result["data"]:
await ctx.send(result["data"]["url"])
else:
await ctx.send(_("No results found."))
else:
await ctx.send(_("Error contacting the API."))
@checks.is_owner()
@commands.command()
async def giphycreds(self, ctx):
"""Explains how to set GIPHY API tokens."""
message = _(
"To get a GIPHY API Key:\n"
"1. Login to (or create) a GIPHY account.\n"
"2. Visit this page: https://developers.giphy.com/dashboard.\n"
"3. Press *Create an App*.\n"
"4. Click *Select API*, then *Next Step*.\n"
"5. Add an app name, for example *Red*.\n"
"6. Add an app description, for example *Used for Red's image cog*.\n"
"7. Click *Create App*. You'll need to agree to the GIPHY API Terms.\n"
"8. Copy the API Key.\n"
"9. In Discord, run the command {command}.\n"
).format(
command="`{prefix}set api GIPHY api_key {placeholder}`".format(
prefix=ctx.clean_prefix, placeholder=_("<your_api_key_here>")
)
)
await ctx.maybe_send_embed(message)
|
import re
from django.utils.translation import gettext_lazy as _
from weblate.trans.autofixes.base import AutoFix
QUOTE_PARAM = re.compile(r"'(\{[^}]+\})'")
SINGLE_APO = re.compile(r"'{1,3}")
DOUBLE_APO = re.compile(r"'{4,}")
REPLACEMENT = "__weblate:quote__"
REPLACE_STRING = r"{0}\1{0}".format(REPLACEMENT)
class DoubleApostrophes(AutoFix):
"""Ensures apostrophes are escaped in Java Properties MessageFormat string.
- all apostrophes except ones around {} vars are doubled
Note: This fix is not really generically applicable in all cases, that's
why it's not enabled by default.
"""
fix_id = "java-messageformat"
name = _("Apostrophes in Java MessageFormat")
def fix_single_target(self, target, source, unit):
flags = unit.all_flags
if ("auto-java-messageformat" not in flags or "{0" not in source) and (
"java-messageformat" not in flags
):
return target, False
# Split on apostrophe
new = SINGLE_APO.sub(
"''", DOUBLE_APO.sub("''''", QUOTE_PARAM.sub(REPLACE_STRING, target))
).replace(REPLACEMENT, "'")
return new, new != target
|
import logging
import pywink
from homeassistant.const import DEGREE, TEMP_CELSIUS
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = ["temperature", "humidity", "balance", "proximity"]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
for sensor in pywink.get_sensors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
if sensor.capability() in SENSOR_TYPES:
add_entities([WinkSensorDevice(sensor, hass)])
for eggtray in pywink.get_eggtrays():
_id = eggtray.object_id() + eggtray.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkSensorDevice(eggtray, hass)])
for tank in pywink.get_propane_tanks():
_id = tank.object_id() + tank.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkSensorDevice(tank, hass)])
for piggy_bank in pywink.get_piggy_banks():
_id = piggy_bank.object_id() + piggy_bank.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
try:
if piggy_bank.capability() in SENSOR_TYPES:
add_entities([WinkSensorDevice(piggy_bank, hass)])
except AttributeError:
_LOGGER.info("Device is not a sensor")
class WinkSensorDevice(WinkDevice):
"""Representation of a Wink sensor."""
def __init__(self, wink, hass):
"""Initialize the Wink device."""
super().__init__(wink, hass)
self.capability = self.wink.capability()
if self.wink.unit() == DEGREE:
self._unit_of_measurement = TEMP_CELSIUS
else:
self._unit_of_measurement = self.wink.unit()
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["sensor"].append(self)
@property
def state(self):
"""Return the state."""
state = None
if self.capability == "humidity":
if self.wink.state() is not None:
state = round(self.wink.state())
elif self.capability == "temperature":
if self.wink.state() is not None:
state = round(self.wink.state(), 1)
elif self.capability == "balance":
if self.wink.state() is not None:
state = round(self.wink.state() / 100, 2)
elif self.capability == "proximity":
if self.wink.state() is not None:
state = self.wink.state()
else:
state = self.wink.state()
return state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
super_attrs = super().device_state_attributes
try:
super_attrs["egg_times"] = self.wink.eggs()
except AttributeError:
# Ignore error, this sensor isn't an eggminder
pass
return super_attrs
|
import numpy as np
from mne.channels import make_dig_montage
from mne.transforms import _sph_to_cart, _topo_to_sph
# XXX: This is a workaround to get the previous behavior.
def _read_eeglab_montage(fname):
"""Read an EEGLAB digitization file.
Parameters
----------
fname : str
The filepath of Polhemus ISOTrak formatted file.
File extension is expected to be '.loc', '.locs' or '.eloc'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
make_dig_montage
"""
ch_names = np.genfromtxt(fname, dtype=str, usecols=3).tolist()
topo = np.loadtxt(fname, dtype=float, usecols=[1, 2])
sph = _topo_to_sph(topo)
pos = _sph_to_cart(sph)
pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1]
return make_dig_montage(
ch_pos=dict(zip(ch_names, pos)),
coord_frame='head',
)
|
from ... import event
from .._widget import Widget
class BaseButton(Widget):
""" Abstract button class.
"""
DEFAULT_MIN_SIZE = 10, 24
CSS = """
.flx-BaseButton {
white-space: nowrap;
padding: 0.2em 0.4em;
border-radius: 3px;
color: #333;
}
.flx-BaseButton, .flx-BaseButton > input {
margin: 2px; /* room for outline */
}
.flx-BaseButton:focus, .flx-BaseButton > input:focus {
outline: none;
box-shadow: 0px 0px 3px 1px rgba(0, 100, 200, 0.7);
}
.flx-Button, .flx-ToggleButton{
background: #e8e8e8;
border: 1px solid #ccc;
transition: background 0.3s;
}
.flx-Button:hover, .flx-ToggleButton:hover {
background: #e8eaff;
}
.flx-ToggleButton {
text-align: left;
}
.flx-ToggleButton.flx-checked {
background: #e8eaff;
}
.flx-ToggleButton::before {
content: '\\2610\\00a0 ';
}
.flx-ToggleButton.flx-checked::before {
content: '\\2611\\00a0 ';
}
.flx-RadioButton > input, .flx-CheckBox > input{
margin-left: 0.3em;
margin-right: 0.3em;
}
.flx-RadioButton > input, .flx-CheckBox > input {
color: #333;
}
.flx-RadioButton:hover > input, .flx-CheckBox:hover > input {
color: #036;
}
"""
text = event.StringProp('', settable=True, doc="""
The text on the button.
""")
checked = event.BoolProp(False, settable=True, doc="""
Whether the button is checked.
""")
disabled = event.BoolProp(False, settable=True, doc="""
Whether the button is disabled.
""")
@event.reaction('pointer_click')
def __on_pointer_click(self, e):
self.node.blur()
@event.emitter
def user_checked(self, checked):
""" Event emitted when the user (un)checks this button. Has
``old_value`` and ``new_value`` attributes.
"""
d = {'old_value': self.checked, 'new_value': checked}
self.set_checked(checked)
return d
class Button(BaseButton):
""" A push button.
The ``node`` of this widget is a
`<button> <https://developer.mozilla.org/docs/Web/HTML/Element/button>`_.
"""
DEFAULT_MIN_SIZE = 10, 28
def _create_dom(self):
global window
node = window.document.createElement('button')
# node = window.document.createElement('input')
# node.setAttribute('type', 'button')
return node
def _render_dom(self):
return [self.text]
@event.reaction('disabled')
def __disabled_changed(self, *events):
if events[-1].new_value:
self.node.setAttribute("disabled", "disabled")
else:
self.node.removeAttribute("disabled")
class ToggleButton(BaseButton):
""" A button that can be toggled. It behaves like a checkbox, while
looking more like a regular button.
The ``node`` of this widget is a
`<button> <https://developer.mozilla.org/docs/Web/HTML/Element/button>`_.
"""
DEFAULT_MIN_SIZE = 10, 28
def _create_dom(self):
global window
node = window.document.createElement('button')
return node
def _render_dom(self):
return [self.text]
@event.reaction('pointer_click')
def __toggle_checked(self, *events):
self.user_checked(not self.checked)
@event.reaction('checked')
def __check_changed(self, *events):
if self.checked:
self.node.classList.add('flx-checked')
else:
self.node.classList.remove('flx-checked')
class RadioButton(BaseButton):
""" A radio button. Of any group of radio buttons that share the
same parent, only one can be active.
The ``outernode`` of this widget is a
`<label> <https://developer.mozilla.org/docs/Web/HTML/Element/label>`_,
and the ``node`` a radio
`<input> <https://developer.mozilla.org/docs/Web/HTML/Element/input>`_.
"""
def _create_dom(self):
global window
outernode = window.document.createElement('label')
node = window.document.createElement('input')
outernode.appendChild(node)
node.setAttribute('type', 'radio')
node.setAttribute('id', self.id)
outernode.setAttribute('for', self.id)
return outernode, node
def _render_dom(self):
return [self.node, self.text]
@event.reaction('parent')
def __update_group(self, *events):
if self.parent:
self.node.name = self.parent.id
@event.reaction('checked')
def __check_changed(self, *events):
self.node.checked = self.checked
@event.emitter
def pointer_click(self, e):
""" This method is called on JS a click event. We *first* update
the checked properties, and then emit the Flexx click event.
That way, one can connect to the click event and have an
up-to-date checked props (even on Py).
"""
# Turn off any radio buttons in the same group
if self.parent:
for child in self.parent.children:
if isinstance(child, RadioButton) and child is not self:
child.set_checked(child.node.checked)
# Turn on this button (last)
self.user_checked(self.node.checked) # instead of set_checked
# Process actual click event
super().pointer_click(e)
class CheckBox(BaseButton):
""" A checkbox button.
The ``outernode`` of this widget is a
`<label> <https://developer.mozilla.org/docs/Web/HTML/Element/label>`_,
and the ``node`` a checkbox
`<input> <https://developer.mozilla.org/docs/Web/HTML/Element/input>`_.
"""
def _create_dom(self):
global window
outernode = window.document.createElement('label')
node = window.document.createElement('input')
outernode.appendChild(node)
node.setAttribute('type', 'checkbox')
node.setAttribute('id', self.id)
outernode.setAttribute('for', self.id)
self._addEventListener(node, 'click', self._check_changed_from_dom, 0)
return outernode, node
def _render_dom(self):
return [self.node, self.text]
@event.reaction('checked')
def __check_changed(self, *events):
self.node.checked = self.checked
def _check_changed_from_dom(self, ev):
self.user_checked(self.node.checked)
|
from paasta_tools.autoscaling import forecasting
def test_moving_average_forecast_policy():
historical_load = [
(1, 100),
(2, 120),
(3, 140),
(4, 160),
(5, 180),
(6, 200),
(7, 220),
]
assert 170 == forecasting.moving_average_forecast_policy(
historical_load, moving_average_window_seconds=5
)
assert 220 == forecasting.moving_average_forecast_policy(
historical_load, moving_average_window_seconds=0.5
)
def test_linreg_forecast_policy():
historical_load = [
(1, 100),
(2, 120),
(3, 140),
(4, 160),
(5, 180),
(6, 200),
(7, 220),
]
assert 220 == forecasting.linreg_forecast_policy(
historical_load, linreg_window_seconds=7, linreg_extrapolation_seconds=0
)
assert 1000 == forecasting.linreg_forecast_policy(
historical_load, linreg_window_seconds=7, linreg_extrapolation_seconds=39
)
# We should handle the case where there's only 1 data point within the window.
assert 220 == forecasting.linreg_forecast_policy(
historical_load, linreg_window_seconds=0, linreg_extrapolation_seconds=0
)
assert 220 == forecasting.linreg_forecast_policy(
historical_load, linreg_window_seconds=0, linreg_extrapolation_seconds=10
)
assert 1000 == forecasting.linreg_forecast_policy(
historical_load,
linreg_window_seconds=0,
linreg_extrapolation_seconds=78,
linreg_default_slope=10,
)
historical_load_2 = [
(1, 100),
(2, 100),
(3, 100),
(4, 100),
(5, 100),
(6, 100),
(1, 100),
(2, 200),
(3, 300),
(4, 400),
(5, 500),
(6, 600),
]
assert 350 == forecasting.linreg_forecast_policy(
historical_load_2, linreg_window_seconds=7, linreg_extrapolation_seconds=0
)
|
from datetime import datetime, timedelta
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
import homeassistant.helpers.sun as sun
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
def test_next_events(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
mod = -1
while True:
next_dawn = astral.dawn_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_dawn > utc_now:
break
mod += 1
mod = -1
while True:
next_dusk = astral.dusk_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_dusk > utc_now:
break
mod += 1
mod = -1
while True:
next_midnight = astral.solar_midnight_utc(
utc_today + timedelta(days=mod), longitude
)
if next_midnight > utc_now:
break
mod += 1
mod = -1
while True:
next_noon = astral.solar_noon_utc(utc_today + timedelta(days=mod), longitude)
if next_noon > utc_now:
break
mod += 1
mod = -1
while True:
next_rising = astral.sunrise_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_rising > utc_now:
break
mod += 1
mod = -1
while True:
next_setting = astral.sunset_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_setting > utc_now:
break
mod += 1
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now):
assert next_dawn == sun.get_astral_event_next(hass, "dawn")
assert next_dusk == sun.get_astral_event_next(hass, "dusk")
assert next_midnight == sun.get_astral_event_next(hass, "solar_midnight")
assert next_noon == sun.get_astral_event_next(hass, "solar_noon")
assert next_rising == sun.get_astral_event_next(hass, SUN_EVENT_SUNRISE)
assert next_setting == sun.get_astral_event_next(hass, SUN_EVENT_SUNSET)
def test_date_events(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
assert dawn == sun.get_astral_event_date(hass, "dawn", utc_today)
assert dusk == sun.get_astral_event_date(hass, "dusk", utc_today)
assert midnight == sun.get_astral_event_date(hass, "solar_midnight", utc_today)
assert noon == sun.get_astral_event_date(hass, "solar_noon", utc_today)
assert sunrise == sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, utc_today)
assert sunset == sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, utc_today)
def test_date_events_default_date(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
with patch("homeassistant.util.dt.now", return_value=utc_now):
assert dawn == sun.get_astral_event_date(hass, "dawn", utc_today)
assert dusk == sun.get_astral_event_date(hass, "dusk", utc_today)
assert midnight == sun.get_astral_event_date(hass, "solar_midnight", utc_today)
assert noon == sun.get_astral_event_date(hass, "solar_noon", utc_today)
assert sunrise == sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, utc_today)
assert sunset == sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, utc_today)
def test_date_events_accepts_datetime(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
assert dawn == sun.get_astral_event_date(hass, "dawn", utc_now)
assert dusk == sun.get_astral_event_date(hass, "dusk", utc_now)
assert midnight == sun.get_astral_event_date(hass, "solar_midnight", utc_now)
assert noon == sun.get_astral_event_date(hass, "solar_noon", utc_now)
assert sunrise == sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, utc_now)
assert sunset == sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, utc_now)
def test_is_up(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 12, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now):
assert not sun.is_up(hass)
utc_now = datetime(2016, 11, 1, 18, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now):
assert sun.is_up(hass)
def test_norway_in_june(hass):
"""Test location in Norway where the sun doesn't set in summer."""
hass.config.latitude = 69.6
hass.config.longitude = 18.8
june = datetime(2016, 6, 1, tzinfo=dt_util.UTC)
print(sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, datetime(2017, 7, 25)))
print(sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, datetime(2017, 7, 25)))
print(sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, datetime(2017, 7, 26)))
print(sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, datetime(2017, 7, 26)))
assert sun.get_astral_event_next(hass, SUN_EVENT_SUNRISE, june) == datetime(
2016, 7, 25, 23, 23, 39, tzinfo=dt_util.UTC
)
assert sun.get_astral_event_next(hass, SUN_EVENT_SUNSET, june) == datetime(
2016, 7, 26, 22, 19, 1, tzinfo=dt_util.UTC
)
assert sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, june) is None
assert sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, june) is None
|
from time import sleep
import voluptuous as vol
from homeassistant.components import rpi_gpio
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverEntity
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import setup_reload_service
from . import DOMAIN, PLATFORMS
CONF_COVERS = "covers"
CONF_RELAY_PIN = "relay_pin"
CONF_RELAY_TIME = "relay_time"
CONF_STATE_PIN = "state_pin"
CONF_STATE_PULL_MODE = "state_pull_mode"
CONF_INVERT_STATE = "invert_state"
CONF_INVERT_RELAY = "invert_relay"
DEFAULT_RELAY_TIME = 0.2
DEFAULT_STATE_PULL_MODE = "UP"
DEFAULT_INVERT_STATE = False
DEFAULT_INVERT_RELAY = False
_COVERS_SCHEMA = vol.All(
cv.ensure_list,
[
vol.Schema(
{
CONF_NAME: cv.string,
CONF_RELAY_PIN: cv.positive_int,
CONF_STATE_PIN: cv.positive_int,
}
)
],
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_COVERS): _COVERS_SCHEMA,
vol.Optional(CONF_STATE_PULL_MODE, default=DEFAULT_STATE_PULL_MODE): cv.string,
vol.Optional(CONF_RELAY_TIME, default=DEFAULT_RELAY_TIME): cv.positive_int,
vol.Optional(CONF_INVERT_STATE, default=DEFAULT_INVERT_STATE): cv.boolean,
vol.Optional(CONF_INVERT_RELAY, default=DEFAULT_INVERT_RELAY): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the RPi cover platform."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
relay_time = config.get(CONF_RELAY_TIME)
state_pull_mode = config.get(CONF_STATE_PULL_MODE)
invert_state = config.get(CONF_INVERT_STATE)
invert_relay = config.get(CONF_INVERT_RELAY)
covers = []
covers_conf = config.get(CONF_COVERS)
for cover in covers_conf:
covers.append(
RPiGPIOCover(
cover[CONF_NAME],
cover[CONF_RELAY_PIN],
cover[CONF_STATE_PIN],
state_pull_mode,
relay_time,
invert_state,
invert_relay,
)
)
add_entities(covers)
class RPiGPIOCover(CoverEntity):
"""Representation of a Raspberry GPIO cover."""
def __init__(
self,
name,
relay_pin,
state_pin,
state_pull_mode,
relay_time,
invert_state,
invert_relay,
):
"""Initialize the cover."""
self._name = name
self._state = False
self._relay_pin = relay_pin
self._state_pin = state_pin
self._state_pull_mode = state_pull_mode
self._relay_time = relay_time
self._invert_state = invert_state
self._invert_relay = invert_relay
rpi_gpio.setup_output(self._relay_pin)
rpi_gpio.setup_input(self._state_pin, self._state_pull_mode)
rpi_gpio.write_output(self._relay_pin, 0 if self._invert_relay else 1)
@property
def name(self):
"""Return the name of the cover if any."""
return self._name
def update(self):
"""Update the state of the cover."""
self._state = rpi_gpio.read_input(self._state_pin)
@property
def is_closed(self):
"""Return true if cover is closed."""
return self._state != self._invert_state
def _trigger(self):
"""Trigger the cover."""
rpi_gpio.write_output(self._relay_pin, 1 if self._invert_relay else 0)
sleep(self._relay_time)
rpi_gpio.write_output(self._relay_pin, 0 if self._invert_relay else 1)
def close_cover(self, **kwargs):
"""Close the cover."""
if not self.is_closed:
self._trigger()
def open_cover(self, **kwargs):
"""Open the cover."""
if self.is_closed:
self._trigger()
|
from homeassistant.const import (
DEVICE_CLASS_POWER,
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
)
from homeassistant.helpers.entity import Entity
from . import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the KEBA charging station platform."""
if discovery_info is None:
return
keba = hass.data[DOMAIN]
sensors = [
KebaSensor(
keba,
"Curr user",
"Max Current",
"max_current",
"mdi:flash",
ELECTRICAL_CURRENT_AMPERE,
),
KebaSensor(
keba,
"Setenergy",
"Energy Target",
"energy_target",
"mdi:gauge",
ENERGY_KILO_WATT_HOUR,
),
KebaSensor(
keba,
"P",
"Charging Power",
"charging_power",
"mdi:flash",
"kW",
DEVICE_CLASS_POWER,
),
KebaSensor(
keba,
"E pres",
"Session Energy",
"session_energy",
"mdi:gauge",
ENERGY_KILO_WATT_HOUR,
),
KebaSensor(
keba,
"E total",
"Total Energy",
"total_energy",
"mdi:gauge",
ENERGY_KILO_WATT_HOUR,
),
]
async_add_entities(sensors)
class KebaSensor(Entity):
"""The entity class for KEBA charging stations sensors."""
def __init__(self, keba, key, name, entity_type, icon, unit, device_class=None):
"""Initialize the KEBA Sensor."""
self._keba = keba
self._key = key
self._name = name
self._entity_type = entity_type
self._icon = icon
self._unit = unit
self._device_class = device_class
self._state = None
self._attributes = {}
@property
def should_poll(self):
"""Deactivate polling. Data updated by KebaHandler."""
return False
@property
def unique_id(self):
"""Return the unique ID of the binary sensor."""
return f"{self._keba.device_id}_{self._entity_type}"
@property
def name(self):
"""Return the name of the device."""
return f"{self._keba.device_name} {self._name}"
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Get the unit of measurement."""
return self._unit
@property
def device_state_attributes(self):
"""Return the state attributes of the binary sensor."""
return self._attributes
async def async_update(self):
"""Get latest cached states from the device."""
self._state = self._keba.get_value(self._key)
if self._key == "P":
self._attributes["power_factor"] = self._keba.get_value("PF")
self._attributes["voltage_u1"] = str(self._keba.get_value("U1"))
self._attributes["voltage_u2"] = str(self._keba.get_value("U2"))
self._attributes["voltage_u3"] = str(self._keba.get_value("U3"))
self._attributes["current_i1"] = str(self._keba.get_value("I1"))
self._attributes["current_i2"] = str(self._keba.get_value("I2"))
self._attributes["current_i3"] = str(self._keba.get_value("I3"))
elif self._key == "Curr user":
self._attributes["max_current_hardware"] = self._keba.get_value("Curr HW")
def update_callback(self):
"""Schedule a state update."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add update callback after being added to hass."""
self._keba.add_update_listener(self.update_callback)
|
import gzip
import io
import sys
import time
import types
import unittest
import operator
from http.client import IncompleteRead
import cherrypy
from cherrypy import tools
from cherrypy._cpcompat import ntou
from cherrypy.test import helper, _test_decorators
*PY_VER_MINOR, _ = PY_VER_PATCH = sys.version_info[:3]
# Refs:
# bugs.python.org/issue39389
# docs.python.org/3.7/whatsnew/changelog.html#python-3-7-7-release-candidate-1
# docs.python.org/3.8/whatsnew/changelog.html#python-3-8-2-release-candidate-1
HAS_GZIP_COMPRESSION_HEADER_FIXED = PY_VER_PATCH >= (3, 8, 2) or (
PY_VER_MINOR == (3, 7) and PY_VER_PATCH >= (3, 7, 7)
)
timeout = 0.2
europoundUnicode = ntou('\x80\xa3')
# Client-side code #
class ToolTests(helper.CPWebCase):
@staticmethod
def setup_server():
# Put check_access in a custom toolbox with its own namespace
myauthtools = cherrypy._cptools.Toolbox('myauth')
def check_access(default=False):
if not getattr(cherrypy.request, 'userid', default):
raise cherrypy.HTTPError(401)
myauthtools.check_access = cherrypy.Tool(
'before_request_body', check_access)
def numerify():
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NumTool(cherrypy.Tool):
def _setup(self):
def makemap():
m = self._merged_args().get('map', {})
cherrypy.request.numerify_map = list(m.items())
cherrypy.request.hooks.attach('on_start_resource', makemap)
def critical():
cherrypy.request.error_response = cherrypy.HTTPError(
502).set_response
critical.failsafe = True
cherrypy.request.hooks.attach('on_start_resource', critical)
cherrypy.request.hooks.attach(self._point, self.callable)
tools.numerify = NumTool('before_finalize', numerify)
# It's not mandatory to inherit from cherrypy.Tool.
class NadsatTool:
def __init__(self):
self.ended = {}
self._name = 'nadsat'
def nadsat(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace(b'good', b'horrorshow')
chunk = chunk.replace(b'piece', b'lomtick')
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
nadsat.priority = 0
def cleanup(self):
# This runs after the request has been completely written out.
cherrypy.response.body = [b'razdrez']
id = cherrypy.request.params.get('id')
if id:
self.ended[id] = True
cleanup.failsafe = True
def _setup(self):
cherrypy.request.hooks.attach('before_finalize', self.nadsat)
cherrypy.request.hooks.attach('on_end_request', self.cleanup)
tools.nadsat = NadsatTool()
def pipe_body():
cherrypy.request.process_request_body = False
clen = int(cherrypy.request.headers['Content-Length'])
cherrypy.request.body = cherrypy.request.rfile.read(clen)
# Assert that we can use a callable object instead of a function.
class Rotator(object):
def __call__(self, scale):
r = cherrypy.response
r.collapse_body()
r.body = [bytes([(x + scale) % 256 for x in r.body[0]])]
cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
def stream_handler(next_handler, *args, **kwargs):
actual = cherrypy.request.config.get('tools.streamer.arg')
assert actual == 'arg value'
cherrypy.response.output = o = io.BytesIO()
try:
next_handler(*args, **kwargs)
# Ignore the response and return our accumulated output
# instead.
return o.getvalue()
finally:
o.close()
cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(
stream_handler)
class Root:
@cherrypy.expose
def index(self):
return 'Howdy earth!'
@cherrypy.expose
@cherrypy.config(**{
'tools.streamer.on': True,
'tools.streamer.arg': 'arg value',
})
def tarfile(self):
actual = cherrypy.request.config.get('tools.streamer.arg')
assert actual == 'arg value'
cherrypy.response.output.write(b'I am ')
cherrypy.response.output.write(b'a tarfile')
@cherrypy.expose
def euro(self):
hooks = list(cherrypy.request.hooks['before_finalize'])
hooks.sort()
cbnames = [x.callback.__name__ for x in hooks]
assert cbnames == ['gzip'], cbnames
priorities = [x.priority for x in hooks]
assert priorities == [80], priorities
yield ntou('Hello,')
yield ntou('world')
yield europoundUnicode
# Bare hooks
@cherrypy.expose
@cherrypy.config(**{'hooks.before_request_body': pipe_body})
def pipe(self):
return cherrypy.request.body
# Multiple decorators; include kwargs just for fun.
# Note that rotator must run before gzip.
@cherrypy.expose
def decorated_euro(self, *vpath):
yield ntou('Hello,')
yield ntou('world')
yield europoundUnicode
decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
decorated_euro = tools.rotator(scale=3)(decorated_euro)
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each
subclass, and adds an instance of the subclass as an attribute
of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in dct.values():
if isinstance(value, types.FunctionType):
cherrypy.expose(value)
setattr(root, name.lower(), cls())
Test = TestType('Test', (object,), {})
# METHOD ONE:
# Declare Tools in _cp_config
@cherrypy.config(**{'tools.nadsat.on': True})
class Demo(Test):
def index(self, id=None):
return 'A good piece of cherry pie'
def ended(self, id):
return repr(tools.nadsat.ended[id])
def err(self, id=None):
raise ValueError()
def errinstream(self, id=None):
yield 'nonconfidential'
raise ValueError()
yield 'confidential'
# METHOD TWO: decorator using Tool()
# We support Python 2.3, but the @-deco syntax would look like
# this:
# @tools.check_access()
def restricted(self):
return 'Welcome!'
restricted = myauthtools.check_access()(restricted)
userid = restricted
def err_in_onstart(self):
return 'success!'
@cherrypy.config(**{'response.stream': True})
def stream(self, id=None):
for x in range(100000000):
yield str(x)
conf = {
# METHOD THREE:
# Declare Tools in detached config
'/demo': {
'tools.numerify.on': True,
'tools.numerify.map': {b'pie': b'3.14159'},
},
'/demo/restricted': {
'request.show_tracebacks': False,
},
'/demo/userid': {
'request.show_tracebacks': False,
'myauth.check_access.default': True,
},
'/demo/errinstream': {
'response.stream': True,
},
'/demo/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'tools.numerify.map': 'pie->3.14159'
},
# Combined tools
'/euro': {
'tools.gzip.on': True,
'tools.encode.on': True,
},
# Priority specified in config
'/decorated_euro/subpath': {
'tools.gzip.priority': 10,
},
# Handler wrappers
'/tarfile': {'tools.streamer.on': True}
}
app = cherrypy.tree.mount(root, config=conf)
app.request_class.namespaces['myauth'] = myauthtools
root.tooldecs = _test_decorators.ToolExamples()
def testHookErrors(self):
self.getPage('/demo/?id=1')
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody('A horrorshow lomtick of cherry 3.14159')
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/1')
self.assertBody('True')
valerr = '\n raise ValueError()\nValueError'
self.getPage('/demo/err?id=3')
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(502, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/3')
self.assertBody('True')
# If body is "razdrez", then on_end_request is being called too early.
if (cherrypy.server.protocol_version == 'HTTP/1.0' or
getattr(cherrypy.server, 'using_apache', False)):
self.getPage('/demo/errinstream?id=5')
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus('200 OK')
self.assertBody('nonconfidential')
else:
# Because this error is raised after the response body has
# started, and because it's chunked output, an error is raised by
# the HTTP client when it encounters incomplete output.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
'/demo/errinstream?id=5')
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/5')
self.assertBody('True')
# Test the "__call__" technique (compile-time decorator).
self.getPage('/demo/restricted')
self.assertErrorPage(401)
# Test compile-time decorator with kwargs from config.
self.getPage('/demo/userid')
self.assertBody('Welcome!')
def testEndRequestOnDrop(self):
old_timeout = None
try:
httpserver = cherrypy.server.httpserver
old_timeout = httpserver.timeout
except (AttributeError, IndexError):
return self.skip()
try:
httpserver.timeout = timeout
# Test that on_end_request is called even if the client drops.
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest('GET', '/demo/stream?id=9', skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
# Skip the rest of the request and close the conn. This will
# cause the server's active socket to error, which *should*
# result in the request being aborted, and request.close being
# called all the way up the stack (including WSGI middleware),
# eventually calling our on_end_request hook.
finally:
self.persistent = False
time.sleep(timeout * 2)
# Test that the on_end_request hook was called.
self.getPage('/demo/ended/9')
self.assertBody('True')
finally:
if old_timeout is not None:
httpserver.timeout = old_timeout
def testGuaranteedHooks(self):
# The 'critical' on_start_resource hook is 'failsafe' (guaranteed
# to run even if there are failures in other on_start methods).
# This is NOT true of the other hooks.
# Here, we have set up a failure in NumerifyTool.numerify_map,
# but our 'critical' hook should run and set the error to 502.
self.getPage('/demo/err_in_onstart')
self.assertErrorPage(502)
tmpl = "AttributeError: 'str' object has no attribute '{attr}'"
expected_msg = tmpl.format(attr='items')
self.assertInBody(expected_msg)
def testCombinedTools(self):
expectedResult = (ntou('Hello,world') +
europoundUnicode).encode('utf-8')
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(expectedResult)
zfile.close()
self.getPage('/euro',
headers=[
('Accept-Encoding', 'gzip'),
('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7')])
self.assertInBody(zbuf.getvalue()[:3])
if not HAS_GZIP_COMPRESSION_HEADER_FIXED:
# NOTE: CherryPy adopts a fix from the CPython bug 39389
# NOTE: introducing a variable compression XFL flag that
# NOTE: was hardcoded to "best compression" before. And so
# NOTE: we can only test it on CPython versions that also
# NOTE: implement this fix.
return
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=6)
zfile.write(expectedResult)
zfile.close()
self.getPage('/decorated_euro', headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(zbuf.getvalue()[:3])
# This returns a different value because gzip's priority was
# lowered in conf, allowing the rotator to run after gzip.
# Of course, we don't want breakage in production apps,
# but it proves the priority was changed.
self.getPage('/decorated_euro/subpath',
headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(bytes([(x + 3) % 256 for x in zbuf.getvalue()]))
def testBareHooks(self):
content = 'bit of a pain in me gulliver'
self.getPage('/pipe',
headers=[('Content-Length', str(len(content))),
('Content-Type', 'text/plain')],
method='POST', body=content)
self.assertBody(content)
def testHandlerWrapperTool(self):
self.getPage('/tarfile')
self.assertBody('I am a tarfile')
def testToolWithConfig(self):
if not sys.version_info >= (2, 5):
return self.skip('skipped (Python 2.5+ only)')
self.getPage('/tooldecs/blah')
self.assertHeader('Content-Type', 'application/data')
def testWarnToolOn(self):
# get
try:
cherrypy.tools.numerify.on
except AttributeError:
pass
else:
raise AssertionError('Tool.on did not error as it should have.')
# set
try:
cherrypy.tools.numerify.on = True
except AttributeError:
pass
else:
raise AssertionError('Tool.on did not error as it should have.')
def testDecorator(self):
@cherrypy.tools.register('on_start_resource')
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.example, cherrypy.Tool))
self.assertEqual(cherrypy.tools.example._point, 'on_start_resource')
@cherrypy.tools.register( # noqa: F811
'before_finalize', name='renamed', priority=60,
)
def example(): # noqa: F811
pass
self.assertTrue(isinstance(cherrypy.tools.renamed, cherrypy.Tool))
self.assertEqual(cherrypy.tools.renamed._point, 'before_finalize')
self.assertEqual(cherrypy.tools.renamed._name, 'renamed')
self.assertEqual(cherrypy.tools.renamed._priority, 60)
class SessionAuthTest(unittest.TestCase):
def test_login_screen_returns_bytes(self):
"""
login_screen must return bytes even if unicode parameters are passed.
Issue 1132 revealed that login_screen would return unicode if the
username and password were unicode.
"""
sa = cherrypy.lib.cptools.SessionAuth()
res = sa.login_screen(None, username=str('nobody'),
password=str('anypass'))
self.assertTrue(isinstance(res, bytes))
class TestHooks:
def test_priorities(self):
"""
Hooks should sort by priority order.
"""
Hook = cherrypy._cprequest.Hook
hooks = [
Hook(None, priority=48),
Hook(None),
Hook(None, priority=49),
]
hooks.sort()
by_priority = operator.attrgetter('priority')
priorities = list(map(by_priority, hooks))
assert priorities == [48, 49, 50]
|
from functools import partial
from aiohttp import hdrs
import voluptuous as vol
from homeassistant.const import CONF_PLATFORM, CONF_WEBHOOK_ID
from homeassistant.core import HassJob, callback
import homeassistant.helpers.config_validation as cv
# mypy: allow-untyped-defs
DEPENDENCIES = ("webhook",)
TRIGGER_SCHEMA = vol.Schema(
{vol.Required(CONF_PLATFORM): "webhook", vol.Required(CONF_WEBHOOK_ID): cv.string}
)
async def _handle_webhook(job, hass, webhook_id, request):
"""Handle incoming webhook."""
result = {"platform": "webhook", "webhook_id": webhook_id}
if "json" in request.headers.get(hdrs.CONTENT_TYPE, ""):
result["json"] = await request.json()
else:
result["data"] = await request.post()
result["query"] = request.query
result["description"] = "webhook"
hass.async_run_hass_job(job, {"trigger": result})
async def async_attach_trigger(hass, config, action, automation_info):
"""Trigger based on incoming webhooks."""
webhook_id = config.get(CONF_WEBHOOK_ID)
job = HassJob(action)
hass.components.webhook.async_register(
automation_info["domain"],
automation_info["name"],
webhook_id,
partial(_handle_webhook, job),
)
@callback
def unregister():
"""Unregister webhook."""
hass.components.webhook.async_unregister(webhook_id)
return unregister
|
import asyncio
from datetime import datetime, timedelta
import logging
import threading
from google_nest_sdm.event import EventCallback, EventMessage
from google_nest_sdm.google_nest_subscriber import GoogleNestSubscriber
from nest import Nest
from nest.nest import APIError, AuthorizationError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_BINARY_SENSORS,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_FILENAME,
CONF_MONITORED_CONDITIONS,
CONF_SENSORS,
CONF_STRUCTURE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import (
aiohttp_client,
config_entry_oauth2_flow,
config_validation as cv,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from . import api, config_flow, local_auth
from .const import (
API_URL,
DATA_SDM,
DOMAIN,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
SIGNAL_NEST_UPDATE,
)
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
CONF_PROJECT_ID = "project_id"
CONF_SUBSCRIBER_ID = "subscriber_id"
# Configuration for the legacy nest API
SERVICE_CANCEL_ETA = "cancel_eta"
SERVICE_SET_ETA = "set_eta"
DATA_NEST = "nest"
DATA_NEST_CONFIG = "nest_config"
NEST_CONFIG_FILE = "nest.conf"
ATTR_ETA = "eta"
ATTR_ETA_WINDOW = "eta_window"
ATTR_STRUCTURE = "structure"
ATTR_TRIP_ID = "trip_id"
AWAY_MODE_AWAY = "away"
AWAY_MODE_HOME = "home"
ATTR_AWAY_MODE = "away_mode"
SERVICE_SET_AWAY_MODE = "set_away_mode"
SENSOR_SCHEMA = vol.Schema(
{vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list)}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
# Required to use the new API (optional for compatibility)
vol.Optional(CONF_PROJECT_ID): cv.string,
vol.Optional(CONF_SUBSCRIBER_ID): cv.string,
# Config that only currently works on the old API
vol.Optional(CONF_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SENSORS): SENSOR_SCHEMA,
vol.Optional(CONF_BINARY_SENSORS): SENSOR_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["sensor", "camera"]
# Services for the legacy API
SET_AWAY_MODE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_AWAY_MODE): vol.In([AWAY_MODE_AWAY, AWAY_MODE_HOME]),
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
SET_ETA_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ETA): cv.time_period,
vol.Optional(ATTR_TRIP_ID): cv.string,
vol.Optional(ATTR_ETA_WINDOW): cv.time_period,
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
CANCEL_ETA_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TRIP_ID): cv.string,
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up Nest components with dispatch between old/new flows."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
if CONF_PROJECT_ID not in config[DOMAIN]:
return await async_setup_legacy(hass, config)
if CONF_SUBSCRIBER_ID not in config[DOMAIN]:
_LOGGER.error("Configuration option '{CONF_SUBSCRIBER_ID}' required")
return False
# For setup of ConfigEntry below
hass.data[DOMAIN][DATA_NEST_CONFIG] = config[DOMAIN]
project_id = config[DOMAIN][CONF_PROJECT_ID]
config_flow.NestFlowHandler.register_sdm_api(hass)
config_flow.NestFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE.format(project_id=project_id),
OAUTH2_TOKEN,
),
)
return True
class SignalUpdateCallback(EventCallback):
"""An EventCallback invoked when new events arrive from subscriber."""
def __init__(self, hass: HomeAssistant):
"""Initialize EventCallback."""
self._hass = hass
def handle_event(self, event_message: EventMessage):
"""Process an incoming EventMessage."""
_LOGGER.debug("Update %s @ %s", event_message.event_id, event_message.timestamp)
traits = event_message.resource_update_traits
if traits:
_LOGGER.debug("Trait update %s", traits.keys())
events = event_message.resource_update_events
if events:
_LOGGER.debug("Event Update %s", events.keys())
if not event_message.resource_update_traits:
# Note: Currently ignoring events like camera motion
return
# This event triggered an update to a device that changed some
# properties which the DeviceManager should already have received.
# Send a signal to refresh state of all listening devices.
async_dispatcher_send(self._hass, SIGNAL_NEST_UPDATE)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Nest from a config entry with dispatch between old/new flows."""
if DATA_SDM not in entry.data:
return await async_setup_legacy_entry(hass, entry)
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
config = hass.data[DOMAIN][DATA_NEST_CONFIG]
session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation)
auth = api.AsyncConfigEntryAuth(
aiohttp_client.async_get_clientsession(hass),
session,
API_URL,
)
subscriber = GoogleNestSubscriber(
auth, config[CONF_PROJECT_ID], config[CONF_SUBSCRIBER_ID]
)
subscriber.set_update_callback(SignalUpdateCallback(hass))
asyncio.create_task(subscriber.start_async())
hass.data[DOMAIN][entry.entry_id] = subscriber
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
if DATA_SDM not in entry.data:
# Legacy API
return True
subscriber = hass.data[DOMAIN][entry.entry_id]
subscriber.stop_async()
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
def nest_update_event_broker(hass, nest):
"""
Dispatch SIGNAL_NEST_UPDATE to devices when nest stream API received data.
Used for the legacy nest API.
Runs in its own thread.
"""
_LOGGER.debug("Listening for nest.update_event")
while hass.is_running:
nest.update_event.wait()
if not hass.is_running:
break
nest.update_event.clear()
_LOGGER.debug("Dispatching nest data update")
dispatcher_send(hass, SIGNAL_NEST_UPDATE)
_LOGGER.debug("Stop listening for nest.update_event")
async def async_setup_legacy(hass, config):
"""Set up Nest components using the legacy nest API."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
local_auth.initialize(hass, conf[CONF_CLIENT_ID], conf[CONF_CLIENT_SECRET])
filename = config.get(CONF_FILENAME, NEST_CONFIG_FILE)
access_token_cache_file = hass.config.path(filename)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"nest_conf_path": access_token_cache_file},
)
)
# Store config to be used during entry setup
hass.data[DATA_NEST_CONFIG] = conf
return True
async def async_setup_legacy_entry(hass, entry):
"""Set up Nest from legacy config entry."""
nest = Nest(access_token=entry.data["tokens"]["access_token"])
_LOGGER.debug("proceeding with setup")
conf = hass.data.get(DATA_NEST_CONFIG, {})
hass.data[DATA_NEST] = NestLegacyDevice(hass, conf, nest)
if not await hass.async_add_executor_job(hass.data[DATA_NEST].initialize):
return False
for component in "climate", "camera", "sensor", "binary_sensor":
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
def validate_structures(target_structures):
all_structures = [structure.name for structure in nest.structures]
for target in target_structures:
if target not in all_structures:
_LOGGER.info("Invalid structure: %s", target)
def set_away_mode(service):
"""Set the away mode for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
_LOGGER.info(
"Setting away mode for: %s to: %s",
structure.name,
service.data[ATTR_AWAY_MODE],
)
structure.away = service.data[ATTR_AWAY_MODE]
def set_eta(service):
"""Set away mode to away and include ETA for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
if structure.thermostats:
_LOGGER.info(
"Setting away mode for: %s to: %s",
structure.name,
AWAY_MODE_AWAY,
)
structure.away = AWAY_MODE_AWAY
now = datetime.utcnow()
trip_id = service.data.get(
ATTR_TRIP_ID, f"trip_{int(now.timestamp())}"
)
eta_begin = now + service.data[ATTR_ETA]
eta_window = service.data.get(ATTR_ETA_WINDOW, timedelta(minutes=1))
eta_end = eta_begin + eta_window
_LOGGER.info(
"Setting ETA for trip: %s, "
"ETA window starts at: %s and ends at: %s",
trip_id,
eta_begin,
eta_end,
)
structure.set_eta(trip_id, eta_begin, eta_end)
else:
_LOGGER.info(
"No thermostats found in structure: %s, unable to set ETA",
structure.name,
)
def cancel_eta(service):
"""Cancel ETA for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
if structure.thermostats:
trip_id = service.data[ATTR_TRIP_ID]
_LOGGER.info("Cancelling ETA for trip: %s", trip_id)
structure.cancel_eta(trip_id)
else:
_LOGGER.info(
"No thermostats found in structure: %s, "
"unable to cancel ETA",
structure.name,
)
hass.services.async_register(
DOMAIN, SERVICE_SET_AWAY_MODE, set_away_mode, schema=SET_AWAY_MODE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_SET_ETA, set_eta, schema=SET_ETA_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_CANCEL_ETA, cancel_eta, schema=CANCEL_ETA_SCHEMA
)
@callback
def start_up(event):
"""Start Nest update event listener."""
threading.Thread(
name="Nest update listener",
target=nest_update_event_broker,
args=(hass, nest),
).start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_up)
@callback
def shut_down(event):
"""Stop Nest update event listener."""
nest.update_event.set()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shut_down)
_LOGGER.debug("async_setup_nest is done")
return True
class NestLegacyDevice:
"""Structure Nest functions for hass for legacy API."""
def __init__(self, hass, conf, nest):
"""Init Nest Devices."""
self.hass = hass
self.nest = nest
self.local_structure = conf.get(CONF_STRUCTURE)
def initialize(self):
"""Initialize Nest."""
try:
# Do not optimize next statement, it is here for initialize
# persistence Nest API connection.
structure_names = [s.name for s in self.nest.structures]
if self.local_structure is None:
self.local_structure = structure_names
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
return False
return True
def structures(self):
"""Generate a list of structures."""
try:
for structure in self.nest.structures:
if structure.name not in self.local_structure:
_LOGGER.debug(
"Ignoring structure %s, not in %s",
structure.name,
self.local_structure,
)
continue
yield structure
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
def thermostats(self):
"""Generate a list of thermostats."""
return self._devices("thermostats")
def smoke_co_alarms(self):
"""Generate a list of smoke co alarms."""
return self._devices("smoke_co_alarms")
def cameras(self):
"""Generate a list of cameras."""
return self._devices("cameras")
def _devices(self, device_type):
"""Generate a list of Nest devices."""
try:
for structure in self.nest.structures:
if structure.name not in self.local_structure:
_LOGGER.debug(
"Ignoring structure %s, not in %s",
structure.name,
self.local_structure,
)
continue
for device in getattr(structure, device_type, []):
try:
# Do not optimize next statement,
# it is here for verify Nest API permission.
device.name_long
except KeyError:
_LOGGER.warning(
"Cannot retrieve device name for [%s]"
", please check your Nest developer "
"account permission settings",
device.serial,
)
continue
yield (structure, device)
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
class NestSensorDevice(Entity):
"""Representation of a Nest sensor."""
def __init__(self, structure, device, variable):
"""Initialize the sensor."""
self.structure = structure
self.variable = variable
if device is not None:
# device specific
self.device = device
self._name = f"{self.device.name_long} {self.variable.replace('_', ' ')}"
else:
# structure only
self.device = structure
self._name = f"{self.structure.name} {self.variable.replace('_', ' ')}"
self._state = None
self._unit = None
@property
def name(self):
"""Return the name of the nest, if any."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@property
def should_poll(self):
"""Do not need poll thanks using Nest streaming API."""
return False
@property
def unique_id(self):
"""Return unique id based on device serial and variable."""
return f"{self.device.serial}-{self.variable}"
@property
def device_info(self):
"""Return information about the device."""
if not hasattr(self.device, "name_long"):
name = self.structure.name
model = "Structure"
else:
name = self.device.name_long
if self.device.is_thermostat:
model = "Thermostat"
elif self.device.is_camera:
model = "Camera"
elif self.device.is_smoke_co_alarm:
model = "Nest Protect"
else:
model = None
return {
"identifiers": {(DOMAIN, self.device.serial)},
"name": name,
"manufacturer": "Nest Labs",
"model": model,
}
def update(self):
"""Do not use NestSensorDevice directly."""
raise NotImplementedError
async def async_added_to_hass(self):
"""Register update signal handler."""
async def async_update_state():
"""Update sensor state."""
await self.async_update_ha_state(True)
self.async_on_remove(
async_dispatcher_connect(self.hass, SIGNAL_NEST_UPDATE, async_update_state)
)
|
from django.utils.translation import gettext_lazy as _
from weblate.checks.base import CountingCheck, TargetCheck, TargetCheckParametrized
from weblate.checks.markup import strip_entities
from weblate.checks.parser import single_value_flag
KASHIDA_CHARS = (
"\u0640",
"\uFCF2",
"\uFCF3",
"\uFCF4",
"\uFE71",
"\uFE77",
"\uFE79",
"\uFE7B",
"\uFE7D",
"\uFE7F",
)
FRENCH_PUNCTUATION = {";", ":", "?", "!"}
class BeginNewlineCheck(TargetCheck):
"""Check for newlines at beginning."""
check_id = "begin_newline"
name = _("Starting newline")
description = _("Source and translation do not both start with a newline")
def check_single(self, source, target, unit):
return self.check_chars(source, target, 0, ["\n"])
class EndNewlineCheck(TargetCheck):
"""Check for newlines at end."""
check_id = "end_newline"
name = _("Trailing newline")
description = _("Source and translation do not both end with a newline")
def check_single(self, source, target, unit):
return self.check_chars(source, target, -1, ["\n"])
class BeginSpaceCheck(TargetCheck):
"""Whitespace check, starting whitespace usually is important for UI."""
check_id = "begin_space"
name = _("Starting spaces")
description = _(
"Source and translation do not both start with same number of spaces"
)
def check_single(self, source, target, unit):
# One letter things are usually decimal/thousand separators
if len(source) <= 1 and len(target) <= 1:
return False
stripped_target = target.lstrip(" ")
stripped_source = source.lstrip(" ")
# String translated to spaces only
if not stripped_target:
return False
# Count space chars in source and target
source_space = len(source) - len(stripped_source)
target_space = len(target) - len(stripped_target)
# Compare numbers
return source_space != target_space
def get_fixup(self, unit):
source = unit.source_string
stripped_source = source.lstrip(" ")
spaces = len(source) - len(stripped_source)
if spaces:
replacement = source[:spaces]
else:
replacement = ""
return [("^ *", replacement, "u")]
class EndSpaceCheck(TargetCheck):
"""Whitespace check."""
check_id = "end_space"
name = _("Trailing space")
description = _("Source and translation do not both end with a space")
def check_single(self, source, target, unit):
# One letter things are usually decimal/thousand separators
if len(source) <= 1 and len(target) <= 1:
return False
if not source or not target:
return False
stripped_target = target.rstrip(" ")
stripped_source = source.rstrip(" ")
# String translated to spaces only
if not stripped_target:
return False
# Count space chars in source and target
source_space = len(source) - len(stripped_source)
target_space = len(target) - len(stripped_target)
# Compare numbers
return source_space != target_space
def get_fixup(self, unit):
source = unit.source_string
stripped_source = source.rstrip(" ")
spaces = len(source) - len(stripped_source)
if spaces:
replacement = source[-spaces:]
else:
replacement = ""
return [(" *$", replacement, "u")]
class DoubleSpaceCheck(TargetCheck):
"""Doublespace check."""
check_id = "double_space"
name = _("Double space")
description = _("Translation contains double space")
def check_single(self, source, target, unit):
# One letter things are usually decimal/thousand separators
if len(source) <= 1 and len(target) <= 1:
return False
if not source or not target:
return False
if " " in source:
return False
# Check if target contains double space
return " " in target
def get_fixup(self, unit):
return [(" {2,}", " ")]
class EndStopCheck(TargetCheck):
"""Check for final stop."""
check_id = "end_stop"
name = _("Mismatched full stop")
description = _("Source and translation do not both end with a full stop")
def check_single(self, source, target, unit):
if len(source) <= 4:
# Might need to use shortcut in translation
return False
if not target:
return False
# Thai and Lojban does not have a full stop
if self.is_language(unit, ("th", "jbo")):
return False
# Allow ... to be translated into ellipsis
if source.endswith("...") and target[-1] == "…":
return False
if self.is_language(unit, ("ja",)) and source[-1] in (":", ";"):
# Japanese sentence might need to end with full stop
# in case it's used before list.
return self.check_chars(source, target, -1, (";", ":", ":", ".", "。"))
if self.is_language(unit, ("hy",)):
return self.check_chars(
source,
target,
-1,
(".", "。", "।", "۔", "։", "·", "෴", "។", ":", "՝", "?", "!", "`"),
)
if self.is_language(unit, ("hi", "bn", "or")):
# Using | instead of । is not typographically correct, but
# seems to be quite usual
return self.check_chars(source, target, -1, (".", "।", "|"))
if self.is_language(unit, ("sat",)):
# Santali uses "᱾" as full stop
return self.check_chars(source, target, -1, (".", "᱾"))
return self.check_chars(
source, target, -1, (".", "。", "।", "۔", "։", "·", "෴", "។")
)
class EndColonCheck(TargetCheck):
"""Check for final colon."""
check_id = "end_colon"
name = _("Mismatched colon")
description = _("Source and translation do not both end with a colon")
def _check_hy(self, source, target):
if source[-1] == ":":
return self.check_chars(source, target, -1, (":", "՝", "`"))
return False
def _check_ja(self, source, target):
# Japanese sentence might need to end with full stop
# in case it's used before list.
if source[-1] in (":", ";"):
return self.check_chars(source, target, -1, (";", ":", ":", ".", "。"))
return False
def check_single(self, source, target, unit):
if not source or not target:
return False
if self.is_language(unit, ("jbo",)):
return False
if self.is_language(unit, ("hy",)):
return self._check_hy(source, target)
if self.is_language(unit, ("ja",)):
return self._check_ja(source, target)
return self.check_chars(source, target, -1, (":", ":", "៖"))
class EndQuestionCheck(TargetCheck):
"""Check for final question mark."""
check_id = "end_question"
name = _("Mismatched question mark")
description = _("Source and translation do not both end with a question mark")
question_el = ("?", ";", ";")
def _check_hy(self, source, target):
if source[-1] == "?":
return self.check_chars(source, target, -1, ("?", "՞", "։"))
return False
def _check_el(self, source, target):
if source[-1] != "?":
return False
return target[-1] not in self.question_el
def check_single(self, source, target, unit):
if not source or not target:
return False
if self.is_language(unit, ("jbo",)):
return False
if self.is_language(unit, ("hy",)):
return self._check_hy(source, target)
if self.is_language(unit, ("el",)):
return self._check_el(source, target)
return self.check_chars(
source, target, -1, ("?", "՞", "؟", "⸮", "?", "፧", "꘏", "⳺")
)
class EndExclamationCheck(TargetCheck):
"""Check for final exclamation mark."""
check_id = "end_exclamation"
name = _("Mismatched exclamation mark")
description = _("Source and translation do not both end with an exclamation mark")
def check_single(self, source, target, unit):
if not source or not target:
return False
if (
self.is_language(unit, ("eu",))
and source[-1] == "!"
and "¡" in target
and "!" in target
):
return False
if self.is_language(unit, ("hy", "jbo")):
return False
if source.endswith("Texy!") or target.endswith("Texy!"):
return False
return self.check_chars(source, target, -1, ("!", "!", "՜", "᥄", "႟", "߹"))
class EndEllipsisCheck(TargetCheck):
"""Check for ellipsis at the end of string."""
check_id = "end_ellipsis"
name = _("Mismatched ellipsis")
description = _("Source and translation do not both end with an ellipsis")
def check_single(self, source, target, unit):
if not target:
return False
if self.is_language(unit, ("jbo",)):
return False
# Allow ... to be translated into ellipsis
if source.endswith("...") and target[-1] == "…":
return False
return self.check_chars(source, target, -1, ("…",))
class EscapedNewlineCountingCheck(CountingCheck):
r"""Check whether there is same amount of escaped \n strings."""
string = "\\n"
check_id = "escaped_newline"
name = _("Mismatched \\n")
description = _("Number of \\n in translation does not match source")
class NewLineCountCheck(CountingCheck):
"""Check whether there is same amount of new lines."""
string = "\n"
check_id = "newline-count"
name = _("Mismatching line breaks")
description = _("Number of new lines in translation does not match source")
class ZeroWidthSpaceCheck(TargetCheck):
"""Check for zero width space char (<U+200B>)."""
check_id = "zero-width-space"
name = _("Zero-width space")
description = _("Translation contains extra zero-width space character")
def check_single(self, source, target, unit):
if self.is_language(unit, ("km",)):
return False
if "\u200b" in source:
return False
return "\u200b" in target
def get_fixup(self, unit):
return [("\u200b", "", "gu")]
class MaxLengthCheck(TargetCheckParametrized):
"""Check for maximum length of translation."""
check_id = "max-length"
name = _("Maximum length of translation")
description = _("Translation should not exceed given length")
default_disabled = True
@property
def param_type(self):
return single_value_flag(int)
def check_target_params(self, sources, targets, unit, value):
replace = self.get_replacement_function(unit)
return any(len(replace(target)) > value for target in targets)
class EndSemicolonCheck(TargetCheck):
"""Check for semicolon at end."""
check_id = "end_semicolon"
name = _("Mismatched semicolon")
description = _("Source and translation do not both end with a semicolon")
def check_single(self, source, target, unit):
if self.is_language(unit, ("el",)) and source and source[-1] == "?":
# Complement to question mark check
return False
return self.check_chars(
strip_entities(source), strip_entities(target), -1, [";"]
)
class KashidaCheck(TargetCheck):
check_id = "kashida"
name = _("Kashida letter used")
description = _("The decorative kashida letters should not be used")
def check_single(self, source, target, unit):
return any(x in target for x in KASHIDA_CHARS)
def get_fixup(self, unit):
return [("[{}]".format("".join(KASHIDA_CHARS)), "", "gu")]
class PunctuationSpacingCheck(TargetCheck):
check_id = "punctuation_spacing"
name = _("Punctuation spacing")
description = _("Missing non breakable space before double punctuation sign")
def check_single(self, source, target, unit):
if (
not self.is_language(unit, ("fr", "br"))
or unit.translation.language.code == "fr_CA"
):
return False
# Remove XML/HTML entities to simplify parsing
target = strip_entities(target)
whitespace = {" ", "\u00A0", "\u202F", "\u2009"}
total = len(target)
for i, char in enumerate(target):
if char in FRENCH_PUNCTUATION:
if i + 1 < total and not target[i + 1].isspace():
continue
if i == 0 or target[i - 1] not in whitespace:
return True
return False
def get_fixup(self, unit):
return [
# First fix possibly wrong whitespace
(
"([ \u00A0\u2009])([{}])".format("".join(FRENCH_PUNCTUATION)),
"\u202F$2",
"gu",
),
# Then add missing ones
(
"([^\u202F])([{}])".format("".join(FRENCH_PUNCTUATION)),
"$1\u202F$2",
"gu",
),
]
|
import os
import shutil
import unittest
import subprocess
import shlex
def run_cmd(app, cmd):
"""Run a command and return a tuple with (stdout, stderr, exit_code)"""
os.environ['FLASK_APP'] = app
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
print('\n$ ' + cmd)
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
return stdout, stderr, process.wait()
class TestMigrate(unittest.TestCase):
def setUp(self):
os.chdir(os.path.split(os.path.abspath(__file__))[0])
try:
os.remove('app.db')
except OSError:
pass
try:
shutil.rmtree('migrations')
except OSError:
pass
try:
shutil.rmtree('temp_folder')
except OSError:
pass
def tearDown(self):
try:
os.remove('app.db')
except OSError:
pass
try:
shutil.rmtree('migrations')
except OSError:
pass
try:
shutil.rmtree('temp_folder')
except OSError:
pass
def test_alembic_version(self):
from flask_migrate import alembic_version
self.assertEqual(len(alembic_version), 3)
for v in alembic_version:
self.assertTrue(isinstance(v, int))
def test_migrate_upgrade(self):
(o, e, s) = run_cmd('app.py', 'flask db init')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app.py', 'flask db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app.py', 'flask db upgrade')
self.assertTrue(s == 0)
from .app import db, User
db.session.add(User(name='test'))
db.session.commit()
def test_custom_directory(self):
(o, e, s) = run_cmd('app_custom_directory.py', 'flask db init')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app_custom_directory.py', 'flask db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app_custom_directory.py', 'flask db upgrade')
self.assertTrue(s == 0)
from .app_custom_directory import db, User
db.session.add(User(name='test'))
db.session.commit()
def test_custom_directory_path(self):
(o, e, s) = run_cmd('app_custom_directory_path.py', 'flask db init')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app_custom_directory_path.py', 'flask db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app_custom_directory_path.py', 'flask db upgrade')
self.assertTrue(s == 0)
from .app_custom_directory_path import db, User
db.session.add(User(name='test'))
db.session.commit()
def test_compare_type(self):
(o, e, s) = run_cmd('app_compare_type1.py', 'flask db init')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app_compare_type1.py', 'flask db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app_compare_type1.py', 'flask db upgrade')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('app_compare_type2.py', 'flask db migrate')
self.assertTrue(s == 0)
self.assertTrue(b'Detected type change from VARCHAR(length=128) '
b'to String(length=10)' in e)
|
from absl import flags
flags.DEFINE_string('ceph_secret', None,
'Name of the Ceph Secret used by Kubernetes in order to '
'authenticate with Ceph. If provided, overrides keyring.')
flags.DEFINE_string('ceph_keyring', '/etc/ceph/keyring',
'Path to the Ceph keyring file.')
flags.DEFINE_string('rbd_pool', 'rbd',
'Name of RBD pool for Ceph volumes.')
flags.DEFINE_string('rbd_user', 'admin',
'Name of RADOS user.')
flags.DEFINE_list('ceph_monitors', [],
'IP addresses and ports of Ceph Monitors. '
'Must be provided when Ceph scratch disk is required. '
'Example: "127.0.0.1:6789,192.168.1.1:6789"')
flags.DEFINE_string('username', 'root',
'User name that Perfkit will attempt to use in order to '
'SSH into Docker instance.')
flags.DEFINE_boolean('docker_in_privileged_mode', True,
'If set to True, will attempt to create Docker containers '
'in a privileged mode. Note that some benchmarks execute '
'commands which are only allowed in privileged mode.')
flags.DEFINE_boolean('kubernetes_anti_affinity', True,
'If set to True, PKB pods will not be scheduled on the '
'same nodes as other PKB pods.')
flags.DEFINE_multi_string('k8s_volume_parameters', None,
'A colon separated key-value pair that will be '
'added to Kubernetes storage class parameters.')
_K8S_PROVISIONERS = [
'kubernetes.io/azure-disk', 'kubernetes.io/gce-pd', 'kubernetes.io/aws-ebs', 'kubernetes.io/glusterfs'
]
flags.DEFINE_enum('k8s_volume_provisioner', None, _K8S_PROVISIONERS,
'The name of the provisioner to use for K8s storage '
'classes.')
|
import datetime
from app import SQLAlchemyDB as db, socketio
from app.database.base import BaseMethod
from app.utils import JsonUtil
class User(db.Model, BaseMethod):
'''user'''
id = db.Column(db.String(32), primary_key=True)
name = db.Column(db.String(32))
location = db.Column(db.String(32))
avatar = db.Column(db.String(128))
src = db.Column(db.String(4), default="gh") # useless
last_login = db.Column(db.DateTime, default=datetime.datetime.now)
def dict(self):
rst = {}
rst['id'] = self.id
rst['name'] = self.name
rst['location'] = self.location
rst['avatar'] = self.avatar
rst['src'] = self.src
rst['last_login'] = self.last_login
return rst
class Server(db.Model, BaseMethod):
'''server list'''
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32))
ip = db.Column(db.String(16))
port = db.Column(db.Integer)
account = db.Column(db.String(32))
pkey = db.Column(db.Text)
user_id = db.Column(db.String(32), db.ForeignKey(User.id))
user = db.relationship(User)
add_time = db.Column(db.DateTime, default=datetime.datetime.now)
deleted = db.Column(db.Boolean, default=False)
def dict(self, with_pkey=False):
rst = {}
rst['id'] = self.id
rst['name'] = self.name
rst['ip'] = self.ip
rst['port'] = self.port
rst['account'] = self.account
rst['pkey'] = with_pkey and self.pkey or ''
rst['user_id'] = self.user_id
rst['add_time'] = self.add_time
return rst
class WebHook(db.Model, BaseMethod):
'''webhook'''
id = db.Column(db.Integer, primary_key=True)
repo = db.Column(db.String(32)) # repo name
branch = db.Column(db.String(32)) # repo branch
shell = db.Column(db.Text) # do what
user_id = db.Column(db.String(32), db.ForeignKey(User.id))
user = db.relationship(User)
server_id = db.Column(db.Integer, db.ForeignKey(Server.id))
server = db.relationship(Server)
add_time = db.Column(db.DateTime, default=datetime.datetime.now)
deleted = db.Column(db.Boolean, default=False)
key = db.Column(db.String(32), unique=True) # 用于webhook,保证私密,直接用 md5 salt
# 1:waiting, 2:ing, 3:error, 4:success, 5:except, other
status = db.Column(db.String(1))
lastUpdate = db.Column(
db.DateTime, default=datetime.datetime.now) # 最新执行时间
def dict(self, with_key=False):
rst = {}
rst['id'] = self.id
rst['repo'] = self.repo
rst['branch'] = self.branch
rst['shell'] = self.shell
rst['user_id'] = self.user_id
rst['server_id'] = self.server_id
rst['server'] = self.server and self.server.dict() or {}
rst['add_time'] = self.add_time
rst['key'] = with_key and self.key or ''
rst['status'] = self.status
rst['lastUpdate'] = self.lastUpdate
return rst
def updateStatus(self, status):
self.status = status
self.lastUpdate = datetime.datetime.now()
self.save()
socketio.emit('webhook',
JsonUtil.object_2_json(self.dict()),
room=self.id)
class History(db.Model, BaseMethod):
'''push history'''
# md5, notice, output, push_name, push_email, success, add_time
id = db.Column(db.Integer, primary_key=True)
# 1:waiting, 2:ing, 3:error, 4:success, 5:except, other
status = db.Column(db.String(1))
shell_log = db.Column(db.Text) # hook shell log
data = db.Column(db.Text) # git push data
push_user = db.Column(db.String(64)) # git push user(name<email>)
add_time = db.Column(
db.DateTime, default=datetime.datetime.now) # git push time
update_time = db.Column(
db.DateTime, default=datetime.datetime.now) # last update time
webhook_id = db.Column(db.Integer, db.ForeignKey(WebHook.id))
webhook = db.relationship(WebHook)
def dict(self):
rst = {}
rst['id'] = self.id
rst['status'] = self.status
rst['shell_log'] = self.shell_log
rst['data'] = self.data # json
rst['push_user'] = self.push_user
rst['add_time'] = self.add_time
rst['update_time'] = self.update_time
rst['webhook_id'] = self.webhook_id
return rst
def updateStatus(self, status):
self.update_time = datetime.datetime.now()
self.status = status
self.save()
socketio.emit('history',
JsonUtil.object_2_json(self.dict()),
room=self.webhook.id)
class Collaborator(db.Model, BaseMethod):
'''Collaborator'''
id = db.Column(db.Integer, primary_key=True)
# webhook
webhook_id = db.Column(db.Integer, db.ForeignKey(WebHook.id))
webhook = db.relationship(WebHook)
# user
user_id = db.Column(db.String(32), db.ForeignKey(User.id))
user = db.relationship(User)
add_time = db.Column(db.DateTime, default=datetime.datetime.now)
def dict(self):
rst = {}
rst['id'] = self.id
rst['webhook_id'] = self.webhook_id
rst['user_id'] = self.user_id
rst['user'] = {}
if self.user:
rst['user'] = self.user.dict()
rst['add_time'] = self.add_time
return rst
|
import sys
import time
import smart_open
open_fn = smart_open.smart_open
# open_fn = open
def report_time_iterate_rows(file_name, report_every=100000):
start = time.time()
last = start
with open_fn(file_name, 'r') as f:
for i, line in enumerate(f, start=1):
if not (i % report_every):
current = time.time()
time_taken = current - last
print('Time taken for %d rows: %.2f seconds, %.2f rows/s' % (
report_every, time_taken, report_every / time_taken))
last = current
total = time.time() - start
print('Total: %d rows, %.2f seconds, %.2f rows/s' % (
i, total, i / total))
report_time_iterate_rows(sys.argv[1])
|
import marshal, new, opcode, sys, types
from lnotab import lnotab_numbers, lnotab_string
class PycFile:
def read(self, f):
if isinstance(f, basestring):
f = open(f, "rb")
self.magic = f.read(4)
self.modtime = f.read(4)
self.code = marshal.load(f)
def write(self, f):
if isinstance(f, basestring):
f = open(f, "wb")
f.write(self.magic)
f.write(self.modtime)
marshal.dump(self.code, f)
def hack_line_numbers(self):
self.code = hack_line_numbers(self.code)
def hack_line_numbers(code):
""" Replace a code object's line number information to claim that every
byte of the bytecode is a new source line. Returns a new code
object. Also recurses to hack the line numbers in nested code objects.
"""
# Create a new lnotab table. Each opcode is claimed to be at
# 1000*lineno + (opcode number within line), so for example, the opcodes on
# source line 12 will be given new line numbers 12000, 12001, 12002, etc.
old_num = list(lnotab_numbers(code.co_lnotab, code.co_firstlineno))
n_bytes = len(code.co_code)
new_num = []
line = 0
opnum_in_line = 0
i_byte = 0
while i_byte < n_bytes:
if old_num and i_byte == old_num[0][0]:
line = old_num.pop(0)[1]
opnum_in_line = 0
new_num.append((i_byte, 100000000 + 1000*line + opnum_in_line))
if ord(code.co_code[i_byte]) >= opcode.HAVE_ARGUMENT:
i_byte += 3
else:
i_byte += 1
opnum_in_line += 1
# new_num is a list of pairs, (byteoff, lineoff). Turn it into an lnotab.
new_firstlineno = new_num[0][1]-1
new_lnotab = lnotab_string(new_num, new_firstlineno)
# Recurse into code constants in this code object.
new_consts = []
for const in code.co_consts:
if type(const) == types.CodeType:
new_consts.append(hack_line_numbers(const))
else:
new_consts.append(const)
# Create a new code object, just like the old one, except with new
# line numbers.
new_code = new.code(
code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags,
code.co_code, tuple(new_consts), code.co_names, code.co_varnames,
code.co_filename, code.co_name, new_firstlineno, new_lnotab
)
return new_code
def hack_file(f):
pyc = PycFile()
pyc.read(f)
pyc.hack_line_numbers()
pyc.write(f)
if __name__ == '__main__':
hack_file(sys.argv[1])
|
import itertools
from unittest import mock
import pytest
from vcr import matchers
from vcr import request
# the dict contains requests with corresponding to its key difference
# with 'base' request.
REQUESTS = {
"base": request.Request("GET", "http://host.com/p?a=b", "", {}),
"method": request.Request("POST", "http://host.com/p?a=b", "", {}),
"scheme": request.Request("GET", "https://host.com:80/p?a=b", "", {}),
"host": request.Request("GET", "http://another-host.com/p?a=b", "", {}),
"port": request.Request("GET", "http://host.com:90/p?a=b", "", {}),
"path": request.Request("GET", "http://host.com/x?a=b", "", {}),
"query": request.Request("GET", "http://host.com/p?c=d", "", {}),
}
def assert_matcher(matcher_name):
matcher = getattr(matchers, matcher_name)
for k1, k2 in itertools.permutations(REQUESTS, 2):
expecting_assertion_error = matcher_name in {k1, k2}
if expecting_assertion_error:
with pytest.raises(AssertionError):
matcher(REQUESTS[k1], REQUESTS[k2])
else:
assert matcher(REQUESTS[k1], REQUESTS[k2]) is None
def test_uri_matcher():
for k1, k2 in itertools.permutations(REQUESTS, 2):
expecting_assertion_error = {k1, k2} != {"base", "method"}
if expecting_assertion_error:
with pytest.raises(AssertionError):
matchers.uri(REQUESTS[k1], REQUESTS[k2])
else:
assert matchers.uri(REQUESTS[k1], REQUESTS[k2]) is None
req1_body = (
b"<?xml version='1.0'?><methodCall><methodName>test</methodName>"
b"<params><param><value><array><data><value><struct>"
b"<member><name>a</name><value><string>1</string></value></member>"
b"<member><name>b</name><value><string>2</string></value></member>"
b"</struct></value></data></array></value></param></params></methodCall>"
)
req2_body = (
b"<?xml version='1.0'?><methodCall><methodName>test</methodName>"
b"<params><param><value><array><data><value><struct>"
b"<member><name>b</name><value><string>2</string></value></member>"
b"<member><name>a</name><value><string>1</string></value></member>"
b"</struct></value></data></array></value></param></params></methodCall>"
)
boto3_bytes_headers = {
"X-Amz-Content-SHA256": b"UNSIGNED-PAYLOAD",
"Cache-Control": b"max-age=31536000, public",
"X-Amz-Date": b"20191102T143910Z",
"User-Agent": b"Boto3/1.9.102 Python/3.5.3 Linux/4.15.0-54-generic Botocore/1.12.253 Resource",
"Content-MD5": b"GQqjEXsRqrPyxfTl99nkAg==",
"Content-Type": b"text/plain",
"Expect": b"100-continue",
"Content-Length": "21",
}
@pytest.mark.parametrize(
"r1, r2",
[
(
request.Request("POST", "http://host.com/", "123", {}),
request.Request("POST", "http://another-host.com/", "123", {"Some-Header": "value"}),
),
(
request.Request(
"POST", "http://host.com/", "a=1&b=2", {"Content-Type": "application/x-www-form-urlencoded"}
),
request.Request(
"POST", "http://host.com/", "b=2&a=1", {"Content-Type": "application/x-www-form-urlencoded"}
),
),
(
request.Request("POST", "http://host.com/", "123", {}),
request.Request("POST", "http://another-host.com/", "123", {"Some-Header": "value"}),
),
(
request.Request(
"POST", "http://host.com/", "a=1&b=2", {"Content-Type": "application/x-www-form-urlencoded"}
),
request.Request(
"POST", "http://host.com/", "b=2&a=1", {"Content-Type": "application/x-www-form-urlencoded"}
),
),
(
request.Request(
"POST", "http://host.com/", '{"a": 1, "b": 2}', {"Content-Type": "application/json"}
),
request.Request(
"POST", "http://host.com/", '{"b": 2, "a": 1}', {"content-type": "application/json"}
),
),
(
request.Request(
"POST", "http://host.com/", req1_body, {"User-Agent": "xmlrpclib", "Content-Type": "text/xml"}
),
request.Request(
"POST",
"http://host.com/",
req2_body,
{"user-agent": "somexmlrpc", "content-type": "text/xml"},
),
),
(
request.Request(
"POST", "http://host.com/", '{"a": 1, "b": 2}', {"Content-Type": "application/json"}
),
request.Request(
"POST", "http://host.com/", '{"b": 2, "a": 1}', {"content-type": "application/json"}
),
),
(
# special case for boto3 bytes headers
request.Request("POST", "http://aws.custom.com/", b"123", boto3_bytes_headers),
request.Request("POST", "http://aws.custom.com/", b"123", boto3_bytes_headers),
),
],
)
def test_body_matcher_does_match(r1, r2):
assert matchers.body(r1, r2) is None
@pytest.mark.parametrize(
"r1, r2",
[
(
request.Request("POST", "http://host.com/", '{"a": 1, "b": 2}', {}),
request.Request("POST", "http://host.com/", '{"b": 2, "a": 1}', {}),
),
(
request.Request(
"POST", "http://host.com/", '{"a": 1, "b": 3}', {"Content-Type": "application/json"}
),
request.Request(
"POST", "http://host.com/", '{"b": 2, "a": 1}', {"content-type": "application/json"}
),
),
(
request.Request("POST", "http://host.com/", req1_body, {"Content-Type": "text/xml"}),
request.Request("POST", "http://host.com/", req2_body, {"content-type": "text/xml"}),
),
],
)
def test_body_match_does_not_match(r1, r2):
with pytest.raises(AssertionError):
matchers.body(r1, r2)
def test_query_matcher():
req1 = request.Request("GET", "http://host.com/?a=b&c=d", "", {})
req2 = request.Request("GET", "http://host.com/?c=d&a=b", "", {})
assert matchers.query(req1, req2) is None
req1 = request.Request("GET", "http://host.com/?a=b&a=b&c=d", "", {})
req2 = request.Request("GET", "http://host.com/?a=b&c=d&a=b", "", {})
req3 = request.Request("GET", "http://host.com/?c=d&a=b&a=b", "", {})
assert matchers.query(req1, req2) is None
assert matchers.query(req1, req3) is None
def test_matchers():
assert_matcher("method")
assert_matcher("scheme")
assert_matcher("host")
assert_matcher("port")
assert_matcher("path")
assert_matcher("query")
def test_evaluate_matcher_does_match():
def bool_matcher(r1, r2):
return True
def assertion_matcher(r1, r2):
assert 1 == 1
r1, r2 = None, None
for matcher in [bool_matcher, assertion_matcher]:
match, assertion_msg = matchers._evaluate_matcher(matcher, r1, r2)
assert match is True
assert assertion_msg is None
def test_evaluate_matcher_does_not_match():
def bool_matcher(r1, r2):
return False
def assertion_matcher(r1, r2):
# This is like the "assert" statement preventing pytest to recompile it
raise AssertionError()
r1, r2 = None, None
for matcher in [bool_matcher, assertion_matcher]:
match, assertion_msg = matchers._evaluate_matcher(matcher, r1, r2)
assert match is False
assert not assertion_msg
def test_evaluate_matcher_does_not_match_with_assert_message():
def assertion_matcher(r1, r2):
# This is like the "assert" statement preventing pytest to recompile it
raise AssertionError("Failing matcher")
r1, r2 = None, None
match, assertion_msg = matchers._evaluate_matcher(assertion_matcher, r1, r2)
assert match is False
assert assertion_msg == "Failing matcher"
def test_get_assertion_message():
assert matchers.get_assertion_message(None) is None
assert matchers.get_assertion_message("") == ""
def test_get_assertion_message_with_details():
assertion_msg = "q1=1 != q2=1"
expected = assertion_msg
assert matchers.get_assertion_message(assertion_msg) == expected
@pytest.mark.parametrize(
"r1, r2, expected_successes, expected_failures",
[
(
request.Request("GET", "http://host.com/p?a=b", "", {}),
request.Request("GET", "http://host.com/p?a=b", "", {}),
["method", "path"],
[],
),
(
request.Request("GET", "http://host.com/p?a=b", "", {}),
request.Request("POST", "http://host.com/p?a=b", "", {}),
["path"],
["method"],
),
(
request.Request("GET", "http://host.com/p?a=b", "", {}),
request.Request("POST", "http://host.com/path?a=b", "", {}),
[],
["method", "path"],
),
],
)
def test_get_matchers_results(r1, r2, expected_successes, expected_failures):
successes, failures = matchers.get_matchers_results(r1, r2, [matchers.method, matchers.path])
assert successes == expected_successes
assert len(failures) == len(expected_failures)
for i, expected_failure in enumerate(expected_failures):
assert failures[i][0] == expected_failure
assert failures[i][1] is not None
@mock.patch("vcr.matchers.get_matchers_results")
@pytest.mark.parametrize(
"successes, failures, expected_match",
[(["method", "path"], [], True), (["method"], ["path"], False), ([], ["method", "path"], False)],
)
def test_requests_match(mock_get_matchers_results, successes, failures, expected_match):
mock_get_matchers_results.return_value = (successes, failures)
r1 = request.Request("GET", "http://host.com/p?a=b", "", {})
r2 = request.Request("GET", "http://host.com/p?a=b", "", {})
match = matchers.requests_match(r1, r2, [matchers.method, matchers.path])
assert match is expected_match
|
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import flip_point
class TestFlipPoint(unittest.TestCase):
def test_flip_point_ndarray(self):
point = np.random.uniform(
low=0., high=32., size=(3, 12, 2))
out = flip_point(point, size=(34, 32), y_flip=True)
point_expected = point.copy()
point_expected[:, :, 0] = 34 - point[:, :, 0]
np.testing.assert_equal(out, point_expected)
out = flip_point(point, size=(34, 32), x_flip=True)
point_expected = point.copy()
point_expected[:, :, 1] = 32 - point[:, :, 1]
np.testing.assert_equal(out, point_expected)
def test_flip_point_list(self):
point = [
np.random.uniform(low=0., high=32., size=(12, 2)),
np.random.uniform(low=0., high=32., size=(10, 2)),
]
out = flip_point(point, size=(34, 32), y_flip=True)
for i, pnt in enumerate(point):
pnt_expected = pnt.copy()
pnt_expected[:, 0] = 34 - pnt[:, 0]
np.testing.assert_equal(out[i], pnt_expected)
out = flip_point(point, size=(34, 32), x_flip=True)
for i, pnt in enumerate(point):
pnt_expected = pnt.copy()
pnt_expected[:, 1] = 32 - pnt[:, 1]
np.testing.assert_equal(out[i], pnt_expected)
testing.run_module(__name__, __file__)
|
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, VOLT
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component
BASE_CFG = {
"platform": "sma",
"host": "1.1.1.1",
"password": "",
"custom": {"my_sensor": {"key": "1234567890123", "unit": VOLT}},
}
async def test_sma_config(hass):
"""Test new config."""
sensors = ["current_consumption"]
with assert_setup_component(1):
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: dict(BASE_CFG, sensors=sensors)}
)
await hass.async_block_till_done()
state = hass.states.get("sensor.current_consumption")
assert state
assert ATTR_UNIT_OF_MEASUREMENT in state.attributes
assert "current_consumption" not in state.attributes
state = hass.states.get("sensor.my_sensor")
assert state
|
import string
import sys
import types
try:
classtype = (type, types.ClassType)
except AttributeError:
classtype = type
import cherrypy
class PageHandler(object):
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
@property
def args(self):
"""The ordered args should be accessible from post dispatch hooks."""
return cherrypy.serving.request.args
@args.setter
def args(self, args):
cherrypy.serving.request.args = args
return cherrypy.serving.request.args
@property
def kwargs(self):
"""The named kwargs should be accessible from post dispatch hooks."""
return cherrypy.serving.request.kwargs
@kwargs.setter
def kwargs(self, kwargs):
cherrypy.serving.request.kwargs = kwargs
return cherrypy.serving.request.kwargs
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError:
x = sys.exc_info()[1]
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError:
raise sys.exc_info()[1]
except Exception:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
"""
Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
1. Too many parameters passed to a function which doesn't define
one of *args or **kwargs.
2. Too little parameters are passed to the function.
There are 3 sources of parameters to a cherrypy handler.
1. query string parameters are passed as keyword parameters to the
handler.
2. body parameters are also passed as keyword parameters.
3. when partial matching occurs, the final path atoms are passed as
positional args.
Both the query string and path atoms are part of the URI. If they are
incorrect, then a 404 Not Found should be raised. Conversely the body
parameters are part of the request; if they are invalid a 400 Bad Request.
"""
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw,
defaults) = getargspec(callable.__call__)
else:
# If it wasn't one of our own types, re-raise
# the original error
raise
if args and (
# For callable objects, which have a __call__(self) method
hasattr(callable, '__call__') or
# For normal methods
inspect.ismethod(callable)
):
# Strip 'self'
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# In the case where the method allows body arguments
# there are 3 potential errors:
# 1. not enough query string parameters -> 404
# 2. not enough body parameters -> 400
# 3. not enough path parts (partial matches) -> 404
#
# We can't actually tell which case it is,
# so I'm raising a 404 because that covers 2/3 of the
# possibilities
#
# In the case where the method does not allow body
# arguments it's definitely a 404.
message = None
if show_mismatched_params:
message = 'Missing parameters: %s' % ','.join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
# Otherwise it's a 400 Bad Request
error = 400
message = None
if show_mismatched_params:
message = 'Multiple values for parameters: '\
'%s' % ','.join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message = 'Unexpected query string '\
'parameters: %s' % ', '.join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
# If there were any extra body parameters, it's a 400 Not Found
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message = 'Unexpected body parameters: '\
'%s' % ', '.join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
def test_callable_spec(callable, args, kwargs): # noqa: F811
return None
else:
getargspec = inspect.getargspec
# Python 3 requires using getfullargspec if
# keyword-only arguments are present
if hasattr(inspect, 'getfullargspec'):
def getargspec(callable):
return inspect.getfullargspec(callable)[:4]
class LateParamPageHandler(PageHandler):
"""When passing cherrypy.request.params to the page handler, we do not
want to capture that dict too early; we want to give tools like the
decoding tool a chance to modify the params dict in-between the lookup
of the handler and the actual calling of the handler. This subclass
takes that into account, and allows request.params to be 'bound late'
(it's more complicated than that, but that's the effect).
"""
@property
def kwargs(self):
"""Page handler kwargs (with cherrypy.request.params copied in)."""
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
@kwargs.setter
def kwargs(self, kwargs):
cherrypy.serving.request.kwargs = kwargs
self._kwargs = kwargs
if sys.version_info < (3, 0):
punctuation_to_underscores = string.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, str) or len(t) != 256:
raise ValueError(
'The translate argument must be a str of len 256.')
else:
punctuation_to_underscores = str.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, dict):
raise ValueError('The translate argument must be a dict.')
class Dispatcher(object):
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
The tree is rooted at cherrypy.request.app.root, and each hierarchical
component in the path_info argument is matched to a corresponding nested
attribute of the root object. Matching handlers must have an 'exposed'
attribute which evaluates to True. The special method name "index"
matches a URI which ends in a slash ("/"). The special method name
"default" may match a portion of the path_info (but only when no longer
substring of the path_info matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
dispatch_method_name = '_cp_dispatch'
"""
The name of the dispatch method that nodes may optionally implement
to provide their own dynamic dispatch algorithm.
"""
def __init__(self, dispatch_method_name=None,
translate=punctuation_to_underscores):
validate_translator(translate)
self.translate = translate
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace('%2F', '/') for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
"""Return the appropriate page handler, plus any virtual path.
This will return two objects. The first will be a callable,
which can be used to generate page output. Any parameters from
the query string or request body will be sent to that callable
as keyword arguments.
The callable is found by traversing the application's tree,
starting from cherrypy.request.app.root, and matching path
components to successive objects in the tree. For example, the
URL "/path/to/handler" might return root.path.to.handler.
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
and were not used when looking up the handler.
These virtual path components are passed to the handler as
positional arguments.
"""
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
fullpath = [x for x in path.strip('/').split('/') if x] + ['index']
fullpath_len = len(fullpath)
segleft = fullpath_len
nodeconf = {}
if hasattr(root, '_cp_config'):
nodeconf.update(root._cp_config)
if '/' in app.config:
nodeconf.update(app.config['/'])
object_trail = [['root', root, nodeconf, segleft]]
node = root
iternames = fullpath[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (e.g. replace '.' with '_')
objname = name.translate(self.translate)
nodeconf = {}
subnode = getattr(node, objname, None)
pre_len = len(iternames)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and hasattr(dispatch, '__call__') and not \
getattr(dispatch, 'exposed', False) and \
pre_len > 1:
# Don't expose the hidden 'index' token to _cp_dispatch
# We skip this if pre_len == 1 since it makes no sense
# to call a dispatcher when we have no tokens left.
index_name = iternames.pop()
subnode = dispatch(vpath=iternames)
iternames.append(index_name)
else:
# We didn't find a path, but keep processing in case there
# is a default() handler.
iternames.pop(0)
else:
# We found the path, remove the vpath entry
iternames.pop(0)
segleft = len(iternames)
if segleft > pre_len:
# No path segment was removed. Raise an error.
raise cherrypy.CherryPyException(
'A vpath segment was added. Custom dispatchers may only '
'remove elements. While trying to process '
'{0} in {1}'.format(name, fullpath)
)
elif segleft == pre_len:
# Assume that the handler used the current path segment, but
# did not pop it. This allows things like
# return getattr(self, vpath[0], None)
iternames.pop(0)
segleft -= 1
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, '_cp_config'):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
existing_len = fullpath_len - pre_len
if existing_len != 0:
curpath = '/' + '/'.join(fullpath[0:existing_len])
else:
curpath = ''
new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft]
for seg in new_segs:
curpath += '/' + seg
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, segleft])
def set_conf():
"""Collapse all object_trail config into cherrypy.request.config.
"""
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, segleft in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = '/' + \
'/'.join(fullpath[0:fullpath_len - segleft])
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, segleft = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, 'default'):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, '_cp_config', {})
object_trail.insert(
i + 1, ['default', defhandler, conf, segleft])
request.config = set_conf()
# See https://github.com/cherrypy/cherrypy/issues/613
request.is_index = path.endswith('/')
return defhandler, fullpath[fullpath_len - segleft:-1]
# Uncomment the next line to restrict positional params to
# "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
# can redirect if path_info has NO trailing slash.
# Note that this also includes handlers which take
# positional parameters (virtual paths).
request.is_index = False
return candidate, fullpath[fullpath_len - segleft:-1]
# We didn't find anything
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
"""Additional dispatch based on cherrypy.request.method.upper().
Methods named GET, POST, etc will be called on an exposed class.
The method names must be all caps; the appropriate Allow header
will be output showing all capitalized method names as allowable
HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if 'GET' in avail and 'HEAD' not in avail:
avail.append('HEAD')
avail.sort()
cherrypy.serving.response.headers['Allow'] = ', '.join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == 'HEAD':
func = getattr(resource, 'GET', None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, '_cp_config'):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace('%2F', '/') for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False, **mapper_options):
"""
Routes dispatcher
Set full_result to True if you wish the controller
and the action to be passed on to the page handler
parameters. By default they won't be.
"""
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper(**mapper_options)
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
"""Set handler and config for the current request."""
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
"""Find the right page handler, and set request.config."""
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ''
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or '/'
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, '_cp_config'):
merge(root._cp_config)
if '/' in app.config:
merge(app.config['/'])
# Mix in values from app.config.
atoms = [x for x in path_info.split('/') if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = '/'.join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller')
controller = self.controllers.get(controller, controller)
if controller:
if isinstance(controller, classtype):
controller = controller()
# Get config from the controller.
if hasattr(controller, '_cp_config'):
merge(controller._cp_config)
action = result.get('action')
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, '_cp_config'):
merge(handler._cp_config)
else:
handler = controller
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = '/'.join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpcutil
def xmlrpc_dispatch(path_info):
path_info = xmlrpcutil.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True,
**domains):
"""
Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
website structure. For example::
http://www.domain.example -> root
http://www.domain2.example -> root/domain2/
http://www.domain2.example:443 -> root/secure
can be accomplished via the following config::
[/]
request.dispatch = cherrypy.dispatch.VirtualHost(
**{'www.domain2.example': '/domain2',
'www.domain2.example:443': '/secure',
})
next_dispatcher
The next dispatcher object in the dispatch chain.
The VirtualHost dispatcher adds a prefix to the URL and calls
another dispatcher. Defaults to cherrypy.dispatch.Dispatcher().
use_x_forwarded_host
If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
``**domains``
A dict of {host header value: virtual prefix} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding "virtual prefix"
value will be prepended to the URL path before calling the
next dispatcher. Note that you often need separate entries
for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header('X-Forwarded-Host', domain)
prefix = domains.get(domain, '')
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
# Touch up staticdir config. See
# https://github.com/cherrypy/cherrypy/issues/614.
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
|
import requests
from homeassistant import config_entries, setup
from homeassistant.components.nuheat.const import CONF_SERIAL_NUMBER, DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, HTTP_INTERNAL_SERVER_ERROR
from .mocks import _get_mock_thermostat_run
from tests.async_mock import MagicMock, patch
async def test_form_user(hass):
"""Test we get the form with user source."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_thermostat = _get_mock_thermostat_run()
with patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.authenticate",
return_value=True,
), patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.get_thermostat",
return_value=mock_thermostat,
), patch(
"homeassistant.components.nuheat.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.nuheat.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_SERIAL_NUMBER: "12345",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Master bathroom"
assert result2["data"] == {
CONF_SERIAL_NUMBER: "12345",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_import(hass):
"""Test we get the form with import source."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_thermostat = _get_mock_thermostat_run()
with patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.authenticate",
return_value=True,
), patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.get_thermostat",
return_value=mock_thermostat,
), patch(
"homeassistant.components.nuheat.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.nuheat.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_SERIAL_NUMBER: "12345",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "Master bathroom"
assert result["data"] == {
CONF_SERIAL_NUMBER: "12345",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.authenticate",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_SERIAL_NUMBER: "12345",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result["type"] == "form"
assert result["errors"] == {"base": "invalid_auth"}
response_mock = MagicMock()
type(response_mock).status_code = 401
with patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.authenticate",
side_effect=requests.HTTPError(response=response_mock),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_SERIAL_NUMBER: "12345",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_invalid_thermostat(hass):
"""Test we handle invalid thermostats."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
response_mock = MagicMock()
type(response_mock).status_code = HTTP_INTERNAL_SERVER_ERROR
with patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.authenticate",
return_value=True,
), patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.get_thermostat",
side_effect=requests.HTTPError(response=response_mock),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_SERIAL_NUMBER: "12345",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_thermostat"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nuheat.config_flow.nuheat.NuHeat.authenticate",
side_effect=requests.exceptions.Timeout,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_SERIAL_NUMBER: "12345",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
|
from homeassistant.components.binary_sensor import BinarySensorEntity
from .sensor import CONF_VALUE_ON, PLATFORM_SCHEMA, TcpSensor
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the TCP binary sensor."""
add_entities([TcpBinarySensor(hass, config)])
class TcpBinarySensor(BinarySensorEntity, TcpSensor):
"""A binary sensor which is on when its state == CONF_VALUE_ON."""
required = (CONF_VALUE_ON,)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state == self._config[CONF_VALUE_ON]
|
import unittest
import networkx as nx
import pandas as pd
import numpy as np
from pgmpy.models import NaiveBayes
from pgmpy.independencies import Independencies
class TestBaseModelCreation(unittest.TestCase):
def setUp(self):
self.G = NaiveBayes()
def test_class_init_without_data(self):
self.assertIsInstance(self.G, nx.DiGraph)
def test_class_init_with_data_string(self):
self.g = NaiveBayes(feature_vars=["b", "c"], dependent_var="a")
self.assertCountEqual(list(self.g.nodes()), ["a", "b", "c"])
self.assertCountEqual(list(self.g.edges()), [("a", "b"), ("a", "c")])
self.assertEqual(self.g.dependent, "a")
self.assertSetEqual(self.g.features, {"b", "c"})
def test_class_init_with_data_nonstring(self):
self.g = NaiveBayes(feature_vars=[2, 3], dependent_var=1)
self.assertCountEqual(list(self.g.nodes()), [1, 2, 3])
self.assertCountEqual(list(self.g.edges()), [(1, 2), (1, 3)])
self.assertEqual(self.g.dependent, 1)
self.assertSetEqual(self.g.features, {2, 3})
def test_add_node_string(self):
self.G.add_node("a")
self.assertListEqual(list(self.G.nodes()), ["a"])
def test_add_node_nonstring(self):
self.G.add_node(1)
self.assertListEqual(list(self.G.nodes()), [1])
def test_add_nodes_from_string(self):
self.G.add_nodes_from(["a", "b", "c", "d"])
self.assertCountEqual(list(self.G.nodes()), ["a", "b", "c", "d"])
def test_add_nodes_from_non_string(self):
self.G.add_nodes_from([1, 2, 3, 4])
self.assertCountEqual(list(self.G.nodes()), [1, 2, 3, 4])
def test_add_edge_string(self):
self.G.add_edge("a", "b")
self.assertCountEqual(list(self.G.nodes()), ["a", "b"])
self.assertListEqual(list(self.G.edges()), [("a", "b")])
self.assertEqual(self.G.dependent, "a")
self.assertSetEqual(self.G.features, {"b"})
self.G.add_nodes_from(["c", "d"])
self.G.add_edge("a", "c")
self.G.add_edge("a", "d")
self.assertCountEqual(list(self.G.nodes()), ["a", "b", "c", "d"])
self.assertCountEqual(
list(self.G.edges()), [("a", "b"), ("a", "c"), ("a", "d")]
)
self.assertEqual(self.G.dependent, "a")
self.assertSetEqual(self.G.features, {"b", "c", "d"})
self.assertRaises(ValueError, self.G.add_edge, "b", "c")
self.assertRaises(ValueError, self.G.add_edge, "d", "f")
self.assertRaises(ValueError, self.G.add_edge, "e", "f")
self.assertRaises(ValueError, self.G.add_edges_from, [("a", "e"), ("b", "f")])
self.assertRaises(ValueError, self.G.add_edges_from, [("b", "f")])
def test_add_edge_nonstring(self):
self.G.add_edge(1, 2)
self.assertCountEqual(list(self.G.nodes()), [1, 2])
self.assertListEqual(list(self.G.edges()), [(1, 2)])
self.assertEqual(self.G.dependent, 1)
self.assertSetEqual(self.G.features, {2})
self.G.add_nodes_from([3, 4])
self.G.add_edge(1, 3)
self.G.add_edge(1, 4)
self.assertCountEqual(list(self.G.nodes()), [1, 2, 3, 4])
self.assertCountEqual(list(self.G.edges()), [(1, 2), (1, 3), (1, 4)])
self.assertEqual(self.G.dependent, 1)
self.assertSetEqual(self.G.features, {2, 3, 4})
self.assertRaises(ValueError, self.G.add_edge, 2, 3)
self.assertRaises(ValueError, self.G.add_edge, 3, 6)
self.assertRaises(ValueError, self.G.add_edge, 5, 6)
self.assertRaises(ValueError, self.G.add_edges_from, [(1, 5), (2, 6)])
self.assertRaises(ValueError, self.G.add_edges_from, [(2, 6)])
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.G.add_edge, "a", "a")
self.assertRaises(ValueError, self.G.add_edge, 1, 1)
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.G.add_edges_from, [("a", "a")])
def test_update_node_parents_bm_constructor(self):
self.g = NaiveBayes(feature_vars=["b", "c"], dependent_var="a")
self.assertListEqual(list(self.g.predecessors("a")), [])
self.assertListEqual(list(self.g.predecessors("b")), ["a"])
self.assertListEqual(list(self.g.predecessors("c")), ["a"])
def test_update_node_parents(self):
self.G.add_nodes_from(["a", "b", "c"])
self.G.add_edges_from([("a", "b"), ("a", "c")])
self.assertListEqual(list(self.G.predecessors("a")), [])
self.assertListEqual(list(self.G.predecessors("b")), ["a"])
self.assertListEqual(list(self.G.predecessors("c")), ["a"])
def tearDown(self):
del self.G
class TestNaiveBayesMethods(unittest.TestCase):
def setUp(self):
self.G1 = NaiveBayes(feature_vars=["b", "c", "d", "e"], dependent_var="a")
self.G2 = NaiveBayes(feature_vars=["g", "l", "s"], dependent_var="d")
def test_local_independencies(self):
self.assertEqual(self.G1.local_independencies("a"), Independencies())
self.assertEqual(
self.G1.local_independencies("b"),
Independencies(["b", ["e", "c", "d"], "a"]),
)
self.assertEqual(
self.G1.local_independencies("c"),
Independencies(["c", ["e", "b", "d"], "a"]),
)
self.assertEqual(
self.G1.local_independencies("d"),
Independencies(["d", ["b", "c", "e"], "a"]),
)
def test_active_trail_nodes(self):
self.assertListEqual(
sorted(self.G2.active_trail_nodes("d")), ["d", "g", "l", "s"]
)
self.assertListEqual(
sorted(self.G2.active_trail_nodes("g")), ["d", "g", "l", "s"]
)
self.assertListEqual(
sorted(self.G2.active_trail_nodes("l")), ["d", "g", "l", "s"]
)
self.assertListEqual(
sorted(self.G2.active_trail_nodes("s")), ["d", "g", "l", "s"]
)
def test_active_trail_nodes_args(self):
self.assertListEqual(
sorted(self.G2.active_trail_nodes("d", observed="g")), ["d", "l", "s"]
)
self.assertListEqual(
sorted(self.G2.active_trail_nodes("l", observed="g")), ["d", "l", "s"]
)
self.assertListEqual(
sorted(self.G2.active_trail_nodes("s", observed=["g", "l"])), ["d", "s"]
)
self.assertListEqual(
sorted(self.G2.active_trail_nodes("s", observed=["d", "l"])), ["s"]
)
def test_get_ancestors_of(self):
self.assertListEqual(sorted(self.G1._get_ancestors_of("b")), ["a", "b"])
self.assertListEqual(sorted(self.G1._get_ancestors_of("e")), ["a", "e"])
self.assertListEqual(sorted(self.G1._get_ancestors_of("a")), ["a"])
self.assertListEqual(
sorted(self.G1._get_ancestors_of(["b", "e"])), ["a", "b", "e"]
)
def tearDown(self):
del self.G1
del self.G2
class TestNaiveBayesFit(unittest.TestCase):
def setUp(self):
self.model1 = NaiveBayes()
self.model2 = NaiveBayes(feature_vars=["B"], dependent_var="A")
def test_fit_model_creation(self):
values = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1000, 5)),
columns=["A", "B", "C", "D", "E"],
)
self.model1.fit(values, "A")
self.assertCountEqual(self.model1.nodes(), ["A", "B", "C", "D", "E"])
self.assertCountEqual(
self.model1.edges(), [("A", "B"), ("A", "C"), ("A", "D"), ("A", "E")]
)
self.assertEqual(self.model1.dependent, "A")
self.assertSetEqual(self.model1.features, {"B", "C", "D", "E"})
self.model2.fit(values)
self.assertCountEqual(self.model1.nodes(), ["A", "B", "C", "D", "E"])
self.assertCountEqual(
self.model1.edges(), [("A", "B"), ("A", "C"), ("A", "D"), ("A", "E")]
)
self.assertEqual(self.model2.dependent, "A")
self.assertSetEqual(self.model2.features, {"B", "C", "D", "E"})
def test_fit_model_creation_exception(self):
values = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1000, 5)),
columns=["A", "B", "C", "D", "E"],
)
values2 = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1000, 3)), columns=["C", "D", "E"]
)
self.assertRaises(ValueError, self.model1.fit, values)
self.assertRaises(ValueError, self.model1.fit, values2)
self.assertRaises(ValueError, self.model2.fit, values2, "A")
def tearDown(self):
del self.model1
del self.model2
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import ntpath
import time
import xml.etree.ElementTree
from absl import flags
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
import six
FLAGS = flags.FLAGS
flags.DEFINE_integer('ntttcp_threads', 1,
'The number of client and server threads for NTttcp '
'to run with.')
flags.DEFINE_integer('ntttcp_time', 60,
'The number of seconds for NTttcp to run.')
flags.DEFINE_bool('ntttcp_udp', False, 'Whether to run a UDP test.')
flags.DEFINE_integer('ntttcp_cooldown_time', 60,
'Time to wait between the test runs.')
flags.DEFINE_integer('ntttcp_packet_size', None,
'The size of the packet being used in the test.')
flags.DEFINE_integer('ntttcp_sender_sb', -1,
'The size of the send buffer, in Kilo Bytes, on the '
'sending VM. The default is the OS default.')
flags.DEFINE_integer('ntttcp_sender_rb', -1,
'The size of the receive buffer, in Kilo Bytes, on the '
'sending VM. The default is the OS default.')
flags.DEFINE_integer('ntttcp_receiver_sb', -1,
'The size of the send buffer, in Kilo Bytes, on the '
'receiving VM. The default is the OS default.')
flags.DEFINE_integer('ntttcp_receiver_rb', -1,
'The size of the receive buffer, in Kilo Bytes, on the '
'receiving VM. The default is the OS default.')
flags.DEFINE_list(
'ntttcp_config_list', '',
'comma separated list of configs to run with ntttcp. The '
'format for a single config is UDP:THREADS:RUNTIME_S:IP_TYPE:PACKET_SIZE, '
'for example True:4:60:INTERNAL:0,False:8:60:EXTERNAL:150')
# When adding new configs to ntttcp_config_list, increase this value
_NUM_PARAMS_IN_CONFIG = 5
CONTROL_PORT = 6001
BASE_DATA_PORT = 5001
NTTTCP_RETRIES = 10
NTTTCP_DIR = 'NTttcp-v5.33'
NTTTCP_ZIP = NTTTCP_DIR + '.zip'
NTTTCP_URL = ('https://gallery.technet.microsoft.com/NTttcp-Version-528-'
'Now-f8b12769/file/159655/1/' + NTTTCP_ZIP)
TRUE_VALS = ['True', 'true', 't']
FALSE_VALS = ['False', 'false', 'f']
# named tuple used in passing configs around
NtttcpConf = collections.namedtuple('NtttcpConf',
'udp threads time_s ip_type packet_size')
def NtttcpConfigListValidator(value):
"""Returns whether or not the config list flag is valid."""
if len(value) == 1 and not value[0]:
# not using the config list here
return True
for config in value:
config_vals = config.split(':')
if len(config_vals) < _NUM_PARAMS_IN_CONFIG:
return False
try:
udp = config_vals[0]
threads = int(config_vals[1])
time_s = int(config_vals[2])
ip_type = config_vals[3]
packet_size = int(config_vals[4])
except ValueError:
return False
if udp not in TRUE_VALS + FALSE_VALS:
return False
if threads < 1:
return False
if time_s < 1:
return False
if packet_size < 0:
return False
# verify the ip type
if ip_type not in [
vm_util.IpAddressSubset.EXTERNAL, vm_util.IpAddressSubset.INTERNAL
]:
return False
return True
flags.register_validator('ntttcp_config_list', NtttcpConfigListValidator,
'malformed config list')
def ParseConfigList():
"""Get the list of configs for the test from the flags."""
if not FLAGS.ntttcp_config_list:
# config is the empty string.
return [
NtttcpConf(
udp=FLAGS.ntttcp_udp,
threads=FLAGS.ntttcp_threads,
time_s=FLAGS.ntttcp_time,
ip_type=FLAGS.ip_addresses,
packet_size=FLAGS.ntttcp_packet_size)
]
conf_list = []
for config in FLAGS.ntttcp_config_list:
confs = config.split(':')
conf_list.append(
NtttcpConf(
udp=(confs[0] in TRUE_VALS),
threads=int(confs[1]),
time_s=int(confs[2]),
ip_type=confs[3],
packet_size=int(confs[4])))
return conf_list
def Install(vm):
"""Installs the NTttcp package on the VM."""
zip_path = ntpath.join(vm.temp_dir, NTTTCP_ZIP)
vm.DownloadFile(NTTTCP_URL, zip_path)
vm.UnzipFile(zip_path, vm.temp_dir)
@vm_util.Retry(poll_interval=60, fuzz=1, max_retries=NTTTCP_RETRIES)
def _TaskKillNtttcp(vm):
kill_command = 'taskkill /IM ntttcp /F'
vm.RemoteCommand(kill_command, ignore_failure=True, suppress_warning=True)
def _RunNtttcp(vm, options):
timeout_duration = 3 * FLAGS.ntttcp_time
ntttcp_exe_dir = ntpath.join(vm.temp_dir, 'x86fre')
command = 'cd {ntttcp_exe_dir}; .\\NTttcp.exe {ntttcp_options}'.format(
ntttcp_exe_dir=ntttcp_exe_dir, ntttcp_options=options)
vm.RobustRemoteCommand(command, timeout=timeout_duration)
def _RemoveXml(vm):
ntttcp_exe_dir = ntpath.join(vm.temp_dir, 'x86fre')
rm_command = 'cd {ntttcp_exe_dir}; rm xml.txt'.format(
ntttcp_exe_dir=ntttcp_exe_dir)
vm.RemoteCommand(rm_command, ignore_failure=True, suppress_warning=True)
def _CatXml(vm):
ntttcp_exe_dir = ntpath.join(vm.temp_dir, 'x86fre')
cat_command = 'cd {ntttcp_exe_dir}; cat xml.txt'.format(
ntttcp_exe_dir=ntttcp_exe_dir)
ntttcp_xml, _ = vm.RemoteCommand(cat_command)
return ntttcp_xml
def _GetSockBufferSize(sock_buff_size):
return '%dK' % sock_buff_size if sock_buff_size != -1 else sock_buff_size
@vm_util.Retry(max_retries=NTTTCP_RETRIES)
def RunNtttcp(sending_vm, receiving_vm, receiving_ip_address, ip_type, udp,
threads, time_s, packet_size, cooldown):
"""Run NTttcp and return the samples collected from the run."""
if cooldown:
time.sleep(FLAGS.ntttcp_cooldown_time)
# Clean up any stray ntttcp processes in case this is retry.
_TaskKillNtttcp(sending_vm)
_TaskKillNtttcp(receiving_vm)
packet_size_string = ''
if packet_size:
packet_size_string = ' -l %d ' % packet_size
shared_options = '-xml -t {time} -p {port} {packet_size}'.format(
time=time_s, port=BASE_DATA_PORT, packet_size=packet_size_string)
udp_string = '-u' if udp else ''
sending_options = shared_options + (
'-s {udp} -m \'{threads},*,{ip}\' -rb {rb} -sb {sb}').format(
udp=udp_string,
threads=threads,
ip=receiving_ip_address,
rb=_GetSockBufferSize(FLAGS.ntttcp_sender_rb),
sb=_GetSockBufferSize(FLAGS.ntttcp_sender_sb))
receiving_options = shared_options + (
'-r {udp} -m \'{threads},*,0.0.0.0\' -rb {rb} -sb {sb}').format(
udp=udp_string,
threads=threads,
rb=_GetSockBufferSize(FLAGS.ntttcp_receiver_rb),
sb=_GetSockBufferSize(FLAGS.ntttcp_receiver_sb))
# NTttcp will append to the xml file when it runs, which causes parsing
# to fail if there was a preexisting xml file. To be safe, try deleting
# the xml file.
_RemoveXml(sending_vm)
_RemoveXml(receiving_vm)
process_args = [(_RunNtttcp, (sending_vm, sending_options), {}),
(_RunNtttcp, (receiving_vm, receiving_options), {})]
background_tasks.RunParallelProcesses(process_args, 200)
sender_xml = _CatXml(sending_vm)
receiver_xml = _CatXml(receiving_vm)
metadata = {'ip_type': ip_type}
for vm_specifier, vm in ('receiving', receiving_vm), ('sending', sending_vm):
for k, v in six.iteritems(vm.GetResourceMetadata()):
metadata['{0}_{1}'.format(vm_specifier, k)] = v
return ParseNtttcpResults(sender_xml, receiver_xml, metadata)
def ParseNtttcpResults(sender_xml_results, receiver_xml_results, metadata):
"""Parses the xml output from NTttcp and returns a list of samples.
The list of samples contains total throughput and per thread throughput
metrics (if there is more than a single thread).
Args:
sender_xml_results: ntttcp test output from the sender.
receiver_xml_results: ntttcp test output from the receiver.
metadata: metadata to be included as part of the samples.
Returns:
list of samples from the results of the ntttcp tests.
"""
sender_xml_root = xml.etree.ElementTree.fromstring(sender_xml_results)
receiver_xml_root = xml.etree.ElementTree.fromstring(receiver_xml_results)
samples = []
metadata = metadata.copy()
# Get the parameters from the sender XML output, but skip the throughput and
# thread info. Those will be used in the samples, not the metadata.
for item in list(sender_xml_root):
if item.tag == 'parameters':
for param in list(item):
metadata[param.tag] = param.text
elif item.tag == 'throughput' or item.tag == 'thread':
continue
else:
metadata['sender %s' % item.tag] = item.text
# We do not want the parameters from the receiver (we already got those
# from the sender), but we do want everything else and have it marked as
# coming from the receiver.
for item in list(receiver_xml_root):
if item.tag == 'parameters' or item.tag == 'thread':
continue
elif item.tag == 'throughput':
if item.attrib['metric'] == 'mbps':
metadata['receiver throughput'] = item.text
else:
metadata['receiver %s' % item.tag] = item.text
metadata['sender rb'] = FLAGS.ntttcp_sender_rb
metadata['sender sb'] = FLAGS.ntttcp_sender_rb
metadata['receiver rb'] = FLAGS.ntttcp_receiver_rb
metadata['receiver sb'] = FLAGS.ntttcp_receiver_sb
throughput_element = sender_xml_root.find('./throughput[@metric="mbps"]')
samples.append(
sample.Sample('Total Throughput', float(throughput_element.text), 'Mbps',
metadata))
thread_elements = sender_xml_root.findall('./thread')
if len(thread_elements) > 1:
for element in thread_elements:
throughput_element = element.find('./throughput[@metric="mbps"]')
metadata = metadata.copy()
metadata['thread_index'] = element.attrib['index']
samples.append(sample.Sample('Thread Throughput',
float(throughput_element.text),
'Mbps',
metadata))
return samples
|
import numpy as np
from functools import reduce
from string import ascii_uppercase
from ..utils import _check_option
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def ttest_1samp_no_p(X, sigma=0, method='relative'):
"""Perform one-sample t-test.
This is a modified version of :func:`scipy.stats.ttest_1samp` that avoids
a (relatively) time-consuming p-value calculation, and can adjust
for implausibly small variance values :footcite:`RidgwayEtAl2012`.
Parameters
----------
X : array
Array to return t-values for.
sigma : float
The variance estimate will be given by ``var + sigma * max(var)`` or
``var + sigma``, depending on "method". By default this is 0 (no
adjustment). See Notes for details.
method : str
If 'relative', the minimum variance estimate will be sigma * max(var),
if 'absolute' the minimum variance estimate will be sigma.
Returns
-------
t : array
T-values, potentially adjusted using the hat method.
Notes
-----
To use the "hat" adjustment method :footcite:`RidgwayEtAl2012`, a value
of ``sigma=1e-3`` may be a reasonable choice.
You can use the conversion from ``scipy.stats.distributions.t.ppf``::
thresh = -scipy.stats.distributions.t.ppf(p_thresh, n_samples - 1) / 2.
to convert a desired p-value threshold to 2-tailed t-value threshold.
For one-tailed tests, ``thresh`` in the above should be multiplied by 2
(and for ``tail=-1``, multiplied by ``-1``).
References
----------
.. footbibliography::
"""
_check_option('method', method, ['absolute', 'relative'])
var = np.var(X, axis=0, ddof=1)
if sigma > 0:
limit = sigma * np.max(var) if method == 'relative' else sigma
var += limit
return np.mean(X, axis=0) / np.sqrt(var / X.shape[0])
def ttest_ind_no_p(a, b, equal_var=True, sigma=0.):
"""Independent samples t-test without p calculation.
This is a modified version of :func:`scipy.stats.ttest_ind`. It operates
along the first axis. The ``sigma`` parameter provides an optional "hat"
adjustment (see :func:`ttest_1samp_no_p` and :footcite:`RidgwayEtAl2012`).
Parameters
----------
a : array-like
The first array.
b : array-like
The second array.
equal_var : bool
Assume equal variance. See :func:`scipy.stats.ttest_ind`.
sigma : float
The regularization. See :func:`ttest_1samp_no_p`.
Returns
-------
t : array
T values.
References
----------
.. footbibliography::
"""
v1 = np.var(a, axis=0, ddof=1)
v2 = np.var(b, axis=0, ddof=1)
n1 = a.shape[0]
n2 = b.shape[0]
if equal_var:
df = n1 + n2 - 2.0
var = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
var = var * (1.0 / n1 + 1.0 / n2)
else:
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
var = vn1 + vn2
if sigma > 0:
var += sigma * np.max(var)
denom = np.sqrt(var)
d = np.mean(a, 0) - np.mean(b, 0)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
return t
def f_oneway(*args):
"""Perform a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes [1]_.
This is a modified version of :func:`scipy.stats.f_oneway` that avoids
computing the associated p-value.
Parameters
----------
*args : array_like
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homocedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (:func:`scipy.stats.kruskal`)
although with some loss of power
The algorithm is from Heiman [2]_, pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
n_samples_per_class = np.array([len(a) for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = reduce(lambda x, y: x + y,
[np.sum(a ** 2, axis=0) for a in args])
sums_args = [np.sum(a, axis=0) for a in args]
square_of_sums_alldata = reduce(lambda x, y: x + y, sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
return f
def _map_effects(n_factors, effects):
"""Map effects to indices."""
if n_factors > len(ascii_uppercase):
raise ValueError('Maximum number of factors supported is 26')
factor_names = list(ascii_uppercase[:n_factors])
if isinstance(effects, str):
if '*' in effects and ':' in effects:
raise ValueError('Not "*" and ":" permitted in effects')
elif '+' in effects and ':' in effects:
raise ValueError('Not "+" and ":" permitted in effects')
elif effects == 'all':
effects = None
elif len(effects) == 1 or ':' in effects:
effects = [effects]
elif '+' in effects:
# all main effects
effects = effects.split('+')
elif '*' in effects:
pass # handle later
else:
raise ValueError('"{}" is not a valid option for "effects"'
.format(effects))
if isinstance(effects, list):
bad_names = [e for e in effects if e not in factor_names]
if len(bad_names) > 1:
raise ValueError('Effect names: {} are not valid. They should '
'the first `n_factors` ({}) characters from the'
'alphabet'.format(bad_names, n_factors))
indices = list(np.arange(2 ** n_factors - 1))
names = list()
for this_effect in indices:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
this_code = (n_factors - 1) - np.where(contrast_idx == 1)[0]
this_name = [factor_names[e] for e in this_code]
this_name.sort()
names.append(':'.join(this_name))
if effects is None or isinstance(effects, str):
effects_ = names
else:
effects_ = effects
selection = [names.index(sel) for sel in effects_]
names = [names[sel] for sel in selection]
if isinstance(effects, str):
if '*' in effects:
# hierarchical order of effects
# the * based effect can be used as stop index
sel_ind = names.index(effects.replace('*', ':')) + 1
names = names[:sel_ind]
selection = selection[:sel_ind]
return selection, names
def _get_contrast_indices(effect_idx, n_factors): # noqa: D401
"""Henson's factor coding, see num2binvec."""
binrepr = np.binary_repr(effect_idx, n_factors)
return np.array([int(i) for i in binrepr], dtype=int)
def _iter_contrasts(n_subjects, factor_levels, effect_picks):
"""Set up contrasts."""
from scipy.signal import detrend
sc = []
n_factors = len(factor_levels)
# prepare computation of Kronecker products
for n_levels in factor_levels:
# for each factor append
# 1) column vector of length == number of levels,
# 2) square matrix with diagonal == number of levels
# main + interaction effects for contrasts
sc.append([np.ones([n_levels, 1]),
detrend(np.eye(n_levels), type='constant')])
for this_effect in effect_picks:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
c_ = sc[0][contrast_idx[n_factors - 1]]
for i_contrast in range(1, n_factors):
this_contrast = contrast_idx[(n_factors - 1) - i_contrast]
c_ = np.kron(c_, sc[i_contrast][this_contrast])
df1 = np.linalg.matrix_rank(c_)
df2 = df1 * (n_subjects - 1)
yield c_, df1, df2
def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B',
pvalue=0.05):
"""Compute F-value thresholds for a two-way ANOVA.
Parameters
----------
n_subjects : int
The number of subjects to be analyzed.
factor_levels : list-like
The number of levels per factor.
effects : str
A string denoting the effect to be returned. The following
mapping is currently supported:
* ``'A'``: main effect of A
* ``'B'``: main effect of B
* ``'A:B'``: interaction effect
* ``'A+B'``: both main effects
* ``'A*B'``: all three effects
pvalue : float
The p-value to be thresholded.
Returns
-------
F_threshold : list | float
List of F-values for each effect if the number of effects
requested > 2, else float.
See Also
--------
f_oneway
f_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
effect_picks, _ = _map_effects(len(factor_levels), effects)
F_threshold = []
for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
effect_picks):
F_threshold.append(f(df1, df2).isf(pvalue))
return F_threshold if len(F_threshold) > 1 else F_threshold[0]
def f_mway_rm(data, factor_levels, effects='all',
correction=False, return_pvals=True):
"""Compute M-way repeated measures ANOVA for fully balanced designs.
Parameters
----------
data : ndarray
3D array where the first two dimensions are compliant
with a subjects X conditions scheme where the first
factor repeats slowest::
A1B1 A1B2 A2B1 A2B2
subject 1 1.34 2.53 0.97 1.74
subject ... .... .... .... ....
subject k 2.45 7.90 3.09 4.76
The last dimensions is thought to carry the observations
for mass univariate analysis.
factor_levels : list-like
The number of levels per factor.
effects : str | list
A string denoting the effect to be returned. The following
mapping is currently supported (example with 2 factors):
* ``'A'``: main effect of A
* ``'B'``: main effect of B
* ``'A:B'``: interaction effect
* ``'A+B'``: both main effects
* ``'A*B'``: all three effects
* ``'all'``: all effects (equals 'A*B' in a 2 way design)
If list, effect names are used: ``['A', 'B', 'A:B']``.
correction : bool
The correction method to be employed if one factor has more than two
levels. If True, sphericity correction using the Greenhouse-Geisser
method will be applied.
return_pvals : bool
If True, return p-values corresponding to F-values.
Returns
-------
F_vals : ndarray
An array of F-statistics with length corresponding to the number
of effects estimated. The shape depends on the number of effects
estimated.
p_vals : ndarray
If not requested via return_pvals, defaults to an empty array.
See Also
--------
f_oneway
f_threshold_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
if data.ndim == 2: # general purpose support, e.g. behavioural data
data = data[:, :, np.newaxis]
elif data.ndim > 3: # let's allow for some magic here.
data = data.reshape(
data.shape[0], data.shape[1], np.prod(data.shape[2:]))
effect_picks, _ = _map_effects(len(factor_levels), effects)
n_obs = data.shape[2]
n_replications = data.shape[0]
# put last axis in front to 'iterate' over mass univariate instances.
data = np.rollaxis(data, 2)
fvalues, pvalues = [], []
for c_, df1, df2 in _iter_contrasts(n_replications, factor_levels,
effect_picks):
y = np.dot(data, c_)
b = np.mean(y, axis=1)[:, np.newaxis, :]
ss = np.sum(np.sum(y * b, axis=2), axis=1)
mse = (np.sum(np.sum(y * y, axis=2), axis=1) - ss) / (df2 / df1)
fvals = ss / mse
fvalues.append(fvals)
if correction:
# sample covariances, leave off "/ (y.shape[1] - 1)" norm because
# it falls out.
v = np.array([np.dot(y_.T, y_) for y_ in y])
v = (np.array([np.trace(vv) for vv in v]) ** 2 /
(df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
eps = v
df1, df2 = np.zeros(n_obs) + df1, np.zeros(n_obs) + df2
if correction:
# numerical imprecision can cause eps=0.99999999999999989
# even with a single category, so never let our degrees of
# freedom drop below 1.
df1, df2 = [np.maximum(d[None, :] * eps, 1.) for d in (df1, df2)]
if return_pvals:
pvals = f(df1, df2).sf(fvals)
else:
pvals = np.empty(0)
pvalues.append(pvals)
# handle single effect returns
return [np.squeeze(np.asarray(vv)) for vv in (fvalues, pvalues)]
def _parametric_ci(arr, ci=.95):
"""Calculate the `ci`% parametric confidence interval for `arr`."""
mean = arr.mean(0)
if len(arr) < 2: # can't compute standard error
sigma = np.full_like(mean, np.nan)
return mean, sigma
from scipy import stats
sigma = stats.sem(arr, 0)
return stats.t.interval(ci, loc=mean, scale=sigma, df=arr.shape[0])
|
import os
import sys
import shutil
from setuptools import setup, find_packages
from setuptools.command.install import install
from setuptools.command.build_py import build_py
with open('requirements.txt', 'r') as fh:
dependencies = [l.strip().split("#")[0] for l in fh]
extras = {}
with open('requirements-extras.txt', 'r') as fh:
extras['extras'] = [l.strip() for l in fh][1:]
# Alternative name.
extras['full'] = extras['extras']
with open('requirements-tests.txt', 'r') as fh:
extras['tests'] = [l.strip() for l in fh][1:]
# ########## platform specific stuff #############
if sys.version_info[0] == 2:
raise Exception('Python 2 is not supported')
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
raise Exception('Python 3 version < 3.5 is not supported')
##################################################
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
with open('README.rst') as f:
long_description = f.read()
def copy_messages():
themes_directory = os.path.join(
os.path.dirname(__file__), 'nikola', 'data', 'themes')
original_messages_directory = os.path.join(
themes_directory, 'default', 'messages')
for theme in ('orphan', 'monospace'):
theme_messages_directory = os.path.join(
themes_directory, theme, 'messages')
if os.path.exists(theme_messages_directory):
shutil.rmtree(theme_messages_directory)
shutil.copytree(original_messages_directory, theme_messages_directory)
def expands_symlinks_for_windows():
"""replaces the symlinked files with a copy of the original content.
In windows (msysgit), a symlink is converted to a text file with a
path to the file it points to. If not corrected, installing from a git
clone will end with some files with bad content
After install the working copy will be dirty (symlink markers overwritten
with real content)
"""
if sys.platform != 'win32':
return
# apply the fix
localdir = os.path.dirname(os.path.abspath(__file__))
oldpath = sys.path[:]
sys.path.insert(0, os.path.join(localdir, 'nikola'))
winutils = __import__('winutils')
failures = winutils.fix_all_git_symlinked(localdir)
sys.path = oldpath
del sys.modules['winutils']
if failures != -1:
print('WARNING: your working copy is now dirty by changes in '
'samplesite, sphinx and themes')
if failures > 0:
raise Exception("Error: \n\tnot all symlinked files could be fixed." +
"\n\tYour best bet is to start again from clean.")
def remove_old_files(self):
tree = os.path.join(self.install_lib, 'nikola')
try:
shutil.rmtree(tree, ignore_errors=True)
except Exception:
pass
class nikola_install(install):
def run(self):
expands_symlinks_for_windows()
remove_old_files(self)
install.run(self)
class nikola_build_py(build_py):
def run(self):
expands_symlinks_for_windows()
build_py.run(self)
setup(name='Nikola',
version='8.1.2',
description='A modular, fast, simple, static website and blog generator',
long_description=long_description,
author='Roberto Alsina and others',
author_email='[email protected]',
url='https://getnikola.com/',
packages=find_packages(exclude=('tests', 'tests.*')),
license='MIT',
keywords='website, blog, static',
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Plugins',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Text Processing :: Markup'],
install_requires=dependencies,
extras_require=extras,
include_package_data=True,
python_requires='>=3.5',
cmdclass={'install': nikola_install, 'build_py': nikola_build_py},
data_files=[
('share/doc/nikola', [
'docs/manual.rst',
'docs/theming.rst',
'docs/extending.rst']),
('share/man/man1', ['docs/man/nikola.1.gz']),
],
entry_points={
'console_scripts': [
'nikola = nikola.__main__:main'
]
},
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.