text
stringlengths 213
32.3k
|
---|
from datetime import datetime
from functools import partial
from json import JSONEncoder, dumps
import math
import os
import sys
from tempfile import mkdtemp
import unittest
import pytest
from homeassistant.core import Event, State
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.json import (
SerializationError,
find_paths_unserializable_data,
load_json,
save_json,
)
from tests.async_mock import Mock
# Test data that can be saved as JSON
TEST_JSON_A = {"a": 1, "B": "two"}
TEST_JSON_B = {"a": "one", "B": 2}
# Test data that can not be loaded as JSON
TEST_BAD_SERIALIED = "THIS IS NOT JSON\n"
TMP_DIR = None
def setup():
"""Set up for tests."""
global TMP_DIR
TMP_DIR = mkdtemp()
def teardown():
"""Clean up after tests."""
for fname in os.listdir(TMP_DIR):
os.remove(os.path.join(TMP_DIR, fname))
os.rmdir(TMP_DIR)
def _path_for(leaf_name):
return os.path.join(TMP_DIR, f"{leaf_name}.json")
def test_save_and_load():
"""Test saving and loading back."""
fname = _path_for("test1")
save_json(fname, TEST_JSON_A)
data = load_json(fname)
assert data == TEST_JSON_A
# Skipped on Windows
@unittest.skipIf(
sys.platform.startswith("win"), "private permissions not supported on Windows"
)
def test_save_and_load_private():
"""Test we can load private files and that they are protected."""
fname = _path_for("test2")
save_json(fname, TEST_JSON_A, private=True)
data = load_json(fname)
assert data == TEST_JSON_A
stats = os.stat(fname)
assert stats.st_mode & 0o77 == 0
def test_overwrite_and_reload():
"""Test that we can overwrite an existing file and read back."""
fname = _path_for("test3")
save_json(fname, TEST_JSON_A)
save_json(fname, TEST_JSON_B)
data = load_json(fname)
assert data == TEST_JSON_B
def test_save_bad_data():
"""Test error from trying to save unserialisable data."""
with pytest.raises(SerializationError) as excinfo:
save_json("test4", {"hello": set()})
assert (
"Failed to serialize to JSON: test4. Bad data at $.hello=set()(<class 'set'>"
in str(excinfo.value)
)
def test_load_bad_data():
"""Test error from trying to load unserialisable data."""
fname = _path_for("test5")
with open(fname, "w") as fh:
fh.write(TEST_BAD_SERIALIED)
with pytest.raises(HomeAssistantError):
load_json(fname)
def test_custom_encoder():
"""Test serializing with a custom encoder."""
class MockJSONEncoder(JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
return "9"
fname = _path_for("test6")
save_json(fname, Mock(), encoder=MockJSONEncoder)
data = load_json(fname)
assert data == "9"
def test_find_unserializable_data():
"""Find unserializeable data."""
assert find_paths_unserializable_data(1) == {}
assert find_paths_unserializable_data([1, 2]) == {}
assert find_paths_unserializable_data({"something": "yo"}) == {}
assert find_paths_unserializable_data({"something": set()}) == {
"$.something": set()
}
assert find_paths_unserializable_data({"something": [1, set()]}) == {
"$.something[1]": set()
}
assert find_paths_unserializable_data([1, {"bla": set(), "blub": set()}]) == {
"$[1].bla": set(),
"$[1].blub": set(),
}
assert find_paths_unserializable_data({("A",): 1}) == {"$<key: ('A',)>": ("A",)}
assert math.isnan(
find_paths_unserializable_data(
float("nan"), dump=partial(dumps, allow_nan=False)
)["$"]
)
# Test custom encoder + State support.
class MockJSONEncoder(JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
bad_data = object()
assert (
find_paths_unserializable_data(
[State("mock_domain.mock_entity", "on", {"bad": bad_data})],
dump=partial(dumps, cls=MockJSONEncoder),
)
== {"$[0](state: mock_domain.mock_entity).attributes.bad": bad_data}
)
assert (
find_paths_unserializable_data(
[Event("bad_event", {"bad_attribute": bad_data})],
dump=partial(dumps, cls=MockJSONEncoder),
)
== {"$[0](event: bad_event).data.bad_attribute": bad_data}
)
|
from typing import Optional, Sequence
from pysmartthings import Attribute, Capability
from homeassistant.components.switch import SwitchEntity
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add switches for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsSwitch(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "switch")
]
)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
# Must be able to be turned on/off.
if Capability.switch in capabilities:
return [Capability.switch, Capability.energy_meter, Capability.power_meter]
return None
class SmartThingsSwitch(SmartThingsEntity, SwitchEntity):
"""Define a SmartThings switch."""
async def async_turn_off(self, **kwargs) -> None:
"""Turn the switch off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_turn_on(self, **kwargs) -> None:
"""Turn the switch on."""
await self._device.switch_on(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self._device.status.attributes[Attribute.power].value
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return self._device.status.attributes[Attribute.energy].value
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.status.switch
|
import os
import sys
import time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
'snmp'))
from snmp import SNMPCollector as parent_SNMPCollector
from diamond.metric import Metric
class SNMPRawCollector(parent_SNMPCollector):
def process_config(self):
super(SNMPRawCollector, self).process_config()
# list to save non-existing oid's per device, to avoid repetition of
# errors in logging. Signal HUP to diamond/collector to flush this
self.skip_list = []
def get_default_config(self):
"""
Override SNMPCollector.get_default_config method to provide
default_config for the SNMPInterfaceCollector
"""
default_config = super(SNMPRawCollector,
self).get_default_config()
default_config.update({
'oids': {},
'path_prefix': 'servers',
'path_suffix': 'snmp',
})
return default_config
def _precision(self, value):
"""
Return the precision of the number
"""
value = str(value)
decimal = value.rfind('.')
if decimal == -1:
return 0
return len(value) - decimal - 1
def _skip(self, device, oid, reason=None):
self.skip_list.append((device, oid))
if reason is not None:
self.log.warn('Muted \'{}\' on \'{}\', because: {}'.format(
oid, device, reason))
def _get_value_walk(self, device, oid, host, port, community):
data = self.walk(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#2)')
return
self.log.debug('Data received from WALK \'{}\': [{}]'.format(
device, data))
if len(data) != 1:
self._skip(
device,
oid,
'unexpected response, data has {} entries'.format(
len(data)))
return
# because we only allow 1-key dicts, we can pick with absolute index
value = data.items()[0][1]
return value
def _get_value(self, device, oid, host, port, community):
data = self.get(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#1)')
return
self.log.debug('Data received from GET \'{}\': [{}]'.format(
device, data))
if len(data) == 0:
self._skip(device, oid, 'empty response, device down?')
return
if oid not in data:
# oid is not even in hierarchy, happens when using 9.9.9.9
# but not when using 1.9.9.9
self._skip(device, oid, 'no object at OID (#1)')
return
value = data[oid]
if value == 'No Such Object currently exists at this OID':
self._skip(device, oid, 'no object at OID (#2)')
return
if value == 'No Such Instance currently exists at this OID':
return self._get_value_walk(device, oid, host, port, community)
return value
def collect_snmp(self, device, host, port, community):
"""
Collect SNMP interface data from device
"""
self.log.debug(
'Collecting raw SNMP statistics from device \'{}\''.format(device))
dev_config = self.config['devices'][device]
if 'oids' in dev_config:
for oid, metricName in dev_config['oids'].items():
if (device, oid) in self.skip_list:
self.log.debug(
'Skipping OID \'{}\' ({}) on device \'{}\''.format(
oid, metricName, device))
continue
timestamp = time.time()
value = self._get_value(device, oid, host, port, community)
if value is None:
continue
self.log.debug(
'\'{}\' ({}) on device \'{}\' - value=[{}]'.format(
oid, metricName, device, value))
path = '.'.join([self.config['path_prefix'], device,
self.config['path_suffix'], metricName])
metric = Metric(path=path, value=value, timestamp=timestamp,
precision=self._precision(value),
metric_type='GAUGE')
self.publish_metric(metric)
|
import unittest
import numpy as np
import numpy.testing as np_test
from pgmpy.factors.distributions import GaussianDistribution as GD
class TestGDInit(unittest.TestCase):
def test_class_init(self):
phi1 = GD(
["x1", "x2", "x3"],
np.array([[1], [-3], [4]]),
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertEqual(phi1.variables, ["x1", "x2", "x3"])
np_test.assert_array_equal(phi1.mean, np.asarray([[1], [-3], [4]], dtype=float))
np_test.assert_array_equal(
phi1.covariance,
np.asarray([[4, 2, -2], [2, 5, -5], [-2, -5, 8]], dtype=float),
)
self.assertEqual(phi1._precision_matrix, None)
phi2 = GD(
["x1", "x2", "x3"],
[1, 2, 5],
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertEqual(phi2.variables, ["x1", "x2", "x3"])
np_test.assert_array_equal(phi2.mean, np.asarray([[1], [2], [5]], dtype=float))
np_test.assert_array_equal(
phi2.covariance,
np.asarray([[4, 2, -2], [2, 5, -5], [-2, -5, 8]], dtype=float),
)
self.assertEqual(phi2._precision_matrix, None)
phi3 = GD(["x"], [0], [[1]])
self.assertEqual(phi3.variables, ["x"])
np_test.assert_array_equal(phi3.mean, np.asarray([[0]], dtype=float))
np_test.assert_array_equal(phi3.covariance, np.asarray([[1]], dtype=float))
self.assertEqual(phi3._precision_matrix, None)
phi1 = GD(
["1", 2, (1, 2, "x")],
np.array([[1], [-3], [4]]),
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertEqual(phi1.variables, ["1", 2, (1, 2, "x")])
np_test.assert_array_equal(phi1.mean, np.asarray([[1], [-3], [4]], dtype=float))
np_test.assert_array_equal(
phi1.covariance,
np.asarray([[4, 2, -2], [2, 5, -5], [-2, -5, 8]], dtype=float),
)
self.assertEqual(phi1._precision_matrix, None)
phi2 = GD(
["1", 7, (1, 2, "x")],
[1, 2, 5],
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertEqual(phi2.variables, ["1", 7, (1, 2, "x")])
np_test.assert_array_equal(phi2.mean, np.asarray([[1], [2], [5]], dtype=float))
np_test.assert_array_equal(
phi2.covariance,
np.asarray([[4, 2, -2], [2, 5, -5], [-2, -5, 8]], dtype=float),
)
self.assertEqual(phi2._precision_matrix, None)
phi3 = GD([23], [0], [[1]])
self.assertEqual(phi3.variables, [23])
np_test.assert_array_equal(phi3.mean, np.asarray([[0]], dtype=float))
np_test.assert_array_equal(phi3.covariance, np.asarray([[1]], dtype=float))
self.assertEqual(phi3._precision_matrix, None)
def test_class_init_valueerror(self):
self.assertRaises(
ValueError,
GD,
["x1", "x2", "x3"],
[1, -3],
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertRaises(
ValueError,
GD,
["x1", "x2"],
[1, -3, 4],
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertRaises(
ValueError,
GD,
["x1", "x2", "x3"],
[[1, -3, 4]],
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertRaises(
ValueError,
GD,
["x1", "x2", "x3"],
[1],
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertRaises(
ValueError,
GD,
["x1", "x2", "x3"],
[[1], [-3]],
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.assertRaises(
ValueError,
GD,
["x1", "x2", "x3"],
[1, -3, 4],
np.array([[4, 2, -2], [2, 5, -5]]),
)
self.assertRaises(
ValueError, GD, ["x1", "x2", "x3"], [1, -3, 4], np.array([[4, 2, -2]])
)
self.assertRaises(
ValueError,
GD,
["x1", "x2", "x3"],
[1, -3],
np.array([[4, 2], [2, 5], [-2, -5]]),
)
self.assertRaises(
ValueError, GD, ["x1", "x2", "x3"], [1, -3], np.array([[4], [2], [-2]])
)
self.assertRaises(ValueError, GD, ["x1", "x2", "x3"], [1, -3], np.array([[-2]]))
class TestJGDMethods(unittest.TestCase):
def setUp(self):
self.phi1 = GD(
["x1", "x2", "x3"],
np.array([[1], [-3], [4]]),
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]),
)
self.phi2 = GD(["x"], [0], [[1]])
self.phi3 = self.phi1.copy()
def test_precision_matrix(self):
self.assertEqual(self.phi1._precision_matrix, None)
np_test.assert_almost_equal(
self.phi1.precision_matrix,
np.array(
[
[0.3125, -0.125, 0],
[-0.125, 0.5833333, 0.3333333],
[0, 0.3333333, 0.3333333],
]
),
)
np_test.assert_almost_equal(
self.phi1._precision_matrix,
np.array(
[
[0.3125, -0.125, 0],
[-0.125, 0.5833333, 0.3333333],
[0, 0.3333333, 0.3333333],
]
),
)
self.assertEqual(self.phi2._precision_matrix, None)
np_test.assert_almost_equal(self.phi2.precision_matrix, np.array([[1]]))
np_test.assert_almost_equal(self.phi2._precision_matrix, np.array([[1]]))
def test_marginalize(self):
phi = self.phi1.marginalize(["x3"], inplace=False)
self.assertEqual(phi.variables, ["x1", "x2"])
np_test.assert_array_equal(phi.mean, np.asarray([[1], [-3]], dtype=float))
np_test.assert_array_equal(
phi.covariance, np.asarray([[4, 2], [2, 5]], dtype=float)
)
self.assertEqual(phi._precision_matrix, None)
phi = self.phi1.marginalize(["x3", "x2"], inplace=False)
self.assertEqual(phi.variables, ["x1"])
np_test.assert_array_equal(phi.mean, np.asarray([[1]], dtype=float))
np_test.assert_array_equal(phi.covariance, np.asarray([[4]], dtype=float))
self.assertEqual(phi._precision_matrix, None)
self.phi1.marginalize(["x3"])
self.assertEqual(self.phi1.variables, ["x1", "x2"])
np_test.assert_array_equal(self.phi1.mean, np.asarray([[1], [-3]], dtype=float))
np_test.assert_array_equal(
self.phi1.covariance, np.asarray([[4, 2], [2, 5]], dtype=float)
)
self.assertEqual(self.phi1._precision_matrix, None)
self.phi1 = self.phi3
self.phi1.marginalize(["x3", "x2"])
self.assertEqual(self.phi1.variables, ["x1"])
np_test.assert_array_equal(self.phi1.mean, np.asarray([[1]], dtype=float))
np_test.assert_array_equal(self.phi1.covariance, np.asarray([[4]], dtype=float))
self.assertEqual(self.phi1._precision_matrix, None)
self.phi1 = self.phi3
def test_copy(self):
copy_phi1 = self.phi1.copy()
self.assertEqual(copy_phi1.variables, self.phi1.variables)
np_test.assert_array_equal(copy_phi1.mean, self.phi1.mean)
np_test.assert_array_equal(copy_phi1.covariance, self.phi1.covariance)
np_test.assert_array_equal(
copy_phi1._precision_matrix, self.phi1._precision_matrix
)
copy_phi1.marginalize(["x3"])
self.assertEqual(self.phi1.variables, ["x1", "x2", "x3"])
np_test.assert_array_equal(
self.phi1.mean, np.asarray([[1], [-3], [4]], dtype=float)
)
np_test.assert_array_equal(
self.phi1.covariance,
np.asarray([[4, 2, -2], [2, 5, -5], [-2, -5, 8]], dtype=float),
)
self.assertEqual(self.phi1._precision_matrix, None)
self.phi1.marginalize(["x2"])
self.assertEqual(copy_phi1.variables, ["x1", "x2"])
np_test.assert_array_equal(copy_phi1.mean, np.asarray([[1], [-3]], dtype=float))
np_test.assert_array_equal(
copy_phi1.covariance, np.asarray([[4, 2], [2, 5]], dtype=float)
)
self.assertEqual(copy_phi1._precision_matrix, None)
self.phi1 = self.phi3
def test_assignment(self):
np_test.assert_almost_equal(self.phi1.assignment(*[1, 2, 3]), 2.797826e-05)
np_test.assert_almost_equal(
self.phi1.assignment(*[[1, 2, 3], [0, 0, 0]]),
np.array([2.79782602e-05, 1.48056313e-03]),
)
np_test.assert_almost_equal(self.phi2.assignment(0), 0.3989422804)
np_test.assert_almost_equal(
self.phi2.assignment(*[0, 1, -1]),
np.array([0.39894228, 0.24197072, 0.24197072]),
)
def test_reduce(self):
phi = self.phi1.reduce([("x1", 7)], inplace=False)
self.assertEqual(phi.variables, ["x2", "x3"])
np_test.assert_array_equal(phi.mean, np.asarray([[0], [1]], dtype=float))
np_test.assert_array_equal(
phi.covariance, np.asarray([[4, -4], [-4, 7]], dtype=float)
)
self.assertEqual(phi._precision_matrix, None)
phi = self.phi1.reduce([("x1", 3), ("x2", 1)], inplace=False)
self.assertEqual(phi.variables, ["x3"])
np_test.assert_array_equal(phi.mean, np.array([[0]], dtype=float))
np_test.assert_array_equal(phi.covariance, np.asarray([[3]], dtype=float))
self.assertEqual(phi._precision_matrix, None)
self.phi1.reduce([("x1", 7)])
self.assertEqual(self.phi1.variables, ["x2", "x3"])
np_test.assert_array_equal(self.phi1.mean, np.asarray([[0], [1]], dtype=float))
np_test.assert_array_equal(
self.phi1.covariance, np.asarray([[4, -4], [-4, 7]], dtype=float)
)
self.assertEqual(self.phi1._precision_matrix, None)
self.phi1 = self.phi3.copy()
self.phi1.reduce([("x1", 3), ("x2", 1)])
self.assertEqual(self.phi1.variables, ["x3"])
np_test.assert_array_equal(self.phi1.mean, np.asarray([[0]], dtype=float))
np_test.assert_array_equal(self.phi1.covariance, np.asarray([[3]], dtype=float))
self.assertEqual(self.phi1._precision_matrix, None)
self.phi1 = self.phi3.copy()
self.phi1.reduce([("x2", 1), ("x1", 3)])
self.assertEqual(self.phi1.variables, ["x3"])
np_test.assert_array_equal(self.phi1.mean, np.asarray([[0]], dtype=float))
np_test.assert_array_equal(self.phi1.covariance, np.asarray([[3]], dtype=float))
self.assertEqual(self.phi1._precision_matrix, None)
def test_normalize(self):
phi = self.phi1.copy()
phi.normalize()
self.assertEqual(self.phi1.variables, phi.variables)
np_test.assert_array_equal(self.phi1.mean, phi.mean)
np_test.assert_array_equal(self.phi1.covariance, phi.covariance)
self.assertEqual(self.phi1._precision_matrix, phi._precision_matrix)
phi = self.phi1.normalize(inplace=False)
self.assertEqual(self.phi1.variables, phi.variables)
np_test.assert_array_equal(self.phi1.mean, phi.mean)
np_test.assert_array_equal(self.phi1.covariance, phi.covariance)
self.assertEqual(self.phi1._precision_matrix, phi._precision_matrix)
def test_product(self):
pass
def test_divide(self):
pass
def test_eq(self):
pass
def test_repr(self):
pass
def tearDown(self):
del self.phi1
del self.phi2
del self.phi3
|
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Uptime"
ICON = "mdi:clock"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="days"): vol.All(
cv.string, vol.In(["minutes", "hours", "days", "seconds"])
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the uptime sensor platform."""
name = config.get(CONF_NAME)
units = config.get(CONF_UNIT_OF_MEASUREMENT)
async_add_entities([UptimeSensor(name, units)], True)
class UptimeSensor(Entity):
"""Representation of an uptime sensor."""
def __init__(self, name, unit):
"""Initialize the uptime sensor."""
self._name = name
self._unit = unit
self.initial = dt_util.now()
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to display in the front end."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement the value is expressed in."""
return self._unit
@property
def state(self):
"""Return the state of the sensor."""
return self._state
async def async_update(self):
"""Update the state of the sensor."""
delta = dt_util.now() - self.initial
div_factor = 3600
if self.unit_of_measurement == "days":
div_factor *= 24
elif self.unit_of_measurement == "minutes":
div_factor /= 60
elif self.unit_of_measurement == "seconds":
div_factor /= 3600
delta = delta.total_seconds() / div_factor
self._state = round(delta, 2)
_LOGGER.debug("New value: %s", delta)
|
import pytest
boto = pytest.importorskip("boto")
import boto # NOQA
import boto.iam # NOQA
from boto.s3.connection import S3Connection # NOQA
from boto.s3.key import Key # NOQA
from configparser import DuplicateSectionError # NOQA
import vcr # NOQA
def test_boto_stubs(tmpdir):
with vcr.use_cassette(str(tmpdir.join("boto-stubs.yml"))):
# Perform the imports within the patched context so that
# CertValidatingHTTPSConnection refers to the patched version.
from boto.https_connection import CertValidatingHTTPSConnection
from vcr.stubs.boto_stubs import VCRCertValidatingHTTPSConnection
# Prove that the class was patched by the stub and that we can instantiate it.
assert issubclass(CertValidatingHTTPSConnection, VCRCertValidatingHTTPSConnection)
CertValidatingHTTPSConnection("hostname.does.not.matter")
def test_boto_without_vcr():
s3_conn = S3Connection()
s3_bucket = s3_conn.get_bucket("boto-demo-1394171994") # a bucket you can access
k = Key(s3_bucket)
k.key = "test.txt"
k.set_contents_from_string("hello world i am a string")
def test_boto_medium_difficulty(tmpdir):
s3_conn = S3Connection()
s3_bucket = s3_conn.get_bucket("boto-demo-1394171994") # a bucket you can access
with vcr.use_cassette(str(tmpdir.join("boto-medium.yml"))):
k = Key(s3_bucket)
k.key = "test.txt"
k.set_contents_from_string("hello world i am a string")
with vcr.use_cassette(str(tmpdir.join("boto-medium.yml"))):
k = Key(s3_bucket)
k.key = "test.txt"
k.set_contents_from_string("hello world i am a string")
def test_boto_hardcore_mode(tmpdir):
with vcr.use_cassette(str(tmpdir.join("boto-hardcore.yml"))):
s3_conn = S3Connection()
s3_bucket = s3_conn.get_bucket("boto-demo-1394171994") # a bucket you can access
k = Key(s3_bucket)
k.key = "test.txt"
k.set_contents_from_string("hello world i am a string")
with vcr.use_cassette(str(tmpdir.join("boto-hardcore.yml"))):
s3_conn = S3Connection()
s3_bucket = s3_conn.get_bucket("boto-demo-1394171994") # a bucket you can access
k = Key(s3_bucket)
k.key = "test.txt"
k.set_contents_from_string("hello world i am a string")
def test_boto_iam(tmpdir):
try:
boto.config.add_section("Boto")
except DuplicateSectionError:
pass
# Ensure that boto uses HTTPS
boto.config.set("Boto", "is_secure", "true")
# Ensure that boto uses CertValidatingHTTPSConnection
boto.config.set("Boto", "https_validate_certificates", "true")
with vcr.use_cassette(str(tmpdir.join("boto-iam.yml"))):
iam_conn = boto.iam.connect_to_region("universal")
iam_conn.get_all_users()
with vcr.use_cassette(str(tmpdir.join("boto-iam.yml"))):
iam_conn = boto.iam.connect_to_region("universal")
iam_conn.get_all_users()
|
import logging
from typing import Dict
from typing import Iterable
from typing import List
from typing import Tuple
from typing import Type
from service_configuration_lib import read_service_configuration
from paasta_tools import utils
from paasta_tools.utils import deep_merge_dictionaries
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import InstanceConfig_T
from paasta_tools.utils import list_clusters
from paasta_tools.utils import load_service_instance_configs
from paasta_tools.utils import load_v2_deployments_json
from paasta_tools.utils import NoDeploymentsAvailable
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class PaastaServiceConfigLoader:
"""PaastaServiceConfigLoader provides useful methods for reading soa-configs and
iterating instance names or InstanceConfigs objects.
:Example:
>>> from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
>>> from paasta_tools.utils import DEFAULT_SOA_DIR
>>>
>>> sc = PaastaServiceConfigLoader(service='fake_service', soa_dir=DEFAULT_SOA_DIR)
>>>
>>> for instance in sc.instances(cluster='fake_cluster', instance_type='marathon'):
... print(instance)
...
main
canary
>>>
>>> for instance_config in sc.instance_configs(cluster='fake_cluster', instance_type='marathon'):
... print(instance_config.get_instance())
...
main
canary
>>>
"""
_framework_configs: Dict[Tuple[str, type], Dict[str, utils.InstanceConfigDict]]
_clusters: List[str]
_deployments_json: utils.DeploymentsJsonV2
def __init__(
self,
service: str,
soa_dir: str = DEFAULT_SOA_DIR,
load_deployments: bool = True,
) -> None:
self._service = service
self._soa_dir = soa_dir
self._load_deployments = load_deployments
self._clusters = None
self._general_config = None
self._deployments_json = None
self._framework_configs = {}
@property
def clusters(self) -> Iterable[str]:
"""Returns an iterator that yields cluster names for the service.
:returns: iterator that yields cluster names.
"""
if self._clusters is None:
self._clusters = list_clusters(service=self._service, soa_dir=self._soa_dir)
for cluster in self._clusters:
yield cluster
def instances(
self, cluster: str, instance_type_class: Type[InstanceConfig_T]
) -> Iterable[str]:
"""Returns an iterator that yields instance names as strings.
:param cluster: The cluster name
:param instance_type: One of paasta_tools.utils.INSTANCE_TYPES
:returns: an iterator that yields instance names
"""
if (cluster, instance_type_class) not in self._framework_configs:
self._refresh_framework_config(cluster, instance_type_class)
for instance in self._framework_configs.get((cluster, instance_type_class), []):
yield instance
def instance_configs(
self, cluster: str, instance_type_class: Type[InstanceConfig_T]
) -> Iterable[InstanceConfig_T]:
"""Returns an iterator that yields InstanceConfig objects.
:param cluster: The cluster name
:param instance_type: One of paasta_tools.utils.INSTANCE_TYPES
:returns: an iterator that yields instances of MarathonServiceConfig, etc.
:raises NotImplementedError: when it doesn't know how to create a config for instance_type
"""
if (cluster, instance_type_class) not in self._framework_configs:
self._refresh_framework_config(cluster, instance_type_class)
for instance, config in self._framework_configs.get(
(cluster, instance_type_class), {}
).items():
try:
yield self._create_service_config(
cluster, instance, config, instance_type_class
)
except NoDeploymentsAvailable:
pass
def _framework_config_filename(
self, cluster: str, instance_type_class: Type[InstanceConfig_T]
):
return f"{instance_type_class.config_filename_prefix}-{cluster}"
def _refresh_framework_config(
self, cluster: str, instance_type_class: Type[InstanceConfig_T]
):
instances = load_service_instance_configs(
service=self._service,
instance_type=instance_type_class.config_filename_prefix,
cluster=cluster,
soa_dir=self._soa_dir,
)
self._framework_configs[(cluster, instance_type_class)] = instances
def _get_branch_dict(
self, cluster: str, instance: str, config: utils.InstanceConfig
) -> utils.BranchDictV2:
if self._deployments_json is None:
self._deployments_json = load_v2_deployments_json(
self._service, soa_dir=self._soa_dir
)
branch = config.get_branch()
deploy_group = config.get_deploy_group()
return self._deployments_json.get_branch_dict(
self._service, branch, deploy_group
)
def _get_merged_config(
self, config: utils.InstanceConfigDict
) -> utils.InstanceConfigDict:
if self._general_config is None:
self._general_config = read_service_configuration(
service_name=self._service, soa_dir=self._soa_dir
)
return deep_merge_dictionaries(overrides=config, defaults=self._general_config)
def _create_service_config(
self,
cluster: str,
instance: str,
config: utils.InstanceConfigDict,
config_class: Type[InstanceConfig_T],
) -> InstanceConfig_T:
"""Create a service instance's configuration for marathon.
:param cluster: The cluster to read the configuration for
:param instance: The instance of the service to retrieve
:param config: the framework instance config.
:returns: An instance of config_class
"""
merged_config = self._get_merged_config(config)
temp_instance_config = config_class(
service=self._service,
cluster=cluster,
instance=instance,
config_dict=merged_config,
branch_dict=None,
soa_dir=self._soa_dir,
)
branch_dict = self._get_branch_dict(cluster, instance, temp_instance_config)
return config_class(
service=self._service,
cluster=cluster,
instance=instance,
config_dict=merged_config,
branch_dict=branch_dict,
soa_dir=self._soa_dir,
)
|
import datetime
from homeassistant.components.switch import SwitchEntity
import homeassistant.util.dt as dt_util
from .const import DOMAIN, DOOR_STATION, DOOR_STATION_INFO
from .entity import DoorBirdEntity
IR_RELAY = "__ir_light__"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the DoorBird switch platform."""
entities = []
config_entry_id = config_entry.entry_id
doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION]
doorstation_info = hass.data[DOMAIN][config_entry_id][DOOR_STATION_INFO]
relays = doorstation_info["RELAYS"]
relays.append(IR_RELAY)
for relay in relays:
switch = DoorBirdSwitch(doorstation, doorstation_info, relay)
entities.append(switch)
async_add_entities(entities)
class DoorBirdSwitch(DoorBirdEntity, SwitchEntity):
"""A relay in a DoorBird device."""
def __init__(self, doorstation, doorstation_info, relay):
"""Initialize a relay in a DoorBird device."""
super().__init__(doorstation, doorstation_info)
self._doorstation = doorstation
self._relay = relay
self._state = False
self._assume_off = datetime.datetime.min
if relay == IR_RELAY:
self._time = datetime.timedelta(minutes=5)
else:
self._time = datetime.timedelta(seconds=5)
self._unique_id = f"{self._mac_addr}_{self._relay}"
@property
def unique_id(self):
"""Switch unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the switch."""
if self._relay == IR_RELAY:
return f"{self._doorstation.name} IR"
return f"{self._doorstation.name} Relay {self._relay}"
@property
def icon(self):
"""Return the icon to display."""
return "mdi:lightbulb" if self._relay == IR_RELAY else "mdi:dip-switch"
@property
def is_on(self):
"""Get the assumed state of the relay."""
return self._state
def turn_on(self, **kwargs):
"""Power the relay."""
if self._relay == IR_RELAY:
self._state = self._doorstation.device.turn_light_on()
else:
self._state = self._doorstation.device.energize_relay(self._relay)
now = dt_util.utcnow()
self._assume_off = now + self._time
def turn_off(self, **kwargs):
"""Turn off the relays is not needed. They are time-based."""
raise NotImplementedError("DoorBird relays cannot be manually turned off.")
async def async_update(self):
"""Wait for the correct amount of assumed time to pass."""
if self._state and self._assume_off <= dt_util.utcnow():
self._state = False
self._assume_off = datetime.datetime.min
|
import unittest
import numpy as np
import pandas as pd
import datashader as ds
import datashader.transfer_functions as tf
class TestDatashader(unittest.TestCase):
# based on https://github.com/pyviz/datashader/blob/master/datashader/tests/test_pipeline.py
def test_pipeline(self):
df = pd.DataFrame({
'x': np.array(([0.] * 10 + [1] * 10)),
'y': np.array(([0.] * 5 + [1] * 5 + [0] * 5 + [1] * 5)),
'f64': np.arange(20, dtype='f8')
})
df.f64.iloc[2] = np.nan
cvs = ds.Canvas(plot_width=2, plot_height=2, x_range=(0, 1), y_range=(0, 1))
pipeline = ds.Pipeline(df, ds.Point('x', 'y'))
img = pipeline((0, 1), (0, 1), 2, 2)
agg = cvs.points(df, 'x', 'y', ds.count())
self.assertTrue(img.equals(tf.shade(agg)))
color_fn = lambda agg: tf.shade(agg, 'pink', 'red')
pipeline.color_fn = color_fn
img = pipeline((0, 1), (0, 1), 2, 2)
self.assertTrue(img.equals(color_fn(agg)))
transform_fn = lambda agg: agg + 1
pipeline.transform_fn = transform_fn
img = pipeline((0, 1), (0, 1), 2, 2)
self.assertTrue(img.equals(color_fn(transform_fn(agg))))
pipeline = ds.Pipeline(df, ds.Point('x', 'y'), ds.sum('f64'))
img = pipeline((0, 1), (0, 1), 2, 2)
agg = cvs.points(df, 'x', 'y', ds.sum('f64'))
self.assertTrue(img.equals(tf.shade(agg)))
|
from aiofreepybox.exceptions import (
AuthorizationError,
HttpRequestError,
InvalidTokenError,
)
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.freebox.const import DOMAIN
from homeassistant.config_entries import SOURCE_DISCOVERY, SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_PORT
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
HOST = "myrouter.freeboxos.fr"
PORT = 1234
@pytest.fixture(name="connect")
def mock_controller_connect():
"""Mock a successful connection."""
with patch("homeassistant.components.freebox.router.Freepybox") as service_mock:
service_mock.return_value.open = AsyncMock()
service_mock.return_value.system.get_config = AsyncMock(
return_value={
"mac": "abcd",
"model_info": {"pretty_name": "Pretty Model"},
"firmware_version": "123",
}
)
service_mock.return_value.lan.get_hosts_list = AsyncMock()
service_mock.return_value.connection.get_status = AsyncMock()
service_mock.return_value.close = AsyncMock()
yield service_mock
async def test_user(hass):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_import(hass):
"""Test import step."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_discovery(hass):
"""Test discovery step."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_DISCOVERY},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_link(hass, connect):
"""Test linking."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == HOST
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
async def test_abort_if_already_setup(hass):
"""Test we abort if component is already setup."""
MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: HOST, CONF_PORT: PORT}, unique_id=HOST
).add_to_hass(hass)
# Should fail, same HOST (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same HOST (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_on_link_failed(hass):
"""Test when we have errors during linking the router."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=AuthorizationError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "register_failed"}
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=HttpRequestError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=InvalidTokenError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
|
import requests.exceptions
from requests_mock import ANY
from upcloud_api import UpCloudAPIError
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.upcloud.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME
from tests.common import MockConfigEntry
FIXTURE_USER_INPUT = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
}
FIXTURE_USER_INPUT_OPTIONS = {
CONF_SCAN_INTERVAL: "120",
}
async def test_show_set_form(hass):
"""Test that the setup form is served."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_connection_error(hass, requests_mock):
"""Test we show user form on connection error."""
requests_mock.request(ANY, ANY, exc=requests.exceptions.ConnectionError())
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_login_error(hass, requests_mock):
"""Test we show user form with appropriate error on response failure."""
requests_mock.request(
ANY,
ANY,
exc=UpCloudAPIError(
error_code="AUTHENTICATION_FAILED",
error_message="Authentication failed using the given username and password.",
),
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_auth"}
async def test_success(hass, requests_mock):
"""Test successful flow provides entry creation data."""
requests_mock.request(ANY, ANY, text='{"account":{"username":"user"}}')
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_USERNAME] == FIXTURE_USER_INPUT[CONF_USERNAME]
assert result["data"][CONF_PASSWORD] == FIXTURE_USER_INPUT[CONF_PASSWORD]
async def test_options(hass):
"""Test options produce expected data."""
config_entry = MockConfigEntry(
domain=DOMAIN, data=FIXTURE_USER_INPUT, options=FIXTURE_USER_INPUT_OPTIONS
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=FIXTURE_USER_INPUT_OPTIONS,
)
assert result["data"][CONF_SCAN_INTERVAL] == int(
FIXTURE_USER_INPUT_OPTIONS[CONF_SCAN_INTERVAL]
)
|
import pytest
from homeassistant.components.frontend import DOMAIN
from homeassistant.setup import async_setup_component
@pytest.fixture(autouse=True)
def setup_frontend(hass):
"""Fixture to setup the frontend."""
hass.loop.run_until_complete(async_setup_component(hass, "frontend", {}))
async def test_get_user_data_empty(hass, hass_ws_client, hass_storage):
"""Test get_user_data command."""
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "frontend/get_user_data", "key": "non-existing-key"}
)
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"] is None
async def test_get_user_data(hass, hass_ws_client, hass_admin_user, hass_storage):
"""Test get_user_data command."""
storage_key = f"{DOMAIN}.user_data_{hass_admin_user.id}"
hass_storage[storage_key] = {
"key": storage_key,
"version": 1,
"data": {"test-key": "test-value", "test-complex": [{"foo": "bar"}]},
}
client = await hass_ws_client(hass)
# Get a simple string key
await client.send_json(
{"id": 6, "type": "frontend/get_user_data", "key": "test-key"}
)
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"] == "test-value"
# Get a more complex key
await client.send_json(
{"id": 7, "type": "frontend/get_user_data", "key": "test-complex"}
)
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"][0]["foo"] == "bar"
# Get all data (no key)
await client.send_json({"id": 8, "type": "frontend/get_user_data"})
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"]["test-key"] == "test-value"
assert res["result"]["value"]["test-complex"][0]["foo"] == "bar"
async def test_set_user_data_empty(hass, hass_ws_client, hass_storage):
"""Test set_user_data command."""
client = await hass_ws_client(hass)
# test creating
await client.send_json(
{"id": 6, "type": "frontend/get_user_data", "key": "test-key"}
)
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"] is None
await client.send_json(
{
"id": 7,
"type": "frontend/set_user_data",
"key": "test-key",
"value": "test-value",
}
)
res = await client.receive_json()
assert res["success"], res
await client.send_json(
{"id": 8, "type": "frontend/get_user_data", "key": "test-key"}
)
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"] == "test-value"
async def test_set_user_data(hass, hass_ws_client, hass_storage, hass_admin_user):
"""Test set_user_data command with initial data."""
storage_key = f"{DOMAIN}.user_data_{hass_admin_user.id}"
hass_storage[storage_key] = {
"version": 1,
"data": {"test-key": "test-value", "test-complex": "string"},
}
client = await hass_ws_client(hass)
# test creating
await client.send_json(
{
"id": 5,
"type": "frontend/set_user_data",
"key": "test-non-existent-key",
"value": "test-value-new",
}
)
res = await client.receive_json()
assert res["success"], res
await client.send_json(
{"id": 6, "type": "frontend/get_user_data", "key": "test-non-existent-key"}
)
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"] == "test-value-new"
# test updating with complex data
await client.send_json(
{
"id": 7,
"type": "frontend/set_user_data",
"key": "test-complex",
"value": [{"foo": "bar"}],
}
)
res = await client.receive_json()
assert res["success"], res
await client.send_json(
{"id": 8, "type": "frontend/get_user_data", "key": "test-complex"}
)
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"][0]["foo"] == "bar"
# ensure other existing key was not modified
await client.send_json(
{"id": 9, "type": "frontend/get_user_data", "key": "test-key"}
)
res = await client.receive_json()
assert res["success"], res
assert res["result"]["value"] == "test-value"
|
import pytest
import nikola.utils
from nikola import shortcodes
@pytest.mark.parametrize(
"template, expected_result",
[
("test({{% noargs %}})", "test(noargs success!)"),
(
"test({{% noargs %}}\\hello world/{{% /noargs %}})",
"test(noargs \\hello world/ success!)",
),
],
)
def test_noargs(site, template, expected_result):
applied_shortcode = shortcodes.apply_shortcodes(template, site.shortcode_registry)[0]
assert applied_shortcode == expected_result
@pytest.mark.parametrize(
"template, expected_result",
[
("test({{% arg 1 %}})", "test(arg ('1',)/[]/)"),
("test({{% arg 1 2aa %}})", "test(arg ('1', '2aa')/[]/)"),
('test({{% arg "hello world" %}})', "test(arg ('hello world',)/[]/)"),
("test({{% arg back\\ slash arg2 %}})", "test(arg ('back slash', 'arg2')/[]/)"),
('test({{% arg "%}}" %}})', "test(arg ('%}}',)/[]/)"),
],
)
def test_positional_arguments(site, template, expected_result):
applied_shortcode = shortcodes.apply_shortcodes(template, site.shortcode_registry)[0]
assert applied_shortcode == expected_result
@pytest.mark.parametrize(
"template, expected_result",
[
("test({{% arg 1a=2b %}})", "test(arg ()/[('1a', '2b')]/)"),
(
'test({{% arg 1a="2b 3c" 4d=5f %}})',
"test(arg ()/[('1a', '2b 3c'), ('4d', '5f')]/)",
),
(
'test({{% arg 1a="2b 3c" 4d=5f back=slash\\ slash %}})',
"test(arg ()/[('1a', '2b 3c'), ('4d', '5f'), ('back', 'slash slash')]/)",
),
],
)
def test_arg_keyword(site, template, expected_result):
applied_shortcode = shortcodes.apply_shortcodes(template, site.shortcode_registry)[0]
assert applied_shortcode == expected_result
@pytest.mark.parametrize(
"template, expected_result",
[
("test({{% arg 123 %}}Hello!{{% /arg %}})", "test(arg ('123',)/[]/Hello!)"),
(
"test({{% arg 123 456 foo=bar %}}Hello world!{{% /arg %}})",
"test(arg ('123', '456')/[('foo', 'bar')]/Hello world!)",
),
(
'test({{% arg 123 456 foo=bar baz="quotes rock." %}}Hello test suite!{{% /arg %}})',
"test(arg ('123', '456')/[('baz', 'quotes rock.'), ('foo', 'bar')]/Hello test suite!)",
),
(
'test({{% arg "123 foo" foobar foo=bar baz="quotes rock." %}}Hello test suite!!{{% /arg %}})',
"test(arg ('123 foo', 'foobar')/[('baz', 'quotes rock.'), ('foo', 'bar')]/Hello test suite!!)",
),
],
)
def test_data(site, template, expected_result):
applied_shortcode = shortcodes.apply_shortcodes(template, site.shortcode_registry)[0]
assert applied_shortcode == expected_result
@pytest.mark.parametrize(
"template, expected_error_pattern",
[
(
"{{% start",
"^Shortcode 'start' starting at .* is not terminated correctly with '%}}'!",
),
(
"{{% wrong ending %%}",
"^Syntax error in shortcode 'wrong' at .*: expecting whitespace!",
),
(
"{{% start %}} {{% /end %}}",
"^Found shortcode ending '{{% /end %}}' which isn't closing a started shortcode",
),
('{{% start "asdf %}}', "^Unexpected end of unquoted string"),
("{{% start =b %}}", "^String starting at .* must be non-empty!"),
('{{% start "a\\', "^Unexpected end of data while escaping"),
("{{% start a\\", "^Unexpected end of data while escaping"),
('{{% start a"b" %}}', "^Unexpected quotation mark in unquoted string"),
(
'{{% start "a"b %}}',
"^Syntax error in shortcode 'start' at .*: expecting whitespace!",
),
("{{% %}}", "^Syntax error: '{{%' must be followed by shortcode name"),
("{{%", "^Syntax error: '{{%' must be followed by shortcode name"),
("{{% ", "^Syntax error: '{{%' must be followed by shortcode name"),
(
"{{% / %}}",
"^Found shortcode ending '{{% / %}}' which isn't closing a started shortcode",
),
("{{% / a %}}", "^Syntax error: '{{% /' must be followed by ' %}}'"),
(
"==> {{% <==",
"^Shortcode '<==' starting at .* is not terminated correctly with '%}}'!",
),
],
)
def test_errors(site, template, expected_error_pattern):
with pytest.raises(shortcodes.ParsingError, match=expected_error_pattern):
shortcodes.apply_shortcodes(
template, site.shortcode_registry, raise_exceptions=True
)
@pytest.mark.parametrize(
"input, expected",
[
("{{% foo %}}", (u"SC1", {u"SC1": u"{{% foo %}}"})),
(
"{{% foo %}} bar {{% /foo %}}",
(u"SC1", {u"SC1": u"{{% foo %}} bar {{% /foo %}}"}),
),
(
"AAA{{% foo %}} bar {{% /foo %}}BBB",
(u"AAASC1BBB", {u"SC1": u"{{% foo %}} bar {{% /foo %}}"}),
),
(
"AAA{{% foo %}} {{% bar %}} {{% /foo %}}BBB",
(u"AAASC1BBB", {u"SC1": u"{{% foo %}} {{% bar %}} {{% /foo %}}"}),
),
(
"AAA{{% foo %}} {{% /bar %}} {{% /foo %}}BBB",
(u"AAASC1BBB", {u"SC1": u"{{% foo %}} {{% /bar %}} {{% /foo %}}"}),
),
(
"AAA{{% foo %}} {{% bar %}} quux {{% /bar %}} {{% /foo %}}BBB",
(
u"AAASC1BBB",
{u"SC1": u"{{% foo %}} {{% bar %}} quux {{% /bar %}} {{% /foo %}}"},
),
),
(
"AAA{{% foo %}} BBB {{% bar %}} quux {{% /bar %}} CCC",
(
u"AAASC1 BBB SC2 CCC",
{u"SC1": u"{{% foo %}}", u"SC2": u"{{% bar %}} quux {{% /bar %}}"},
),
),
],
)
def test_extract_shortcodes(input, expected, monkeypatch):
i = iter("SC%d" % i for i in range(1, 100))
monkeypatch.setattr(shortcodes, "_new_sc_id", i.__next__)
extracted = shortcodes.extract_shortcodes(input)
assert extracted == expected
@pytest.fixture(scope="module")
def site():
s = FakeSiteWithShortcodeRegistry()
s.register_shortcode("noargs", noargs)
s.register_shortcode("arg", arg)
return s
class FakeSiteWithShortcodeRegistry:
def __init__(self):
self.shortcode_registry = {}
self.debug = True
# this code duplicated in nikola/nikola.py
def register_shortcode(self, name, f):
"""Register function f to handle shortcode "name"."""
if name in self.shortcode_registry:
nikola.utils.LOGGER.warn("Shortcode name conflict: %s", name)
return
self.shortcode_registry[name] = f
def noargs(site, data="", lang=""):
return "noargs {0} success!".format(data)
def arg(*args, **kwargs):
# don’t clutter the kwargs dict
kwargs.pop("site")
data = kwargs.pop("data")
kwargs.pop("lang")
return "arg {0}/{1}/{2}".format(args, sorted(kwargs.items()), data)
|
import asyncio
from datetime import timedelta
import logging
from typing import Dict, List
import aiohttp
import async_timeout
from smhi import Smhi
from smhi.smhi_lib import SmhiForecastException
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
WeatherEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.util import Throttle, slugify
from .const import ATTR_SMHI_CLOUDINESS, ENTITY_ID_SENSOR_FORMAT
_LOGGER = logging.getLogger(__name__)
# Used to map condition from API results
CONDITION_CLASSES = {
"cloudy": [5, 6],
"fog": [7],
"hail": [],
"lightning": [21],
"lightning-rainy": [11],
"partlycloudy": [3, 4],
"pouring": [10, 20],
"rainy": [8, 9, 18, 19],
"snowy": [15, 16, 17, 25, 26, 27],
"snowy-rainy": [12, 13, 14, 22, 23, 24],
"sunny": [1, 2],
"windy": [],
"windy-variant": [],
"exceptional": [],
}
# 5 minutes between retrying connect to API again
RETRY_TIMEOUT = 5 * 60
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=31)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, config_entries
) -> bool:
"""Add a weather entity from map location."""
location = config_entry.data
name = slugify(location[CONF_NAME])
session = aiohttp_client.async_get_clientsession(hass)
entity = SmhiWeather(
location[CONF_NAME],
location[CONF_LATITUDE],
location[CONF_LONGITUDE],
session=session,
)
entity.entity_id = ENTITY_ID_SENSOR_FORMAT.format(name)
config_entries([entity], True)
return True
class SmhiWeather(WeatherEntity):
"""Representation of a weather entity."""
def __init__(
self,
name: str,
latitude: str,
longitude: str,
session: aiohttp.ClientSession = None,
) -> None:
"""Initialize the SMHI weather entity."""
self._name = name
self._latitude = latitude
self._longitude = longitude
self._forecasts = None
self._fail_count = 0
self._smhi_api = Smhi(self._longitude, self._latitude, session=session)
@property
def unique_id(self) -> str:
"""Return a unique id."""
return f"{self._latitude}, {self._longitude}"
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self) -> None:
"""Refresh the forecast data from SMHI weather API."""
try:
with async_timeout.timeout(10):
self._forecasts = await self.get_weather_forecast()
self._fail_count = 0
except (asyncio.TimeoutError, SmhiForecastException):
_LOGGER.error("Failed to connect to SMHI API, retry in 5 minutes")
self._fail_count += 1
if self._fail_count < 3:
self.hass.helpers.event.async_call_later(
RETRY_TIMEOUT, self.retry_update
)
async def retry_update(self, _):
"""Retry refresh weather forecast."""
await self.async_update()
async def get_weather_forecast(self) -> []:
"""Return the current forecasts from SMHI API."""
return await self._smhi_api.async_get_forecast()
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def temperature(self) -> int:
"""Return the temperature."""
if self._forecasts is not None:
return self._forecasts[0].temperature
return None
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
if self._forecasts is not None:
return self._forecasts[0].humidity
return None
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
if self._forecasts is not None:
# Convert from m/s to km/h
return round(self._forecasts[0].wind_speed * 18 / 5)
return None
@property
def wind_bearing(self) -> int:
"""Return the wind bearing."""
if self._forecasts is not None:
return self._forecasts[0].wind_direction
return None
@property
def visibility(self) -> float:
"""Return the visibility."""
if self._forecasts is not None:
return self._forecasts[0].horizontal_visibility
return None
@property
def pressure(self) -> int:
"""Return the pressure."""
if self._forecasts is not None:
return self._forecasts[0].pressure
return None
@property
def cloudiness(self) -> int:
"""Return the cloudiness."""
if self._forecasts is not None:
return self._forecasts[0].cloudiness
return None
@property
def condition(self) -> str:
"""Return the weather condition."""
if self._forecasts is None:
return None
return next(
(k for k, v in CONDITION_CLASSES.items() if self._forecasts[0].symbol in v),
None,
)
@property
def attribution(self) -> str:
"""Return the attribution."""
return "Swedish weather institute (SMHI)"
@property
def forecast(self) -> List:
"""Return the forecast."""
if self._forecasts is None or len(self._forecasts) < 2:
return None
data = []
for forecast in self._forecasts[1:]:
condition = next(
(k for k, v in CONDITION_CLASSES.items() if forecast.symbol in v), None
)
data.append(
{
ATTR_FORECAST_TIME: forecast.valid_time.isoformat(),
ATTR_FORECAST_TEMP: forecast.temperature_max,
ATTR_FORECAST_TEMP_LOW: forecast.temperature_min,
ATTR_FORECAST_PRECIPITATION: round(forecast.total_precipitation, 1),
ATTR_FORECAST_CONDITION: condition,
}
)
return data
@property
def device_state_attributes(self) -> Dict:
"""Return SMHI specific attributes."""
if self.cloudiness:
return {ATTR_SMHI_CLOUDINESS: self.cloudiness}
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_ON, STATE_OFF}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Alert states."""
# Reproduce states in parallel.
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
import copy
import errno
import json
DEFAULTS = {
"debug": "false",
"log_file": None,
"log_level": "warning",
"master": "localhost:5050",
"max_workers": 5,
"scheme": "http",
"response_timeout": 5,
}
def load_mesos_config(config_path, profile="default"):
on_disk = {}
try:
with open(config_path, "rt") as f:
on_disk = json.load(f)[profile]
except ValueError as e:
raise ValueError("Invalid JSON: {} in {}".format(str(e), config_path))
except IOError as e:
if e.errno != errno.ENOENT:
raise
config = copy.deepcopy(DEFAULTS)
config.update(on_disk)
return config
|
try:
import _winreg as winreg
except ImportError:
import winreg
from .windows_tz import win_tz
_cache_tz = None
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = winreg.QueryInfoKey(key)[1]
for i in range(size):
data = winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
def get_localzone_name():
"""Get local zone name."""
# Windows is special. It has unique time zone names (in several
# meanings of the word) available, but unfortunately, they can be
# translated to the language of the operating system, so we need to
# do a backwards lookup, by going through all time zones and see which
# one matches.
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
localtz = winreg.OpenKey(handle, TZLOCALKEYNAME)
keyvalues = valuestodict(localtz)
localtz.Close()
if "TimeZoneKeyName" in keyvalues:
# Windows 7 (and Vista?)
# For some reason this returns a string with loads of NUL bytes at
# least on some systems. I don't know if this is a bug somewhere, I
# just work around it.
tzkeyname = keyvalues["TimeZoneKeyName"].split("\x00", 1)[0]
else:
# Windows 2000 or XP
# This is the localized name:
tzwin = keyvalues["StandardName"]
# Open the list of timezones to look up the real name:
TZKEYNAME = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
tzkey = winreg.OpenKey(handle, TZKEYNAME)
# Now, match this value to Time Zone information
tzkeyname = None
for i in range(winreg.QueryInfoKey(tzkey)[0]):
subkey = winreg.EnumKey(tzkey, i)
sub = winreg.OpenKey(tzkey, subkey)
data = valuestodict(sub)
sub.Close()
try:
if data["Std"] == tzwin:
tzkeyname = subkey
break
except KeyError:
# This timezone didn't have proper configuration.
# Ignore it.
pass
tzkey.Close()
handle.Close()
if tzkeyname is None:
raise LookupError("Can not find Windows timezone configuration")
timezone = win_tz.get(tzkeyname)
if timezone is None:
# Nope, that didn't work. Try adding "Standard Time",
# it seems to work a lot of times:
timezone = win_tz.get(tzkeyname + " Standard Time")
# Return what we have.
return timezone
def get_localzone():
"""Return the zoneinfo-based tzinfo object that matches the Windows-configured timezone."""
global _cache_tz
if _cache_tz is None:
_cache_tz = get_localzone_name()
return _cache_tz
def reload_localzone():
"""Reload the cached localzone. You need to call this if the timezone has changed."""
global _cache_tz
_cache_tz = get_localzone_name()
return _cache_tz
|
import pytest
import voluptuous_serialize
import homeassistant.components.automation as automation
from homeassistant.components.humidifier import DOMAIN, const, device_action
from homeassistant.const import STATE_ON
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_get_actions(hass, device_reg, entity_reg):
"""Test we get the expected actions from a humidifier."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set("humidifier.test_5678", STATE_ON, {})
hass.states.async_set(
"humidifier.test_5678", "attributes", {"supported_features": 1}
)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "set_humidity",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "set_mode",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_action_no_modes(hass, device_reg, entity_reg):
"""Test we get the expected actions from a humidifier."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set("humidifier.test_5678", STATE_ON, {})
hass.states.async_set(
"humidifier.test_5678", "attributes", {"supported_features": 0}
)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "set_humidity",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_action_no_state(hass, device_reg, entity_reg):
"""Test we get the expected actions from a humidifier."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "set_humidity",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_action(hass):
"""Test for actions."""
hass.states.async_set(
"humidifier.entity",
STATE_ON,
{const.ATTR_AVAILABLE_MODES: [const.MODE_HOME, const.MODE_AWAY]},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_off",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "turn_off",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_on",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "turn_on",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_toggle"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "toggle",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_humidity",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_humidity",
"humidity": 35,
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_mode",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_mode",
"mode": const.MODE_AWAY,
},
},
]
},
)
set_humidity_calls = async_mock_service(hass, "humidifier", "set_humidity")
set_mode_calls = async_mock_service(hass, "humidifier", "set_mode")
turn_on_calls = async_mock_service(hass, "humidifier", "turn_on")
turn_off_calls = async_mock_service(hass, "humidifier", "turn_off")
toggle_calls = async_mock_service(hass, "humidifier", "toggle")
assert len(set_humidity_calls) == 0
assert len(set_mode_calls) == 0
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_set_humidity")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 0
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_set_mode")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 1
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_turn_off")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 1
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 1
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_turn_on")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 1
assert len(turn_on_calls) == 1
assert len(turn_off_calls) == 1
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_toggle")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 1
assert len(turn_on_calls) == 1
assert len(turn_off_calls) == 1
assert len(toggle_calls) == 1
async def test_capabilities(hass):
"""Test getting capabilities."""
# Test capabililities without state
capabilities = await device_action.async_get_action_capabilities(
hass,
{
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_mode",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "mode", "options": [], "required": True, "type": "select"}]
# Set state
hass.states.async_set(
"humidifier.entity",
STATE_ON,
{const.ATTR_AVAILABLE_MODES: [const.MODE_HOME, const.MODE_AWAY]},
)
# Set humidity
capabilities = await device_action.async_get_action_capabilities(
hass,
{
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_humidity",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "humidity", "required": True, "type": "integer"}]
# Set mode
capabilities = await device_action.async_get_action_capabilities(
hass,
{
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_mode",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "mode",
"options": [("home", "home"), ("away", "away")],
"required": True,
"type": "select",
}
]
|
from unittest.mock import call, patch
import pytest
import zigpy.profiles.zha as zha
import zigpy.zcl.clusters.general as general
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.switch import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from .common import (
async_enable_traffic,
async_find_group_entity_id,
async_test_rejoin,
find_entity_id,
get_zha_gateway,
send_attributes_report,
)
from tests.common import mock_coro
ON = 1
OFF = 0
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e8"
@pytest.fixture
def zigpy_device(zigpy_device_mock):
"""Device tracker zigpy device."""
endpoints = {
1: {
"in_clusters": [general.Basic.cluster_id, general.OnOff.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.ON_OFF_SWITCH,
}
}
return zigpy_device_mock(endpoints)
@pytest.fixture
async def coordinator(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee="00:15:8d:00:02:32:4f:32",
nwk=0x0000,
node_descriptor=b"\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_switch_1(hass, zigpy_device_mock, zha_device_joined):
"""Test zha switch platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.OnOff.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_switch_2(hass, zigpy_device_mock, zha_device_joined):
"""Test zha switch platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.OnOff.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE2,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
async def test_switch(hass, zha_device_joined_restored, zigpy_device):
"""Test zha switch platform."""
zha_device = await zha_device_joined_restored(zigpy_device)
cluster = zigpy_device.endpoints.get(1).on_off
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == STATE_OFF
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the switch was created and that its state is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at switch
await send_attributes_report(hass, cluster, {1: 0, 0: 1, 2: 2})
assert hass.states.get(entity_id).state == STATE_ON
# turn off at switch
await send_attributes_report(hass, cluster, {1: 1, 0: 0, 2: 2})
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]),
):
# turn on via UI
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
# turn off from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x01, zcl_f.Status.SUCCESS]),
):
# turn off via UI
await hass.services.async_call(
DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None, tsn=None
)
# test joining a new switch to the network and HA
await async_test_rejoin(hass, zigpy_device, [cluster], (1,))
async def async_test_zha_group_switch_entity(
hass, device_switch_1, device_switch_2, coordinator
):
"""Test the switch entity for a ZHA group."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
zha_gateway.coordinator_zha_device = coordinator
coordinator._zha_gateway = zha_gateway
device_switch_1._zha_gateway = zha_gateway
device_switch_2._zha_gateway = zha_gateway
member_ieee_addresses = [device_switch_1.ieee, device_switch_2.ieee]
# test creating a group with 2 members
zha_group = await zha_gateway.async_create_zigpy_group(
"Test Group", member_ieee_addresses
)
await hass.async_block_till_done()
assert zha_group is not None
assert len(zha_group.members) == 2
for member in zha_group.members:
assert member.ieee in member_ieee_addresses
entity_id = async_find_group_entity_id(hass, DOMAIN, zha_group)
assert hass.states.get(entity_id) is not None
group_cluster_on_off = zha_group.endpoint[general.OnOff.cluster_id]
dev1_cluster_on_off = device_switch_1.endpoints[1].on_off
dev2_cluster_on_off = device_switch_2.endpoints[1].on_off
# test that the lights were created and that they are unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, zha_group.members)
# test that the lights were created and are off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]),
):
# turn on via UI
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(group_cluster_on_off.request.mock_calls) == 1
assert group_cluster_on_off.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
assert hass.states.get(entity_id).state == STATE_ON
# turn off from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x01, zcl_f.Status.SUCCESS]),
):
# turn off via UI
await hass.services.async_call(
DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(group_cluster_on_off.request.mock_calls) == 1
assert group_cluster_on_off.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None, tsn=None
)
assert hass.states.get(entity_id).state == STATE_OFF
# test some of the group logic to make sure we key off states correctly
await dev1_cluster_on_off.on()
await dev2_cluster_on_off.on()
# test that group light is on
assert hass.states.get(entity_id).state == STATE_ON
await dev1_cluster_on_off.off()
# test that group light is still on
assert hass.states.get(entity_id).state == STATE_ON
await dev2_cluster_on_off.off()
# test that group light is now off
assert hass.states.get(entity_id).state == STATE_OFF
await dev1_cluster_on_off.on()
# test that group light is now back on
assert hass.states.get(entity_id).state == STATE_ON
|
from typing import Optional
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.core import callback
from .base import ONVIFBaseEntity
from .const import DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a ONVIF binary sensor."""
device = hass.data[DOMAIN][config_entry.unique_id]
entities = {
event.uid: ONVIFBinarySensor(event.uid, device)
for event in device.events.get_platform("binary_sensor")
}
async_add_entities(entities.values())
@callback
def async_check_entities():
"""Check if we have added an entity for the event."""
new_entities = []
for event in device.events.get_platform("binary_sensor"):
if event.uid not in entities:
entities[event.uid] = ONVIFBinarySensor(event.uid, device)
new_entities.append(entities[event.uid])
async_add_entities(new_entities)
device.events.async_add_listener(async_check_entities)
return True
class ONVIFBinarySensor(ONVIFBaseEntity, BinarySensorEntity):
"""Representation of a binary ONVIF event."""
def __init__(self, uid, device):
"""Initialize the ONVIF binary sensor."""
ONVIFBaseEntity.__init__(self, device)
BinarySensorEntity.__init__(self)
self.uid = uid
@property
def is_on(self) -> bool:
"""Return true if event is active."""
return self.device.events.get_uid(self.uid).value
@property
def name(self) -> str:
"""Return the name of the event."""
return self.device.events.get_uid(self.uid).name
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return self.device.events.get_uid(self.uid).device_class
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self.uid
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self.device.events.get_uid(self.uid).entity_enabled
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.device.events.async_add_listener(self.async_write_ha_state)
)
|
import argparse
import glob
import os
import struct
import sys
def clamp_to_min_max(value, min, max):
if value > max:
value = max
elif value < min:
value = min
return value
def clamp_to_u8(value):
return clamp_to_min_max(value, 0, 255)
def parse_args():
parser = argparse.ArgumentParser(description="Set the spectrum effect")
parser.add_argument('-d', '--device', type=str, help="Device string like \"0003:1532:0045.000C\"")
args = parser.parse_args()
return args
def run():
args = parse_args()
if args.device is None:
mouse_dirs = glob.glob(os.path.join('/sys/bus/hid/drivers/razermouse/', "*:*:*.*"))
if len(mouse_dirs) > 1:
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
if len(mouse_dirs) < 1:
print("No mouse directories found. Make sure the driver is binded", file=sys.stderr)
sys.exit(1)
mouse_dir = mouse_dirs[0]
else:
mouse_dir = os.path.join('/sys/bus/hid/drivers/razermouse/', args.device)
if not os.path.isdir(mouse_dir):
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
byte_string = struct.pack(">B", 0x01)
spectrum_mode_filepath = os.path.join(mouse_dir, "mode_spectrum")
with open(spectrum_mode_filepath, 'wb') as spectrum_mode_file:
spectrum_mode_file.write(byte_string)
print("Done")
if __name__ == '__main__':
run()
|
from datetime import timedelta
import json
from airly.exceptions import AirlyError
from homeassistant.components.air_quality import ATTR_AQI, ATTR_PM_2_5, ATTR_PM_10
from homeassistant.components.airly.air_quality import (
ATTRIBUTION,
LABEL_ADVICE,
LABEL_AQI_DESCRIPTION,
LABEL_AQI_LEVEL,
LABEL_PM_2_5_LIMIT,
LABEL_PM_2_5_PERCENT,
LABEL_PM_10_LIMIT,
LABEL_PM_10_PERCENT,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import patch
from tests.common import async_fire_time_changed, load_fixture
from tests.components.airly import init_integration
async def test_air_quality(hass):
"""Test states of the air_quality."""
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("air_quality.home")
assert state
assert state.state == "14"
assert state.attributes.get(ATTR_AQI) == 23
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(LABEL_ADVICE) == "Great air!"
assert state.attributes.get(ATTR_PM_10) == 19
assert state.attributes.get(ATTR_PM_2_5) == 14
assert state.attributes.get(LABEL_AQI_DESCRIPTION) == "Great air here today!"
assert state.attributes.get(LABEL_AQI_LEVEL) == "very low"
assert state.attributes.get(LABEL_PM_2_5_LIMIT) == 25.0
assert state.attributes.get(LABEL_PM_2_5_PERCENT) == 55
assert state.attributes.get(LABEL_PM_10_LIMIT) == 50.0
assert state.attributes.get(LABEL_PM_10_PERCENT) == 37
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
assert state.attributes.get(ATTR_ICON) == "mdi:blur"
entry = registry.async_get("air_quality.home")
assert entry
assert entry.unique_id == "55.55-122.12"
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when service causes an error."""
await init_integration(hass)
state = hass.states.get("air_quality.home")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "14"
future = utcnow() + timedelta(minutes=60)
with patch(
"airly._private._RequestsHandler.get",
side_effect=AirlyError(500, "Unexpected error"),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("air_quality.home")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=120)
with patch(
"airly._private._RequestsHandler.get",
return_value=json.loads(load_fixture("airly_valid_station.json")),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("air_quality.home")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "14"
async def test_manual_update_entity(hass):
"""Test manual update entity via service homeasasistant/update_entity."""
await init_integration(hass)
await async_setup_component(hass, "homeassistant", {})
with patch(
"homeassistant.components.airly.AirlyDataUpdateCoordinator._async_update_data"
) as mock_update:
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["air_quality.home"]},
blocking=True,
)
assert mock_update.call_count == 1
|
import rumps
import time
def timez():
return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime())
@rumps.timer(1)
def a(sender):
print('%r %r' % (sender, timez()))
@rumps.clicked('Change timer')
def changeit(_):
response = rumps.Window('Enter new interval').run()
if response.clicked:
global_namespace_timer.interval = int(response.text)
@rumps.clicked('All timers')
def activetimers(_):
print(rumps.timers())
@rumps.clicked('Start timer')
def start_timer(_):
global_namespace_timer.start()
@rumps.clicked('Stop timer')
def stop_timer(_):
global_namespace_timer.stop()
if __name__ == "__main__":
global_namespace_timer = rumps.Timer(a, 4)
rumps.App('fuuu', menu=('Change timer', 'All timers', 'Start timer', 'Stop timer')).run()
|
import os, sys
import os.path as osp
import inspect
from logilab.common import STD_BLACKLIST
from logilab.common.shellutils import globfind
from logilab.common.modutils import load_module_from_file, modpath_from_file
def module_members(module):
members = []
for name, value in inspect.getmembers(module):
if getattr(value, '__module__', None) == module.__name__:
members.append( (name, value) )
return sorted(members)
def class_members(klass):
return sorted([name for name in vars(klass)
if name not in ('__doc__', '__module__',
'__dict__', '__weakref__')])
class ModuleGenerator:
file_header = """.. -*- coding: utf-8 -*-\n\n%s\n"""
module_def = """
:mod:`%s`
=======%s
.. automodule:: %s
:members: %s
"""
class_def = """
.. autoclass:: %s
:members: %s
"""
def __init__(self, project_title, code_dir):
self.title = project_title
self.code_dir = osp.abspath(code_dir)
def generate(self, dest_file, exclude_dirs=STD_BLACKLIST):
"""make the module file"""
self.fn = open(dest_file, 'w')
num = len(self.title) + 6
title = "=" * num + "\n %s API\n" % self.title + "=" * num
self.fn.write(self.file_header % title)
self.gen_modules(exclude_dirs=exclude_dirs)
self.fn.close()
def gen_modules(self, exclude_dirs):
"""generate all modules"""
for module in self.find_modules(exclude_dirs):
modname = module.__name__
classes = []
modmembers = []
for objname, obj in module_members(module):
if inspect.isclass(obj):
classmembers = class_members(obj)
classes.append( (objname, classmembers) )
else:
modmembers.append(objname)
self.fn.write(self.module_def % (modname, '=' * len(modname),
modname,
', '.join(modmembers)))
for klass, members in classes:
self.fn.write(self.class_def % (klass, ', '.join(members)))
def find_modules(self, exclude_dirs):
basepath = osp.dirname(self.code_dir)
basedir = osp.basename(basepath) + osp.sep
if basedir not in sys.path:
sys.path.insert(1, basedir)
for filepath in globfind(self.code_dir, '*.py', exclude_dirs):
if osp.basename(filepath) in ('setup.py', '__pkginfo__.py'):
continue
try:
module = load_module_from_file(filepath)
except: # module might be broken or magic
dotted_path = modpath_from_file(filepath)
module = type('.'.join(dotted_path), (), {}) # mock it
yield module
if __name__ == '__main__':
# example :
title, code_dir, outfile = sys.argv[1:]
generator = ModuleGenerator(title, code_dir)
# XXX modnames = ['logilab']
generator.generate(outfile, ('test', 'tests', 'examples',
'data', 'doc', '.hg', 'migration'))
|
from typing import Callable
from pyisy.constants import ISY_VALUE_UNKNOWN
from homeassistant.components.fan import (
DOMAIN as FAN,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from .const import _LOGGER, DOMAIN as ISY994_DOMAIN, ISY994_NODES, ISY994_PROGRAMS
from .entity import ISYNodeEntity, ISYProgramEntity
from .helpers import migrate_old_unique_ids
VALUE_TO_STATE = {
0: SPEED_OFF,
63: SPEED_LOW,
64: SPEED_LOW,
190: SPEED_MEDIUM,
191: SPEED_MEDIUM,
255: SPEED_HIGH,
}
STATE_TO_VALUE = {}
for key in VALUE_TO_STATE:
STATE_TO_VALUE[VALUE_TO_STATE[key]] = key
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 fan platform."""
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
devices = []
for node in hass_isy_data[ISY994_NODES][FAN]:
devices.append(ISYFanEntity(node))
for name, status, actions in hass_isy_data[ISY994_PROGRAMS][FAN]:
devices.append(ISYFanProgramEntity(name, status, actions))
await migrate_old_unique_ids(hass, FAN, devices)
async_add_entities(devices)
class ISYFanEntity(ISYNodeEntity, FanEntity):
"""Representation of an ISY994 fan device."""
@property
def speed(self) -> str:
"""Return the current speed."""
return VALUE_TO_STATE.get(self._node.status)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return self._node.status != 0
def set_speed(self, speed: str) -> None:
"""Send the set speed command to the ISY994 fan device."""
self._node.turn_on(val=STATE_TO_VALUE.get(speed, 255))
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn on command to the ISY994 fan device."""
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 fan device."""
self._node.turn_off()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
class ISYFanProgramEntity(ISYProgramEntity, FanEntity):
"""Representation of an ISY994 fan program."""
@property
def speed(self) -> str:
"""Return the current speed."""
return VALUE_TO_STATE.get(self._node.status)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
return self._node.status != 0
def turn_off(self, **kwargs) -> None:
"""Send the turn on command to ISY994 fan program."""
if not self._actions.run_then():
_LOGGER.error("Unable to turn off the fan")
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn off command to ISY994 fan program."""
if not self._actions.run_else():
_LOGGER.error("Unable to turn on the fan")
|
from django.apps import apps
from django.conf import settings
from django.db import models
from django.urls import reverse
from zinnia.managers import EntryRelatedPublishedManager
from zinnia.managers import entries_published
def safe_get_user_model():
"""
Safe loading of the User model, customized or not.
"""
user_app, user_model = settings.AUTH_USER_MODEL.split('.')
return apps.get_registered_model(user_app, user_model)
class AuthorPublishedManager(models.Model):
"""
Proxy model manager to avoid overriding of
the default User's manager and issue #307.
"""
published = EntryRelatedPublishedManager()
class Meta:
abstract = True
class Author(safe_get_user_model(),
AuthorPublishedManager):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
try:
return super(Author, self).get_absolute_url()
except AttributeError:
return reverse('zinnia:author_detail', args=[self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return (self.get_short_name()
or self.get_full_name()
or self.get_username())
class Meta:
"""
Author's meta informations.
"""
proxy = True
|
import pytest
from queue import Empty
from unittest.mock import Mock
from kombu.transport.consul import Channel, Transport
pytest.importorskip('consul')
class test_Consul:
def setup(self):
self.connection = Mock()
self.connection.client.transport_options = {}
self.connection.client.port = 303
self.consul = self.patching('consul.Consul').return_value
self.channel = Channel(connection=self.connection)
def test_driver_version(self):
assert Transport(self.connection.client).driver_version()
def test_failed_get(self):
self.channel._acquire_lock = Mock(return_value=False)
self.channel.client.kv.get.return_value = (1, None)
with pytest.raises(Empty):
self.channel._get('empty')()
def test_test_purge(self):
self.channel._destroy_session = Mock(return_value=True)
self.consul.kv.delete = Mock(return_value=True)
assert self.channel._purge('foo')
def test_variables(self):
assert self.channel.session_ttl == 30
assert self.channel.timeout == '10s'
def test_lock_key(self):
key = self.channel._lock_key('myqueue')
assert key == 'kombu/myqueue.lock'
def test_key_prefix(self):
key = self.channel._key_prefix('myqueue')
assert key == 'kombu/myqueue'
def test_get_or_create_session(self):
queue = 'myqueue'
session_id = '123456'
self.consul.session.create.return_value = session_id
assert self.channel._get_or_create_session(queue) == session_id
def test_create_delete_queue(self):
queue = 'mynewqueue'
self.consul.kv.put.return_value = True
assert self.channel._new_queue(queue)
self.consul.kv.delete.return_value = True
self.channel._destroy_session = Mock()
self.channel._delete(queue)
def test_size(self):
self.consul.kv.get.return_value = [(1, {}), (2, {})]
assert self.channel._size('q') == 2
def test_get(self):
self.channel._obtain_lock = Mock(return_value=True)
self.channel._release_lock = Mock(return_value=True)
self.consul.kv.get.return_value = [1, [
{'Key': 'myqueue', 'ModifyIndex': 1, 'Value': '1'},
]]
self.consul.kv.delete.return_value = True
assert self.channel._get('myqueue') is not None
def test_put(self):
self.consul.kv.put.return_value = True
assert self.channel._put('myqueue', 'mydata') is None
|
import attr
import itertools
import numpy
import scipy.optimize
import sys
def f(*args, simplify=False):
p = ((),)
for l in range(len(args)):
l += 1
p = itertools.chain(p, itertools.product(*(args,), repeat=l))
if simplify:
p = {tuple(sorted(set(x))) for x in p}
p = sorted(p, key=lambda x: (len(x), x))
return p
def m(*args):
if len(args) == 0:
return 0
r = 1
for arg in args:
r *= arg
return r
class Poly:
def __init__(self, *names):
self.names = names
self.terms = f(*self.names, simplify=True)
def calculate(self, coefficients, **name_values):
for name in name_values:
if name not in self.names:
raise Exception('bad parameter')
substituted_terms = []
for term in self.terms:
substituted_terms.append(tuple(name_values[name] for name in term))
c_tuples = ((c,) for c in coefficients)
terms = tuple(a + b for a, b in zip(c_tuples, substituted_terms))
multiplied = tuple(m(*t) for t in terms)
total = sum(multiplied)
return total
poly = Poly('f', 'c', 'l')
#print('\n'.join(str(t) for t in poly.terms))
@attr.s
class FCL:
f = attr.ib()
c = attr.ib()
l = attr.ib()
INPUT = """\
1,1,1,18,242,1119
1,1,2,18,242,1121
1,1,3,18,242,1123
1,1,4,18,242,1125
1,1,5,18,242,1127
1,2,1,18,243,1124
1,2,2,18,243,1128
1,2,3,18,243,1132
1,2,4,18,243,1136
1,2,5,18,243,1140
1,3,1,18,244,1129
1,3,2,18,244,1135
1,3,3,18,244,1141
1,3,4,18,244,1147
1,3,5,18,244,1153
1,4,1,18,245,1134
1,4,2,18,245,1142
1,4,3,18,245,1150
1,4,4,18,245,1158
1,4,5,18,245,1166
1,5,1,18,246,1139
1,5,2,18,246,1149
1,5,3,18,246,1159
1,5,4,18,246,1169
1,5,5,18,246,1179
2,1,1,19,399,1893
2,1,2,19,399,1897
2,1,3,19,399,1901
2,1,4,19,399,1905
2,1,5,19,399,1909
2,2,1,19,401,1903
2,2,2,19,401,1911
2,2,3,19,401,1919
2,2,4,19,401,1927
2,2,5,19,401,1935
2,3,1,19,403,1913
2,3,2,19,403,1925
2,3,3,19,403,1937
2,3,4,19,403,1949
2,3,5,19,403,1961
2,4,1,19,405,1923
2,4,2,19,405,1939
2,4,3,19,405,1955
2,4,4,19,405,1971
2,4,5,19,405,1987
2,5,1,19,407,1933
2,5,2,19,407,1953
2,5,3,19,407,1973
2,5,4,19,407,1993
2,5,5,19,407,2013
3,1,1,20,556,2667
3,1,2,20,556,2673
3,1,3,20,556,2679
3,1,4,20,556,2685
3,1,5,20,556,2691
3,2,1,20,559,2682
3,2,2,20,559,2694
3,2,3,20,559,2706
3,2,4,20,559,2718
3,2,5,20,559,2730
3,3,1,20,562,2697
3,3,2,20,562,2715
3,3,3,20,562,2733
3,3,4,20,562,2751
3,3,5,20,562,2769
3,4,1,20,565,2712
3,4,2,20,565,2736
3,4,3,20,565,2760
3,4,4,20,565,2784
3,4,5,20,565,2808
3,5,1,20,568,2727
3,5,2,20,568,2757
3,5,3,20,568,2787
3,5,4,20,568,2817
3,5,5,20,568,2847
4,1,1,21,713,3441
4,1,2,21,713,3449
4,1,3,21,713,3457
4,1,4,21,713,3465
4,1,5,21,713,3473
4,2,1,21,717,3461
4,2,2,21,717,3477
4,2,3,21,717,3493
4,2,4,21,717,3509
4,2,5,21,717,3525
4,3,1,21,721,3481
4,3,2,21,721,3505
4,3,3,21,721,3529
4,3,4,21,721,3553
4,3,5,21,721,3577
4,4,1,21,725,3501
4,4,2,21,725,3533
4,4,3,21,725,3565
4,4,4,21,725,3597
4,4,5,21,725,3629
4,5,1,21,729,3521
4,5,2,21,729,3561
4,5,3,21,729,3601
4,5,4,21,729,3641
4,5,5,21,729,3681
5,1,1,22,870,4215
5,1,2,22,870,4225
5,1,3,22,870,4235
5,1,4,22,870,4245
5,1,5,22,870,4255
5,2,1,22,875,4240
5,2,2,22,875,4260
5,2,3,22,875,4280
5,2,4,22,875,4300
5,2,5,22,875,4320
5,3,1,22,880,4265
5,3,2,22,880,4295
5,3,3,22,880,4325
5,3,4,22,880,4355
5,3,5,22,880,4385
5,4,1,22,885,4290
5,4,2,22,885,4330
5,4,3,22,885,4370
5,4,4,22,885,4410
5,4,5,22,885,4450
5,5,1,22,890,4315
5,5,2,22,890,4365
5,5,3,22,890,4415
5,5,4,22,890,4465
5,5,5,22,890,4515
"""
inputs_outputs = {}
for row in INPUT.splitlines():
row = [int(v) for v in row.split(",")]
inputs_outputs[FCL(*row[:3])] = FCL(*row[3:])
#print('\n'.join(str(t) for t in inputs_outputs.items()))
def calc_poly_coeff(poly, coefficients):
c_tuples = list(((c,) for c in coefficients))
poly = list(f(*poly))
poly = list(a + b for a, b in zip(c_tuples, poly))
multiplied = list(m(*t) for t in poly)
total = sum(multiplied)
return total
def calc_error(inputs, output, coefficients):
result = poly.calculate(coefficients, **inputs)
return result - output
def calc_total_error(inputs_outputs, coefficients, name):
total_error = 0
for inputs, outputs in inputs_outputs.items():
total_error += abs(calc_error(attr.asdict(inputs), attr.asdict(outputs)[name], coefficients))
return total_error
coefficient_count = len(poly.terms)
#print('count: {}'.format(coefficient_count))
x0 = numpy.array((0,) * coefficient_count)
#print(x0)
with open('results', 'w') as f:
for name in sorted(attr.asdict(FCL(0,0,0))):
c = scipy.optimize.minimize(
fun=lambda c: calc_total_error(inputs_outputs, c, name),
x0=x0
)
coefficients = [int(round(x)) for x in c.x]
terms = [''.join(t) for t in poly.terms]
message = "{}' = ".format(name)
message += ' + '.join("{}{}".format(coeff if coeff != 1 else '', term) for coeff, term in reversed(list(zip(coefficients, terms))) if coeff != 0)
print(message)
f.write(message)
|
import os
import re
import subprocess
import diamond.collector
from diamond.collector import str_to_bool
class MountStatsCollector(diamond.collector.Collector):
"""Diamond collector for statistics from /proc/self/mountstats
"""
BYTES_MAP = ['normalreadbytes', 'normalwritebytes', 'directreadbytes',
'directwritebytes', 'serverreadbytes', 'serverwritebytes']
EVENTS_MAP = ['inoderevalidates', 'dentryrevalidates',
'datainvalidates', 'attrinvalidates', 'syncinodes',
'vfsopen', 'vfslookup', 'vfspermission', 'vfsreadpage',
'vfsreadpages', 'vfswritepage', 'vfswritepages',
'vfsreaddir', 'vfsflush', 'vfsfsync', 'vfsflock',
'vfsrelease', 'setattrtrunc', 'extendwrite',
'sillyrenames', 'shortreads', 'shortwrites', 'delay']
XPRT_MAP = {'rdma': ['port', 'bind_count', 'connect_count',
'connect_time', 'idle_time', 'rpcsends',
'rpcreceives', 'badxids', 'backlogutil',
'read_chunks', 'write_chunks', 'reply_chunks',
'total_rdma_req', 'total_dma_rep', 'pullup',
'fixup', 'hardway', 'failed_marshal', 'bad_reply'],
'tcp': ['port', 'bind_count', 'connect_count',
'connect_time', 'idle_time', 'rpcsends',
'rpcreceives', 'badxids', 'backlogutil'],
'udp': ['port', 'bind_count', 'rpcsends', 'rpcreceives',
'badxids', 'backlogutil']}
RPCS_MAP = ['ACCESS', 'CLOSE', 'COMMIT', 'CREATE', 'DELEGRETURN',
'FSINFO', 'FSSTAT', 'FS_LOCATIONS', 'GETACL', 'GETATTR',
'LINK', 'LOCK', 'LOCKT', 'LOCKU', 'LOOKUP', 'LOOKUP_ROOT',
'MKDIR', 'MKNOD', 'NULL', 'OPEN', 'OPEN_CONFIRM',
'OPEN_DOWNGRADE', 'OPEN_NOATTR', 'PATHCONF', 'READ',
'READDIR', 'READDIRPLUS', 'READLINK', 'REMOVE', 'RENAME',
'RENEW', 'RMDIR', 'SERVER_CAPS', 'SETACL', 'SETATTR',
'SETCLIENTID', 'SETCLIENTID_CONFIRM', 'STATFS', 'SYMLINK',
'WRITE']
MOUNTSTATS = '/proc/self/mountstats'
def process_config(self):
super(MountStatsCollector, self).process_config()
self.exclude_filters = self.config['exclude_filters']
if isinstance(self.exclude_filters, basestring):
self.exclude_filters = [self.exclude_filters]
if len(self.exclude_filters) > 0:
self.exclude_reg = re.compile('|'.join(self.exclude_filters))
else:
self.exclude_reg = None
self.include_filters = self.config['include_filters']
if isinstance(self.include_filters, basestring):
self.include_filters = [self.include_filters]
if len(self.include_filters) > 0:
self.include_reg = re.compile('|'.join(self.include_filters))
else:
self.include_reg = None
def get_default_config_help(self):
config_help = super(MountStatsCollector,
self).get_default_config_help()
config_help.update({
'exclude_filters':
"A list of regex patterns. Any filesystem" +
" matching any of these patterns will be excluded from" +
" mount stats metrics collection.",
'include_filters':
"A list of regex patterns. Any filesystem" +
" matching any of these patterns will be included from" +
" mount stats metrics collection.",
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
config = super(MountStatsCollector, self).get_default_config()
config.update({
'exclude_filters': [],
'include_filters': [],
'path': 'mountstats',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def collect(self):
"""Collect statistics from /proc/self/mountstats.
Currently, we do fairly naive parsing and do not actually check
the statvers value returned by mountstats.
"""
if str_to_bool(self.config['use_sudo']):
if not os.access(self.config['sudo_cmd'], os.X_OK):
self.log.error("Cannot find or exec %s"
% self.config['sudo_cmd'])
return None
command = [self.config['sudo_cmd'], '/bin/cat', self.MOUNTSTATS]
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0][:-1]
lines = p.split("\n")
else:
if not os.access(self.MOUNTSTATS, os.R_OK):
self.log.error("Cannot read path %s" % self.MOUNTSTATS)
return None
f = open(self.MOUNTSTATS)
lines = f.readlines()
f.close()
path = None
for line in lines:
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'device':
path = tokens[4]
skip = False
if self.exclude_reg:
skip = self.exclude_reg.match(path)
if self.include_reg:
skip = not self.include_reg.match(path)
if skip:
self.log.debug("Ignoring %s", path)
else:
self.log.debug("Keeping %s", path)
path = path.replace('.', '_')
path = path.replace('/', '_')
elif skip:
# If we are in a skip state, don't pay any attention to
# anything that isn't the next device line
continue
elif tokens[0] == 'events:':
for i in range(0, len(self.EVENTS_MAP)):
metric_name = "%s.events.%s" % (path, self.EVENTS_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'bytes:':
for i in range(0, len(self.BYTES_MAP)):
metric_name = "%s.bytes.%s" % (path, self.BYTES_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'xprt:':
proto = tokens[1]
if not self.XPRT_MAP[proto]:
self.log.error("Unknown protocol %s", proto)
continue
for i in range(0, len(self.XPRT_MAP[proto])):
metric_name = "%s.xprt.%s.%s" % (path, proto,
self.XPRT_MAP[proto][i])
metric_value = long(tokens[i + 2])
self.publish_counter(metric_name, metric_value)
elif tokens[0][:-1] in self.RPCS_MAP:
rpc = tokens[0][:-1]
ops = long(tokens[1])
rtt = long(tokens[7])
exe = long(tokens[8])
metric_fmt = "%s.rpc.%s.%s"
ops_name = metric_fmt % (path, rpc.lower(), 'ops')
rtt_name = metric_fmt % (path, rpc.lower(), 'rtt')
exe_name = metric_fmt % (path, rpc.lower(), 'exe')
self.publish_counter(ops_name, ops)
self.publish_counter(rtt_name, rtt)
self.publish_counter(exe_name, exe)
|
import logging
from typing import Any, Dict, Optional, cast
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
ATTR_EDITABLE,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_ICON,
CONF_ID,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_RADIUS,
EVENT_CORE_CONFIG_UPDATE,
SERVICE_RELOAD,
STATE_UNAVAILABLE,
)
from homeassistant.core import Event, HomeAssistant, ServiceCall, State, callback
from homeassistant.helpers import (
collection,
config_validation as cv,
entity,
entity_component,
entity_registry,
service,
storage,
)
from homeassistant.loader import bind_hass
from homeassistant.util.location import distance
from .const import ATTR_PASSIVE, ATTR_RADIUS, CONF_PASSIVE, DOMAIN, HOME_ZONE
_LOGGER = logging.getLogger(__name__)
DEFAULT_PASSIVE = False
DEFAULT_RADIUS = 100
ENTITY_ID_FORMAT = "zone.{}"
ENTITY_ID_HOME = ENTITY_ID_FORMAT.format(HOME_ZONE)
ICON_HOME = "mdi:home"
ICON_IMPORT = "mdi:import"
CREATE_FIELDS = {
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_LATITUDE): cv.latitude,
vol.Required(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): vol.Coerce(float),
vol.Optional(CONF_PASSIVE, default=DEFAULT_PASSIVE): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS): vol.Coerce(float),
vol.Optional(CONF_PASSIVE): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
}
def empty_value(value: Any) -> Any:
"""Test if the user has the default config value from adding "zone:"."""
if isinstance(value, dict) and len(value) == 0:
return []
raise vol.Invalid("Not a default value")
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN, default=[]): vol.Any(
vol.All(cv.ensure_list, [vol.Schema(CREATE_FIELDS)]),
empty_value,
)
},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
@bind_hass
def async_active_zone(
hass: HomeAssistant, latitude: float, longitude: float, radius: int = 0
) -> Optional[State]:
"""Find the active zone for given latitude, longitude.
This method must be run in the event loop.
"""
# Sort entity IDs so that we are deterministic if equal distance to 2 zones
zones = (
cast(State, hass.states.get(entity_id))
for entity_id in sorted(hass.states.async_entity_ids(DOMAIN))
)
min_dist = None
closest = None
for zone in zones:
if zone.state == STATE_UNAVAILABLE or zone.attributes.get(ATTR_PASSIVE):
continue
zone_dist = distance(
latitude,
longitude,
zone.attributes[ATTR_LATITUDE],
zone.attributes[ATTR_LONGITUDE],
)
if zone_dist is None:
continue
within_zone = zone_dist - radius < zone.attributes[ATTR_RADIUS]
closer_zone = closest is None or zone_dist < min_dist # type: ignore
smaller_zone = (
zone_dist == min_dist
and zone.attributes[ATTR_RADIUS]
< cast(State, closest).attributes[ATTR_RADIUS]
)
if within_zone and (closer_zone or smaller_zone):
min_dist = zone_dist
closest = zone
return closest
def in_zone(zone: State, latitude: float, longitude: float, radius: float = 0) -> bool:
"""Test if given latitude, longitude is in given zone.
Async friendly.
"""
if zone.state == STATE_UNAVAILABLE:
return False
zone_dist = distance(
latitude,
longitude,
zone.attributes[ATTR_LATITUDE],
zone.attributes[ATTR_LONGITUDE],
)
if zone_dist is None or zone.attributes[ATTR_RADIUS] is None:
return False
return zone_dist - radius < cast(float, zone.attributes[ATTR_RADIUS])
class ZoneStorageCollection(collection.StorageCollection):
"""Zone collection stored in storage."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: Dict) -> Dict:
"""Validate the config is valid."""
return cast(Dict, self.CREATE_SCHEMA(data))
@callback
def _get_suggested_id(self, info: Dict) -> str:
"""Suggest an ID based on the config."""
return cast(str, info[CONF_NAME])
async def _update_data(self, data: dict, update_data: Dict) -> Dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
return {**data, **update_data}
async def async_setup(hass: HomeAssistant, config: Dict) -> bool:
"""Set up configured zones as well as Home Assistant zone if necessary."""
component = entity_component.EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.IDLessCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.attach_entity_component_collection(
component, yaml_collection, lambda conf: Zone(conf, False)
)
storage_collection = ZoneStorageCollection(
storage.Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.attach_entity_component_collection(
component, storage_collection, lambda conf: Zone(conf, True)
)
if config[DOMAIN]:
await yaml_collection.async_load(config[DOMAIN])
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
async def _collection_changed(change_type: str, item_id: str, config: Dict) -> None:
"""Handle a collection change: clean up entity registry on removals."""
if change_type != collection.CHANGE_REMOVED:
return
ent_reg = await entity_registry.async_get_registry(hass)
ent_reg.async_remove(
cast(str, ent_reg.async_get_entity_id(DOMAIN, DOMAIN, item_id))
)
storage_collection.async_add_listener(_collection_changed)
async def reload_service_handler(service_call: ServiceCall) -> None:
"""Remove all zones and load new ones from config."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
return
await yaml_collection.async_load(conf[DOMAIN])
service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
if component.get_entity("zone.home"):
return True
home_zone = Zone(
_home_conf(hass),
True,
)
home_zone.entity_id = ENTITY_ID_HOME
await component.async_add_entities([home_zone])
async def core_config_updated(_: Event) -> None:
"""Handle core config updated."""
await home_zone.async_update_config(_home_conf(hass))
hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, core_config_updated)
hass.data[DOMAIN] = storage_collection
return True
@callback
def _home_conf(hass: HomeAssistant) -> Dict:
"""Return the home zone config."""
return {
CONF_NAME: hass.config.location_name,
CONF_LATITUDE: hass.config.latitude,
CONF_LONGITUDE: hass.config.longitude,
CONF_RADIUS: DEFAULT_RADIUS,
CONF_ICON: ICON_HOME,
CONF_PASSIVE: False,
}
async def async_setup_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> bool:
"""Set up zone as config entry."""
storage_collection = cast(ZoneStorageCollection, hass.data[DOMAIN])
data = dict(config_entry.data)
data.setdefault(CONF_PASSIVE, DEFAULT_PASSIVE)
data.setdefault(CONF_RADIUS, DEFAULT_RADIUS)
await storage_collection.async_create_item(data)
hass.async_create_task(hass.config_entries.async_remove(config_entry.entry_id))
return True
async def async_unload_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> bool:
"""Will be called once we remove it."""
return True
class Zone(entity.Entity):
"""Representation of a Zone."""
def __init__(self, config: Dict, editable: bool):
"""Initialize the zone."""
self._config = config
self._editable = editable
self._attrs: Optional[Dict] = None
self._generate_attrs()
@property
def state(self) -> str:
"""Return the state property really does nothing for a zone."""
return "zoning"
@property
def name(self) -> str:
"""Return name."""
return cast(str, self._config[CONF_NAME])
@property
def unique_id(self) -> Optional[str]:
"""Return unique ID."""
return self._config.get(CONF_ID)
@property
def icon(self) -> Optional[str]:
"""Return the icon if any."""
return self._config.get(CONF_ICON)
@property
def state_attributes(self) -> Optional[Dict]:
"""Return the state attributes of the zone."""
return self._attrs
async def async_update_config(self, config: Dict) -> None:
"""Handle when the config is updated."""
if self._config == config:
return
self._config = config
self._generate_attrs()
self.async_write_ha_state()
@callback
def _generate_attrs(self) -> None:
"""Generate new attrs based on config."""
self._attrs = {
ATTR_LATITUDE: self._config[CONF_LATITUDE],
ATTR_LONGITUDE: self._config[CONF_LONGITUDE],
ATTR_RADIUS: self._config[CONF_RADIUS],
ATTR_PASSIVE: self._config[CONF_PASSIVE],
ATTR_EDITABLE: self._editable,
}
|
import asyncio
import logging
from homeassistant.components.vacuum import (
ATTR_STATUS,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STATUS,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.helpers.entity import Entity
from . import roomba_reported_state
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_CLEANING_TIME = "cleaning_time"
ATTR_CLEANED_AREA = "cleaned_area"
ATTR_ERROR = "error"
ATTR_ERROR_CODE = "error_code"
ATTR_POSITION = "position"
ATTR_SOFTWARE_VERSION = "software_version"
# Commonly supported features
SUPPORT_IROBOT = (
SUPPORT_BATTERY
| SUPPORT_PAUSE
| SUPPORT_RETURN_HOME
| SUPPORT_SEND_COMMAND
| SUPPORT_START
| SUPPORT_STATE
| SUPPORT_STATUS
| SUPPORT_STOP
| SUPPORT_LOCATE
)
STATE_MAP = {
"": STATE_IDLE,
"charge": STATE_DOCKED,
"hmMidMsn": STATE_CLEANING, # Recharging at the middle of a cycle
"hmPostMsn": STATE_RETURNING, # Cycle finished
"hmUsrDock": STATE_RETURNING,
"pause": STATE_PAUSED,
"run": STATE_CLEANING,
"stop": STATE_IDLE,
"stuck": STATE_ERROR,
}
class IRobotEntity(Entity):
"""Base class for iRobot Entities."""
def __init__(self, roomba, blid):
"""Initialize the iRobot handler."""
self.vacuum = roomba
self._blid = blid
self.vacuum_state = roomba_reported_state(roomba)
self._name = self.vacuum_state.get("name")
self._version = self.vacuum_state.get("softwareVer")
self._sku = self.vacuum_state.get("sku")
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def robot_unique_id(self):
"""Return the uniqueid of the vacuum cleaner."""
return f"roomba_{self._blid}"
@property
def unique_id(self):
"""Return the uniqueid of the vacuum cleaner."""
return self.robot_unique_id
@property
def device_info(self):
"""Return the device info of the vacuum cleaner."""
return {
"identifiers": {(DOMAIN, self.robot_unique_id)},
"manufacturer": "iRobot",
"name": str(self._name),
"sw_version": self._version,
"model": self._sku,
}
@property
def _battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self.vacuum_state.get("batPct")
@property
def _robot_state(self):
"""Return the state of the vacuum cleaner."""
clean_mission_status = self.vacuum_state.get("cleanMissionStatus", {})
cycle = clean_mission_status.get("cycle")
phase = clean_mission_status.get("phase")
try:
state = STATE_MAP[phase]
except KeyError:
return STATE_ERROR
if cycle != "none" and state in (STATE_IDLE, STATE_DOCKED):
state = STATE_PAUSED
return state
async def async_added_to_hass(self):
"""Register callback function."""
self.vacuum.register_on_message_callback(self.on_message)
def new_state_filter(self, new_state): # pylint: disable=no-self-use
"""Filter out wifi state messages."""
return len(new_state) > 1 or "signal" not in new_state
def on_message(self, json_data):
"""Update state on message change."""
state = json_data.get("state", {}).get("reported", {})
if self.new_state_filter(state):
self.schedule_update_ha_state()
class IRobotVacuum(IRobotEntity, StateVacuumEntity):
"""Base class for iRobot robots."""
def __init__(self, roomba, blid):
"""Initialize the iRobot handler."""
super().__init__(roomba, blid)
self._cap_position = self.vacuum_state.get("cap", {}).get("pose") == 1
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_IROBOT
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self._battery_level
@property
def state(self):
"""Return the state of the vacuum cleaner."""
return self._robot_state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True # Always available, otherwise setup will fail
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
state = self.vacuum_state
# Roomba software version
software_version = state.get("softwareVer")
# Set properties that are to appear in the GUI
state_attrs = {ATTR_SOFTWARE_VERSION: software_version}
# Set legacy status to avoid break changes
state_attrs[ATTR_STATUS] = self.vacuum.current_state
# Only add cleaning time and cleaned area attrs when the vacuum is
# currently on
if self.state == STATE_CLEANING:
# Get clean mission status
mission_state = state.get("cleanMissionStatus", {})
cleaning_time = mission_state.get("mssnM")
cleaned_area = mission_state.get("sqft") # Imperial
# Convert to m2 if the unit_system is set to metric
if cleaned_area and self.hass.config.units.is_metric:
cleaned_area = round(cleaned_area * 0.0929)
state_attrs[ATTR_CLEANING_TIME] = cleaning_time
state_attrs[ATTR_CLEANED_AREA] = cleaned_area
# Error
if self.vacuum.error_code != 0:
state_attrs[ATTR_ERROR] = self.vacuum.error_message
state_attrs[ATTR_ERROR_CODE] = self.vacuum.error_code
# Not all Roombas expose position data
# https://github.com/koalazak/dorita980/issues/48
if self._cap_position:
pos_state = state.get("pose", {})
position = None
pos_x = pos_state.get("point", {}).get("x")
pos_y = pos_state.get("point", {}).get("y")
theta = pos_state.get("theta")
if all(item is not None for item in [pos_x, pos_y, theta]):
position = f"({pos_x}, {pos_y}, {theta})"
state_attrs[ATTR_POSITION] = position
return state_attrs
def on_message(self, json_data):
"""Update state on message change."""
state = json_data.get("state", {}).get("reported", {})
if self.new_state_filter(state):
_LOGGER.debug("Got new state from the vacuum: %s", json_data)
self.schedule_update_ha_state()
async def async_start(self):
"""Start or resume the cleaning task."""
if self.state == STATE_PAUSED:
await self.hass.async_add_executor_job(self.vacuum.send_command, "resume")
else:
await self.hass.async_add_executor_job(self.vacuum.send_command, "start")
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
await self.hass.async_add_executor_job(self.vacuum.send_command, "stop")
async def async_pause(self):
"""Pause the cleaning cycle."""
await self.hass.async_add_executor_job(self.vacuum.send_command, "pause")
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
if self.state == STATE_CLEANING:
await self.async_pause()
for _ in range(0, 10):
if self.state == STATE_PAUSED:
break
await asyncio.sleep(1)
await self.hass.async_add_executor_job(self.vacuum.send_command, "dock")
async def async_locate(self, **kwargs):
"""Located vacuum."""
await self.hass.async_add_executor_job(self.vacuum.send_command, "find")
async def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
_LOGGER.debug("async_send_command %s (%s), %s", command, params, kwargs)
await self.hass.async_add_executor_job(
self.vacuum.send_command, command, params
)
|
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
data_name = 'mtrf'
has_mtrf_data = partial(has_dataset, name=data_name)
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name=data_name,
download=download)
data_path.__doc__ = _data_path_doc.format(name=data_name,
conf='MNE_DATASETS_MTRF_PATH')
def get_version(): # noqa: D103
return _get_version(data_name)
get_version.__doc__ = _version_doc.format(name=data_name)
|
import configparser
import io
import json.decoder
import os
import shutil
import sys
import time
import requests
import pygments
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
from pkg_resources import resource_filename
from nikola.plugin_categories import Command
from nikola import utils
LOGGER = utils.get_logger('theme')
class CommandTheme(Command):
"""Manage themes."""
json = None
name = "theme"
doc_usage = "[-u url] [-i theme_name] [-r theme_name] [-l] [--list-installed] [-g] [-n theme_name] [-c template_name]"
doc_purpose = "manage themes"
output_dir = 'themes'
cmd_options = [
{
'name': 'install',
'short': 'i',
'long': 'install',
'type': str,
'default': '',
'help': 'Install a theme.'
},
{
'name': 'uninstall',
'long': 'uninstall',
'short': 'r',
'type': str,
'default': '',
'help': 'Uninstall a theme.'
},
{
'name': 'list',
'short': 'l',
'long': 'list',
'type': bool,
'default': False,
'help': 'Show list of available themes.'
},
{
'name': 'list_installed',
'long': 'list-installed',
'type': bool,
'help': "List the installed themes with their location.",
'default': False
},
{
'name': 'url',
'short': 'u',
'long': 'url',
'type': str,
'help': "URL for the theme repository",
'default': 'https://themes.getnikola.com/v8/themes.json'
},
{
'name': 'getpath',
'short': 'g',
'long': 'get-path',
'type': str,
'default': '',
'help': "Print the path for installed theme",
},
{
'name': 'copy-template',
'short': 'c',
'long': 'copy-template',
'type': str,
'default': '',
'help': 'Copy a built-in template into templates/ or your theme',
},
{
'name': 'new',
'short': 'n',
'long': 'new',
'type': str,
'default': '',
'help': 'Create a new theme',
},
{
'name': 'new_engine',
'long': 'engine',
'type': str,
'default': 'mako',
'help': 'Engine to use for new theme (mako or jinja)',
},
{
'name': 'new_parent',
'long': 'parent',
'type': str,
'default': 'base',
'help': 'Parent to use for new theme',
},
{
'name': 'new_legacy_meta',
'long': 'legacy-meta',
'type': bool,
'default': False,
'help': 'Create legacy meta files for new theme',
},
]
def _execute(self, options, args):
"""Install theme into current site."""
url = options['url']
# See the "mode" we need to operate in
install = options.get('install')
uninstall = options.get('uninstall')
list_available = options.get('list')
list_installed = options.get('list_installed')
get_path = options.get('getpath')
copy_template = options.get('copy-template')
new = options.get('new')
new_engine = options.get('new_engine')
new_parent = options.get('new_parent')
new_legacy_meta = options.get('new_legacy_meta')
command_count = [bool(x) for x in (
install,
uninstall,
list_available,
list_installed,
get_path,
copy_template,
new)].count(True)
if command_count > 1 or command_count == 0:
print(self.help())
return 2
if list_available:
return self.list_available(url)
elif list_installed:
return self.list_installed()
elif install:
return self.do_install_deps(url, install)
elif uninstall:
return self.do_uninstall(uninstall)
elif get_path:
return self.get_path(get_path)
elif copy_template:
return self.copy_template(copy_template)
elif new:
return self.new_theme(new, new_engine, new_parent, new_legacy_meta)
def do_install_deps(self, url, name):
"""Install themes and their dependencies."""
data = self.get_json(url)
# `name` may be modified by the while loop.
origname = name
installstatus = self.do_install(name, data)
# See if the theme's parent is available. If not, install it
while True:
parent_name = utils.get_parent_theme_name(utils.get_theme_path_real(name, self.site.themes_dirs))
if parent_name is None:
break
try:
utils.get_theme_path_real(parent_name, self.site.themes_dirs)
break
except Exception: # Not available
self.do_install(parent_name, data)
name = parent_name
if installstatus:
LOGGER.info('Remember to set THEME="{0}" in conf.py to use this theme.'.format(origname))
def do_install(self, name, data):
"""Download and install a theme."""
if name in data:
utils.makedirs(self.output_dir)
url = data[name]
LOGGER.info("Downloading '{0}'".format(url))
try:
zip_data = requests.get(url).content
except requests.exceptions.SSLError:
LOGGER.warning("SSL error, using http instead of https (press ^C to abort)")
time.sleep(1)
url = url.replace('https', 'http', 1)
zip_data = requests.get(url).content
zip_file = io.BytesIO()
zip_file.write(zip_data)
LOGGER.info("Extracting '{0}' into themes/".format(name))
utils.extract_all(zip_file)
dest_path = os.path.join(self.output_dir, name)
else:
dest_path = os.path.join(self.output_dir, name)
try:
theme_path = utils.get_theme_path_real(name, self.site.themes_dirs)
LOGGER.error("Theme '{0}' is already installed in {1}".format(name, theme_path))
except Exception:
LOGGER.error("Can't find theme {0}".format(name))
return False
confpypath = os.path.join(dest_path, 'conf.py.sample')
if os.path.exists(confpypath):
LOGGER.warning('This theme has a sample config file. Integrate it with yours in order to make this theme work!')
print('Contents of the conf.py.sample file:\n')
with io.open(confpypath, 'r', encoding='utf-8-sig') as fh:
if self.site.colorful:
print(pygments.highlight(fh.read(), PythonLexer(), TerminalFormatter()))
else:
print(fh.read())
return True
def do_uninstall(self, name):
"""Uninstall a theme."""
try:
path = utils.get_theme_path_real(name, self.site.themes_dirs)
except Exception:
LOGGER.error('Unknown theme: {0}'.format(name))
return 1
# Don't uninstall builtin themes (Issue #2510)
blocked = os.path.dirname(utils.__file__)
if path.startswith(blocked):
LOGGER.error("Can't delete builtin theme: {0}".format(name))
return 1
LOGGER.warning('About to uninstall theme: {0}'.format(name))
LOGGER.warning('This will delete {0}'.format(path))
sure = utils.ask_yesno('Are you sure?')
if sure:
LOGGER.warning('Removing {0}'.format(path))
shutil.rmtree(path)
return 0
return 1
def get_path(self, name):
"""Get path for an installed theme."""
try:
path = utils.get_theme_path_real(name, self.site.themes_dirs)
print(path)
except Exception:
print("not installed")
return 0
def list_available(self, url):
"""List all available themes."""
data = self.get_json(url)
print("Available Themes:")
print("-----------------")
for theme in sorted(data.keys()):
print(theme)
return 0
def list_installed(self):
"""List all installed themes."""
print("Installed Themes:")
print("-----------------")
themes = []
themes_dirs = self.site.themes_dirs + [resource_filename('nikola', os.path.join('data', 'themes'))]
for tdir in themes_dirs:
if os.path.isdir(tdir):
themes += [(i, os.path.join(tdir, i)) for i in os.listdir(tdir)]
for tname, tpath in sorted(set(themes)):
if os.path.isdir(tpath):
print("{0} at {1}".format(tname, tpath))
def copy_template(self, template):
"""Copy the named template file from the parent to a local theme or to templates/."""
# Find template
t = self.site.template_system.get_template_path(template)
if t is None:
LOGGER.error("Cannot find template {0} in the lookup.".format(template))
return 2
# Figure out where to put it.
# Check if a local theme exists.
theme_path = utils.get_theme_path(self.site.THEMES[0])
if theme_path.startswith('themes' + os.sep):
# Theme in local themes/ directory
base = os.path.join(theme_path, 'templates')
else:
# Put it in templates/
base = 'templates'
if not os.path.exists(base):
os.mkdir(base)
LOGGER.info("Created directory {0}".format(base))
try:
out = shutil.copy(t, base)
LOGGER.info("Copied template from {0} to {1}".format(t, out))
except shutil.SameFileError:
LOGGER.error("This file already exists in your templates directory ({0}).".format(base))
return 3
def new_theme(self, name, engine, parent, create_legacy_meta=False):
"""Create a new theme."""
base = 'themes'
themedir = os.path.join(base, name)
LOGGER.info("Creating theme {0} with parent {1} and engine {2} in {3}".format(name, parent, engine, themedir))
if not os.path.exists(base):
os.mkdir(base)
LOGGER.info("Created directory {0}".format(base))
# Check if engine and parent match
parent_engine = utils.get_template_engine(utils.get_theme_chain(parent, self.site.themes_dirs))
if parent_engine != engine:
LOGGER.error("Cannot use engine {0} because parent theme '{1}' uses {2}".format(engine, parent, parent_engine))
return 2
# Create theme
if not os.path.exists(themedir):
os.mkdir(themedir)
LOGGER.info("Created directory {0}".format(themedir))
else:
LOGGER.error("Theme already exists")
return 2
cp = configparser.ConfigParser()
cp['Theme'] = {
'engine': engine,
'parent': parent
}
theme_meta_path = os.path.join(themedir, name + '.theme')
with io.open(theme_meta_path, 'w', encoding='utf-8') as fh:
cp.write(fh)
LOGGER.info("Created file {0}".format(theme_meta_path))
if create_legacy_meta:
with io.open(os.path.join(themedir, 'parent'), 'w', encoding='utf-8') as fh:
fh.write(parent + '\n')
LOGGER.info("Created file {0}".format(os.path.join(themedir, 'parent')))
with io.open(os.path.join(themedir, 'engine'), 'w', encoding='utf-8') as fh:
fh.write(engine + '\n')
LOGGER.info("Created file {0}".format(os.path.join(themedir, 'engine')))
LOGGER.info("Theme {0} created successfully.".format(themedir))
LOGGER.info('Remember to set THEME="{0}" in conf.py to use this theme.'.format(name))
def get_json(self, url):
"""Download the JSON file with all plugins."""
if self.json is None:
try:
try:
self.json = requests.get(url).json()
except requests.exceptions.SSLError:
LOGGER.warning("SSL error, using http instead of https (press ^C to abort)")
time.sleep(1)
url = url.replace('https', 'http', 1)
self.json = requests.get(url).json()
except json.decoder.JSONDecodeError as e:
LOGGER.error("Failed to decode JSON data in response from server.")
LOGGER.error("JSON error encountered:" + str(e))
LOGGER.error("This issue might be caused by server-side issues, or by to unusual activity in your "
"network (as determined by CloudFlare). Please visit https://themes.getnikola.com/ in "
"a browser.")
sys.exit(2)
return self.json
|
import os.path as op
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from scipy.io import savemat
import mne
from mne.datasets import testing
from mne.beamformer import make_lcmv, apply_lcmv, apply_lcmv_cov
from mne.beamformer.tests.test_lcmv import _get_data
from mne.externals.pymatreader import read_mat
from mne.utils import run_tests_if_main
data_path = testing.data_path(download=False)
ft_data_path = op.join(data_path, 'fieldtrip', 'beamformer')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label')
reject = dict(grad=4000e-13, mag=4e-12)
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def _get_bf_data(save_fieldtrip=False):
raw, epochs, evoked, data_cov, _, _, _, _, _, fwd = _get_data(proj=False)
if save_fieldtrip is True:
# raw needs to be saved with all channels and picked in FieldTrip
raw.save(op.join(ft_data_path, 'raw.fif'), overwrite=True)
# src (tris are not available in fwd['src'] once imported into MATLAB)
src = fwd['src'].copy()
mne.write_source_spaces(op.join(ft_data_path, 'src.fif'), src,
verbose='error', overwrite=True)
# pick gradiometers only:
epochs.pick_types(meg='grad')
evoked.pick_types(meg='grad')
# compute covariance matrix (ignore false alarm about no baseline)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145,
method='empirical', verbose='error')
if save_fieldtrip is True:
# if the covariance matrix and epochs need resaving:
# data covariance:
cov_savepath = op.join(ft_data_path, 'sample_cov.mat')
sample_cov = {'sample_cov': data_cov['data']}
savemat(cov_savepath, sample_cov)
# evoked data:
ev_savepath = op.join(ft_data_path, 'sample_evoked.mat')
data_ev = {'sample_evoked': evoked.data}
savemat(ev_savepath, data_ev)
return evoked, data_cov, fwd
# beamformer types to be tested: unit-gain (vector and scalar) and
# unit-noise-gain (time series and power output [apply_lcmv_cov])
@pytest.mark.parametrize('bf_type, weight_norm, pick_ori, pwr', [
['ug_scal', None, 'max-power', False],
['ung', 'unit-noise-gain', 'max-power', False],
['ung_pow', 'unit-noise-gain', 'max-power', True],
['ug_vec', None, 'vector', False],
['ung_vec', 'unit-noise-gain', 'vector', False],
])
def test_lcmv_fieldtrip(_get_bf_data, bf_type, weight_norm, pick_ori, pwr):
"""Test LCMV vs fieldtrip output."""
evoked, data_cov, fwd = _get_bf_data
# run the MNE-Python beamformer
filters = make_lcmv(evoked.info, fwd, data_cov=data_cov,
noise_cov=None, pick_ori=pick_ori, reg=0.05,
weight_norm=weight_norm)
if pwr:
stc_mne = apply_lcmv_cov(data_cov, filters)
else:
stc_mne = apply_lcmv(evoked, filters)
# load the FieldTrip output
ft_fname = op.join(ft_data_path, 'ft_source_' + bf_type + '-vol.mat')
stc_ft_data = read_mat(ft_fname)['stc']
if stc_ft_data.ndim == 1:
stc_ft_data.shape = (stc_ft_data.size, 1)
if stc_mne.data.ndim == 2:
signs = np.sign((stc_mne.data * stc_ft_data).sum(-1, keepdims=True))
if pwr:
assert_array_equal(signs, 1.)
stc_mne.data *= signs
assert stc_ft_data.shape == stc_mne.data.shape
if pick_ori == 'vector':
# compare norms first
assert_allclose(np.linalg.norm(stc_mne.data, axis=1),
np.linalg.norm(stc_ft_data, axis=1), rtol=1e-6)
assert_allclose(stc_mne.data, stc_ft_data, rtol=1e-6)
run_tests_if_main()
|
import json
import arrow
import pem
import requests
import sys
from cryptography import x509
from flask import current_app, g
from lemur.common.utils import validate_conf, convert_pkcs7_bytes_to_pem
from lemur.extensions import metrics
from lemur.plugins import lemur_digicert as digicert
from lemur.plugins.bases import IssuerPlugin, SourcePlugin
from retrying import retry
def log_status_code(r, *args, **kwargs):
"""
Is a request hook that logs all status codes to the digicert api.
:param r:
:param args:
:param kwargs:
:return:
"""
log_data = {
"reason": (r.reason if r.reason else ""),
"status_code": r.status_code,
"url": (r.url if r.url else ""),
}
metrics.send("digicert_status_code_{}".format(r.status_code), "counter", 1)
current_app.logger.info(log_data)
def signature_hash(signing_algorithm):
"""Converts Lemur's signing algorithm into a format DigiCert understands.
:param signing_algorithm:
:return: str digicert specific algorithm string
"""
if not signing_algorithm:
return current_app.config.get("DIGICERT_DEFAULT_SIGNING_ALGORITHM", "sha256")
if signing_algorithm == "sha256WithRSA":
return "sha256"
elif signing_algorithm == "sha384WithRSA":
return "sha384"
elif signing_algorithm == "sha512WithRSA":
return "sha512"
raise Exception("Unsupported signing algorithm.")
def determine_validity_years(years):
"""
Considering maximum allowed certificate validity period of 397 days, this method should not return
more than 1 year of validity. Thus changing it to always return 1.
Lemur will change this method in future to handle validity in months (determine_validity_months)
instead of years. This will allow flexibility to handle short-lived certificates.
:param years:
:return: 1
"""
return 1
def determine_end_date(end_date):
"""
Determine appropriate end date
:param end_date:
:return: validity_end
"""
default_days = current_app.config.get("DIGICERT_DEFAULT_VALIDITY_DAYS", 397)
max_validity_end = arrow.utcnow().shift(days=current_app.config.get("DIGICERT_MAX_VALIDITY_DAYS", default_days))
if not end_date:
end_date = arrow.utcnow().shift(days=default_days)
if end_date > max_validity_end:
end_date = max_validity_end
return end_date
def get_additional_names(options):
"""
Return a list of strings to be added to a SAN certificates.
:param options:
:return:
"""
names = []
# add SANs if present
if options.get("extensions"):
for san in options["extensions"]["sub_alt_names"]["names"]:
if isinstance(san, x509.DNSName):
names.append(san.value)
return names
def map_fields(options, csr):
"""Set the incoming issuer options to DigiCert fields/options.
:param options:
:param csr:
:return: dict or valid DigiCert options
"""
data = dict(
certificate={
"common_name": options["common_name"],
"csr": csr,
"signature_hash": signature_hash(options.get("signing_algorithm")),
},
organization={"id": current_app.config.get("DIGICERT_ORG_ID")},
)
data["certificate"]["dns_names"] = get_additional_names(options)
if options.get("validity_years"):
data["validity_years"] = determine_validity_years(options.get("validity_years"))
elif options.get("validity_end"):
data["custom_expiration_date"] = determine_end_date(options.get("validity_end")).format("YYYY-MM-DD")
# check if validity got truncated. If resultant validity is not equal to requested validity, it just got truncated
if data["custom_expiration_date"] != options.get("validity_end").format("YYYY-MM-DD"):
log_validity_truncation(options, f"{__name__}.{sys._getframe().f_code.co_name}")
else:
data["validity_years"] = determine_validity_years(0)
if current_app.config.get("DIGICERT_PRIVATE", False):
if "product" in data:
data["product"]["type_hint"] = "private"
else:
data["product"] = dict(type_hint="private")
return data
def map_cis_fields(options, csr):
"""
MAP issuer options to DigiCert CIS fields/options.
:param options:
:param csr:
:return: data
"""
if options.get("validity_years"):
validity_end = determine_end_date(arrow.utcnow().shift(years=options["validity_years"]))
elif options.get("validity_end"):
validity_end = determine_end_date(options.get("validity_end"))
# check if validity got truncated. If resultant validity is not equal to requested validity, it just got truncated
if validity_end != options.get("validity_end"):
log_validity_truncation(options, f"{__name__}.{sys._getframe().f_code.co_name}")
else:
validity_end = determine_end_date(False)
data = {
"profile_name": current_app.config.get("DIGICERT_CIS_PROFILE_NAMES", {}).get(options['authority'].name),
"common_name": options["common_name"],
"additional_dns_names": get_additional_names(options),
"csr": csr,
"signature_hash": signature_hash(options.get("signing_algorithm")),
"validity": {
"valid_to": validity_end.format("YYYY-MM-DDTHH:mm:ss") + "Z"
},
"organization": {
"name": options["organization"],
},
}
# possibility to default to a SIGNING_ALGORITHM for a given profile
if current_app.config.get("DIGICERT_CIS_SIGNING_ALGORITHMS", {}).get(options['authority'].name):
data["signature_hash"] = current_app.config.get("DIGICERT_CIS_SIGNING_ALGORITHMS", {}).get(
options['authority'].name)
return data
def log_validity_truncation(options, function):
log_data = {
"cn": options["common_name"],
"creator": g.user.username
}
metrics.send("digicert_validity_truncated", "counter", 1, metric_tags=log_data)
log_data["function"] = function
log_data["message"] = "Digicert Plugin truncated the validity of certificate"
current_app.logger.info(log_data)
def handle_response(response):
"""
Handle the DigiCert API response and any errors it might have experienced.
:param response:
:return:
"""
if response.status_code > 399:
raise Exception("DigiCert rejected request with the error:" + response.json()["errors"][0]["message"])
return response.json()
def handle_cis_response(response):
"""
Handle the DigiCert CIS API response and any errors it might have experienced.
:param response:
:return:
"""
if response.status_code == 404:
raise Exception("DigiCert: order not in issued state")
elif response.status_code == 406:
raise Exception("DigiCert: wrong header request format")
elif response.status_code > 399:
raise Exception("DigiCert rejected request with the error:" + response.text)
if response.url.endswith("download"):
return response.content
else:
return response.json()
@retry(stop_max_attempt_number=10, wait_fixed=1000)
def get_certificate_id(session, base_url, order_id):
"""Retrieve certificate order id from Digicert API."""
order_url = "{0}/services/v2/order/certificate/{1}".format(base_url, order_id)
response_data = handle_response(session.get(order_url))
if response_data["status"] != "issued":
raise Exception("Order not in issued state.")
return response_data["certificate"]["id"]
@retry(stop_max_attempt_number=10, wait_fixed=1000)
def get_cis_certificate(session, base_url, order_id):
"""Retrieve certificate order id from Digicert API, including the chain"""
certificate_url = "{0}/platform/cis/certificate/{1}/download".format(base_url, order_id)
session.headers.update({"Accept": "application/x-pkcs7-certificates"})
response = session.get(certificate_url)
response_content = handle_cis_response(response)
cert_chain_pem = convert_pkcs7_bytes_to_pem(response_content)
if len(cert_chain_pem) < 3:
raise Exception("Missing the certificate chain")
return cert_chain_pem
class DigiCertSourcePlugin(SourcePlugin):
"""Wrap the Digicert Certifcate API."""
title = "DigiCert"
slug = "digicert-source"
description = "Enables the use of Digicert as a source of existing certificates."
version = digicert.VERSION
author = "Kevin Glisson"
author_url = "https://github.com/netflix/lemur.git"
def __init__(self, *args, **kwargs):
"""Initialize source with appropriate details."""
required_vars = [
"DIGICERT_API_KEY",
"DIGICERT_URL",
"DIGICERT_ORG_ID",
"DIGICERT_ROOT",
]
validate_conf(current_app, required_vars)
self.session = requests.Session()
self.session.headers.update(
{
"X-DC-DEVKEY": current_app.config["DIGICERT_API_KEY"],
"Content-Type": "application/json",
}
)
self.session.hooks = dict(response=log_status_code)
super(DigiCertSourcePlugin, self).__init__(*args, **kwargs)
def get_certificates(self):
pass
class DigiCertIssuerPlugin(IssuerPlugin):
"""Wrap the Digicert Issuer API."""
title = "DigiCert"
slug = "digicert-issuer"
description = "Enables the creation of certificates by the DigiCert REST API."
version = digicert.VERSION
author = "Kevin Glisson"
author_url = "https://github.com/netflix/lemur.git"
def __init__(self, *args, **kwargs):
"""Initialize the issuer with the appropriate details."""
required_vars = [
"DIGICERT_API_KEY",
"DIGICERT_URL",
"DIGICERT_ORG_ID",
"DIGICERT_ORDER_TYPE",
"DIGICERT_ROOT",
]
validate_conf(current_app, required_vars)
self.session = requests.Session()
self.session.headers.update(
{
"X-DC-DEVKEY": current_app.config["DIGICERT_API_KEY"],
"Content-Type": "application/json",
}
)
self.session.hooks = dict(response=log_status_code)
super(DigiCertIssuerPlugin, self).__init__(*args, **kwargs)
def create_certificate(self, csr, issuer_options):
"""Create a DigiCert certificate.
:param csr:
:param issuer_options:
:return: :raise Exception:
"""
base_url = current_app.config.get("DIGICERT_URL")
cert_type = current_app.config.get("DIGICERT_ORDER_TYPE")
# make certificate request
determinator_url = "{0}/services/v2/order/certificate/{1}".format(
base_url, cert_type
)
data = map_fields(issuer_options, csr)
response = self.session.post(determinator_url, data=json.dumps(data))
if response.status_code > 399:
raise Exception(response.json()["errors"][0]["message"])
order_id = response.json()["id"]
certificate_id = get_certificate_id(self.session, base_url, order_id)
# retrieve certificate
certificate_url = "{0}/services/v2/certificate/{1}/download/format/pem_all".format(
base_url, certificate_id
)
end_entity, intermediate, root = pem.parse(
self.session.get(certificate_url).content
)
return (
"\n".join(str(end_entity).splitlines()),
"\n".join(str(intermediate).splitlines()),
certificate_id,
)
def revoke_certificate(self, certificate, reason):
"""Revoke a Digicert certificate."""
base_url = current_app.config.get("DIGICERT_URL")
# make certificate revoke request
create_url = "{0}/services/v2/certificate/{1}/revoke".format(
base_url, certificate.external_id
)
comments = reason["comments"] if "comments" in reason else ''
if "crl_reason" in reason:
comments += '(' + reason["crl_reason"] + ')'
metrics.send("digicert_revoke_certificate", "counter", 1)
response = self.session.put(create_url, data=json.dumps({"comments": comments}))
return handle_response(response)
def get_ordered_certificate(self, pending_cert):
""" Retrieve a certificate via order id """
order_id = pending_cert.external_id
base_url = current_app.config.get("DIGICERT_URL")
try:
certificate_id = get_certificate_id(self.session, base_url, order_id)
except Exception as ex:
return None
certificate_url = "{0}/services/v2/certificate/{1}/download/format/pem_all".format(
base_url, certificate_id
)
end_entity, intermediate, root = pem.parse(
self.session.get(certificate_url).content
)
cert = {
"body": "\n".join(str(end_entity).splitlines()),
"chain": "\n".join(str(intermediate).splitlines()),
"external_id": str(certificate_id),
}
return cert
def cancel_ordered_certificate(self, pending_cert, **kwargs):
""" Set the certificate order to canceled """
base_url = current_app.config.get("DIGICERT_URL")
api_url = "{0}/services/v2/order/certificate/{1}/status".format(
base_url, pending_cert.external_id
)
payload = {"status": "CANCELED", "note": kwargs.get("note")}
response = self.session.put(api_url, data=json.dumps(payload))
if response.status_code == 404:
# not well documented by Digicert, but either the certificate does not exist or we
# don't own that order (someone else's order id!). Either way, we can just ignore it
# and have it removed from Lemur
current_app.logger.warning(
"Digicert Plugin tried to cancel pending certificate {0} but it does not exist!".format(
pending_cert.name
)
)
elif response.status_code != 204:
current_app.logger.debug(
"{0} code {1}".format(response.status_code, response.content)
)
raise Exception(
"Failed to cancel pending certificate {0}".format(pending_cert.name)
)
@staticmethod
def create_authority(options):
"""Create an authority.
Creates an authority, this authority is then used by Lemur to
allow a user to specify which Certificate Authority they want
to sign their certificate.
:param options:
:return:
"""
role = {"username": "", "password": "", "name": "digicert"}
return current_app.config.get("DIGICERT_ROOT"), "", [role]
class DigiCertCISSourcePlugin(SourcePlugin):
"""Wrap the Digicert CIS Certifcate API."""
title = "DigiCert"
slug = "digicert-cis-source"
description = "Enables the use of Digicert as a source of existing certificates."
version = digicert.VERSION
author = "Kevin Glisson"
author_url = "https://github.com/netflix/lemur.git"
additional_options = []
def __init__(self, *args, **kwargs):
"""Initialize source with appropriate details."""
required_vars = [
"DIGICERT_CIS_API_KEY",
"DIGICERT_CIS_URL",
"DIGICERT_CIS_ROOTS",
"DIGICERT_CIS_PROFILE_NAMES",
]
validate_conf(current_app, required_vars)
self.session = requests.Session()
self.session.headers.update(
{
"X-DC-DEVKEY": current_app.config["DIGICERT_CIS_API_KEY"],
"Content-Type": "application/json",
}
)
self.session.hooks = dict(response=log_status_code)
a = requests.adapters.HTTPAdapter(max_retries=3)
self.session.mount("https://", a)
super(DigiCertCISSourcePlugin, self).__init__(*args, **kwargs)
def get_certificates(self, options, **kwargs):
"""Fetch all Digicert certificates."""
base_url = current_app.config.get("DIGICERT_CIS_URL")
# make request
search_url = "{0}/platform/cis/certificate/search".format(base_url)
certs = []
page = 1
while True:
response = self.session.get(
search_url, params={"status": ["issued"], "page": page}
)
data = handle_cis_response(response)
for c in data["certificates"]:
download_url = "{0}/platform/cis/certificate/{1}".format(
base_url, c["id"]
)
certificate = self.session.get(download_url)
# normalize serial
serial = str(int(c["serial_number"], 16))
cert = {
"body": certificate.content,
"serial": serial,
"external_id": c["id"],
}
certs.append(cert)
if page == data["total_pages"]:
break
page += 1
return certs
class DigiCertCISIssuerPlugin(IssuerPlugin):
"""Wrap the Digicert Certificate Issuing API."""
title = "DigiCert CIS"
slug = "digicert-cis-issuer"
description = "Enables the creation of certificates by the DigiCert CIS REST API."
version = digicert.VERSION
author = "Kevin Glisson"
author_url = "https://github.com/netflix/lemur.git"
def __init__(self, *args, **kwargs):
"""Initialize the issuer with the appropriate details."""
required_vars = [
"DIGICERT_CIS_API_KEY",
"DIGICERT_CIS_URL",
"DIGICERT_CIS_ROOTS",
"DIGICERT_CIS_PROFILE_NAMES",
]
validate_conf(current_app, required_vars)
self.session = requests.Session()
self.session.headers.update(
{
"X-DC-DEVKEY": current_app.config["DIGICERT_CIS_API_KEY"],
"Content-Type": "application/json",
}
)
self.session.hooks = dict(response=log_status_code)
super(DigiCertCISIssuerPlugin, self).__init__(*args, **kwargs)
def create_certificate(self, csr, issuer_options):
"""Create a DigiCert certificate."""
base_url = current_app.config.get("DIGICERT_CIS_URL")
# make certificate request
create_url = "{0}/platform/cis/certificate".format(base_url)
data = map_cis_fields(issuer_options, csr)
response = self.session.post(create_url, data=json.dumps(data))
data = handle_cis_response(response)
# retrieve certificate
certificate_chain_pem = get_cis_certificate(self.session, base_url, data["id"])
self.session.headers.pop("Accept")
end_entity = certificate_chain_pem[0]
intermediate = certificate_chain_pem[1]
return (
"\n".join(str(end_entity).splitlines()),
"\n".join(str(intermediate).splitlines()),
data["id"],
)
def revoke_certificate(self, certificate, reason):
"""Revoke a Digicert certificate."""
base_url = current_app.config.get("DIGICERT_CIS_URL")
# make certificate revoke request
revoke_url = "{0}/platform/cis/certificate/{1}/revoke".format(
base_url, certificate.external_id
)
metrics.send("digicert_revoke_certificate_success", "counter", 1)
comments = reason["comments"] if "comments" in reason else ''
if "crl_reason" in reason:
comments += '(' + reason["crl_reason"] + ')'
response = self.session.put(revoke_url, data=json.dumps({"comments": comments}))
if response.status_code != 204:
metrics.send("digicert_revoke_certificate_failure", "counter", 1)
raise Exception("Failed to revoke certificate.")
metrics.send("digicert_revoke_certificate_success", "counter", 1)
@staticmethod
def create_authority(options):
"""Create an authority.
Creates an authority, this authority is then used by Lemur to
allow a user to specify which Certificate Authority they want
to sign their certificate.
:param options:
:return:
"""
role = {"username": "", "password": "", "name": "digicert"}
return current_app.config.get("DIGICERT_CIS_ROOTS", {}).get(options['authority'].name), "", [role]
|
import time
from six import StringIO
from stash.tests.stashtest import StashTestCase
class ThreadsTests(StashTestCase):
setup_commands = ['BIN_PATH=$STASH_ROOT/tests/system/data:$BIN_PATH']
def test_101(self):
"""
background thread clears properly
"""
self.stash('test_101_1.py &')
time.sleep(4)
cmp_str = r"""[stash]$ [stash]$ sleeping ... 0
sleeping ... 1
"""
assert self.stash.main_screen.text == cmp_str, 'output not identical'
def test_102(self):
"""
Two parallel threads with same stdout should interleave
"""
outs = StringIO()
self.stash('test_102_1.py &', final_outs=outs)
self.stash('test_102_2.py &', final_outs=outs)
time.sleep(5)
s = outs.getvalue()
# Count the number of times the output switches between threads
change_cnt = 0
prev_line = None
for cur_line in outs.getvalue().splitlines():
if prev_line is None:
prev_line = cur_line
elif prev_line != cur_line:
change_cnt += 1
prev_line = cur_line
self.assertTrue(change_cnt > 2, 'Output do not interleave')
def test_103(self):
"""
Two threads in parallel with different stdout do not interfere
"""
outs1 = StringIO()
self.stash('test_102_1.py &', final_outs=outs1)
self.stash('test_102_2.py')
time.sleep(1)
cmp_str1 = r"""[stash]$ [stash]$ test_102_2.py
test_102_2.py
test_102_2.py
test_102_2.py
test_102_2.py
[stash]$ """
assert self.stash.main_screen.text == cmp_str1, 'output not identical'
cmp_str2 = r"""test_102_1.py
test_102_1.py
test_102_1.py
test_102_1.py
test_102_1.py
"""
assert outs1.getvalue() == cmp_str2, 'output not identical'
|
from homeassistant.helpers.entity import ToggleEntity
from . import DOMAIN as DAIKIN_DOMAIN
ZONE_ICON = "mdi:home-circle"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up the platform.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Daikin climate based on config_entry."""
daikin_api = hass.data[DAIKIN_DOMAIN][entry.entry_id]
zones = daikin_api.device.zones
if zones:
async_add_entities(
[
DaikinZoneSwitch(daikin_api, zone_id)
for zone_id, zone in enumerate(zones)
if zone != ("-", "0")
]
)
class DaikinZoneSwitch(ToggleEntity):
"""Representation of a zone."""
def __init__(self, daikin_api, zone_id):
"""Initialize the zone."""
self._api = daikin_api
self._zone_id = zone_id
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.device.mac}-zone{self._zone_id}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ZONE_ICON
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._api.name} {self._api.device.zones[self._zone_id][0]}"
@property
def is_on(self):
"""Return the state of the sensor."""
return self._api.device.zones[self._zone_id][1] == "1"
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
async def async_update(self):
"""Retrieve latest state."""
await self._api.async_update()
async def async_turn_on(self, **kwargs):
"""Turn the zone on."""
await self._api.device.set_zone(self._zone_id, "1")
async def async_turn_off(self, **kwargs):
"""Turn the zone off."""
await self._api.device.set_zone(self._zone_id, "0")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import posixpath
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import nvidia_driver
from perfkitbenchmarker.linux_packages import tensorflow
from six.moves import range
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'tensorflow'
BENCHMARK_CONFIG = """
tensorflow:
description: Runs Tensorflow Benchmark.
vm_groups:
default:
os_type: ubuntu1604
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
GPU = 'gpu'
CPU = 'cpu'
NCHW = 'NCHW'
NHWC = 'NHWC'
PID_PREFIX = 'TF_PS_PID'
MODELS = ['vgg11', 'vgg16', 'vgg19', 'lenet', 'googlenet', 'overfeat',
'alexnet', 'trivial', 'inception3', 'inception4', 'resnet50',
'resnet101', 'resnet152']
FP16 = 'float16'
FP32 = 'float32'
flags.DEFINE_boolean('tf_forward_only', False, '''whether use forward-only or
training for benchmarking''')
flags.DEFINE_list('tf_models', ['inception3', 'vgg16', 'alexnet', 'resnet50',
'resnet152'], 'name of the models to run')
flags.register_validator('tf_models',
lambda models: models and set(models).issubset(MODELS),
'Invalid models list. tf_models must be a subset of '
+ ', '.join(MODELS))
flags.DEFINE_string('tf_data_dir', None,
'Path to dataset in TFRecord format (aka Example '
'protobufs). If not specified, synthetic data will be '
'used.')
flags.DEFINE_string('tf_data_module', 'tensorflow/ILSVRC2012',
'Data path in preprovisioned data bucket.')
flags.DEFINE_integer('tf_num_files_train', 1024,
'The number of files for training')
flags.DEFINE_integer('tf_num_files_val', 128,
'The number of files for validation')
flags.DEFINE_enum('tf_data_name', 'imagenet', ['imagenet', 'flowers'],
'Name of dataset: imagenet or flowers.')
flags.DEFINE_list('tf_batch_sizes', None, 'batch sizes per compute device. '
'If not provided, the suggested batch size is used for '
'the given model')
flags.DEFINE_enum('tf_variable_update', 'parameter_server',
['parameter_server', 'replicated',
'distributed_replicated', 'independent'],
'''The method for managing variables: parameter_server,
replicated, distributed_replicated, independent''')
flags.DEFINE_enum('tf_local_parameter_device', CPU, [CPU, GPU],
'''Device to use as parameter server: cpu or gpu. For
distributed training, it can affect where caching of
variables happens.''')
flags.DEFINE_enum('tf_device', GPU, [CPU, GPU],
'Device to use for computation: cpu or gpu')
flags.DEFINE_enum('tf_data_format', NCHW, [NCHW, NHWC], '''Data layout to
use: NHWC (TF native) or NCHW (cuDNN native).''')
flags.DEFINE_boolean('tf_distortions', True,
'''Enable/disable distortions during image preprocessing.
These include bbox and color distortions.''')
flags.DEFINE_boolean('tf_distributed', False, 'Run TensorFlow distributed')
flags.DEFINE_string('tf_distributed_port', '2222',
'The port to use in TensorFlow distributed job')
flags.DEFINE_enum('tf_precision', FP32, [FP16, FP32],
'Use 16-bit floats for certain tensors instead of 32-bit '
'floats. This is currently experimental.')
flags.DEFINE_boolean('tf_use_local_data', False, 'Whether to use data from '
'local machine. If true, the benchmark will use data from '
'cloud storage (GCS, S3, etc).')
flags.DEFINE_string('tf_benchmark_args', None,
'Arguments (as a string) to pass to tf_cnn_benchmarks. '
'This can be used to run a benchmark with arbitrary '
'parameters. Arguments will be parsed and added to the '
'sample metadata. For example, '
'--tf_benchmark_args="--nodistortions --optimizer=sgd '
'will run tf_cnn_benchmarks.py '
'--nodistortions --optimizer=sgd '
'and put the following in the metadata: '
'{\'nodistortions\': \'True\', \'optimizer\': \'sgd\'}. '
'All arguments must be in the form --arg_name=value. '
'If there are GPUs on the VM and no \'num_gpus\' flag in '
'the tf_benchmarks_args flag, the num_gpus flag will '
'automatically be populated with the number of available '
'GPUs.')
def LocalParameterDeviceValidator(value):
if FLAGS.tf_device == CPU:
return value == CPU
return True
flags.register_validator('tf_local_parameter_device',
LocalParameterDeviceValidator)
NVIDIA_TESLA_P4 = nvidia_driver.NVIDIA_TESLA_P4
NVIDIA_TESLA_K80 = nvidia_driver.NVIDIA_TESLA_K80
NVIDIA_TESLA_P100 = nvidia_driver.NVIDIA_TESLA_P100
NVIDIA_TESLA_V100 = nvidia_driver.NVIDIA_TESLA_V100
DEFAULT_BATCH_SIZE = 64
DEFAULT_BATCH_SIZES = {
CPU: {
'alexnet': 512,
'inception3': 64,
'resnet50': 64,
'resnet152': 32,
'vgg16': 32,
},
NVIDIA_TESLA_K80: {
'alexnet': 512,
'inception3': 64,
'resnet50': 64,
'resnet152': 32,
'vgg16': 32,
},
NVIDIA_TESLA_P4: {
'alexnet': 512,
'inception3': 128,
'resnet50': 128,
'resnet152': 64,
'vgg16': 64,
},
NVIDIA_TESLA_P100: {
'alexnet': 512,
'inception3': 256,
'resnet50': 256,
'resnet152': 128,
'vgg16': 128,
},
NVIDIA_TESLA_V100: {
'alexnet': 512,
'inception3': 256,
'resnet50': 256,
'resnet152': 128,
'vgg16': 128,
},
}
DATA_DIR = posixpath.join(linux_packages.INSTALL_DIR, 'imagenet')
class TFParseOutputException(Exception):
pass
class TFParsePsPidException(Exception):
pass
class TFDataDirException(Exception):
pass
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _GetDefaultBatchSizeByModel(model, gpu_type):
"""Return the default batch size for a given model and gpu / cpu type.
If gpu_type is none, it is assumed that the model will be running on the CPU.
If there is no default for the given model and gpu_type, a default batch
size will be returned as defined by DEFAULT_BATCH_SIZE.
Args:
model: name of the Tensorflow model
gpu_type: type of the GPU, or None
Returns:
default batch size for the given model / gpu_type,
or the default batch size.
"""
computation_device = gpu_type or CPU
try:
return DEFAULT_BATCH_SIZES[computation_device][model]
except KeyError:
return DEFAULT_BATCH_SIZE
def _GetBatchSizes(model, gpu_type):
"""Return the batch_size flag if specified, or the appropriate default if not.
Args:
model: name of the Tensorflow model
gpu_type: type of the GPU, or None
Returns:
value of the batch_size flag if specified, or the default batch size for the
given model / gpu_type.
"""
return FLAGS.tf_batch_sizes or [_GetDefaultBatchSizeByModel(model, gpu_type)]
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.forward_only = FLAGS.tf_forward_only
benchmark_spec.data_name = FLAGS.tf_data_name
benchmark_spec.data_dir = (DATA_DIR if FLAGS.tf_use_local_data else
FLAGS.tf_data_dir)
benchmark_spec.use_local_data = FLAGS.tf_use_local_data
benchmark_spec.variable_update = FLAGS.tf_variable_update
benchmark_spec.distortions = FLAGS.tf_distortions
benchmark_spec.cnn_benchmarks_branch = FLAGS.tf_cnn_benchmarks_branch
benchmark_spec.tensorflow_cpu_pip_package = FLAGS.tf_cpu_pip_package
benchmark_spec.tensorflow_gpu_pip_package = FLAGS.tf_gpu_pip_package
benchmark_spec.distributed = FLAGS.tf_distributed
benchmark_spec.precision = FLAGS.tf_precision
benchmark_spec.benchmark_args = FLAGS.tf_benchmark_args
def _PrepareVm(vm):
"""Install and set up TensorFlow on the target vm.
The TensorFlow benchmarks are also installed.
A specific branch of the benchmarks cnn_tf_v1.10_compatible which works best
with TensorFlow 1.10 is used and can be overridden with the flag
tf_cnn_benchmarks_branch.
Args:
vm: virtual machine on which to install TensorFlow
"""
if FLAGS.tf_data_dir and FLAGS.tf_use_local_data:
def _DownloadData(num_files, mode):
for i in range(num_files):
filename = '{}-{:05}-of-{:05}'.format(mode, i, num_files)
vm.DownloadPreprovisionedData(DATA_DIR, FLAGS.tf_data_module, filename)
_DownloadData(FLAGS.tf_num_files_train, 'train')
_DownloadData(FLAGS.tf_num_files_val, 'validation')
vm.Install('tensorflow')
vm.InstallPackages('git')
def Prepare(benchmark_spec):
"""Install and set up TensorFlow on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vms = benchmark_spec.vms
vm_util.RunThreaded(_PrepareVm, vms)
benchmark_spec.tensorflow_version = tensorflow.GetTensorFlowVersion(vms[0])
if nvidia_driver.CheckNvidiaGpuExists(vms[0]):
benchmark_spec.gpu_type = nvidia_driver.GetGpuType(vms[0])
def _GetMetadataFromBenchmarkArgs(tf_cnn_benchmark_args):
"""Return a dictionary of arg names and values.
Only supports arguments in the following format:
--arg_name=arg_value
The above string will result in this function returning a dictionary
like so: {'arg_name': 'arg_value'}
Because this and other PKB benchmarks use the 'precision' flag to specify
fp16 or fp32, this function will convert the Tensorflow-specific precision
flag ('use_fp16') to 'precision' to keep results consistent. All other command
line arguments are extracted as is without being renamed.
Args:
tf_cnn_benchmark_args: string. The command line args to parse into a dict.
Returns:
A dictionary mapping argument names to their values.
"""
args = tf_cnn_benchmark_args.split(' ')
args_dict = {arg.split('=')[0].replace('--', ''): arg.split('=')[1]
for arg in args}
if 'use_fp16' in args_dict:
if args_dict['use_fp16'].lower() == 'true':
args_dict['precision'] = FP16
else:
args_dict['precision'] = FP32
return args_dict
def _CreateMetadataDict(benchmark_spec, model, batch_size):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: benchmark spec
model: model which was run
batch_size: batch sized used
Returns:
metadata dict
"""
vm = benchmark_spec.vms[0]
metadata = {}
if nvidia_driver.CheckNvidiaGpuExists(vm):
metadata.update(nvidia_driver.GetMetadata(vm))
metadata['command_line'] = benchmark_spec.tf_cnn_benchmark_cmd
metadata['cnn_benchmarks_branch'] = benchmark_spec.cnn_benchmarks_branch
metadata['tensorflow_version'] = benchmark_spec.tensorflow_version
metadata['tensorflow_cpu_pip_package'] = (
benchmark_spec.tensorflow_cpu_pip_package)
metadata['tensorflow_gpu_pip_package'] = (
benchmark_spec.tensorflow_gpu_pip_package)
# If we ran a custom command-line through the benchmark_args flag,
# add the metadata from that command and return. We don't need anymore
# metadata from this function as it is likely invalid.
if getattr(benchmark_spec, 'benchmark_args', None):
metadata.update(
_GetMetadataFromBenchmarkArgs(benchmark_spec.benchmark_args))
return metadata
metadata['model'] = model
metadata['batch_size'] = batch_size
metadata['forward_only'] = benchmark_spec.forward_only
metadata['data_name'] = benchmark_spec.data_name
metadata['data_dir'] = benchmark_spec.data_dir
metadata['use_local_data'] = benchmark_spec.use_local_data
metadata['variable_update'] = benchmark_spec.variable_update
metadata['local_parameter_device'] = benchmark_spec.local_parameter_device
metadata['device'] = benchmark_spec.device
metadata['data_format'] = benchmark_spec.data_format
metadata['distortions'] = benchmark_spec.distortions
metadata['distributed'] = benchmark_spec.distributed
metadata['precision'] = benchmark_spec.precision
metadata['num_gpus'] = benchmark_spec.num_gpus
return metadata
def _ExtractThroughput(output):
"""Extract throughput from TensorFlow output.
Args:
output: TensorFlow output
Returns:
throuput (float)
"""
regex = r'total images/sec: (\S+)'
try:
return regex_util.ExtractFloat(regex, output)
except:
raise TFParseOutputException('Unable to parse TensorFlow output')
def _ExtractTfParameterServerPid(output):
"""Extract the process identification number from TensorFlow parameter server.
Args:
output: string, Remote command output
Returns:
string, process identification number from TensorFlow parameter server
Raises:
TFParsePsPidException
"""
regex = r'{pid} (\S+)'.format(pid=PID_PREFIX)
try:
return regex_util.ExtractExactlyOneMatch(regex, output)
except:
raise TFParsePsPidException('Unable to parse process identification number '
'of TensorFlow parameter server from remote '
'command output.')
def _MakeSamplesFromOutput(benchmark_spec, output, model, batch_size):
"""Create a sample containing the measured TensorFlow throughput.
Args:
benchmark_spec: benchmark spec
output: TensorFlow output
model: model which was run
batch_size: batch sized used
Returns:
a Sample containing the TensorFlow throughput in Gflops
"""
metadata = _CreateMetadataDict(benchmark_spec, model, batch_size)
tensorflow_throughput = _ExtractThroughput(output)
return sample.Sample('Training synthetic data', tensorflow_throughput,
'images/sec', metadata)
def _GetTfCnnBenchmarkCommand(vm, model, batch_size, benchmark_spec,
args='', job_name=''):
"""Create the command used to run the tf_cnn_benchmarks script.
The command is either formulated using flag values stored on the
benchmark_spec, or is essentially provided outright through the
benchmark_args flag.
Args:
vm: the VM to run on.
model: name of the model to run.
batch_size: batch size to use for training.
benchmark_spec: the benchmark spec object.
args: string, distributed arguments
job_name: string, distributed job name
Returns:
A string that runs the tf_cnn_benchmarks.py script
with the desired arguments.
"""
num_gpus = (nvidia_driver.QueryNumberOfGpus(vm) if
nvidia_driver.CheckNvidiaGpuExists(vm) else 0)
benchmark_spec.num_gpus = num_gpus
if benchmark_spec.benchmark_args is not None:
cmd = 'python tf_cnn_benchmarks.py ' + benchmark_spec.benchmark_args
# If the user didn't specify num_gpus in the benchmark_args string,
# use all the GPUs on the system.
if '--num_gpus' not in benchmark_spec.benchmark_args and num_gpus:
cmd = '{cmd} --num_gpus={num_gpus}'.format(cmd=cmd, num_gpus=num_gpus)
return cmd
benchmark_spec.local_parameter_device = FLAGS.tf_local_parameter_device
benchmark_spec.device = FLAGS.tf_device
benchmark_spec.data_format = FLAGS.tf_data_format
if num_gpus == 0:
benchmark_spec.local_parameter_device = CPU
benchmark_spec.device = CPU
benchmark_spec.data_format = NHWC
cmd = (
'{env_vars} python tf_cnn_benchmarks.py '
'--local_parameter_device={local_parameter_device} '
'--batch_size={batch_size} '
'--model={model} '
'{data} '
'--data_name={data_name} '
'--variable_update={variable_update} '
'--distortions={distortions} '
'--device={device} '
'--data_format={data_format} '
'--forward_only={forward_only} '
'--use_fp16={use_fp16} '
'{num_gpus} '
'{job_name}'.format(
env_vars=tensorflow.GetEnvironmentVars(vm),
local_parameter_device=benchmark_spec.local_parameter_device,
batch_size=batch_size,
model=model,
data=('--data_dir={}'.format(benchmark_spec.data_dir) if
benchmark_spec.data_dir else ''),
data_name=benchmark_spec.data_name,
variable_update=benchmark_spec.variable_update,
distortions=benchmark_spec.distortions,
device=benchmark_spec.device,
data_format=benchmark_spec.data_format,
forward_only=benchmark_spec.forward_only,
use_fp16=(benchmark_spec.precision == FP16),
num_gpus='--num_gpus={}'.format(num_gpus) if num_gpus else '',
job_name='--job_name={0} {1}'.format(job_name, args) if args else ''))
return cmd
def _RunModelOnVm(vm, model, batch_size, benchmark_spec, args='', job_name=''):
"""Runs a TensorFlow benchmark on a single VM.
Args:
vm: VM to run on
model: string, the name of model to run
batch_size: int, training batch size
benchmark_spec: BenchmarkSpec object
args: string, distributed arguments
job_name: string, distributed job name
Returns:
a Sample containing the TensorFlow throughput or the process
identification number from TensorFlow parameter server.
"""
tf_cnn_benchmark_cmd = _GetTfCnnBenchmarkCommand(
vm, model, batch_size, benchmark_spec, args, job_name)
benchmark_spec.tf_cnn_benchmark_cmd = tf_cnn_benchmark_cmd
tf_cnn_benchmark_dir = 'benchmarks/scripts/tf_cnn_benchmarks'
run_command = 'cd {path} ; {cmd}'.format(path=tf_cnn_benchmark_dir,
cmd=tf_cnn_benchmark_cmd)
output, _ = vm.RobustRemoteCommand(run_command, should_log=True)
if job_name == 'ps':
return _ExtractTfParameterServerPid(output)
else:
return _MakeSamplesFromOutput(benchmark_spec, output, model, batch_size)
def _RunOnVm(vm, benchmark_spec):
"""Runs a TensorFlow benchmark on a single VM.
Args:
vm: VM to run on
benchmark_spec: benchmark_spec object
Returns:
A list of samples containing the TensorFlow throughput from different models
"""
samples = []
if FLAGS.tf_benchmark_args:
return [_RunModelOnVm(vm, None, None, benchmark_spec)]
gpu_type = getattr(benchmark_spec, 'gpu_type', None)
for model in FLAGS.tf_models:
for batch_size in _GetBatchSizes(model, gpu_type):
samples.append(_RunModelOnVm(vm, model, batch_size, benchmark_spec))
return samples
def _GetHostsArgs(hosts):
return ','.join('{ip}:{port}'.format(ip=vm.internal_ip,
port=FLAGS.tf_distributed_port)
for vm in hosts)
def _RunDistributedTf(benchmark_spec):
"""Run distributed TensorFlow for each model specified.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
ps_hosts = benchmark_spec.vm_groups['parameter_server_hosts']
worker_hosts = benchmark_spec.vm_groups['worker_hosts']
dist_args = '--ps_hosts={ps_args} --worker_hosts={worker_args}'.format(
ps_args=_GetHostsArgs(ps_hosts), worker_args=_GetHostsArgs(worker_hosts))
flattened_results = []
vm_pid = collections.namedtuple('vm_pid', 'vm pid')
gpu_type = getattr(benchmark_spec, 'gpu_type', None)
for model in FLAGS.tf_models:
for batch_size in _GetBatchSizes(model, gpu_type):
ps_pids = []
for task_index, vm in enumerate(ps_hosts):
dist_ps_args = ('{args} --task_index={index} &\n'
'echo {pid} $!').format(args=dist_args,
index=task_index,
pid=PID_PREFIX)
pid = _RunModelOnVm(vm, model, batch_size, benchmark_spec, dist_ps_args,
'ps')
ps_pids.append(vm_pid(vm=vm, pid=pid))
args = []
for task_index, vm in enumerate(worker_hosts):
dist_worker_args = ('{args} --job_name=worker '
'--task_index={index}').format(args=dist_args,
index=task_index)
args.append(((vm, model, batch_size, benchmark_spec, dist_worker_args,
'worker'), {}))
result = vm_util.RunThreaded(_RunModelOnVm, args)
for ps_pid in ps_pids:
ps_pid.vm.RemoteCommand('kill -9 %s' % ps_pid.pid)
flattened_results.extend(vm_result for vm_result in result)
return flattened_results
def _RunTf(benchmark_spec):
"""Run TensorFlow for each model specified.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
args = [((vm, benchmark_spec), {}) for vm in vms]
run_results = vm_util.RunThreaded(_RunOnVm, args)
# Add vm index to results metadata
for idx, vm_result in enumerate(run_results):
for result_sample in vm_result:
result_sample.metadata['vm_index'] = idx
# Flatten the list
return [samples for vm_results in run_results for samples in vm_results]
def Run(benchmark_spec):
"""Run TensorFlow on the cluster for each model specified.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
if benchmark_spec.distributed:
return _RunDistributedTf(benchmark_spec)
else:
return _RunTf(benchmark_spec)
def Cleanup(unused_benchmark_spec):
"""Cleanup TensorFlow on the cluster."""
pass
|
from elephas.java import java_classes
from elephas.dl4j import ParameterAveragingModel
from elephas.utils import rdd_utils
import keras
from keras.utils import np_utils
def main():
# Set Java Spark context
conf = java_classes.SparkConf().setMaster('local[*]').setAppName("elephas_dl4j")
jsc = java_classes.JavaSparkContext(conf)
# Define Keras model
model = keras.models.Sequential()
model.add(keras.layers.Dense(128, input_dim=784))
model.add(keras.layers.Dense(units=10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# Define DL4J Elephas model
spark_model = ParameterAveragingModel(java_spark_context=jsc, model=model, num_workers=4, batch_size=32)
# Load data and build DL4J DataSet RDD under the hood
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float64")
x_test = x_test.astype("float64")
# Convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
y_train = y_train.astype("float64")
y_test = y_test.astype("float64")
x_train /= 255
x_test /= 255
java_rdd = rdd_utils.to_java_rdd(jsc, x_train, y_train, 32)
import timeit
start = timeit.default_timer()
# Fit model
spark_model.fit_rdd(java_rdd, 2)
stop = timeit.default_timer()
print('Time: ', stop - start)
# Retrieve resulting weights from training, set to original Keras model, evaluate.
keras_model = spark_model.get_keras_model()
score = keras_model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
import os
if os.path.exists("temp.h5"):
os.remove("temp.h5")
if __name__ == '__main__':
main()
|
import chainer
from chainer.backends import cuda
from chainercv import transforms
class YOLOBase(chainer.Chain):
"""Base class for YOLOv2 and YOLOv3.
A subclass of this class should have :obj:`extractor`,
:meth:`forward`, and :meth:`_decode`.
"""
@property
def insize(self):
return self.extractor.insize
def use_preset(self, preset):
"""Use the given preset during prediction.
This method changes values of :obj:`nms_thresh` and
:obj:`score_thresh`. These values are a threshold value
used for non maximum suppression and a threshold value
to discard low confidence proposals in :meth:`predict`,
respectively.
If the attributes need to be changed to something
other than the values provided in the presets, please modify
them by directly accessing the public attributes.
Args:
preset ({'visualize', 'evaluate'}): A string to determine the
preset to use.
"""
if preset == 'visualize':
self.nms_thresh = 0.45
self.score_thresh = 0.5
elif preset == 'evaluate':
self.nms_thresh = 0.45
self.score_thresh = 0.005
else:
raise ValueError('preset must be visualize or evaluate')
def predict(self, imgs):
"""Detect objects from images.
This method predicts objects for each image.
Args:
imgs (iterable of numpy.ndarray): Arrays holding images.
All images are in CHW and RGB format
and the range of their value is :math:`[0, 255]`.
Returns:
tuple of lists:
This method returns a tuple of three lists,
:obj:`(bboxes, labels, scores)`.
* **bboxes**: A list of float arrays of shape :math:`(R, 4)`, \
where :math:`R` is the number of bounding boxes in a image. \
Each bounding box is organized by \
:math:`(y_{min}, x_{min}, y_{max}, x_{max})` \
in the second axis.
* **labels** : A list of integer arrays of shape :math:`(R,)`. \
Each value indicates the class of the bounding box. \
Values are in range :math:`[0, L - 1]`, where :math:`L` is the \
number of the foreground classes.
* **scores** : A list of float arrays of shape :math:`(R,)`. \
Each value indicates how confident the prediction is.
"""
x = []
params = []
for img in imgs:
_, H, W = img.shape
img, param = transforms.resize_contain(
img / 255, (self.insize, self.insize), fill=0.5,
return_param=True)
x.append(self.xp.array(img))
param['size'] = (H, W)
params.append(param)
with chainer.using_config('train', False), \
chainer.function.no_backprop_mode():
locs, objs, confs = self.forward(self.xp.stack(x))
locs = locs.array
objs = objs.array
confs = confs.array
bboxes = []
labels = []
scores = []
for loc, obj, conf, param in zip(locs, objs, confs, params):
bbox, label, score = self._decode(loc, obj, conf)
bbox = cuda.to_cpu(bbox)
label = cuda.to_cpu(label)
score = cuda.to_cpu(score)
bbox = transforms.translate_bbox(
bbox, -self.insize / 2, -self.insize / 2)
bbox = transforms.resize_bbox(
bbox, param['scaled_size'], param['size'])
bbox = transforms.translate_bbox(
bbox, param['size'][0] / 2, param['size'][1] / 2)
bboxes.append(bbox)
labels.append(label)
scores.append(score)
return bboxes, labels, scores
|
import paramiko
import StringIO
def is_log_success(log):
for x in ['fatal', 'fail', 'error']:
if log.startswith(x) or log.endswith(x):
return False
return True
# ssh to exec cmd
def do_ssh_cmd(ip, port, account, pkey, shell, push_data='', timeout=300):
try:
port = int(port)
except:
port = 22
s = paramiko.SSHClient()
s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
# 首先以 ssh 密钥方式登陆
pkey_file = StringIO.StringIO(pkey.strip() + '\n') # 注意最后有一个换行
private_key = paramiko.RSAKey.from_private_key(pkey_file)
s.connect(ip, port, account, pkey=private_key, timeout=5)
pkey_file.close()
except:
# 如果出现异常,则使用 用户密码登陆的方式
s.connect(ip, port, account, password=pkey, timeout=5)
# if push_data:
# shell = shell + (" '%s'" % push_data)
shell = shell.split('\n')
shell = [sh for sh in shell if sh.strip()]
shell = ' && '.join(shell)
stdin, stdout, stderr = s.exec_command(shell, timeout=timeout)
log = stdout.read()
err = stderr.read()
success = True
if not log and err:
success = False
log = err
s.close()
if success:
success = is_log_success(log)
return success, log
|
from typing import Optional, Sequence
from pysmartthings import Attribute, Capability
from homeassistant.components.lock import LockEntity
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
ST_STATE_LOCKED = "locked"
ST_LOCK_ATTR_MAP = {
"codeId": "code_id",
"codeName": "code_name",
"lockName": "lock_name",
"method": "method",
"timeout": "timeout",
"usedCode": "used_code",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add locks for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsLock(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "lock")
]
)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
if Capability.lock in capabilities:
return [Capability.lock]
return None
class SmartThingsLock(SmartThingsEntity, LockEntity):
"""Define a SmartThings lock."""
async def async_lock(self, **kwargs):
"""Lock the device."""
await self._device.lock(set_status=True)
self.async_write_ha_state()
async def async_unlock(self, **kwargs):
"""Unlock the device."""
await self._device.unlock(set_status=True)
self.async_write_ha_state()
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._device.status.lock == ST_STATE_LOCKED
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
state_attrs = {}
status = self._device.status.attributes[Attribute.lock]
if status.value:
state_attrs["lock_state"] = status.value
if isinstance(status.data, dict):
for st_attr, ha_attr in ST_LOCK_ATTR_MAP.items():
data_val = status.data.get(st_attr)
if data_val is not None:
state_attrs[ha_attr] = data_val
return state_attrs
|
from sklearn.pipeline import Pipeline
class ScikitSupervisedCompactor(object):
def __init__(self, pipeline):
'''
Parameters
----------
pipeline : Pipeline
sklearn.pipeline.Pipeline instance
'''
self.pipeline = pipeline
def compact(self, term_doc_matrix, non_text=False):
'''
Parameters
----------
term_doc_matrix : TermDocMatrix
Term document matrix object to compact
non_text : bool
Use non-text features instead of terms
Returns
-------
New term doc matrix
'''
return term_doc_matrix.remove_terms_by_indices(self._indices_to_compact(term_doc_matrix, non_text), non_text)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.flags import _validators
from absl.testing import absltest
import mock
class NumericFlagBoundsTest(absltest.TestCase):
def setUp(self):
super(NumericFlagBoundsTest, self).setUp()
self.flag_values = flags.FlagValues()
def test_no_validator_if_no_bounds(self):
"""Validator is not registered if lower and upper bound are None."""
with mock.patch.object(_validators, 'register_validator'
) as register_validator:
flags.DEFINE_integer('positive_flag', None, 'positive int',
lower_bound=0, flag_values=self.flag_values)
register_validator.assert_called_once_with(
'positive_flag', mock.ANY, flag_values=self.flag_values)
with mock.patch.object(_validators, 'register_validator'
) as register_validator:
flags.DEFINE_integer('int_flag', None, 'just int',
flag_values=self.flag_values)
register_validator.assert_not_called()
def test_success(self):
flags.DEFINE_integer('int_flag', 5, 'Just integer',
flag_values=self.flag_values)
argv = ('./program', '--int_flag=13')
self.flag_values(argv)
self.assertEqual(13, self.flag_values.int_flag)
self.flag_values.int_flag = 25
self.assertEqual(25, self.flag_values.int_flag)
def test_success_if_none(self):
flags.DEFINE_integer('int_flag', None, '',
lower_bound=0, upper_bound=5,
flag_values=self.flag_values)
argv = ('./program',)
self.flag_values(argv)
self.assertIsNone(self.flag_values.int_flag)
def test_success_if_exactly_equals(self):
flags.DEFINE_float('float_flag', None, '',
lower_bound=1, upper_bound=1,
flag_values=self.flag_values)
argv = ('./program', '--float_flag=1')
self.flag_values(argv)
self.assertEqual(1, self.flag_values.float_flag)
def test_exception_if_smaller(self):
flags.DEFINE_integer('int_flag', None, '',
lower_bound=0, upper_bound=5,
flag_values=self.flag_values)
argv = ('./program', '--int_flag=-1')
try:
self.flag_values(argv)
except flags.IllegalFlagValueError as e:
text = 'flag --int_flag=-1: -1 is not an integer in the range [0, 5]'
self.assertEqual(text, str(e))
class SettingFlagAfterStartTest(absltest.TestCase):
def setUp(self):
self.flag_values = flags.FlagValues()
def test_success(self):
flags.DEFINE_integer('int_flag', None, 'Just integer',
flag_values=self.flag_values)
argv = ('./program', '--int_flag=13')
self.flag_values(argv)
self.assertEqual(13, self.flag_values.int_flag)
self.flag_values.int_flag = 25
self.assertEqual(25, self.flag_values.int_flag)
def test_exception_if_setting_integer_flag_outside_bounds(self):
flags.DEFINE_integer('int_flag', None, 'Just integer', lower_bound=0,
flag_values=self.flag_values)
argv = ('./program', '--int_flag=13')
self.flag_values(argv)
self.assertEqual(13, self.flag_values.int_flag)
with self.assertRaises(flags.IllegalFlagValueError):
self.flag_values.int_flag = -2
if __name__ == '__main__':
absltest.main()
|
import configparser
import logging
import os
import click
from twtxt.models import Source
logger = logging.getLogger(__name__)
class Config:
""":class:`Config` interacts with the configuration file.
:param str config_file: full path to the loaded config file
:param ~configparser.ConfigParser cfg: a :class:`~configparser.ConfigParser` object with config loaded
"""
config_dir = click.get_app_dir("twtxt")
config_name = "config"
def __init__(self, config_file, cfg):
self.config_file = config_file
self.cfg = cfg
@classmethod
def from_file(cls, file):
"""Try loading given config file.
:param str file: full path to the config file to load
"""
if not os.path.exists(file):
raise ValueError("Config file not found.")
try:
config_parser = configparser.ConfigParser()
config_parser.read(file)
configuration = cls(file, config_parser)
if not configuration.check_config_sanity():
raise ValueError("Error in config file.")
else:
return configuration
except configparser.Error:
raise ValueError("Config file is invalid.")
@classmethod
def discover(cls):
"""Make a guess about the config file location an try loading it."""
file = os.path.join(Config.config_dir, Config.config_name)
return cls.from_file(file)
@classmethod
def create_config(cls, cfgfile, nick, twtfile, twturl, disclose_identity, add_news):
"""Create a new config file at the default location.
:param str cfgfile: path to the config file
:param str nick: nickname to use for own tweets
:param str twtfile: path to the local twtxt file
:param str twturl: URL to the remote twtxt file
:param bool disclose_identity: if true the users id will be disclosed
:param bool add_news: if true follow twtxt news feed
"""
cfgfile_dir = os.path.dirname(cfgfile)
if not os.path.exists(cfgfile_dir):
os.makedirs(cfgfile_dir)
cfg = configparser.ConfigParser()
cfg.add_section("twtxt")
cfg.set("twtxt", "nick", nick)
cfg.set("twtxt", "twtfile", twtfile)
cfg.set("twtxt", "twturl", twturl)
cfg.set("twtxt", "disclose_identity", str(disclose_identity))
cfg.set("twtxt", "character_limit", "140")
cfg.set("twtxt", "character_warning", "140")
cfg.add_section("following")
if add_news:
cfg.set("following", "twtxt", "https://buckket.org/twtxt_news.txt")
conf = cls(cfgfile, cfg)
conf.write_config()
return conf
def write_config(self):
"""Writes `self.cfg` to `self.config_file`."""
with open(self.config_file, "w") as config_file:
self.cfg.write(config_file)
@property
def following(self):
"""A :class:`list` of all :class:`Source` objects."""
following = []
try:
for (nick, url) in self.cfg.items("following"):
source = Source(nick, url)
following.append(source)
except configparser.NoSectionError as e:
logger.debug(e)
return following
@property
def options(self):
"""A :class:`dict` of all config options."""
try:
return dict(self.cfg.items("twtxt"))
except configparser.NoSectionError as e:
logger.debug(e)
return {}
@property
def nick(self):
return self.cfg.get("twtxt", "nick", fallback=os.environ.get("USER", "").lower())
@property
def twtfile(self):
return os.path.expanduser(self.cfg.get("twtxt", "twtfile", fallback="twtxt.txt"))
@property
def twturl(self):
return self.cfg.get("twtxt", "twturl", fallback=None)
@property
def check_following(self):
return self.cfg.getboolean("twtxt", "check_following", fallback=True)
@property
def use_pager(self):
return self.cfg.getboolean("twtxt", "use_pager", fallback=False)
@property
def use_cache(self):
return self.cfg.getboolean("twtxt", "use_cache", fallback=True)
@property
def porcelain(self):
return self.cfg.getboolean("twtxt", "porcelain", fallback=False)
@property
def disclose_identity(self):
return self.cfg.getboolean("twtxt", "disclose_identity", fallback=False)
@property
def character_limit(self):
return self.cfg.getint("twtxt", "character_limit", fallback=None)
@property
def character_warning(self):
return self.cfg.getint("twtxt", "character_warning", fallback=None)
@property
def limit_timeline(self):
return self.cfg.getint("twtxt", "limit_timeline", fallback=20)
@property
def timeline_update_interval(self):
return self.cfg.getint("twtxt", "timeline_update_interval", fallback=10)
@property
def use_abs_time(self):
return self.cfg.getboolean("twtxt", "use_abs_time", fallback=False)
@property
def timeout(self):
return self.cfg.getfloat("twtxt", "timeout", fallback=5.0)
@property
def sorting(self):
return self.cfg.get("twtxt", "sorting", fallback="descending")
@property
def source(self):
return Source(self.nick, self.twturl)
@property
def pre_tweet_hook(self):
return self.cfg.get("twtxt", "pre_tweet_hook", fallback=None)
@property
def post_tweet_hook(self):
return self.cfg.get("twtxt", "post_tweet_hook", fallback=None)
def add_source(self, source):
"""Adds a new :class:`Source` to the config’s following section."""
if not self.cfg.has_section("following"):
self.cfg.add_section("following")
self.cfg.set("following", source.nick, source.url)
self.write_config()
def get_source_by_nick(self, nick):
"""Returns the :class:`Source` of the given nick.
:param str nick: nickname for which will be searched in the config
"""
url = self.cfg.get("following", nick, fallback=None)
return Source(nick, url) if url else None
def remove_source_by_nick(self, nick):
"""Removes a :class:`Source` form the config’s following section.
:param str nick: nickname for which will be searched in the config
"""
if not self.cfg.has_section("following"):
return False
ret_val = self.cfg.remove_option("following", nick)
self.write_config()
return ret_val
def build_default_map(self):
"""Maps config options to the default values used by click, returns :class:`dict`."""
default_map = {
"following": {
"check": self.check_following,
"timeout": self.timeout,
"porcelain": self.porcelain,
},
"tweet": {
"twtfile": self.twtfile,
},
"timeline": {
"pager": self.use_pager,
"cache": self.use_cache,
"limit": self.limit_timeline,
"timeout": self.timeout,
"sorting": self.sorting,
"porcelain": self.porcelain,
"twtfile": self.twtfile,
"update_interval": self.timeline_update_interval,
},
"view": {
"pager": self.use_pager,
"cache": self.use_cache,
"limit": self.limit_timeline,
"timeout": self.timeout,
"sorting": self.sorting,
"porcelain": self.porcelain,
"update_interval": self.timeline_update_interval,
}
}
return default_map
def check_config_sanity(self):
"""Checks if the given values in the config file are sane."""
is_sane = True
# This extracts some properties which cannot be checked like "nick",
# but it is definitely better than writing the property names as a
# string literal.
properties = [property_name for property_name, obj
in self.__class__.__dict__.items()
if isinstance(obj, property)]
for property_name in properties:
try:
getattr(self, property_name)
except ValueError as e:
click.echo("✗ Config error on {0} - {1}".format(property_name, e))
is_sane = False
return is_sane
|
import os
from perfkitbenchmarker import linux_packages
URL = 'https://www.kernel.org/pub/linux/kernel/v4.x/linux-4.4.25.tar.gz'
TARBALL = 'linux-4.4.25.tar.gz'
UNTAR_DIR = 'linux-4.4.25'
KERNEL_TARBALL = os.path.join(linux_packages.INSTALL_DIR, TARBALL)
def _Install(vm):
vm.Install('build_tools')
vm.Install('wget')
vm.InstallPackages('bc')
vm.RemoteCommand('mkdir -p {0} && '
'cd {0} && wget {1}'.format(linux_packages.INSTALL_DIR, URL))
def AptInstall(vm):
_Install(vm)
def YumInstall(vm):
_Install(vm)
def Cleanup(vm):
vm.RemoteCommand('cd {} && rm -f {}'.format(linux_packages.INSTALL_DIR,
TARBALL))
|
from math import pi
from pygal.adapters import none_to_zero, positive
from pygal.graph.graph import Graph
from pygal.util import alter, decorate
class Pie(Graph):
"""Pie graph class"""
_adapters = [positive, none_to_zero]
def slice(self, serie, start_angle, total):
"""Make a serie slice"""
serie_node = self.svg.serie(serie)
dual = self._len > 1 and not self._order == 1
slices = self.svg.node(serie_node['plot'], class_="slices")
serie_angle = 0
original_start_angle = start_angle
if self.half_pie:
center = ((self.width - self.margin_box.x) / 2.,
(self.height - self.margin_box.y) / 1.25)
else:
center = ((self.width - self.margin_box.x) / 2.,
(self.height - self.margin_box.y) / 2.)
radius = min(center)
for i, val in enumerate(serie.values):
perc = val / total
if self.half_pie:
angle = 2 * pi * perc / 2
else:
angle = 2 * pi * perc
serie_angle += angle
val = self._format(serie, i)
metadata = serie.metadata.get(i)
slice_ = decorate(
self.svg, self.svg.node(slices, class_="slice"), metadata
)
if dual:
small_radius = radius * .9
big_radius = radius
else:
big_radius = radius * .9
small_radius = radius * serie.inner_radius
alter(
self.svg.slice(
serie_node, slice_, big_radius, small_radius, angle,
start_angle, center, val, i, metadata
), metadata
)
start_angle += angle
if dual:
val = self._serie_format(serie, sum(serie.values))
self.svg.slice(
serie_node, self.svg.node(slices,
class_="big_slice"), radius * .9, 0,
serie_angle, original_start_angle, center, val, i, metadata
)
return serie_angle
def _compute_x_labels(self):
pass
def _compute_y_labels(self):
pass
def _plot(self):
"""Draw all the serie slices"""
total = sum(map(sum, map(lambda x: x.values, self.series)))
if total == 0:
return
if self.half_pie:
current_angle = 3 * pi / 2
else:
current_angle = 0
for index, serie in enumerate(self.series):
angle = self.slice(serie, current_angle, total)
current_angle += angle
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from sidekiqweb import SidekiqWebCollector
##########################################################################
class TestSidekiqWebCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SidekiqWebCollector', {
'interval': 10
})
self.collector = SidekiqWebCollector(config, None)
def test_import(self):
self.assertTrue(SidekiqWebCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'redis.connected_clients': 22,
'redis.uptime_in_days': 62,
'redis.used_memory_human_byte': 1426063.36,
'redis.used_memory_peak_human_byte': 8598323.2,
'sidekiq.busy': 0,
'sidekiq.default_latency': 0,
'sidekiq.enqueued': 0,
'sidekiq.failed': 22,
'sidekiq.processed': 4622701,
'sidekiq.retries': 0,
'sidekiq.scheduled': 30,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats_blank')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from datetime import datetime as dt
import pandas as pd
import pytest
import six
from pandas import DataFrame, MultiIndex
from pandas.util.testing import assert_frame_equal
from arctic.chunkstore.date_chunker import DateChunker
from arctic.date import DateRange
def test_date_filter():
c = DateChunker()
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
# OPEN - CLOSED
assert_frame_equal(c.filter(df, DateRange(None, dt(2016, 1, 3))), df)
# CLOSED - OPEN
assert_frame_equal(c.filter(df, DateRange(dt(2016, 1, 1), None)), df)
# OPEN - OPEN
assert_frame_equal(c.filter(df, DateRange(None, None)), df)
# CLOSED - OPEN (far before data range)
assert_frame_equal(c.filter(df, DateRange(dt(2000, 1, 1), None)), df)
# CLOSED - OPEN (far after range)
assert(c.filter(df, DateRange(dt(2020, 1, 2), None)).empty)
# OPEN - CLOSED
assert_frame_equal(c.filter(df, DateRange(None, dt(2020, 1, 1))), df)
# CLOSED - CLOSED (after range)
assert(c.filter(df, DateRange(dt(2017, 1, 1), dt(2018, 1, 1))).empty)
def test_date_filter_no_index():
c = DateChunker()
df = DataFrame(data={'data': [1, 2, 3],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)]
}
)
# OPEN - CLOSED
assert_frame_equal(c.filter(df, DateRange(None, dt(2016, 1, 3))), df)
# CLOSED - OPEN
assert_frame_equal(c.filter(df, DateRange(dt(2016, 1, 1), None)), df)
# OPEN - OPEN
assert_frame_equal(c.filter(df, DateRange(None, None)), df)
# CLOSED - OPEN (far before data range)
assert_frame_equal(c.filter(df, DateRange(dt(2000, 1, 1), None)), df)
# CLOSED - OPEN (far after range)
assert(c.filter(df, DateRange(dt(2020, 1, 2), None)).empty)
# OPEN - CLOSED
assert_frame_equal(c.filter(df, DateRange(None, dt(2020, 1, 1))), df)
# CLOSED - CLOSED (after range)
assert(c.filter(df, DateRange(dt(2017, 1, 1), dt(2018, 1, 1))).empty)
def test_date_filter_with_pd_date_range():
c = DateChunker()
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
assert(c.filter(df, pd.date_range(dt(2017, 1, 1), dt(2018, 1, 1))).empty)
assert_frame_equal(c.filter(df, pd.date_range(dt(2016, 1, 1), dt(2017, 1, 1))), df)
def test_to_chunks_exceptions():
df = DataFrame(data={'data': [1, 2, 3]})
c = DateChunker()
with pytest.raises(Exception) as e:
six.next(c.to_chunks(df, 'D'))
assert('datetime indexed' in str(e.value))
df.columns = ['date']
with pytest.raises(Exception) as e:
six.next(c.to_chunks(df, 'ZSDFG'))
assert('Unknown freqstr' in str(e.value) or 'Invalid frequency' in str(e.value))
def test_exclude():
c = DateChunker()
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
df2 = DataFrame(data={'data': [1, 2, 3]})
assert(c.exclude(df, DateRange(dt(2016, 1, 1), dt(2016, 1, 1))).equals(c.exclude(df, pd.date_range(dt(2016, 1, 1), dt(2016, 1, 1)))))
assert(c.exclude(df2, None).equals(df2))
def test_exclude_no_index():
c = DateChunker()
df = DataFrame(data={'data': [1, 2, 3],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)]
}
)
df2 = DataFrame(data={'data': [1, 2, 3]})
assert(c.exclude(df, DateRange(dt(2016, 1, 1), dt(2016, 1, 1))).equals(c.exclude(df, pd.date_range(dt(2016, 1, 1), dt(2016, 1, 1)))))
assert(c.exclude(df2, None).equals(df2))
def test_with_tuples():
c = DateChunker()
df = DataFrame(data={'data': [1, 2, 3],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)]
}
)
# OPEN - CLOSED
assert_frame_equal(c.filter(df, (None, dt(2016, 1, 3))), df)
# CLOSED - OPEN
assert_frame_equal(c.filter(df, (dt(2016, 1, 1), None)), df)
# OPEN - OPEN
assert_frame_equal(c.filter(df, (None, None)), df)
# CLOSED - OPEN (far before data range)
assert_frame_equal(c.filter(df, (dt(2000, 1, 1), None)), df)
# CLOSED - OPEN (far after range)
assert(c.filter(df, (dt(2020, 1, 2), None)).empty)
# OPEN - CLOSED
assert_frame_equal(c.filter(df, (None, dt(2020, 1, 1))), df)
# CLOSED - CLOSED (after range)
assert(c.filter(df, (dt(2017, 1, 1), dt(2018, 1, 1))).empty)
|
from io import BytesIO
from urllib.error import HTTPError
from urllib.parse import urlsplit
from xmlrpc.client import ServerProxy
from bs4 import BeautifulSoup
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
import django_comments as comments
from zinnia import url_shortener as shortener_settings
from zinnia.flags import PINGBACK
from zinnia.flags import get_user_flagger
from zinnia.managers import PUBLISHED
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.models.entry import Entry
from zinnia.signals import connect_discussion_signals
from zinnia.signals import disconnect_discussion_signals
from zinnia.signals import disconnect_entry_signals
from zinnia.tests.utils import TestTransport
from zinnia.tests.utils import datetime
from zinnia.tests.utils import skip_if_custom_user
from zinnia.xmlrpc.pingback import generate_pingback_content
@skip_if_custom_user
@override_settings(
ROOT_URLCONF='zinnia.tests.implementations.urls.default',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
'zinnia.tests.utils.EntryDetailLoader',
]
}
}
],
)
class PingBackTestCase(TestCase):
"""Test cases for pingbacks"""
def fake_urlopen(self, url):
"""Fake urlopen using client if domain
correspond to current_site else HTTPError"""
scheme, netloc, path, query, fragment = urlsplit(url)
if not netloc:
raise
if self.site.domain == netloc:
response = BytesIO(self.client.get(url).content)
return response
raise HTTPError(url, 404, 'unavailable url', {}, None)
def setUp(self):
disconnect_entry_signals()
disconnect_discussion_signals()
# Clear the cache of user flagger to avoid error on MySQL
get_user_flagger.cache_clear()
# Use default URL shortener backend, to avoid networks errors
self.original_shortener = shortener_settings.URL_SHORTENER_BACKEND
shortener_settings.URL_SHORTENER_BACKEND = 'zinnia.url_shortener.'\
'backends.default'
# Set up a stub around urlopen
import zinnia.xmlrpc.pingback
self.original_urlopen = zinnia.xmlrpc.pingback.urlopen
zinnia.xmlrpc.pingback.urlopen = self.fake_urlopen
# Set up a stub around zinnia.spam_checker
import zinnia.spam_checker
self.original_scb = zinnia.spam_checker.SPAM_CHECKER_BACKENDS
zinnia.spam_checker.SPAM_CHECKER_BACKENDS = []
# Preparing site
self.site = Site.objects.get_current()
# Creating tests entries
self.author = Author.objects.create_user(username='webmaster',
email='[email protected]')
self.category = Category.objects.create(title='test', slug='test')
params = {'title': 'My first entry',
'content': 'My first content',
'slug': 'my-first-entry',
'publication_date': datetime(2010, 1, 1, 12),
'status': PUBLISHED}
self.first_entry = Entry.objects.create(**params)
self.first_entry.sites.add(self.site)
self.first_entry.categories.add(self.category)
self.first_entry.authors.add(self.author)
params = {'title': 'My second entry',
'content': 'My second content with link '
'to <a href="http://%s%s">first entry</a>'
' and other links : %s %s.' % (
self.site.domain,
self.first_entry.get_absolute_url(),
'http://example.com/error-404/',
'http://external/'),
'slug': 'my-second-entry',
'publication_date': datetime(2010, 1, 1, 12),
'status': PUBLISHED}
self.second_entry = Entry.objects.create(**params)
self.second_entry.sites.add(self.site)
self.second_entry.categories.add(self.category)
self.second_entry.authors.add(self.author)
# Instanciating the server proxy
self.server = ServerProxy('http://example.com/xmlrpc/',
transport=TestTransport())
def tearDown(self):
import zinnia.xmlrpc.pingback
zinnia.xmlrpc.pingback.urlopen = self.original_urlopen
shortener_settings.URL_SHORTENER_BACKEND = self.original_shortener
import zinnia.spam_checker
zinnia.spam_checker.SPAM_CHECKER_BACKENDS = self.original_scb
def test_generate_pingback_content(self):
soup = BeautifulSoup(self.second_entry.content, 'html.parser')
target = 'http://%s%s' % (self.site.domain,
self.first_entry.get_absolute_url())
self.assertEqual(
generate_pingback_content(soup, target, 1000),
'My second content with link to first entry and other links : '
'http://example.com/error-404/ http://external/.')
self.assertEqual(
generate_pingback_content(soup, target, 50),
'...ond content with link to first entry and other lin...')
soup = BeautifulSoup('<a href="%s">test link</a>' % target,
'html.parser')
self.assertEqual(
generate_pingback_content(soup, target, 6), 'test l...')
soup = BeautifulSoup('test <a href="%s">link</a>' % target,
'html.parser')
self.assertEqual(
generate_pingback_content(soup, target, 8), '...est link')
self.assertEqual(
generate_pingback_content(soup, target, 9), 'test link')
def test_pingback_ping(self):
target = 'http://%s%s' % (
self.site.domain, self.first_entry.get_absolute_url())
source = 'http://%s%s' % (
self.site.domain, self.second_entry.get_absolute_url())
# Error code 0 : A generic fault code
response = self.server.pingback.ping('toto', 'titi')
self.assertEqual(response, 0)
response = self.server.pingback.ping('http://%s/' % self.site.domain,
'http://%s/' % self.site.domain)
self.assertEqual(response, 0)
# Error code 16 : The source URI does not exist.
response = self.server.pingback.ping('http://external/', target)
self.assertEqual(response, 16)
# Error code 17 : The source URI does not contain a link to
# the target URI and so cannot be used as a source.
response = self.server.pingback.ping(source, 'toto')
self.assertEqual(response, 17)
# Error code 32 : The target URI does not exist.
response = self.server.pingback.ping(
source, 'http://example.com/error-404/')
self.assertEqual(response, 32)
response = self.server.pingback.ping(source, 'http://external/')
self.assertEqual(response, 32)
# Error code 33 : The target URI cannot be used as a target.
response = self.server.pingback.ping(source, 'http://example.com/')
self.assertEqual(response, 33)
self.first_entry.pingback_enabled = False
self.first_entry.save()
response = self.server.pingback.ping(source, target)
self.assertEqual(response, 33)
# Validate pingback
self.assertEqual(self.first_entry.pingback_count, 0)
self.first_entry.pingback_enabled = True
self.first_entry.save()
connect_discussion_signals()
response = self.server.pingback.ping(source, target)
disconnect_discussion_signals()
self.assertEqual(
response,
'Pingback from %s to %s registered.' % (source, target))
first_entry_reloaded = Entry.objects.get(pk=self.first_entry.pk)
self.assertEqual(first_entry_reloaded.pingback_count, 1)
self.assertTrue(self.second_entry.title in
self.first_entry.pingbacks[0].user_name)
# Error code 48 : The pingback has already been registered.
response = self.server.pingback.ping(source, target)
self.assertEqual(response, 48)
def test_pingback_ping_on_entry_without_author(self):
target = 'http://%s%s' % (
self.site.domain, self.first_entry.get_absolute_url())
source = 'http://%s%s' % (
self.site.domain, self.second_entry.get_absolute_url())
self.first_entry.pingback_enabled = True
self.first_entry.save()
self.first_entry.authors.clear()
connect_discussion_signals()
response = self.server.pingback.ping(source, target)
disconnect_discussion_signals()
self.assertEqual(
response,
'Pingback from %s to %s registered.' % (source, target))
first_entry_reloaded = Entry.objects.get(pk=self.first_entry.pk)
self.assertEqual(first_entry_reloaded.pingback_count, 1)
self.assertTrue(self.second_entry.title in
self.first_entry.pingbacks[0].user_name)
def test_pingback_ping_spam_checker(self):
import zinnia.spam_checker
original_scb = zinnia.spam_checker.SPAM_CHECKER_BACKENDS
zinnia.spam_checker.SPAM_CHECKER_BACKENDS = (
'zinnia.spam_checker.backends.all_is_spam',
)
target = 'http://%s%s' % (
self.site.domain, self.first_entry.get_absolute_url())
source = 'http://%s%s' % (
self.site.domain, self.second_entry.get_absolute_url())
self.first_entry.pingback_enabled = True
self.first_entry.save()
response = self.server.pingback.ping(source, target)
self.assertEqual(response, 51)
zinnia.spam_checker.SPAM_CHECKER_BACKENDS = original_scb
def test_pingback_extensions_get_pingbacks(self):
target = 'http://%s%s' % (
self.site.domain, self.first_entry.get_absolute_url())
source = 'http://%s%s' % (
self.site.domain, self.second_entry.get_absolute_url())
response = self.server.pingback.ping(source, target)
self.assertEqual(
response, 'Pingback from %s to %s registered.' % (source, target))
response = self.server.pingback.extensions.getPingbacks(
'http://external/')
self.assertEqual(response, 32)
response = self.server.pingback.extensions.getPingbacks(
'http://example.com/error-404/')
self.assertEqual(response, 32)
response = self.server.pingback.extensions.getPingbacks(
'http://example.com/2010/')
self.assertEqual(response, 33)
response = self.server.pingback.extensions.getPingbacks(source)
self.assertEqual(response, [])
response = self.server.pingback.extensions.getPingbacks(target)
self.assertEqual(response, [
'http://example.com/2010/01/01/my-second-entry/'])
comment = comments.get_model().objects.create(
content_type=ContentType.objects.get_for_model(Entry),
object_pk=self.first_entry.pk,
site=self.site, submit_date=timezone.now(),
comment='Test pingback',
user_url='http://external/blog/1/',
user_name='Test pingback')
comment.flags.create(user=self.author, flag=PINGBACK)
response = self.server.pingback.extensions.getPingbacks(target)
self.assertEqual(response, [
'http://example.com/2010/01/01/my-second-entry/',
'http://external/blog/1/'])
|
import logging
import typing
import voluptuous as vol
from homeassistant.const import (
ATTR_EDITABLE,
CONF_ICON,
CONF_ID,
CONF_NAME,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.helpers import collection
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType
from homeassistant.loader import bind_hass
DOMAIN = "input_boolean"
_LOGGER = logging.getLogger(__name__)
CONF_INITIAL = "initial"
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Optional(CONF_INITIAL): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INITIAL): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
}
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(vol.Any(UPDATE_FIELDS, None))},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
class InputBooleanStorageCollection(collection.StorageCollection):
"""Input boolean collection stored in storage."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: typing.Dict) -> typing.Dict:
"""Validate the config is valid."""
return self.CREATE_SCHEMA(data)
@callback
def _get_suggested_id(self, info: typing.Dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_NAME]
async def _update_data(self, data: dict, update_data: typing.Dict) -> typing.Dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
return {**data, **update_data}
@bind_hass
def is_on(hass, entity_id):
"""Test if input_boolean is True."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up an input boolean."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.attach_entity_component_collection(
component, yaml_collection, lambda conf: InputBoolean(conf, from_yaml=True)
)
storage_collection = InputBooleanStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.attach_entity_component_collection(
component, storage_collection, InputBoolean
)
await yaml_collection.async_load(
[{CONF_ID: id_, **(conf or {})} for id_, conf in config.get(DOMAIN, {}).items()]
)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, yaml_collection)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, storage_collection)
async def reload_service_handler(service_call: ServiceCallType) -> None:
"""Remove all input booleans and load new ones from config."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
return
await yaml_collection.async_load(
[
{CONF_ID: id_, **(conf or {})}
for id_, conf in conf.get(DOMAIN, {}).items()
]
)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
return True
class InputBoolean(ToggleEntity, RestoreEntity):
"""Representation of a boolean input."""
def __init__(self, config: typing.Optional[dict], from_yaml: bool = False):
"""Initialize a boolean input."""
self._config = config
self._editable = True
self._state = config.get(CONF_INITIAL)
if from_yaml:
self._editable = False
self.entity_id = f"{DOMAIN}.{self.unique_id}"
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return name of the boolean input."""
return self._config.get(CONF_NAME)
@property
def state_attributes(self):
"""Return the state attributes of the entity."""
return {ATTR_EDITABLE: self._editable}
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._config.get(CONF_ICON)
@property
def is_on(self):
"""Return true if entity is on."""
return self._state
@property
def unique_id(self):
"""Return a unique ID for the person."""
return self._config[CONF_ID]
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
# If not None, we got an initial value.
await super().async_added_to_hass()
if self._state is not None:
return
state = await self.async_get_last_state()
self._state = state and state.state == STATE_ON
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._state = False
self.async_write_ha_state()
async def async_update_config(self, config: typing.Dict) -> None:
"""Handle when the config is updated."""
self._config = config
self.async_write_ha_state()
|
import os
import inspect
from google.auth import credentials
from google.auth.exceptions import RefreshError
from google.api_core.gapic_v1.client_info import ClientInfo
from google.cloud import bigquery
from google.cloud.exceptions import Forbidden
from google.cloud.bigquery._http import Connection
from kaggle_secrets import GcpTarget, UserSecretsClient
from log import Log
KAGGLE_GCP_CLIENT_USER_AGENT="kaggle-gcp-client/1.0"
def get_integrations():
kernel_integrations_var = os.getenv("KAGGLE_KERNEL_INTEGRATIONS")
kernel_integrations = KernelIntegrations()
if kernel_integrations_var is None:
return kernel_integrations
for integration in kernel_integrations_var.split(':'):
try:
target = GcpTarget[integration.upper()]
kernel_integrations.add_integration(target)
except KeyError as e:
Log.error(f"Unknown integration target: {e}")
return kernel_integrations
class KernelIntegrations():
def __init__(self):
self.integrations = {}
def add_integration(self, target):
self.integrations[target] = True
def has_integration(self, target):
return target in self.integrations
def has_bigquery(self):
return GcpTarget.BIGQUERY in self.integrations
def has_gcs(self):
return GcpTarget.GCS in self.integrations
def has_automl(self):
return GcpTarget.AUTOML in self.integrations
class KaggleKernelCredentials(credentials.Credentials):
"""Custom Credentials used to authenticate using the Kernel's connected OAuth account.
Example usage:
client = bigquery.Client(project='ANOTHER_PROJECT',
credentials=KaggleKernelCredentials())
"""
def __init__(self, target=GcpTarget.BIGQUERY):
super().__init__()
self.target = target
def refresh(self, request):
try:
client = UserSecretsClient()
if self.target == GcpTarget.BIGQUERY:
self.token, self.expiry = client.get_bigquery_access_token()
elif self.target == GcpTarget.GCS:
self.token, self.expiry = client._get_gcs_access_token()
elif self.target == GcpTarget.AUTOML:
self.token, self.expiry = client._get_automl_access_token()
except ConnectionError as e:
Log.error(f"Connection error trying to refresh access token: {e}")
print("There was a connection error trying to fetch the access token. "
f"Please ensure internet is on in order to use the {self.target.service} Integration.")
raise RefreshError('Unable to refresh access token due to connection error.') from e
except Exception as e:
Log.error(f"Error trying to refresh access token: {e}")
if (not get_integrations().has_integration(self.target)):
Log.error(f"No {self.target.service} integration found.")
print(
f"Please ensure you have selected a {self.target.service} account in the Notebook Add-ons menu.")
raise RefreshError('Unable to refresh access token.') from e
class _DataProxyConnection(Connection):
"""Custom Connection class used to proxy the BigQuery client to Kaggle's data proxy."""
API_BASE_URL = os.getenv("KAGGLE_DATA_PROXY_URL")
def __init__(self, client):
super().__init__(client)
self.extra_headers["X-KAGGLE-PROXY-DATA"] = os.getenv(
"KAGGLE_DATA_PROXY_TOKEN")
def api_request(self, *args, **kwargs):
"""Wrap Connection.api_request in order to handle errors gracefully.
"""
try:
return super().api_request(*args, **kwargs)
except Forbidden as e:
msg = ("Permission denied using Kaggle's public BigQuery integration. "
"Did you mean to select a BigQuery account in the Notebook Add-ons menu?")
print(msg)
Log.info(msg)
raise e
class PublicBigqueryClient(bigquery.client.Client):
"""A modified BigQuery client that routes requests using Kaggle's Data Proxy to provide free access to Public Datasets.
Example usage:
from kaggle import PublicBigqueryClient
client = PublicBigqueryClient()
"""
def __init__(self, *args, **kwargs):
data_proxy_project = os.getenv("KAGGLE_DATA_PROXY_PROJECT")
anon_credentials = credentials.AnonymousCredentials()
anon_credentials.refresh = lambda *args: None
super().__init__(
project=data_proxy_project, credentials=anon_credentials, *args, **kwargs
)
# TODO: Remove this once https://github.com/googleapis/google-cloud-python/issues/7122 is implemented.
self._connection = _DataProxyConnection(self)
def has_been_monkeypatched(method):
return "kaggle_gcp" in inspect.getsourcefile(method)
def init_bigquery():
from google.auth import environment_vars
from google.cloud import bigquery
is_proxy_token_set = "KAGGLE_DATA_PROXY_TOKEN" in os.environ
is_user_secrets_token_set = "KAGGLE_USER_SECRETS_TOKEN" in os.environ
if not (is_proxy_token_set or is_user_secrets_token_set):
return bigquery
# If this Notebook has bigquery integration on startup, preload the Kaggle Credentials
# object for magics to work.
if get_integrations().has_bigquery():
from google.cloud.bigquery import magics
magics.context.credentials = KaggleKernelCredentials()
def monkeypatch_bq(bq_client, *args, **kwargs):
from kaggle_gcp import get_integrations, PublicBigqueryClient, KaggleKernelCredentials
specified_credentials = kwargs.get('credentials')
has_bigquery = get_integrations().has_bigquery()
# Prioritize passed in project id, but if it is missing look for env var.
arg_project = kwargs.get('project')
explicit_project_id = arg_project or os.environ.get(environment_vars.PROJECT)
# This is a hack to get around the bug in google-cloud library.
# Remove these two lines once this is resolved:
# https://github.com/googleapis/google-cloud-python/issues/8108
if explicit_project_id:
Log.info(f"Explicit project set to {explicit_project_id}")
kwargs['project'] = explicit_project_id
if explicit_project_id is None and specified_credentials is None and not has_bigquery:
msg = "Using Kaggle's public dataset BigQuery integration."
Log.info(msg)
print(msg)
return PublicBigqueryClient(*args, **kwargs)
else:
if specified_credentials is None:
Log.info("No credentials specified, using KaggleKernelCredentials.")
kwargs['credentials'] = KaggleKernelCredentials()
if (not has_bigquery):
Log.info("No bigquery integration found, creating client anyways.")
print('Please ensure you have selected a BigQuery '
'account in the Notebook Add-ons menu.')
if explicit_project_id is None:
Log.info("No project specified while using the unmodified client.")
print('Please ensure you specify a project id when creating the client'
' in order to use your BigQuery account.')
kwargs['client_info'] = set_kaggle_user_agent(kwargs.get('client_info'))
return bq_client(*args, **kwargs)
# Monkey patches BigQuery client creation to use proxy or user-connected GCP account.
# Deprecated in favor of Kaggle.DataProxyClient().
# TODO: Remove this once uses have migrated to that new interface.
bq_client = bigquery.Client
if (not has_been_monkeypatched(bigquery.Client)):
bigquery.Client = lambda *args, **kwargs: monkeypatch_bq(
bq_client, *args, **kwargs)
return bigquery
def monkeypatch_client(client_klass, kaggle_kernel_credentials):
client_init = client_klass.__init__
def patched_init(self, *args, **kwargs):
specified_credentials = kwargs.get('credentials')
if specified_credentials is None:
Log.info("No credentials specified, using KaggleKernelCredentials.")
kwargs['credentials'] = kaggle_kernel_credentials
kwargs['client_info'] = set_kaggle_user_agent(kwargs.get('client_info'))
return client_init(self, *args, **kwargs)
if (not has_been_monkeypatched(client_klass.__init__)):
client_klass.__init__ = patched_init
def set_kaggle_user_agent(client_info: ClientInfo):
# Add kaggle client user agent in order to attribute usage.
if client_info is None:
client_info = ClientInfo(user_agent=KAGGLE_GCP_CLIENT_USER_AGENT)
else:
client_info.user_agent = KAGGLE_GCP_CLIENT_USER_AGENT
return client_info
def init_gcs():
is_user_secrets_token_set = "KAGGLE_USER_SECRETS_TOKEN" in os.environ
from google.cloud import storage
if not is_user_secrets_token_set:
return storage
from kaggle_gcp import get_integrations
if not get_integrations().has_gcs():
return storage
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
monkeypatch_client(
storage.Client,
KaggleKernelCredentials(target=GcpTarget.GCS))
return storage
def init_automl():
is_user_secrets_token_set = "KAGGLE_USER_SECRETS_TOKEN" in os.environ
from google.cloud import automl, automl_v1beta1
if not is_user_secrets_token_set:
return
from kaggle_gcp import get_integrations
if not get_integrations().has_automl():
return
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
kaggle_kernel_credentials = KaggleKernelCredentials(target=GcpTarget.AUTOML)
# Patch the 2 GA clients: AutoMlClient and PreditionServiceClient
monkeypatch_client(automl.AutoMlClient, kaggle_kernel_credentials)
monkeypatch_client(automl.PredictionServiceClient, kaggle_kernel_credentials)
# The AutoML client library exposes 3 different client classes (AutoMlClient,
# TablesClient, PredictionServiceClient), so patch each of them.
# The same KaggleKernelCredentials are passed to all of them.
# The GcsClient class is only used internally by TablesClient.
# The beta version of the clients that are now GA are included here for now.
# They are deprecated and will be removed by 1 May 2020.
monkeypatch_client(automl_v1beta1.AutoMlClient, kaggle_kernel_credentials)
monkeypatch_client(automl_v1beta1.PredictionServiceClient, kaggle_kernel_credentials)
# The TablesClient is still in beta, so this will not be deprecated until
# the TablesClient is GA.
monkeypatch_client(automl_v1beta1.TablesClient, kaggle_kernel_credentials)
def init():
init_bigquery()
init_gcs()
init_automl()
# We need to initialize the monkeypatching of the client libraries
# here since there is a circular dependency between our import hook version
# google.cloud.* and kaggle_gcp. By calling init here, we guarantee
# that regardless of the original import that caused google.cloud.* to be
# loaded, the monkeypatching will be done.
init()
|
import logging
import yaml
from ansible import context
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.common.collections import ImmutableDict
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.play import Play
from ansible.vars.manager import VariableManager
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Ansible_playbook(NeuronModule):
def __init__(self, **kwargs):
super(Ansible_playbook, self).__init__(**kwargs)
self.task_file = kwargs.get('task_file', None)
self.sudo = kwargs.get('sudo', False)
self.sudo_user = kwargs.get('sudo_user', False)
self.sudo_password = kwargs.get('sudo_password', False)
# check if parameters have been provided
if self._is_parameters_ok():
# since the API is constructed for CLI it expects certain options to always be set in the context object
context.CLIARGS = self._get_options()
# initialize needed objects
loader = DataLoader()
passwords = {'become_pass': self.sudo_password}
inventory = InventoryManager(loader=loader, sources="localhost,")
# variable manager takes care of merging all the different sources to give you a unified
# view of variables available in each context
variable_manager = VariableManager(loader=loader, inventory=inventory)
variable_manager.set_inventory(inventory)
playbooks = None
with open(self.task_file, 'r') as stream:
try:
playbooks = yaml.full_load(stream)
except yaml.YAMLError as exc:
logger.debug("Ansibe playbook error: {}".format(exc))
if playbooks is not None:
# force usage of python 3 interpreter
playbooks[0].setdefault("vars", {})
playbooks[0]["vars"]["ansible_python_interpreter"] = "/usr/bin/python3"
play = Play().load(playbooks[0], variable_manager=variable_manager, loader=loader)
# Run it - instantiate task queue manager, which takes care of forking and setting up all objects
# to iterate over host list and tasks
tqm = None
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=passwords,
stdout_callback='default',
# Use our custom callback instead of the ``default`` callback plugin, which prints to stdout
)
tqm.run(play) # most interesting data for a play is actually sent to the callback's methods
finally:
# we always need to cleanup child procs and the structres we use to communicate with them
if tqm is not None:
tqm.cleanup()
def _is_parameters_ok(self):
if self.task_file is None:
raise MissingParameterException("task_file parameter required")
# check if the user want to use sudo for root privileges
if self.sudo:
# the user must set a login and password
if not self.sudo_user:
raise MissingParameterException("sudo_user parameter required with sudo True")
if not self.sudo_password:
raise MissingParameterException("sudo_password parameter required with sudo True")
return True
def _get_options(self):
"""
Return a valid dict of option usable by Ansible depending on the sudo value if set
:return: ImmutableDict
"""
if self.sudo:
options = ImmutableDict(connection='local', forks=100, become=True, become_method="sudo",
become_user=self.sudo_user, check=False, listhosts=False, listtasks=False, listtags=False,
syntax=False, module_path="", diff=False)
else:
options = ImmutableDict(connection='local', forks=100, become=None, become_method=None, become_user=None,
check=False, listhosts=False, listtasks=False, listtags=False, syntax=False,
module_path="", diff=False)
logger.debug("Ansible options: %s" % str(options))
return options
|
import re
from django.utils.translation import gettext_lazy as _
from weblate.checks.format import BaseFormatCheck
RUBY_FORMAT_MATCH = re.compile(
r"""
%( # initial %
(?: # classic printf style
(?:(?P<ord>\d+)\$)? # variable order, like %1$s
(?P<fullvar>
[ +#*-]* # flags
(?:\d+)? # width
(?:\.\d+)? # precision
(?P<type>[a-zA-Z%]) # type (%s, %d, etc.)
)
)|(?: # template style
(?P<t_fullvar>
[ +#*-]* # flags
(?:\d+)? # width
(?:\.\d+)? # precision
(?:
(?:
<(?P<t_field>[^> ]+)> # named printf reference
(?P<t_type>[a-zA-Z]) # type (%s, %d, etc.)
)
|
(?:\{(?P<tt_field>[^} ]+)\}) # named reference (implicit %s)
)
)
)
)
""",
re.VERBOSE,
)
class RubyFormatCheck(BaseFormatCheck):
"""Check for Ruby format string.
Ruby support various format strings (excluding string interpolation):
- printf syntax: %s, %1$s
- named printf syntax: %<variable>s
- template style (implicit %s): %{variable}
"""
check_id = "ruby_format"
name = _("Ruby format")
description = _("Ruby format string does not match source")
regexp = RUBY_FORMAT_MATCH
def is_position_based(self, string):
return string != "%" and not re.search(r"[$<{]", string)
|
from copy import deepcopy
import pytest
from homeassistant.components import numato
from . import numato_mock
from .common import NUMATO_CFG
@pytest.fixture
def config():
"""Provide a copy of the numato domain's test configuration.
This helps to quickly change certain aspects of the configuration scoped
to each individual test.
"""
return deepcopy(NUMATO_CFG)
@pytest.fixture
def numato_fixture(monkeypatch):
"""Inject the numato mockup into numato homeassistant module."""
module_mock = numato_mock.NumatoModuleMock()
monkeypatch.setattr(numato, "gpio", module_mock)
return module_mock
|
import vcr
from urllib.request import urlopen
def test_recorded_request_uri_with_redirected_request(tmpdir, httpbin):
with vcr.use_cassette(str(tmpdir.join("test.yml"))) as cass:
assert len(cass) == 0
urlopen(httpbin.url + "/redirect/3")
assert cass.requests[0].uri == httpbin.url + "/redirect/3"
assert cass.requests[3].uri == httpbin.url + "/get"
assert len(cass) == 4
def test_records_multiple_header_values(tmpdir, httpbin):
with vcr.use_cassette(str(tmpdir.join("test.yml"))) as cass:
assert len(cass) == 0
urlopen(httpbin.url + "/response-headers?foo=bar&foo=baz")
assert len(cass) == 1
assert cass.responses[0]["headers"]["foo"] == ["bar", "baz"]
|
from __future__ import absolute_import
from __future__ import print_function
import pytest
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.utils import np_utils
from elephas.spark_model import SparkModel
from elephas.utils.rdd_utils import to_simple_rdd
# Define basic parameters
batch_size = 64
nb_classes = 10
epochs = 1
# Create Spark context
pytest.mark.usefixtures("spark_context")
# Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(128, input_dim=784))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(optimizer="sgd",
loss="categorical_crossentropy", metrics=["acc"])
def test_spark_model_end_to_end(spark_context):
rdd = to_simple_rdd(spark_context, x_train, y_train)
# sync epoch
spark_model = SparkModel(model, frequency='epoch',
mode='synchronous', num_workers=2)
spark_model.fit(rdd, epochs=epochs, batch_size=batch_size,
verbose=2, validation_split=0.1)
score = spark_model.master_network.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', score[1])
# sync batch
spark_model = SparkModel(model, frequency='batch',
mode='synchronous', num_workers=2)
spark_model.fit(rdd, epochs=epochs, batch_size=batch_size,
verbose=2, validation_split=0.1)
score = spark_model.master_network.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', score[1])
# async epoch
spark_model = SparkModel(model, frequency='epoch', mode='asynchronous')
spark_model.fit(rdd, epochs=epochs, batch_size=batch_size,
verbose=2, validation_split=0.1)
score = spark_model.master_network.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', score[1])
# hog wild epoch
spark_model = SparkModel(model, frequency='epoch', mode='hogwild')
spark_model.fit(rdd, epochs=epochs, batch_size=batch_size,
verbose=2, validation_split=0.1)
score = spark_model.master_network.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', score[1])
|
import gc
import sys
import weakref
from flexx.util.testing import run_tests_if_main, skipif, skip, raises
from flexx.event.both_tester import run_in_both, this_is_js
from flexx.util.logging import capture_log
from flexx import event
loop = event.loop
logger = event.logger
## Order
class MyObject1(event.Component):
@event.reaction('!a')
def r1(self, *events):
print('r1:' + ' '.join([ev.type for ev in events]))
@event.reaction('!a', '!b')
def r2(self, *events):
print('r2:' + ' '.join([ev.type for ev in events]))
@event.reaction('!c')
def r3(self, *events):
pass
@run_in_both(MyObject1)
def test_reaction_order1():
"""
r1:a a
r2:a a
r1:a a
r2:a a
"""
m = MyObject1()
# since there is a reaction for c, the a events cannot join
with loop:
m.emit('a', {})
m.emit('a', {})
m.emit('c', {})
m.emit('c', {})
m.emit('a', {})
m.emit('a', {})
@run_in_both(MyObject1)
def test_reaction_order2():
"""
r1:a a
r2:a a b b a a
r1:a a
r1:a
r2:a
"""
m = MyObject1()
# for r1, the events cannot join, but they can for b
with loop:
m.emit('a', {})
m.emit('a', {})
m.emit('b', {})
m.emit('b', {})
m.emit('a', {})
m.emit('a', {})
m.emit('c', {}) # but this breaks it
m.emit('a', {})
@run_in_both(MyObject1)
def test_reaction_order3():
"""
r2:b a a
r1:a a
"""
m = MyObject1()
# in all of the above r1 went first, because of its name.
# now r2 is "triggered" first
with loop:
m.emit('b', {})
m.emit('a', {})
m.emit('a', {})
@run_in_both(MyObject1)
def test_reaction_order4():
"""
r2:b a a
r1:a a
"""
m = MyObject1()
# in all of the above r1 went first, because of its name.
# now r2 is "triggered" first
with loop:
m.emit('b', {})
m.emit('a', {})
m.emit('a', {})
## Labels
class MyObject_labeled(event.Component):
@event.reaction('!a')
def r1(self, *events):
print('r1 ' + ' '.join([ev.type for ev in events]))
@event.reaction('!a:b')
def r2(self, *events):
print('r2 ' + ' '.join([ev.type for ev in events]))
@event.reaction('!a:a')
def r3(self, *events):
print('r3 ' + ' '.join([ev.type for ev in events]))
@run_in_both(MyObject_labeled)
def test_reaction_labels1():
"""
r3 a a
r2 a a
r1 a a
"""
m = MyObject_labeled()
# in all of the above r1 went first, because of its name.
# now r2 is "triggered" first
with loop:
m.emit('a', {})
m.emit('a', {})
## Init order
class MyObject_init(event.Component):
foo = event.IntProp(settable=True)
bar = event.IntProp(7, settable=True)
spam = event.IntProp(settable=False)
@event.reaction('foo', 'bar')
def _report(self, *events):
print('r ' + ', '.join(['%s:%i->%i' % (ev.type, ev.old_value, ev.new_value) for ev in events]))
@run_in_both(MyObject_init)
def test_reaction_init1():
"""
0 7
iter
r bar:7->7, foo:0->0
0 7
end
"""
# order bar foo is because of sorted prop names
m = MyObject_init()
print(m.foo, m.bar)
print('iter')
loop.iter()
print(m.foo, m.bar)
print('end')
@skipif(sys.version_info < (3,6), reason='need ordered kwargs')
@run_in_both(MyObject_init)
def test_reaction_init2():
"""
4 4
iter
r foo:4->4, bar:4->4
4 4
end
"""
# Order is determined by order of kwargs.
m = MyObject_init(foo=4, bar=4)
print(m.foo, m.bar)
print('iter')
loop.iter()
print(m.foo, m.bar)
print('end')
@run_in_both(MyObject_init)
def test_reaction_init3():
"""
0 7
iter
r bar:7->7, foo:0->0, foo:0->2, bar:7->2
2 2
end
"""
# first order due to prop name sorting, second two due to order of calling setters
m = MyObject_init()
m.set_foo(2)
m.set_bar(2)
print(m.foo, m.bar)
print('iter')
loop.iter()
print(m.foo, m.bar)
print('end')
@skipif(sys.version_info < (3,6), reason='need ordered kwargs')
@run_in_both(MyObject_init)
def test_reaction_init4():
"""
4 4
iter
r foo:4->4, bar:4->4, foo:4->2, bar:4->2
2 2
end
"""
# Order of first two is determined by order of keyword args in constructor
# the next two by the property name, the next two by order of actions.
m = MyObject_init(foo=4, bar=4)
m.set_foo(2)
m.set_bar(2)
print(m.foo, m.bar)
print('iter')
loop.iter()
print(m.foo, m.bar)
print('end')
@run_in_both(MyObject_init)
def test_reaction_init_fail1():
"""
? AttributeError
end
"""
try:
m = MyObject_init(blabla=1)
except AttributeError as err:
logger.exception(err)
try:
m = MyObject_init(spam=1)
except TypeError as err:
logger.exception(err)
print('end')
## Inheritance, overloading, and super()
class MyObjectSub(MyObject1):
@event.reaction('!a', '!b')
def r2(self, *events):
super().r2(*events)
print('-- r2 sub')
@run_in_both(MyObjectSub)
def test_reaction_overloading1():
"""
r1:a a
r2:a a
-- r2 sub
r2:b b
-- r2 sub
"""
m = MyObjectSub()
with loop:
m.emit('a', {})
m.emit('a', {})
with loop:
m.emit('b', {})
m.emit('b', {})
## Reactions used not as decorators
class MyObject2(event.Component):
foo = event.IntProp(settable=True)
bar = event.IntProp(7, settable=True)
@run_in_both(MyObject2)
def test_reaction_using_react_func1():
"""
r bar:7->7, foo:0->0, foo:0->2, bar:7->2
r bar:7->7, foo:0->0, foo:0->3, bar:7->3
"""
def foo(*events):
print('r ' + ', '.join(['%s:%i->%i' % (ev.type, ev.old_value, ev.new_value) for ev in events]))
m = MyObject2()
m.reaction(foo, 'foo', 'bar')
m.set_foo(2)
m.set_bar(2)
loop.iter()
# Again, but watch order of args
m = MyObject2()
m.reaction('foo', 'bar', foo)
m.set_foo(3)
m.set_bar(3)
loop.iter()
@run_in_both(MyObject2)
def test_reaction_using_react_func2():
"""
r foo:0->2, bar:7->2
r foo:0->3, bar:7->3
"""
def foo(*events):
print('r ' + ', '.join(['%s:%i->%i' % (ev.type, ev.old_value, ev.new_value) for ev in events]))
m = MyObject2()
loop.iter() # this is extra
m.reaction(foo, 'foo', 'bar')
m.set_foo(2)
m.set_bar(2)
loop.iter()
# Again, but watch order of args
m = MyObject2()
loop.iter() # this is extra
m.reaction('foo', 'bar', foo)
m.set_foo(3)
m.set_bar(3)
loop.iter()
@run_in_both(MyObject2)
def test_reaction_using_react_func3():
"""
r foo:0->2, bar:7->2
"""
class Foo:
def foo(self, *events):
print('r ' + ', '.join(['%s:%i->%i' % (ev.type, ev.old_value, ev.new_value) for ev in events]))
f = Foo()
m = MyObject2()
loop.iter() # this is extra
m.reaction(f.foo, 'foo', 'bar')
m.set_foo(2)
m.set_bar(2)
loop.iter()
@run_in_both(MyObject2, js=False) # not an issue in JS - no decorators there
def test_reaction_using_react_func4():
"""
r bar:7->7, foo:0->0, foo:0->2, bar:7->2
"""
m = MyObject2()
@m.reaction('foo', 'bar')
def foo(*events):
print('r ' + ', '.join(['%s:%i->%i' % (ev.type, ev.old_value, ev.new_value) for ev in events]))
m.set_foo(2)
m.set_bar(2)
loop.iter()
# not in both
def test_reaction_builtin_function():
class Foo(event.Component):
pass
foo = Foo()
foo.reaction('!bar', print) # this should not error
## Reactions as decorators on other components
# not in both
def test_reaction_as_decorator_of_other_cls():
class C1(event.Component):
foo = event.AnyProp(settable=True)
c1 = C1()
class C2(event.Component):
@c1.reaction('foo')
def on_foo(self, *events):
print('x')
self.xx = events[-1].new_value
c2 = C2()
loop.iter()
c1.set_foo(3)
loop.iter()
assert c2.xx == 3
## Misc
@run_in_both(MyObject1)
def test_reaction_calling():
"""
r1:
r2:
end
"""
m = MyObject1()
m.r1()
m.r2()
loop.iter()
print('end')
def test_reaction_exceptions1():
m = event.Component()
@m.reaction('!foo')
def handle_foo(*events):
1/0
m.emit('foo', {})
sys.last_traceback = None
assert sys.last_traceback is None
# No exception should be thrown here
loop.iter()
loop.iter()
# But we should have prepared for PM debugging
if sys.version_info[0] >= 3: # not sure why
assert sys.last_traceback
# Its different for a direct call
with raises(ZeroDivisionError):
handle_foo()
def test_reaction_exceptions2():
class Foo(event.Component):
def __init__(self):
super().__init__()
self.bar = event.Component()
self.bars = [self.bar]
f = Foo()
# ok
@f.reaction('bars*.spam')
def handle_foo(*events):
pass
# not ok
with raises(RuntimeError) as err:
@f.reaction('bar*.spam')
def handle_foo(*events):
pass
assert 'not a tuple' in str(err)
def test_reaction_decorator_fails():
class Foo:
def foo(self, *events):
pass
f = Foo()
def foo(*events):
pass
# Needs at least one argument
with raises(TypeError):
event.reaction()
# Need a function
with raises(TypeError):
event.reaction('!foo')(3)
# Need self argument
with raises(TypeError):
event.reaction('!foo')(foo)
# Cannot be bound method
with raises(TypeError):
event.reaction('!foo')(f.foo)
def test_reaction_descriptor_has_local_connection_strings():
m = MyObject1()
assert m.__class__.r1.local_connection_strings == ['!a']
## Meta-ish tests that are similar for property/emitter/action/reaction
@run_in_both(MyObject1)
def test_reaction_meta():
"""
True
r1
[['!a', ['a:r1']]]
[['!a', ['a:r2']], ['!b', ['b:r2']]]
"""
m = MyObject1()
print(hasattr(m.r1, 'dispose'))
print(m.r1.get_name())
print([list(x) for x in m.r1.get_connection_info()]) # tuple-> list
print([list(x) for x in m.r2.get_connection_info()])
@run_in_both(MyObject1)
def test_reaction_not_settable():
"""
fail AttributeError
"""
m = MyObject1()
try:
m.r1 = 3
except AttributeError:
print('fail AttributeError')
# We cannot prevent deletion in JS, otherwise we cannot overload
def test_reaction_python_only():
m = MyObject1()
# Reaction decorator needs proper callable and connection strings
with raises(TypeError):
event.reaction(3)
with raises(TypeError):
event.reaction(isinstance)
# Check type of the instance attribute
assert isinstance(m.r1, event._reaction.Reaction)
# Cannot set or delete a reaction
with raises(AttributeError):
m.r1 = 3
with raises(AttributeError):
del m.r1
# Repr and docs
assert 'reaction' in repr(m.__class__.r1).lower()
assert 'reaction' in repr(m.r1).lower()
assert 'r1' in repr(m.r1)
run_tests_if_main()
|
import abc
import socket
from threading import Thread
import six.moves.cPickle as pickle
from flask import Flask, request
from multiprocessing import Process
from elephas.utils.sockets import determine_master
from elephas.utils.sockets import receive, send
from elephas.utils.serialization import dict_to_model
from elephas.utils.rwlock import RWLock as Lock
from elephas.utils.notebook_utils import is_running_in_notebook
from elephas.utils import subtract_params
class BaseParameterServer(object):
"""BaseParameterServer
Parameter servers can be started and stopped. Server implementations have
to cater to the needs of their respective BaseParameterClient instances.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
raise NotImplementedError
@abc.abstractmethod
def start(self):
"""Start the parameter server instance.
"""
raise NotImplementedError
@abc.abstractmethod
def stop(self):
"""Terminate the parameter server instance.
"""
raise NotImplementedError
class HttpServer(BaseParameterServer):
"""HttpServer
Flask HTTP server. Defines two routes, `/parameters` to GET current
parameters held by this server, and `/update` which can be used to
POST updates.
"""
def __init__(self, model, mode, port=4000, debug=True,
threaded=True, use_reloader=False):
"""Initializes and HTTP server from a serialized Keras model
a parallelisation mode and a port to run the Flask application on. In
hogwild mode no read- or write-locks will be acquired, in asynchronous
mode this is the case.
:param model: Serialized Keras model
:param mode: parallelization mode, either `asynchronous` or `hogwild`
:param port: int, port to run the application on
:param debug: boolean, Flask debug mode
:param threaded: boolean, Flask threaded application mode
:param use_reloader: boolean, Flask `use_reloader` argument
"""
self.master_network = dict_to_model(model)
self.mode = mode
self.master_url = None
self.port = port
if is_running_in_notebook():
self.threaded = False
self.use_reloader = False
self.debug = False
else:
self.debug = debug
self.threaded = threaded
self.use_reloader = use_reloader
self.lock = Lock()
self.pickled_weights = None
self.weights = self.master_network.get_weights()
self.server = Process(target=self.start_flask_service)
def start(self):
self.server.start()
self.master_url = determine_master(self.port)
def stop(self):
self.server.terminate()
self.server.join()
def start_flask_service(self):
"""Define Flask parameter server service.
This HTTP server can do two things: get the current model
parameters and update model parameters. After registering
the `parameters` and `update` routes, the service will
get started.
"""
app = Flask(__name__)
self.app = app
@app.route('/')
def home():
return 'Elephas'
@app.route('/parameters', methods=['GET'])
def handle_get_parameters():
if self.mode == 'asynchronous':
self.lock.acquire_read()
self.pickled_weights = pickle.dumps(self.weights, -1)
pickled_weights = self.pickled_weights
if self.mode == 'asynchronous':
self.lock.release()
return pickled_weights
@app.route('/update', methods=['POST'])
def handle_update_parameters():
delta = pickle.loads(request.data)
if self.mode == 'asynchronous':
self.lock.acquire_write()
if not self.master_network.built:
self.master_network.build()
# Just apply the gradient
weights_before = self.weights
self.weights = subtract_params(weights_before, delta)
if self.mode == 'asynchronous':
self.lock.release()
return 'Update done'
master_url = determine_master(self.port)
host = master_url.split(':')[0]
self.app.run(host=host, debug=self.debug, port=self.port,
threaded=self.threaded, use_reloader=self.use_reloader)
class SocketServer(BaseParameterServer):
"""SocketServer
A basic Python socket server
"""
def __init__(self, model, port=4000):
"""Initializes a Socket server instance from a serializer Keras model
and a port to listen to.
:param model: Serialized Keras model
:param port: int, port to run the socket on
"""
self.model = dict_to_model(model)
self.port = port
self.socket = None
self.runs = False
self.connections = []
self.lock = Lock()
self.thread = None
def start(self):
if self.thread is not None:
self.stop()
self.thread = Thread(target=self.start_server)
self.thread.start()
def stop(self):
self.stop_server()
self.thread.join()
self.thread = None
def start_server(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.bind(('0.0.0.0', self.port))
sock.listen(5)
self.socket = sock
self.runs = True
self.run()
def stop_server(self):
self.runs = False
if self.socket:
for thread in self.connections:
thread.join()
del thread
self.socket.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(("localhost", self.port))
sock.close()
except Exception:
pass
self.socket = None
self.connections = []
def update_parameters(self, conn):
data = receive(conn)
delta = data['delta']
with self.lock:
weights = self.model.get_weights() + delta
self.model.set_weights(weights)
def get_parameters(self, conn):
with self.lock:
weights = self.model.get_weights()
send(conn, weights)
def action_listener(self, conn):
while self.runs:
get_or_update = conn.recv(1).decode()
if get_or_update == 'u':
self.update_parameters(conn)
elif get_or_update == 'g':
self.get_parameters(conn)
else:
raise ValueError('Received invalid action')
def run(self):
while self.runs:
try:
conn, addr = self.socket.accept()
thread = Thread(target=self.action_listener, args=(conn, addr))
thread.start()
self.connections.append(thread)
except Exception:
print("Failed to set up socket connection.")
|
import inspect
import discord
from discord.ext import commands
__all__ = [
"ConversionFailure",
"BotMissingPermissions",
"UserFeedbackCheckFailure",
"ArgParserFailure",
]
class ConversionFailure(commands.BadArgument):
"""Raised when converting an argument fails."""
def __init__(self, converter, argument: str, param: inspect.Parameter, *args):
self.converter = converter
self.argument = argument
self.param = param
super().__init__(*args)
class BotMissingPermissions(commands.CheckFailure):
"""Raised if the bot is missing permissions required to run a command."""
def __init__(self, missing: discord.Permissions, *args):
self.missing: discord.Permissions = missing
super().__init__(*args)
class UserFeedbackCheckFailure(commands.CheckFailure):
"""A version of CheckFailure which isn't suppressed."""
def __init__(self, message=None, *args):
self.message = message
super().__init__(message, *args)
class ArgParserFailure(UserFeedbackCheckFailure):
"""Raised when parsing an argument fails."""
def __init__(
self, cmd: str, user_input: str, custom_help: str = None, ctx_send_help: bool = False
):
self.cmd = cmd
self.user_input = user_input
self.send_cmd_help = ctx_send_help
self.custom_help_msg = custom_help
super().__init__()
|
from ... import event
from . import Widget
class ColorSelectWidget(Widget):
""" A widget used to select a color.
The ``node`` of this widget is an
`<input> <https://developer.mozilla.org/docs/Web/HTML/Element/input>`_
element of type ``color``. This is supported at least
on Firefox and Chrome, but not on IE.
"""
DEFAULT_MIN_SIZE = 28, 28
color = event.ColorProp('#000000', settable=True, doc="""
The currently selected color.
""")
disabled = event.BoolProp(False, settable=True, doc="""
Whether the color select is disabled.
""")
def _create_dom(self):
global window
node = window.document.createElement('input')
try:
node.type = 'color'
except Exception: # This widget simply does not work on IE
node = window.document.createElement('div')
node.innerHTML = 'Not supported'
self._addEventListener(node, 'input', self._color_changed_from_dom, 0)
return node
@event.emitter
def user_color(self, color):
""" Event emitted when the user changes the color. Has ``old_value``
and ``new_value`` attributes.
"""
d = {'old_value': self.color, 'new_value': color}
self.set_color(color)
return d
@event.reaction('color')
def _color_changed(self, *events):
self.node.value = self.color.hex # hex is html-compatible, color.css is not
def _color_changed_from_dom(self, e):
self.user_color(self.node.value)
@event.reaction('disabled')
def __disabled_changed(self, *events):
if self.disabled:
self.node.setAttribute("disabled", "disabled")
else:
self.node.removeAttribute("disabled")
|
from flexx import app, event, ui
class MyWidget(ui.Label):
""" A Widget that reacts to key presses.
"""
CSS = """
.flx-MyWidget {
min-width: 10px;
min-height: 10px;
padding: 5px;
border: 2px solid black;
border-radius: 5px;
}
"""
def init(self):
index = self.__class__._count or 1
self.__class__._count = index + 1
self._base_text = str(index) + ' ' + self.text
color = '#77f', '7f7', 'f77', 'ff5', 'f5f', '5ff', '800', '080', '008'
self.apply_style('background:#' + color[index-1])
self.set_wrap(1)
self.set_flex(1)
@event.reaction('key_down')
def _on_key(self, *events):
for ev in events:
if ev.key == 'ArrowUp':
self.set_flex(self.flex[0] + 1)
elif ev.key == 'ArrowDown':
self.set_flex(max(0, self.flex[0] - 1))
elif ev.key == 'b':
self.parent.set_mode('box')
elif ev.key == 's':
self.parent.set_mode('split')
elif ev.key == 'f':
self.parent.set_mode('fix')
elif ev.key == 'o':
ori = {'h': 'v', 'v': 'h'}.get(self.parent.orientation, 'h')
self.parent.set_orientation(ori)
elif ev.key == ']':
with self.parent:
MyWidget()
elif ev.key == '[':
self.dispose()
@event.reaction('parent.mode', 'flex')
def _update_text(self, *events):
text = self._base_text + '<br>\n'
text += 'widget with flex (%s) ' % self.flex
text += 'in %s %s layout.' % (self.parent.orientation, self.parent.mode)
self.set_html(text)
class MyLayout(ui.HVLayout):
""" A layout with some good initial values.
"""
def init(self, ori):
self.set_flex(1)
self.set_orientation(ori)
self.set_padding(8) # so we can better see the structure
@event.reaction
def _track_orientation(self):
if 'h' in self.orientation:
self.apply_style('background:#faa;')
else:
self.apply_style('background:#afa;')
text = """
This is a hv layout test app. Click a widget and then hit a key to change
the layout:<br>
* Arrow up/down: increase or decrease the flex value<br>
* o: toggle the layout orientation<br>
* b, f, s: set the layout to box, fix, or split mode<br>
"""
class TestApp(ui.Widget):
def init(self):
with MyLayout('v') as self.s:
self.w1 = MyWidget(text=text)
with MyLayout('h') as self.s:
self.w2 = MyWidget(text='hello world!')
with MyLayout('v'):
self.w3 = MyWidget(text='hi')
self.w4 = MyWidget(text='hello world! ' * 4)
with MyLayout('v'):
self.w5 = MyWidget(text='min-size: 50',
style='min-width:50px; min-height:50px')
self.w6 = MyWidget(text='min-size: 100',
style='min-width:100px; min-height:100px')
self.w7 = MyWidget(text='min-size: 150',
style='min-width:150px; min-height:150px')
with ui.Widget(flex=1):
with MyLayout('h'):
self.w8 = MyWidget()
self.w9 = MyWidget(style='min-width:250px;')
self.w8 = MyWidget()
if __name__ == '__main__':
m = app.launch(TestApp)
app.run()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import absltest
from absl.testing import xml_reporter
import mock
import six
class MockTestResult(xml_reporter._TextAndXMLTestResult):
def __init__(self):
super(MockTestResult, self).__init__(six.StringIO(), six.StringIO(),
'description', False)
self.subtest_success = []
self.subtest_failure = []
def addSubTest(self, test, subtest, err): # pylint: disable=invalid-name
super(MockTestResult, self).addSubTest(test, subtest, err)
if six.PY2:
params = {}
for param in subtest.params:
for param_name, param_value in param.items():
params[param_name] = param_value
else:
params = dict(subtest.params)
if err is not None:
self.addSubTestFailure(params)
else:
self.addSubTestSuccess(params)
def addSubTestFailure(self, params): # pylint: disable=invalid-name
self.subtest_failure.append(params)
def addSubTestSuccess(self, params): # pylint: disable=invalid-name
self.subtest_success.append(params)
class MockTestResultWithoutSubTest(xml_reporter._TextAndXMLTestResult):
# hasattr(MockTestResultWithoutSubTest, addSubTest) return False
def __init__(self):
super(MockTestResultWithoutSubTest, self).__init__(six.StringIO(),
six.StringIO(),
'description',
False)
@property
def addSubTest(self): # pylint: disable=invalid-name
raise AttributeError
class Unittest3BackportTest(absltest.TestCase):
def test_subtest_pass(self):
class Foo(absltest.TestCase):
def runTest(self):
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
result = MockTestResult()
Foo().run(result)
expected_success = [{'i': 1, 'j': 2}, {'i': 1, 'j': 3}, {'i': 1},
{'i': 2, 'j': 2}, {'i': 2, 'j': 3}, {'i': 2}]
self.assertListEqual(result.subtest_success, expected_success)
def test_subtest_fail(self):
class Foo(absltest.TestCase):
def runTest(self):
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
if j == 2:
self.fail('failure')
result = MockTestResult()
Foo().run(result)
# The first layer subtest result is only added to the output when it is a
# success
expected_success = [{'i': 1, 'j': 3}, {'i': 2, 'j': 3}]
expected_failure = [{'i': 1, 'j': 2}, {'i': 2, 'j': 2}]
self.assertListEqual(expected_success, result.subtest_success)
self.assertListEqual(expected_failure, result.subtest_failure)
def test_subtest_expected_failure(self):
class Foo(absltest.TestCase):
@unittest.expectedFailure
def runTest(self):
for i in [1, 2, 3]:
with self.subTest(i=i):
self.assertEqual(i, 2)
foo = Foo()
with mock.patch.object(foo, '_addExpectedFailure',
autospec=True) as mock_subtest_expected_failure:
result = MockTestResult()
foo.run(result)
self.assertEqual(mock_subtest_expected_failure.call_count, 1)
def test_subtest_unexpected_success(self):
class Foo(absltest.TestCase):
@unittest.expectedFailure
def runTest(self):
for i in [1, 2, 3]:
with self.subTest(i=i):
self.assertEqual(i, i)
foo = Foo()
with mock.patch.object(foo, '_addUnexpectedSuccess',
autospec=True) as mock_subtest_unexpected_success:
result = MockTestResult()
foo.run(result)
self.assertEqual(mock_subtest_unexpected_success.call_count, 1)
def test_subtest_fail_fast(self):
# Ensure failfast works with subtest
class Foo(absltest.TestCase):
def runTest(self):
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
result = MockTestResult()
result.failfast = True
Foo().run(result)
expected_failure = [{'i': 1}]
self.assertListEqual(expected_failure, result.subtest_failure)
def test_subtest_skip(self):
# When a test case is skipped, addSubTest should not be called
class Foo(absltest.TestCase):
@unittest.skip('no reason')
def runTest(self):
for i in [1, 2, 3]:
with self.subTest(i=i):
self.assertEqual(i, i)
foo = Foo()
result = MockTestResult()
with mock.patch.object(foo, '_addSkip', autospec=True) as mock_test_skip:
with mock.patch.object(result, 'addSubTestSuccess',
autospec=True) as mock_subtest_success:
foo.run(result)
self.assertEqual(mock_test_skip.call_count, 1)
self.assertEqual(mock_subtest_success.call_count, 0)
@mock.patch.object(MockTestResultWithoutSubTest, 'addFailure', autospec=True)
def test_subtest_legacy(self, mock_test_fail):
# When the result object does not have addSubTest method,
# text execution stops after the first subtest failure.
class Foo(absltest.TestCase):
def runTest(self):
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
result = MockTestResultWithoutSubTest()
Foo().run(result)
self.assertEqual(mock_test_fail.call_count, 1)
if __name__ == '__main__':
absltest.main()
|
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.homeassistant.triggers import state as state_trigger
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN, STATE_CLEANING, STATE_DOCKED, STATES
TRIGGER_TYPES = {"cleaning", "docked"}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Vacuum devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "cleaning",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "docked",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] == "cleaning":
from_state = [state for state in STATES if state != STATE_CLEANING]
to_state = STATE_CLEANING
else:
from_state = [state for state in STATES if state != STATE_DOCKED]
to_state = STATE_DOCKED
state_config = {
CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_FROM: from_state,
state_trigger.CONF_TO: to_state,
}
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
|
import argparse
import platform
import sys
import os
import numpy
import scipy
import gensim
def package_info():
"""Get the versions of Gensim and its dependencies,
the location where Gensim is installed and platform on which the system is running.
Returns
-------
dict of (str, str)
Dictionary containing the versions of Gensim, Python, NumPy, SciPy and platform information.
"""
return {
"Platform": platform.platform(),
"Python": sys.version.replace("\n", ', '),
"NumPy": numpy.__version__,
"SciPy": scipy.__version__,
"Gensim": gensim.__version__,
"Location": os.path.abspath(__file__),
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__[:-65], formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--info", help="Information about Gensim package", action="store_true")
args = parser.parse_args()
if args.info:
print("Gensim installation information\n")
for (k, v) in sorted(package_info().items()):
print("{}: {}".format(k, v))
|
import re
import inspect
from PyQt5.QtWidgets import QLineEdit, QApplication
import pytest
from qutebrowser.components import readlinecommands
# Some functions aren't 100% readline compatible:
# https://github.com/qutebrowser/qutebrowser/issues/678
# Those are marked with fixme and have another value marked with '# wrong'
# which marks the current behavior.
fixme = pytest.mark.xfail(reason='readline compatibility - see #678')
class LineEdit(QLineEdit):
"""QLineEdit with some methods to make testing easier."""
def _get_index(self, haystack, needle):
"""Get the index of a char (needle) in a string (haystack).
Return:
The position where needle was found, or None if it wasn't found.
"""
try:
return haystack.index(needle)
except ValueError:
return None
def set_aug_text(self, text):
"""Set a text with </> markers for selected text and | as cursor."""
real_text = re.sub('[<>|]', '', text)
self.setText(real_text)
cursor_pos = self._get_index(text, '|')
sel_start_pos = self._get_index(text, '<')
sel_end_pos = self._get_index(text, '>')
if sel_start_pos is not None and sel_end_pos is None:
raise ValueError("< given without >!")
if sel_start_pos is None and sel_end_pos is not None:
raise ValueError("> given without <!")
if cursor_pos is not None:
if sel_start_pos is not None or sel_end_pos is not None:
raise ValueError("Can't mix | and </>!")
self.setCursorPosition(cursor_pos)
elif sel_start_pos is not None:
if sel_start_pos > sel_end_pos:
raise ValueError("< given after >!")
sel_len = sel_end_pos - sel_start_pos - 1
self.setSelection(sel_start_pos, sel_len)
def aug_text(self):
"""Get a text with </> markers for selected text and | as cursor."""
text = self.text()
chars = list(text)
cur_pos = self.cursorPosition()
assert cur_pos >= 0
chars.insert(cur_pos, '|')
if self.hasSelectedText():
selected_text = self.selectedText()
sel_start = self.selectionStart()
sel_end = sel_start + len(selected_text)
assert sel_start > 0
assert sel_end > 0
assert sel_end > sel_start
assert cur_pos == sel_end
assert text[sel_start:sel_end] == selected_text
chars.insert(sel_start, '<')
chars.insert(sel_end + 1, '>')
return ''.join(chars)
def _validate_deletion(lineedit, method, text, deleted, rest):
"""Run and validate a text deletion method on the ReadLine bridge.
Args:
lineedit: The LineEdit instance.
method: Reference to the method on the bridge to test.
text: The starting 'augmented' text (see LineEdit.set_aug_text)
deleted: The text that should be deleted when the method is invoked.
rest: The augmented text that should remain after method is invoked.
"""
lineedit.set_aug_text(text)
method()
assert readlinecommands.bridge._deleted[lineedit] == deleted
assert lineedit.aug_text() == rest
lineedit.clear()
readlinecommands.rl_yank()
assert lineedit.aug_text() == deleted + '|'
@pytest.fixture
def lineedit(qtbot, monkeypatch):
"""Fixture providing a LineEdit."""
le = LineEdit()
qtbot.add_widget(le)
monkeypatch.setattr(QApplication.instance(), 'focusWidget', lambda: le)
return le
def test_none(qtbot):
"""Call each rl_* method with a None focusWidget."""
assert QApplication.instance().focusWidget() is None
for name, method in inspect.getmembers(readlinecommands,
inspect.isfunction):
if name.startswith('rl_'):
method()
@pytest.mark.parametrize('text, expected', [('f<oo>bar', 'fo|obar'),
('|foobar', '|foobar')])
def test_rl_backward_char(text, expected, lineedit):
"""Test rl_backward_char."""
lineedit.set_aug_text(text)
readlinecommands.rl_backward_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('f<oo>bar', 'foob|ar'),
('foobar|', 'foobar|')])
def test_rl_forward_char(text, expected, lineedit):
"""Test rl_forward_char."""
lineedit.set_aug_text(text)
readlinecommands.rl_forward_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('one <tw>o', 'one |two'),
('<one >two', '|one two'),
('|one two', '|one two')])
def test_rl_backward_word(text, expected, lineedit):
"""Test rl_backward_word."""
lineedit.set_aug_text(text)
readlinecommands.rl_backward_word()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [
pytest.param('<o>ne two', 'one| two', marks=fixme),
('<o>ne two', 'one |two'), # wrong
pytest.param('<one> two', 'one two|', marks=fixme),
('<one> two', 'one |two'), # wrong
('one t<wo>', 'one two|')
])
def test_rl_forward_word(text, expected, lineedit):
"""Test rl_forward_word."""
lineedit.set_aug_text(text)
readlinecommands.rl_forward_word()
assert lineedit.aug_text() == expected
def test_rl_beginning_of_line(lineedit):
"""Test rl_beginning_of_line."""
lineedit.set_aug_text('f<oo>bar')
readlinecommands.rl_beginning_of_line()
assert lineedit.aug_text() == '|foobar'
def test_rl_end_of_line(lineedit):
"""Test rl_end_of_line."""
lineedit.set_aug_text('f<oo>bar')
readlinecommands.rl_end_of_line()
assert lineedit.aug_text() == 'foobar|'
@pytest.mark.parametrize('text, expected', [('foo|bar', 'foo|ar'),
('foobar|', 'foobar|'),
('|foobar', '|oobar'),
('f<oo>bar', 'f|bar')])
def test_rl_delete_char(text, expected, lineedit):
"""Test rl_delete_char."""
lineedit.set_aug_text(text)
readlinecommands.rl_delete_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, expected', [('foo|bar', 'fo|bar'),
('foobar|', 'fooba|'),
('|foobar', '|foobar'),
('f<oo>bar', 'f|bar')])
def test_rl_backward_delete_char(text, expected, lineedit):
"""Test rl_backward_delete_char."""
lineedit.set_aug_text(text)
readlinecommands.rl_backward_delete_char()
assert lineedit.aug_text() == expected
@pytest.mark.parametrize('text, deleted, rest', [
('delete this| test', 'delete this', '| test'),
pytest.param('delete <this> test', 'delete this', '| test', marks=fixme),
('delete <this> test', 'delete ', '|this test'), # wrong
pytest.param('f<oo>bar', 'foo', '|bar', marks=fixme),
('f<oo>bar', 'f', '|oobar'), # wrong
])
def test_rl_unix_line_discard(lineedit, text, deleted, rest):
"""Delete from the cursor to the beginning of the line and yank back."""
_validate_deletion(lineedit, readlinecommands.rl_unix_line_discard,
text, deleted, rest)
@pytest.mark.parametrize('text, deleted, rest', [
('test |delete this', 'delete this', 'test |'),
pytest.param('<test >delete this', 'test delete this', 'test |',
marks=fixme),
('<test >delete this', 'test delete this', '|'), # wrong
])
def test_rl_kill_line(lineedit, text, deleted, rest):
"""Delete from the cursor to the end of line and yank back."""
_validate_deletion(lineedit, readlinecommands.rl_kill_line,
text, deleted, rest)
@pytest.mark.parametrize('text, deleted, rest', [
('test delete|foobar', 'delete', 'test |foobar'),
('test delete |foobar', 'delete ', 'test |foobar'),
('open -t github.com/foo/bar |', 'github.com/foo/bar ', 'open -t |'),
('open -t |github.com/foo/bar', '-t ', 'open |github.com/foo/bar'),
pytest.param('test del<ete>foobar', 'delete', 'test |foobar',
marks=fixme),
('test del<ete >foobar', 'del', 'test |ete foobar'), # wrong
])
def test_rl_unix_word_rubout(lineedit, text, deleted, rest):
"""Delete to word beginning and see if it comes back with yank."""
_validate_deletion(lineedit, readlinecommands.rl_unix_word_rubout,
text, deleted, rest)
@pytest.mark.parametrize('text, deleted, rest', [
('test delete|foobar', 'delete', 'test |foobar'),
('test delete |foobar', 'delete ', 'test |foobar'),
('open -t github.com/foo/bar |', 'bar ', 'open -t github.com/foo/|'),
('open -t |github.com/foo/bar', '-t ', 'open |github.com/foo/bar'),
('open foo/bar.baz|', 'bar.baz', 'open foo/|'),
])
def test_rl_unix_filename_rubout(lineedit, text, deleted, rest):
"""Delete filename segment and see if it comes back with yank."""
_validate_deletion(lineedit, readlinecommands.rl_unix_filename_rubout,
text, deleted, rest)
@pytest.mark.parametrize('text, deleted, rest', [
pytest.param('test foobar| delete', ' delete', 'test foobar|',
marks=fixme),
('test foobar| delete', ' ', 'test foobar|delete'), # wrong
pytest.param('test foo|delete bar', 'delete', 'test foo| bar',
marks=fixme),
('test foo|delete bar', 'delete ', 'test foo|bar'), # wrong
pytest.param('test foo<bar> delete', ' delete', 'test foobar|',
marks=fixme),
('test foo<bar>delete', 'bardelete', 'test foo|'), # wrong
])
def test_rl_kill_word(lineedit, text, deleted, rest):
"""Delete to word end and see if it comes back with yank."""
_validate_deletion(lineedit, readlinecommands.rl_kill_word,
text, deleted, rest)
@pytest.mark.parametrize('text, deleted, rest', [
('test delete|foobar', 'delete', 'test |foobar'),
('test delete |foobar', 'delete ', 'test |foobar'),
('open -t github.com/foo/bar |', 'bar ', 'open -t github.com/foo/|'),
('open -t |github.com/foo/bar', 't ', 'open -|github.com/foo/bar'),
pytest.param('test del<ete>foobar', 'delete', 'test |foobar', marks=fixme),
('test del<ete >foobar', 'del', 'test |ete foobar'), # wrong
('open foo/bar.baz|', 'baz', 'open foo/bar.|'),
])
def test_rl_backward_kill_word(lineedit, text, deleted, rest):
"""Delete to word beginning and see if it comes back with yank."""
_validate_deletion(lineedit, readlinecommands.rl_backward_kill_word,
text, deleted, rest)
def test_rl_yank_no_text(lineedit):
"""Test yank without having deleted anything."""
lineedit.clear()
readlinecommands.rl_yank()
assert lineedit.aug_text() == '|'
|
import logging
import string
from aiohttp import web
import prometheus_client
import voluptuous as vol
from homeassistant import core as hacore
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_HVAC_ACTION,
CURRENT_HVAC_ACTIONS,
)
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.humidifier.const import (
ATTR_AVAILABLE_MODES,
ATTR_HUMIDITY,
ATTR_MODE,
)
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
CONTENT_TYPE_TEXT_PLAIN,
EVENT_STATE_CHANGED,
PERCENTAGE,
STATE_ON,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import entityfilter, state as state_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.util.temperature import fahrenheit_to_celsius
_LOGGER = logging.getLogger(__name__)
API_ENDPOINT = "/api/prometheus"
DOMAIN = "prometheus"
CONF_FILTER = "filter"
CONF_PROM_NAMESPACE = "namespace"
CONF_COMPONENT_CONFIG = "component_config"
CONF_COMPONENT_CONFIG_GLOB = "component_config_glob"
CONF_COMPONENT_CONFIG_DOMAIN = "component_config_domain"
CONF_DEFAULT_METRIC = "default_metric"
CONF_OVERRIDE_METRIC = "override_metric"
COMPONENT_CONFIG_SCHEMA_ENTRY = vol.Schema(
{vol.Optional(CONF_OVERRIDE_METRIC): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
{
vol.Optional(CONF_FILTER, default={}): entityfilter.FILTER_SCHEMA,
vol.Optional(CONF_PROM_NAMESPACE): cv.string,
vol.Optional(CONF_DEFAULT_METRIC): cv.string,
vol.Optional(CONF_OVERRIDE_METRIC): cv.string,
vol.Optional(CONF_COMPONENT_CONFIG, default={}): vol.Schema(
{cv.entity_id: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}): vol.Schema(
{cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}): vol.Schema(
{cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Activate Prometheus component."""
hass.http.register_view(PrometheusView(prometheus_client))
conf = config[DOMAIN]
entity_filter = conf[CONF_FILTER]
namespace = conf.get(CONF_PROM_NAMESPACE)
climate_units = hass.config.units.temperature_unit
override_metric = conf.get(CONF_OVERRIDE_METRIC)
default_metric = conf.get(CONF_DEFAULT_METRIC)
component_config = EntityValues(
conf[CONF_COMPONENT_CONFIG],
conf[CONF_COMPONENT_CONFIG_DOMAIN],
conf[CONF_COMPONENT_CONFIG_GLOB],
)
metrics = PrometheusMetrics(
prometheus_client,
entity_filter,
namespace,
climate_units,
component_config,
override_metric,
default_metric,
)
hass.bus.listen(EVENT_STATE_CHANGED, metrics.handle_event)
return True
class PrometheusMetrics:
"""Model all of the metrics which should be exposed to Prometheus."""
def __init__(
self,
prometheus_cli,
entity_filter,
namespace,
climate_units,
component_config,
override_metric,
default_metric,
):
"""Initialize Prometheus Metrics."""
self.prometheus_cli = prometheus_cli
self._component_config = component_config
self._override_metric = override_metric
self._default_metric = default_metric
self._filter = entity_filter
self._sensor_metric_handlers = [
self._sensor_override_component_metric,
self._sensor_override_metric,
self._sensor_attribute_metric,
self._sensor_default_metric,
self._sensor_fallback_metric,
]
if namespace:
self.metrics_prefix = f"{namespace}_"
else:
self.metrics_prefix = ""
self._metrics = {}
self._climate_units = climate_units
def handle_event(self, event):
"""Listen for new messages on the bus, and add them to Prometheus."""
state = event.data.get("new_state")
if state is None:
return
entity_id = state.entity_id
_LOGGER.debug("Handling state update for %s", entity_id)
domain, _ = hacore.split_entity_id(entity_id)
if not self._filter(state.entity_id):
return
handler = f"_handle_{domain}"
if hasattr(self, handler) and state.state != STATE_UNAVAILABLE:
getattr(self, handler)(state)
labels = self._labels(state)
state_change = self._metric(
"state_change", self.prometheus_cli.Counter, "The number of state changes"
)
state_change.labels(**labels).inc()
entity_available = self._metric(
"entity_available",
self.prometheus_cli.Gauge,
"Entity is available (not in the unavailable state)",
)
entity_available.labels(**labels).set(float(state.state != STATE_UNAVAILABLE))
last_updated_time_seconds = self._metric(
"last_updated_time_seconds",
self.prometheus_cli.Gauge,
"The last_updated timestamp",
)
last_updated_time_seconds.labels(**labels).set(state.last_updated.timestamp())
def _handle_attributes(self, state):
for key, value in state.attributes.items():
metric = self._metric(
f"{state.domain}_attr_{key.lower()}",
self.prometheus_cli.Gauge,
f"{key} attribute of {state.domain} entity",
)
try:
value = float(value)
metric.labels(**self._labels(state)).set(value)
except (ValueError, TypeError):
pass
def _metric(self, metric, factory, documentation, extra_labels=None):
labels = ["entity", "friendly_name", "domain"]
if extra_labels is not None:
labels.extend(extra_labels)
try:
return self._metrics[metric]
except KeyError:
full_metric_name = self._sanitize_metric_name(
f"{self.metrics_prefix}{metric}"
)
self._metrics[metric] = factory(full_metric_name, documentation, labels)
return self._metrics[metric]
@staticmethod
def _sanitize_metric_name(metric: str) -> str:
return "".join(
[
c
if c in string.ascii_letters
or c in string.digits
or c == "_"
or c == ":"
else f"u{hex(ord(c))}"
for c in metric
]
)
@staticmethod
def state_as_number(state):
"""Return a state casted to a float."""
try:
value = state_helper.state_as_number(state)
except ValueError:
_LOGGER.debug("Could not convert %s to float", state)
value = 0
return value
@staticmethod
def _labels(state):
return {
"entity": state.entity_id,
"domain": state.domain,
"friendly_name": state.attributes.get(ATTR_FRIENDLY_NAME),
}
def _battery(self, state):
if "battery_level" in state.attributes:
metric = self._metric(
"battery_level_percent",
self.prometheus_cli.Gauge,
"Battery level as a percentage of its capacity",
)
try:
value = float(state.attributes[ATTR_BATTERY_LEVEL])
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_binary_sensor(self, state):
metric = self._metric(
"binary_sensor_state",
self.prometheus_cli.Gauge,
"State of the binary sensor (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_input_boolean(self, state):
metric = self._metric(
"input_boolean_state",
self.prometheus_cli.Gauge,
"State of the input boolean (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_device_tracker(self, state):
metric = self._metric(
"device_tracker_state",
self.prometheus_cli.Gauge,
"State of the device tracker (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_person(self, state):
metric = self._metric(
"person_state", self.prometheus_cli.Gauge, "State of the person (0/1)"
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_light(self, state):
metric = self._metric(
"light_state", self.prometheus_cli.Gauge, "Load level of a light (0..1)"
)
try:
if "brightness" in state.attributes and state.state == STATE_ON:
value = state.attributes["brightness"] / 255.0
else:
value = self.state_as_number(state)
value = value * 100
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_lock(self, state):
metric = self._metric(
"lock_state", self.prometheus_cli.Gauge, "State of the lock (0/1)"
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_climate(self, state):
temp = state.attributes.get(ATTR_TEMPERATURE)
if temp:
if self._climate_units == TEMP_FAHRENHEIT:
temp = fahrenheit_to_celsius(temp)
metric = self._metric(
"temperature_c",
self.prometheus_cli.Gauge,
"Temperature in degrees Celsius",
)
metric.labels(**self._labels(state)).set(temp)
current_temp = state.attributes.get(ATTR_CURRENT_TEMPERATURE)
if current_temp:
if self._climate_units == TEMP_FAHRENHEIT:
current_temp = fahrenheit_to_celsius(current_temp)
metric = self._metric(
"current_temperature_c",
self.prometheus_cli.Gauge,
"Current Temperature in degrees Celsius",
)
metric.labels(**self._labels(state)).set(current_temp)
current_action = state.attributes.get(ATTR_HVAC_ACTION)
if current_action:
metric = self._metric(
"climate_action",
self.prometheus_cli.Gauge,
"HVAC action",
["action"],
)
for action in CURRENT_HVAC_ACTIONS:
metric.labels(**dict(self._labels(state), action=action)).set(
float(action == current_action)
)
def _handle_humidifier(self, state):
humidifier_target_humidity_percent = state.attributes.get(ATTR_HUMIDITY)
if humidifier_target_humidity_percent:
metric = self._metric(
"humidifier_target_humidity_percent",
self.prometheus_cli.Gauge,
"Target Relative Humidity",
)
metric.labels(**self._labels(state)).set(humidifier_target_humidity_percent)
metric = self._metric(
"humidifier_state",
self.prometheus_cli.Gauge,
"State of the humidifier (0/1)",
)
try:
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
current_mode = state.attributes.get(ATTR_MODE)
available_modes = state.attributes.get(ATTR_AVAILABLE_MODES)
if current_mode and available_modes:
metric = self._metric(
"humidifier_mode",
self.prometheus_cli.Gauge,
"Humidifier Mode",
["mode"],
)
for mode in available_modes:
metric.labels(**dict(self._labels(state), mode=mode)).set(
float(mode == current_mode)
)
def _handle_sensor(self, state):
unit = self._unit_string(state.attributes.get(ATTR_UNIT_OF_MEASUREMENT))
for metric_handler in self._sensor_metric_handlers:
metric = metric_handler(state, unit)
if metric is not None:
break
if metric is not None:
_metric = self._metric(
metric, self.prometheus_cli.Gauge, f"Sensor data measured in {unit}"
)
try:
value = self.state_as_number(state)
if unit == TEMP_FAHRENHEIT:
value = fahrenheit_to_celsius(value)
_metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
self._battery(state)
def _sensor_default_metric(self, state, unit):
"""Get default metric."""
return self._default_metric
@staticmethod
def _sensor_attribute_metric(state, unit):
"""Get metric based on device class attribute."""
metric = state.attributes.get(ATTR_DEVICE_CLASS)
if metric is not None:
return f"{metric}_{unit}"
return None
def _sensor_override_metric(self, state, unit):
"""Get metric from override in configuration."""
if self._override_metric:
return self._override_metric
return None
def _sensor_override_component_metric(self, state, unit):
"""Get metric from override in component confioguration."""
return self._component_config.get(state.entity_id).get(CONF_OVERRIDE_METRIC)
@staticmethod
def _sensor_fallback_metric(state, unit):
"""Get metric from fallback logic for compatibility."""
if unit in (None, ""):
_LOGGER.debug("Unsupported sensor: %s", state.entity_id)
return None
return f"sensor_unit_{unit}"
@staticmethod
def _unit_string(unit):
"""Get a formatted string of the unit."""
if unit is None:
return
units = {
TEMP_CELSIUS: "c",
TEMP_FAHRENHEIT: "c", # F should go into C metric
PERCENTAGE: "percent",
}
default = unit.replace("/", "_per_")
default = default.lower()
return units.get(unit, default)
def _handle_switch(self, state):
metric = self._metric(
"switch_state", self.prometheus_cli.Gauge, "State of the switch (0/1)"
)
try:
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
self._handle_attributes(state)
def _handle_zwave(self, state):
self._battery(state)
def _handle_automation(self, state):
metric = self._metric(
"automation_triggered_count",
self.prometheus_cli.Counter,
"Count of times an automation has been triggered",
)
metric.labels(**self._labels(state)).inc()
class PrometheusView(HomeAssistantView):
"""Handle Prometheus requests."""
url = API_ENDPOINT
name = "api:prometheus"
def __init__(self, prometheus_cli):
"""Initialize Prometheus view."""
self.prometheus_cli = prometheus_cli
async def get(self, request):
"""Handle request for Prometheus metrics."""
_LOGGER.debug("Received Prometheus metrics request")
return web.Response(
body=self.prometheus_cli.generate_latest(),
content_type=CONTENT_TYPE_TEXT_PLAIN,
)
|
import urllib2
import datetime
try:
import json
except ImportError:
import simplejson as json
import diamond.collector
class DseOpsCenterCollector(diamond.collector.Collector):
last_run_time = 0
column_families = None
last_schema_sync_time = 0
def get_default_config_help(self):
config_help = super(DseOpsCenterCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'cluster_id': "Set cluster ID/name.\n",
'metrics': "You can list explicit metrics if you like,\n"
" by default all know metrics are included.\n",
'node_group': "Set node group name, any by default\n",
'default_tail_opts': "Chaning these is not recommended.",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DseOpsCenterCollector, self).get_default_config()
metrics = [
'cf-bf-false-positives',
'cf-bf-false-ratio',
'cf-bf-space-used',
'cf-keycache-hit-rate',
'cf-keycache-hits',
'cf-keycache-requests',
'cf-live-disk-used',
'cf-live-sstables',
'cf-pending-tasks',
'cf-read-latency-op',
'cf-read-ops',
'cf-rowcache-hit-rate',
'cf-rowcache-hits',
'cf-rowcache-requests',
'cf-total-disk-used',
'cf-write-latency-op',
'cf-write-ops',
'cms-collection-count',
'cms-collection-time',
'data-load',
'heap-committed',
'heap-max',
'heap-used',
'key-cache-hit-rate',
'key-cache-hits',
'key-cache-requests',
'nonheap-committed',
'nonheap-max',
'nonheap-used',
'pending-compaction-tasks',
'pending-flush-sorter-tasks',
'pending-flushes',
'pending-gossip-tasks',
'pending-hinted-handoff',
'pending-internal-responses',
'pending-memtable-post-flushers',
'pending-migrations',
'pending-misc-tasks',
'pending-read-ops',
'pending-read-repair-tasks',
'pending-repair-tasks',
'pending-repl-on-write-tasks',
'pending-request-responses',
'pending-streams',
'pending-write-ops',
'read-latency-op',
'read-ops',
'row-cache-hit-rate',
'row-cache-hits',
'row-cache-requests',
'solr-avg-time-per-req',
'solr-errors',
'solr-requests',
'solr-timeouts',
'total-bytes-compacted',
'total-compactions-completed',
'write-latency-op',
'write-ops',
]
config.update({
'host': '127.0.0.1',
'port': 8888,
'path': 'cassandra',
'node_group': '*',
'metrics': ','.join(metrics),
'default_tail_opts': '&forecast=0&node_aggregation=1',
})
return config
def _get_schema(self):
time_now = int(datetime.datetime.utcnow().strftime('%s'))
if ((self.column_families is None or
(time_now - self.last_schema_sync_time < 3600))):
return False
url = 'http://%s:%i/%s/keyspaces' % (self.config['host'],
int(self.config['port']),
self.config['cluster_id'])
try:
response = urllib2.urlopen(url)
except Exception as err:
self.log.error('%s: %s', url, err)
return False
try:
result = json.load(response)
column_families = []
for ks in result:
i = []
for cf in result[ks]['column_families']:
i.append("%s.%s" % (ks, cf))
column_families.append(i)
self.column_families = ','.join(sum(column_families, []))
self.log.debug('DseOpsCenterCollector columnfamilies = %s',
self.column_families)
self.last_schema_sync_time = time_now
return True
except (TypeError, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False
def _get(self, start, end, step=60):
self._get_schema()
url = ('http://%s:%i/%s/new-metrics?node_group=%s&columnfamilies=%s'
'&metrics=%s&start=%i&end=%i&step=%i%s') % (
self.config['host'],
int(self.config['port']),
self.config['cluster_id'],
self.config['node_group'],
self.column_families,
self.config['metrics'],
start, end, step,
self.config['default_tail_opts'])
try:
response = urllib2.urlopen(url)
except Exception as err:
self.log.error('%s: %s', url, err)
return False
self.log.debug('DseOpsCenterCollector metrics url = %s', url)
try:
return json.load(response)
except (TypeError, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False
def collect(self):
metrics = {}
if json is None:
self.log.error('Unable to import json')
return None
time_now = int(datetime.datetime.utcnow().strftime('%s'))
self.log.debug('DseOpsCenterCollector last_run_time = %i',
self.last_run_time)
if self.last_run_time == 0:
self.last_run_time = time_now - 60
if time_now - self.last_run_time >= 60:
result = self._get(self.last_run_time, time_now)
self.last_run_time = time_now
if not result:
return None
self.log.debug('DseOpsCenterCollector result = %s', result)
for data in result['data'][self.config['node_group']]:
if data['data-points'][0][0] is not None:
if 'columnfamily' in data:
k = '.'.join([data['metric'],
data['columnfamily']])
metrics[k] = data['data-points'][0][0]
else:
metrics[data['metric']] = data['data-points'][0][0]
self.log.debug('DseOpsCenterCollector metrics = %s', metrics)
for key in metrics:
self.publish(key, metrics[key])
else:
self.log.debug(
"DseOpsCenterCollector can only run once every minute")
return None
|
import numpy as np
from scipy.stats import rankdata, pearsonr
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.metrics.pairwise import cosine_distances
from scattertext.Scalers import stretch_0_to_1, dense_rank
from scattertext.termcompaction.AssociationCompactor import AssociationCompactor
from scattertext.termscoring.RankDifference import RankDifference
from scattertext.viz import ScatterplotStructure, VizDataAdapter
from scattertext.viz.PairPlotFromScattertextStructure import PairPlotFromScatterplotStructure
from scattertext.ScatterChartExplorer import ScatterChartExplorer
from scattertext.categoryprojector.CategoryProjector import CategoryProjector
from scattertext.viz.BasicHTMLFromScatterplotStructure import D3URLs
from scattertext.Scalers import scale_neg_1_to_1_with_zero_mean
from scattertext.termranking.AbsoluteFrequencyRanker import AbsoluteFrequencyRanker
def produce_category_focused_pairplot(corpus,
category,
category_projector=CategoryProjector(projector=TruncatedSVD(20)),
category_projection=None,
**kwargs):
'''
Produces a pair-plot which is focused on a single category.
:param corpus: TermDocMatrix
:param category: str, name of a category in the corpus
:param category_projector: CategoryProjector, a factor analysis of the category/feature vector
:param category_projection: CategoryProjection, None by default. If present, overrides category projector
:param kwargs: remaining kwargs for produce_pairplot
:return: str, HTML
'''
category_num = corpus.get_categories().index(category)
uncorrelated_components_projection = (category_projector.project(corpus)
if category_projection is None
else category_projection)
distances = cosine_distances(uncorrelated_components_projection.get_category_embeddings().T)
similarity_to_category_scores = -2 * (rankdata(distances[category_num]) - 0.5)
uncorrelated_components = uncorrelated_components_projection.get_projection()
least_correlated_dimension = min([(np.abs(pearsonr(similarity_to_category_scores,
uncorrelated_components.T[i])[0]), i)]
for i in range(uncorrelated_components.shape[1]))[0][1]
projection_to_plot = np.array([uncorrelated_components.T[least_correlated_dimension],
similarity_to_category_scores]).T
return produce_pairplot(
corpus,
initial_category=category,
category_projection=uncorrelated_components_projection.use_alternate_projection(projection_to_plot),
category_focused=True,
**kwargs
)
def produce_pairplot(corpus,
asian_mode=False,
category_width_in_pixels=500,
category_height_in_pixels=700,
term_width_in_pixels=500,
term_height_in_pixels=700,
terms_to_show=3000,
scaler=scale_neg_1_to_1_with_zero_mean,
term_ranker=AbsoluteFrequencyRanker,
use_metadata=False,
category_projector=CategoryProjector(),
category_projection=None,
topic_model_term_lists=None,
topic_model_preview_size=10,
metadata_descriptions=None,
initial_category=None,
x_dim=0,
y_dim=1,
show_halo=True,
num_terms_in_halo=5,
category_color_func='(function(x) {return "#5555FF"})',
protocol='https',
d3_url_struct=D3URLs(),
category_focused=False,
verbose=False,
use_full_doc=True,
default_to_term_comparison=True,
category_x_label='',
category_y_label='',
category_show_axes_and_cross_hairs=False,
highlight_selected_category=True,
term_x_label=None, # used if default_to_term_comparison
term_y_label=None, # used if default_to_term_comparison
wordfish_style=False,
**kwargs):
if category_projection is None:
if use_metadata:
category_projection = category_projector.project_with_metadata(corpus, x_dim=x_dim, y_dim=y_dim)
else:
category_projection = category_projector.project(corpus, x_dim=x_dim, y_dim=y_dim)
if initial_category is None:
initial_category = corpus.get_categories()[0]
category_scatter_chart_explorer = _get_category_scatter_chart_explorer(
category_projection, scaler, term_ranker, verbose
)
category_scatter_chart_data = category_scatter_chart_explorer.to_dict(
category=initial_category,
max_docs_per_category=0,
)
category_tooltip_func = '(function(d) {return d.term})'
initial_category_idx = corpus.get_categories().index(initial_category)
term_plot_change_func = _get_term_plot_change_js_func(wordfish_style, category_focused, initial_category_idx)
category_scatterplot_structure = ScatterplotStructure(
VizDataAdapter(category_scatter_chart_data),
width_in_pixels=category_width_in_pixels,
height_in_pixels=category_height_in_pixels,
asian_mode=asian_mode,
use_non_text_features=True,
show_characteristic=False,
x_label=category_x_label,
y_label=category_y_label,
show_axes_and_cross_hairs=category_show_axes_and_cross_hairs,
full_data='getCategoryDataAndInfo()',
show_top_terms=False,
get_tooltip_content=category_tooltip_func,
color_func=category_color_func,
show_axes=False,
horizontal_line_y_position=0,
vertical_line_x_position=0,
unified_context=True,
show_category_headings=False,
show_cross_axes=True,
div_name='cat-plot',
alternative_term_func=term_plot_change_func,
highlight_selected_category=highlight_selected_category
)
compacted_corpus = AssociationCompactor(terms_to_show,
use_non_text_features=use_metadata).compact(corpus)
terms_to_hide = set(corpus.get_terms()) - set(compacted_corpus.get_terms())
if verbose:
print('num terms to hide', len(terms_to_hide))
print('num terms to show', compacted_corpus.get_num_terms())
term_scatter_chart_explorer = ScatterChartExplorer(
category_projection.get_corpus(),
minimum_term_frequency=0,
minimum_not_category_term_frequency=0,
pmi_threshold_coefficient=0,
term_ranker=term_ranker,
use_non_text_features=use_metadata,
score_transform=stretch_0_to_1,
verbose=verbose
).hide_terms(terms_to_hide)
if default_to_term_comparison:
if topic_model_term_lists is not None:
term_scatter_chart_explorer.inject_metadata_term_lists(topic_model_term_lists)
if metadata_descriptions is not None:
term_scatter_chart_explorer.inject_metadata_descriptions(metadata_descriptions)
if use_metadata:
tdf = corpus.get_metadata_freq_df('')
else:
tdf = corpus.get_term_freq_df('')
scores = RankDifference().get_scores(
tdf[initial_category], tdf[[c for c in corpus.get_categories() if c != initial_category]].sum(axis=1)
)
term_scatter_chart_data = term_scatter_chart_explorer.to_dict(
category=initial_category,
scores=scores,
include_term_category_counts=True,
transform=dense_rank,
**kwargs
)
y_label = initial_category,
x_label = 'Not ' + initial_category,
color_func = None
show_top_terms = True
show_axes = False
else:
term_projection = category_projection.get_term_projection()
original_x = term_projection['x']
original_y = term_projection['y']
x_coords = scaler(term_projection['x'])
y_coords = scaler(term_projection['y'])
x_label = term_x_label if term_x_label is not None else ''
y_label = term_y_label if term_y_label is not None else ''
show_axes = True
horizontal_line_y_position = 0
vertical_line_x_position = 0
term_scatter_chart_explorer.inject_coordinates(x_coords,
y_coords,
original_x=original_x,
original_y=original_y)
if topic_model_term_lists is not None:
term_scatter_chart_explorer.inject_metadata_term_lists(topic_model_term_lists)
if metadata_descriptions is not None:
term_scatter_chart_explorer.inject_metadata_descriptions(metadata_descriptions)
term_scatter_chart_data = term_scatter_chart_explorer.to_dict(
category=initial_category,
category_name=initial_category,
include_term_category_counts=True,
# transform=dense_rank,
)
color_func = '(function(x) {return "#5555FF"})'
show_top_terms = False
term_scatterplot_structure = ScatterplotStructure(
VizDataAdapter(term_scatter_chart_data),
width_in_pixels=term_width_in_pixels,
height_in_pixels=term_height_in_pixels,
use_full_doc=use_metadata or use_full_doc, asian_mode=asian_mode,
use_non_text_features=use_metadata, show_characteristic=False,
x_label=x_label,
y_label=y_label,
full_data='getTermDataAndInfo()',
show_top_terms=show_top_terms,
get_tooltip_content=None,
color_func=color_func,
# horizontal_line_y_position=0,
# vertical_line_x_position=0,
show_axes=show_axes,
topic_model_preview_size=topic_model_preview_size,
show_category_headings=False,
div_name='d3-div-1',
unified_context=True,
highlight_selected_category=highlight_selected_category
)
return PairPlotFromScatterplotStructure(
category_scatterplot_structure,
term_scatterplot_structure,
category_projection,
category_width_in_pixels,
category_height_in_pixels,
num_terms=num_terms_in_halo,
show_halo=show_halo,
d3_url_struct=d3_url_struct,
x_dim=x_dim,
y_dim=y_dim,
protocol=protocol
).to_html()
def _get_category_scatter_chart_explorer(category_projection, scaler, term_ranker, verbose):
category_scatter_chart_explorer = ScatterChartExplorer(
category_projection.get_corpus(),
minimum_term_frequency=0,
minimum_not_category_term_frequency=0,
pmi_threshold_coefficient=0,
filter_unigrams=False,
jitter=0,
max_terms=None,
# term_ranker=term_ranker,
use_non_text_features=True,
term_significance=None,
terms_to_include=None,
verbose=verbose
)
proj_df = category_projection.get_pandas_projection()
category_scatter_chart_explorer.inject_coordinates(
x_coords=scaler(proj_df['x']),
y_coords=scaler(proj_df['y']),
original_x=proj_df['x'],
original_y=proj_df['y']
)
return category_scatter_chart_explorer
def _get_term_plot_change_js_func(wordfish_style, category_focused, initial_category_idx):
if wordfish_style:
return '(function (termInfo) {termPlotInterface.yAxisLogCounts(termInfo.term); return false;})'
if category_focused:
return '(function (termInfo) {termPlotInterface.drawCategoryAssociation(%s, termInfo.i); return false;})' % (
initial_category_idx
)
return '(function (termInfo) {termPlotInterface.drawCategoryAssociation(termInfo.i); return false;})'
|
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.util import decorator
from .const import CHILD_CALLBACK, MYSENSORS_GATEWAY_READY, NODE_CALLBACK
from .device import get_mysensors_devices
from .helpers import discover_mysensors_platform, validate_set_msg
HANDLERS = decorator.Registry()
@HANDLERS.register("set")
async def handle_set(hass, hass_config, msg):
"""Handle a mysensors set message."""
validated = validate_set_msg(msg)
_handle_child_update(hass, hass_config, validated)
@HANDLERS.register("internal")
async def handle_internal(hass, hass_config, msg):
"""Handle a mysensors internal message."""
internal = msg.gateway.const.Internal(msg.sub_type)
handler = HANDLERS.get(internal.name)
if handler is None:
return
await handler(hass, hass_config, msg)
@HANDLERS.register("I_BATTERY_LEVEL")
async def handle_battery_level(hass, hass_config, msg):
"""Handle an internal battery level message."""
_handle_node_update(hass, msg)
@HANDLERS.register("I_HEARTBEAT_RESPONSE")
async def handle_heartbeat(hass, hass_config, msg):
"""Handle an heartbeat."""
_handle_node_update(hass, msg)
@HANDLERS.register("I_SKETCH_NAME")
async def handle_sketch_name(hass, hass_config, msg):
"""Handle an internal sketch name message."""
_handle_node_update(hass, msg)
@HANDLERS.register("I_SKETCH_VERSION")
async def handle_sketch_version(hass, hass_config, msg):
"""Handle an internal sketch version message."""
_handle_node_update(hass, msg)
@HANDLERS.register("I_GATEWAY_READY")
async def handle_gateway_ready(hass, hass_config, msg):
"""Handle an internal gateway ready message.
Set asyncio future result if gateway is ready.
"""
gateway_ready = hass.data.get(MYSENSORS_GATEWAY_READY.format(id(msg.gateway)))
if gateway_ready is None or gateway_ready.cancelled():
return
gateway_ready.set_result(True)
@callback
def _handle_child_update(hass, hass_config, validated):
"""Handle a child update."""
signals = []
# Update all platforms for the device via dispatcher.
# Add/update entity for validated children.
for platform, dev_ids in validated.items():
devices = get_mysensors_devices(hass, platform)
new_dev_ids = []
for dev_id in dev_ids:
if dev_id in devices:
signals.append(CHILD_CALLBACK.format(*dev_id))
else:
new_dev_ids.append(dev_id)
if new_dev_ids:
discover_mysensors_platform(hass, hass_config, platform, new_dev_ids)
for signal in set(signals):
# Only one signal per device is needed.
# A device can have multiple platforms, ie multiple schemas.
async_dispatcher_send(hass, signal)
@callback
def _handle_node_update(hass, msg):
"""Handle a node update."""
signal = NODE_CALLBACK.format(id(msg.gateway), msg.node_id)
async_dispatcher_send(hass, signal)
|
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
def load_class(name, setting):
"""Import module and creates class given by name in string."""
try:
module, attr = name.rsplit(".", 1)
except ValueError as error:
raise ImproperlyConfigured(
f'Error importing class {name} in {setting}: "{error}"'
)
try:
mod = import_module(module)
except ImportError as error:
raise ImproperlyConfigured(
f'Error importing module {module} in {setting}: "{error}"'
)
try:
return getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured(
f'Module "{module}" does not define a "{attr}" class in {setting}'
)
class ClassLoader:
"""Dict like object to lazy load list of classes."""
def __init__(self, name, construct=True):
self.name = name
self.construct = construct
def load_data(self):
result = {}
value = getattr(settings, self.name)
if value:
if not isinstance(value, (list, tuple)):
raise ImproperlyConfigured(
f"Setting {self.name} must be list or tuple!"
)
for path in value:
obj = load_class(path, self.name)
if self.construct:
obj = obj()
result[obj.get_identifier()] = obj
return result
@cached_property
def data(self):
return self.load_data()
def __getitem__(self, key):
return self.data.__getitem__(key)
def __setitem__(self, key, value):
self.data.__setitem__(key, value)
def get(self, key):
return self.data.get(key)
def items(self):
return self.data.items()
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def __iter__(self):
return self.data.__iter__()
def __len__(self):
return self.data.__len__()
def __contains__(self, item):
return self.data.__contains__(item)
def exists(self):
return bool(self.data)
def get_choices(self, empty=False, exclude=(), cond=lambda x: True):
result = [
(x, self[x].name)
for x in sorted(self)
if x not in exclude and cond(self[x])
]
if empty:
result.insert(0, ("", ""))
return result
|
import json
from coverage import env
from coverage.backward import byte_to_int, bytes_to_ints, binary_bytes, zip_longest
from coverage.misc import contract, new_contract
if env.PY3:
def _to_blob(b):
"""Convert a bytestring into a type SQLite will accept for a blob."""
return b
new_contract('blob', lambda v: isinstance(v, bytes))
else:
def _to_blob(b):
"""Convert a bytestring into a type SQLite will accept for a blob."""
return buffer(b) # pylint: disable=undefined-variable
new_contract('blob', lambda v: isinstance(v, buffer)) # pylint: disable=undefined-variable
@contract(nums='Iterable', returns='blob')
def nums_to_numbits(nums):
"""Convert `nums` into a numbits.
Arguments:
nums: a reusable iterable of integers, the line numbers to store.
Returns:
A binary blob.
"""
try:
nbytes = max(nums) // 8 + 1
except ValueError:
# nums was empty.
return _to_blob(b'')
b = bytearray(nbytes)
for num in nums:
b[num//8] |= 1 << num % 8
return _to_blob(bytes(b))
@contract(numbits='blob', returns='list[int]')
def numbits_to_nums(numbits):
"""Convert a numbits into a list of numbers.
Arguments:
numbits: a binary blob, the packed number set.
Returns:
A list of ints.
When registered as a SQLite function by :func:`register_sqlite_functions`,
this returns a string, a JSON-encoded list of ints.
"""
nums = []
for byte_i, byte in enumerate(bytes_to_ints(numbits)):
for bit_i in range(8):
if (byte & (1 << bit_i)):
nums.append(byte_i * 8 + bit_i)
return nums
@contract(numbits1='blob', numbits2='blob', returns='blob')
def numbits_union(numbits1, numbits2):
"""Compute the union of two numbits.
Returns:
A new numbits, the union of `numbits1` and `numbits2`.
"""
byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
return _to_blob(binary_bytes(b1 | b2 for b1, b2 in byte_pairs))
@contract(numbits1='blob', numbits2='blob', returns='blob')
def numbits_intersection(numbits1, numbits2):
"""Compute the intersection of two numbits.
Returns:
A new numbits, the intersection `numbits1` and `numbits2`.
"""
byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
intersection_bytes = binary_bytes(b1 & b2 for b1, b2 in byte_pairs)
return _to_blob(intersection_bytes.rstrip(b'\0'))
@contract(numbits1='blob', numbits2='blob', returns='bool')
def numbits_any_intersection(numbits1, numbits2):
"""Is there any number that appears in both numbits?
Determine whether two number sets have a non-empty intersection. This is
faster than computing the intersection.
Returns:
A bool, True if there is any number in both `numbits1` and `numbits2`.
"""
byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
return any(b1 & b2 for b1, b2 in byte_pairs)
@contract(num='int', numbits='blob', returns='bool')
def num_in_numbits(num, numbits):
"""Does the integer `num` appear in `numbits`?
Returns:
A bool, True if `num` is a member of `numbits`.
"""
nbyte, nbit = divmod(num, 8)
if nbyte >= len(numbits):
return False
return bool(byte_to_int(numbits[nbyte]) & (1 << nbit))
def register_sqlite_functions(connection):
"""
Define numbits functions in a SQLite connection.
This defines these functions for use in SQLite statements:
* :func:`numbits_union`
* :func:`numbits_intersection`
* :func:`numbits_any_intersection`
* :func:`num_in_numbits`
* :func:`numbits_to_nums`
`connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>`
object. After creating the connection, pass it to this function to
register the numbits functions. Then you can use numbits functions in your
queries::
import sqlite3
from coverage.numbits import register_sqlite_functions
conn = sqlite3.connect('example.db')
register_sqlite_functions(conn)
c = conn.cursor()
# Kind of a nonsense query: find all the files and contexts that
# executed line 47 in any file:
c.execute(
"select file_id, context_id from line_bits where num_in_numbits(?, numbits)",
(47,)
)
"""
connection.create_function("numbits_union", 2, numbits_union)
connection.create_function("numbits_intersection", 2, numbits_intersection)
connection.create_function("numbits_any_intersection", 2, numbits_any_intersection)
connection.create_function("num_in_numbits", 2, num_in_numbits)
connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b)))
|
from __future__ import division
import argparse
import multiprocessing
import numpy as np
import chainer
from chainer.training import extensions
from chainer.training.triggers import ManualScheduleTrigger
import chainermn
from chainercv.chainer_experimental.datasets.sliceable \
import TransformDataset
from chainercv.chainer_experimental.training.extensions import make_shift
from chainercv.datasets import sbd_instance_segmentation_label_names
from chainercv.datasets import SBDInstanceSegmentationDataset
from chainercv.experimental.links import FCISResNet101
from chainercv.experimental.links import FCISTrainChain
from chainercv.extensions import InstanceSegmentationVOCEvaluator
from chainercv.links.model.ssd import GradientScaling
from train_sbd import concat_examples
from train_sbd import Transform
# https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator
try:
import cv2
cv2.setNumThreads(0)
except ImportError:
pass
def main():
parser = argparse.ArgumentParser(
description='ChainerCV training example: FCIS')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument(
'--lr', '-l', type=float, default=None,
help='Learning rate for multi GPUs')
parser.add_argument('--batchsize', type=int, default=8)
parser.add_argument('--epoch', '-e', type=int, default=42)
parser.add_argument('--cooldown-epoch', '-ce', type=int, default=28)
args = parser.parse_args()
# https://docs.chainer.org/en/stable/chainermn/tutorial/tips_faqs.html#using-multiprocessiterator
if hasattr(multiprocessing, 'set_start_method'):
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process()
p.start()
p.join()
# chainermn
comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank
np.random.seed(args.seed)
# model
fcis = FCISResNet101(
n_fg_class=len(sbd_instance_segmentation_label_names),
pretrained_model='imagenet', iter2=False)
fcis.use_preset('evaluate')
model = FCISTrainChain(fcis)
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
# dataset
train_dataset = TransformDataset(
SBDInstanceSegmentationDataset(split='train'),
('img', 'mask', 'label', 'bbox', 'scale'),
Transform(model.fcis))
if comm.rank == 0:
indices = np.arange(len(train_dataset))
else:
indices = None
indices = chainermn.scatter_dataset(indices, comm, shuffle=True)
train_dataset = train_dataset.slice[indices]
train_iter = chainer.iterators.SerialIterator(
train_dataset, batch_size=args.batchsize // comm.size)
if comm.rank == 0:
test_dataset = SBDInstanceSegmentationDataset(split='val')
test_iter = chainer.iterators.SerialIterator(
test_dataset, batch_size=1, repeat=False, shuffle=False)
# optimizer
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9),
comm)
optimizer.setup(model)
model.fcis.head.conv1.W.update_rule.add_hook(GradientScaling(3.0))
model.fcis.head.conv1.b.update_rule.add_hook(GradientScaling(3.0))
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
for param in model.params():
if param.name in ['beta', 'gamma']:
param.update_rule.enabled = False
model.fcis.extractor.conv1.disable_update()
model.fcis.extractor.res2.disable_update()
updater = chainer.training.updater.StandardUpdater(
train_iter, optimizer, converter=concat_examples,
device=device)
trainer = chainer.training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
@make_shift('lr')
def lr_scheduler(trainer):
if args.lr is None:
base_lr = 0.0005 * args.batchsize
else:
base_lr = args.lr
epoch = trainer.updater.epoch
if epoch < args.cooldown_epoch:
rate = 1
else:
rate = 0.1
return rate * base_lr
trainer.extend(lr_scheduler)
if comm.rank == 0:
# interval
log_interval = 100, 'iteration'
plot_interval = 3000, 'iteration'
print_interval = 20, 'iteration'
# training extensions
trainer.extend(
extensions.snapshot_object(
model.fcis, filename='snapshot_model.npz'),
trigger=(args.epoch, 'epoch'))
trainer.extend(
extensions.observe_lr(),
trigger=log_interval)
trainer.extend(
extensions.LogReport(log_name='log.json', trigger=log_interval))
trainer.extend(extensions.PrintReport([
'iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'main/roi_mask_loss',
'validation/main/map',
]), trigger=print_interval)
trainer.extend(
extensions.ProgressBar(update_interval=10))
if extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(
['main/loss'],
file_name='loss.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(
InstanceSegmentationVOCEvaluator(
test_iter, model.fcis,
iou_thresh=0.5, use_07_metric=True,
label_names=sbd_instance_segmentation_label_names),
trigger=ManualScheduleTrigger(
[len(train_dataset) * args.cooldown_epoch,
len(train_dataset) * args.epoch], 'iteration'))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.run()
if __name__ == '__main__':
main()
|
from djangocms_text_ckeditor.models import Text
from djangocms_text_ckeditor.utils import plugin_tags_to_id_list, replace_plugin_tags
from cms.api import add_plugin
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.models import CascadeElement
def deserialize_to_placeholder(placeholder, data, language):
def plugins_from_data(placeholder, parent, data):
for plugin_type, data, children_data in data:
try:
plugin_class = plugin_pool.get_plugin(plugin_type)
except Exception:
continue
kwargs = dict(data)
inlines = kwargs.pop('inlines', [])
shared_glossary = kwargs.pop('shared_glossary', None)
try:
instance = add_plugin(placeholder, plugin_class, language, target=parent, **kwargs)
except Exception:
continue
if isinstance(instance, CascadeElement):
instance.plugin_class.add_inline_elements(instance, inlines)
instance.plugin_class.add_shared_reference(instance, shared_glossary)
# for some unknown reasons add_plugin sets instance.numchild to 0,
# but fixing and save()-ing 'instance' executes some filters in an unwanted manner
plugins_from_data(placeholder, instance, children_data)
if isinstance(instance, Text):
# we must convert the old plugin IDs into the new ones,
# otherwise links are not displayed
id_dict = dict(zip(
plugin_tags_to_id_list(instance.body),
(t[0] for t in instance.get_children().values_list('id'))
))
instance.body = replace_plugin_tags(instance.body, id_dict)
instance.save()
plugins_from_data(placeholder, None, data['plugins'])
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import (
ATTR_FAN_SPEED,
DOMAIN,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_SET_FAN_SPEED,
SERVICE_START,
SERVICE_STOP,
STATE_CLEANING,
STATE_DOCKED,
STATE_RETURNING,
)
_LOGGER = logging.getLogger(__name__)
VALID_STATES_TOGGLE = {STATE_ON, STATE_OFF}
VALID_STATES_STATE = {
STATE_CLEANING,
STATE_DOCKED,
STATE_IDLE,
STATE_RETURNING,
STATE_PAUSED,
}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if not (state.state in VALID_STATES_TOGGLE or state.state in VALID_STATES_STATE):
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state and cur_state.attributes.get(
ATTR_FAN_SPEED
) == state.attributes.get(ATTR_FAN_SPEED):
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if cur_state.state != state.state:
# Wrong state
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
elif state.state == STATE_CLEANING:
service = SERVICE_START
elif state.state in [STATE_DOCKED, STATE_RETURNING]:
service = SERVICE_RETURN_TO_BASE
elif state.state == STATE_IDLE:
service = SERVICE_STOP
elif state.state == STATE_PAUSED:
service = SERVICE_PAUSE
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
if cur_state.attributes.get(ATTR_FAN_SPEED) != state.attributes.get(ATTR_FAN_SPEED):
# Wrong fan speed
service_data["fan_speed"] = state.attributes[ATTR_FAN_SPEED]
await hass.services.async_call(
DOMAIN, SERVICE_SET_FAN_SPEED, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Vacuum states."""
# Reproduce states in parallel.
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import reporter
import numpy as np
class PixelwiseSoftmaxClassifier(chainer.Chain):
"""A pixel-wise classifier.
It computes the loss based on a given input/label pair for
semantic segmentation.
Args:
predictor (~chainer.Link): Predictor network.
ignore_label (int): A class id that is going to be ignored in
evaluation. The default value is -1.
class_weight (array): An array
that contains constant weights that will be multiplied with the
loss values along with the channel dimension. This will be
used in :func:`chainer.functions.softmax_cross_entropy`.
"""
def __init__(self, predictor, ignore_label=-1, class_weight=None):
super(PixelwiseSoftmaxClassifier, self).__init__()
with self.init_scope():
self.predictor = predictor
self.ignore_label = ignore_label
if class_weight is not None:
self.class_weight = np.asarray(class_weight, dtype=np.float32)
else:
self.class_weight = class_weight
def to_cpu(self):
super(PixelwiseSoftmaxClassifier, self).to_cpu()
if self.class_weight is not None:
self.class_weight = cuda.to_cpu(self.class_weight)
def to_gpu(self, device=None):
super(PixelwiseSoftmaxClassifier, self).to_gpu(device)
if self.class_weight is not None:
self.class_weight = cuda.to_gpu(self.class_weight, device)
def forward(self, x, t):
"""Computes the loss value for an image and label pair.
Args:
x (~chainer.Variable): A variable with a batch of images.
t (~chainer.Variable): A variable with the ground truth
image-wise label.
Returns:
~chainer.Variable: Loss value.
"""
self.y = self.predictor(x)
self.loss = F.softmax_cross_entropy(
self.y, t, class_weight=self.class_weight,
ignore_label=self.ignore_label)
reporter.report({'loss': self.loss}, self)
return self.loss
|
import abc
import logging
import re
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import resource
flags.DEFINE_string('smb_tier', 'Standard', 'SMB Mode')
FLAGS = flags.FLAGS
_MOUNT_SMB_RE = re.compile(r'.*type smb \((.*?)\)', re.MULTILINE)
def GetSmbServiceClass(cloud):
"""Get the SMB service corresponding to the cloud.
Args:
cloud: The name of the cloud to supply the SMB service.
Returns:
The SMB service class for this cloud.
Raises:
NotImplementedError: No service found for this cloud.
"""
return resource.GetResourceClass(BaseSmbService, CLOUD=cloud)
class BaseSmbService(resource.BaseResource):
"""Object representing an SMB Service."""
# subclasses must override this with a list or tuple
SMB_TIERS = None
RESOURCE_TYPE = 'BaseSmbService'
DEFAULT_SMB_VERSION = None
VOLUME_NAME = ''
def __init__(self, disk_spec, zone):
super(BaseSmbService, self).__init__()
self.disk_spec = disk_spec
self.zone = zone
self.smb_tier = FLAGS.smb_tier or self.DEFAULT_TIER
if self.smb_tier and self.SMB_TIERS and self.smb_tier not in self.SMB_TIERS:
# SMB service does not have to have a list of smb_tiers nor does it have
# to be implemented by a provider
raise errors.Config.InvalidValue(
('smb_tier "%s" not in acceptable list "%s" '
'for cloud %s') % (self.smb_tier, self.SMB_TIERS, self.CLOUD))
logging.debug('%s SMB service with smb_tier %s zone %s default version %s',
self.CLOUD, self.smb_tier, self.zone,
self.DEFAULT_SMB_VERSION)
def CreateSmbDisk(self):
return disk.SmbDisk(self.disk_spec, self.GetRemoteAddress(),
self.GetStorageAccountAndKey(),
self.DEFAULT_SMB_VERSION, self.smb_tier)
@abc.abstractmethod
def _IsReady(self):
"""Boolean function to determine if disk is SMB mountable."""
pass
@abc.abstractmethod
def GetRemoteAddress(self):
"""The SMB server's address."""
pass
@abc.abstractmethod
def GetStorageAccountAndKey(self):
"""The SMB server's storage account's name and key."""
pass
|
import logging
from pmsensor import serial_pm as pm
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_BRAND = "brand"
CONF_SERIAL_DEVICE = "serial_device"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BRAND): cv.string,
vol.Required(CONF_SERIAL_DEVICE): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available PM sensors."""
try:
coll = pm.PMDataCollector(
config.get(CONF_SERIAL_DEVICE), pm.SUPPORTED_SENSORS[config.get(CONF_BRAND)]
)
except KeyError:
_LOGGER.error(
"Brand %s not supported\n supported brands: %s",
config.get(CONF_BRAND),
pm.SUPPORTED_SENSORS.keys(),
)
return
except OSError as err:
_LOGGER.error(
"Could not open serial connection to %s (%s)",
config.get(CONF_SERIAL_DEVICE),
err,
)
return
dev = []
for pmname in coll.supported_values():
if config.get(CONF_NAME) is not None:
name = "{} PM{}".format(config.get(CONF_NAME), pmname)
else:
name = f"PM{pmname}"
dev.append(ParticulateMatterSensor(coll, name, pmname))
add_entities(dev)
class ParticulateMatterSensor(Entity):
"""Representation of an Particulate matter sensor."""
def __init__(self, pmDataCollector, name, pmname):
"""Initialize a new PM sensor."""
self._name = name
self._pmname = pmname
self._state = None
self._collector = pmDataCollector
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
def update(self):
"""Read from sensor and update the state."""
_LOGGER.debug("Reading data from PM sensor")
try:
self._state = self._collector.read_data()[self._pmname]
except KeyError:
_LOGGER.error("Could not read PM%s value", self._pmname)
|
import logging
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
GIT_REPO = 'https://github.com/aerospike/aerospike-server.git'
GIT_TAG = '4.0.0.1'
AEROSPIKE_DIR = '%s/aerospike-server' % linux_packages.INSTALL_DIR
AEROSPIKE_CONF_PATH = '%s/as/etc/aerospike_dev.conf' % AEROSPIKE_DIR
AEROSPIKE_DEFAULT_TELNET_PORT = 3003
MEMORY = 'memory'
DISK = 'disk'
flags.DEFINE_enum('aerospike_storage_type', MEMORY, [MEMORY, DISK],
'The type of storage to use for Aerospike data. The type of '
'disk is controlled by the "data_disk_type" flag.')
flags.DEFINE_integer('aerospike_replication_factor', 1,
'Replication factor for aerospike server.')
flags.DEFINE_integer('aerospike_transaction_threads_per_queue', 4,
'Number of threads per transaction queue.')
flags.DEFINE_integer('aerospike_vms', 1,
'Number of vms (nodes) for aerospike server.')
def _Install(vm):
"""Installs the Aerospike server on the VM."""
vm.Install('build_tools')
vm.Install('lua5_1')
vm.Install('openssl')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, AEROSPIKE_DIR))
# Comment out Werror flag and compile. With newer compilers gcc7xx,
# compilation is broken due to warnings.
vm.RemoteCommand(
'cd {0} && git checkout {1} && git submodule update --init '
'&& sed -i "s/COMMON_CFLAGS += -Werror/# $COMMON_CFLAGS += -Werror/" '
'{0}/make_in/Makefile.in '
'&& make'.format(AEROSPIKE_DIR, GIT_TAG))
def YumInstall(vm):
"""Installs the aerospike_server package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the aerospike_server package on the VM."""
vm.InstallPackages('netcat-openbsd zlib1g-dev')
_Install(vm)
@vm_util.Retry(poll_interval=5, timeout=300,
retryable_exceptions=(errors.Resource.RetryableCreationError))
def _WaitForServerUp(server):
"""Block until the Aerospike server is up and responsive.
Will timeout after 5 minutes, and raise an exception. Before the timeout
expires any exceptions are caught and the status check is retried.
We check the status of the server by connecting to Aerospike's out
of band telnet management port and issue a 'status' command. This should
return 'ok' if the server is ready. Per the aerospike docs, this always
returns 'ok', i.e. if the server is not up the connection will fail or we
would get no response at all.
Args:
server: VirtualMachine Aerospike has been installed on.
Raises:
errors.Resource.RetryableCreationError when response is not 'ok' or if there
is an error connecting to the telnet port or otherwise running the remote
check command.
"""
address = server.internal_ip
port = AEROSPIKE_DEFAULT_TELNET_PORT
logging.info("Trying to connect to Aerospike at %s:%s" % (address, port))
try:
def _NetcatPrefix():
_, stderr = server.RemoteCommand('nc -h', ignore_failure=True)
if '-q' in stderr:
return 'nc -q 1'
else:
return 'nc -i 1'
out, _ = server.RemoteCommand(
'(echo -e "status\n" ; sleep 1)| %s %s %s' % (
_NetcatPrefix(), address, port))
if out.startswith('ok'):
logging.info("Aerospike server status is OK. Server up and running.")
return
except errors.VirtualMachine.RemoteCommandError as e:
raise errors.Resource.RetryableCreationError(
"Aerospike server not up yet: %s." % str(e))
else:
raise errors.Resource.RetryableCreationError(
"Aerospike server not up yet. Expected 'ok' but got '%s'." % out)
def ConfigureAndStart(server, seed_node_ips=None):
"""Prepare the Aerospike server on a VM.
Args:
server: VirtualMachine to install and start Aerospike on.
seed_node_ips: internal IP addresses of seed nodes in the cluster.
Leave unspecified for a single-node deployment.
"""
server.Install('aerospike_server')
seed_node_ips = seed_node_ips or [server.internal_ip]
if FLAGS.aerospike_storage_type == DISK:
devices = [scratch_disk.GetDevicePath()
for scratch_disk in server.scratch_disks]
else:
devices = []
server.RenderTemplate(
data.ResourcePath('aerospike.conf.j2'), AEROSPIKE_CONF_PATH,
{'devices': devices,
'memory_size': int(server.total_memory_kb * 0.8),
'seed_addresses': seed_node_ips,
'transaction_threads_per_queue':
FLAGS.aerospike_transaction_threads_per_queue,
'replication_factor': FLAGS.aerospike_replication_factor})
for scratch_disk in server.scratch_disks:
if scratch_disk.mount_point:
server.RemoteCommand('sudo umount %s' % scratch_disk.mount_point)
server.RemoteCommand('cd %s && make init' % AEROSPIKE_DIR)
# Persist the nohup command past the ssh session
# "sh -c 'cd /whereever; nohup ./whatever > /dev/null 2>&1 &'"
cmd = (f'sh -c \'cd {AEROSPIKE_DIR} && nohup sudo make start > '
f'~/aerospike.log 2>&1 &\'')
server.RemoteCommand(cmd)
_WaitForServerUp(server)
logging.info('Aerospike server configured and started.')
def Uninstall(vm):
vm.RemoteCommand('rm -rf %s' % AEROSPIKE_DIR)
|
import collections
import time
import numpy as np
PERCENTILES_LIST = 0.1, 1, 5, 10, 50, 90, 95, 99, 99.9
_SAMPLE_FIELDS = 'metric', 'value', 'unit', 'metadata', 'timestamp'
def PercentileCalculator(numbers, percentiles=PERCENTILES_LIST):
"""Computes percentiles, stddev and mean on a set of numbers.
Args:
numbers: A sequence of numbers to compute percentiles for.
percentiles: If given, a list of percentiles to compute. Can be
floats, ints or longs.
Returns:
A dictionary of percentiles.
Raises:
ValueError, if numbers is empty or if a percentile is outside of
[0, 100].
"""
# 'if not numbers' will fail if numbers is an np.Array or pd.Series.
if not len(numbers):
raise ValueError("Can't compute percentiles of empty list.")
numbers_sorted = sorted(numbers)
count = len(numbers_sorted)
total = sum(numbers_sorted)
result = {}
for percentile in percentiles:
float(percentile) # verify type
if percentile < 0.0 or percentile > 100.0:
raise ValueError('Invalid percentile %s' % percentile)
percentile_string = 'p%s' % str(percentile)
index = int(count * float(percentile) / 100.0)
index = min(index, count - 1) # Correction to handle 100th percentile.
result[percentile_string] = numbers_sorted[index]
average = total / float(count)
result['average'] = average
if count > 1:
total_of_squares = sum([(i - average) ** 2 for i in numbers])
result['stddev'] = (total_of_squares / (count - 1)) ** 0.5
else:
result['stddev'] = 0
return result
def GeoMean(iterable):
"""Calculate the geometric mean of a collection of numbers.
Args:
iterable: A sequence of numbers.
Returns:
The geometric mean
Raises:
ValueError, if numbers is empty.
"""
arr = np.fromiter(iterable, dtype='float')
if not arr.size:
raise ValueError("Can't compute geomean of empty list.")
return arr.prod() ** (1 / len(arr))
class Sample(collections.namedtuple('Sample', _SAMPLE_FIELDS)):
"""A performance sample.
Attributes:
metric: string. Name of the metric within the benchmark.
value: float. Result for 'metric'.
unit: string. Units for 'value'.
metadata: dict. Additional metadata to include with the sample.
timestamp: float. Unix timestamp.
"""
def __new__(cls, metric, value, unit, metadata=None, timestamp=None,
**kwargs):
if timestamp is None:
timestamp = time.time()
return super(Sample, cls).__new__(cls, metric, float(value or 0.0), unit,
metadata=metadata or {},
timestamp=timestamp,
**kwargs)
def asdict(self):
"""Converts the Sample to a dictionary."""
return self._asdict()
|
import sys
import cheroot.wsgi
import cheroot.server
import cherrypy
class CPWSGIHTTPRequest(cheroot.server.HTTPRequest):
"""Wrapper for cheroot.server.HTTPRequest.
This is a layer, which preserves URI parsing mode like it which was
before Cheroot v5.8.0.
"""
def __init__(self, server, conn):
"""Initialize HTTP request container instance.
Args:
server (cheroot.server.HTTPServer):
web server object receiving this request
conn (cheroot.server.HTTPConnection):
HTTP connection object for this request
"""
super(CPWSGIHTTPRequest, self).__init__(
server, conn, proxy_mode=True
)
class CPWSGIServer(cheroot.wsgi.Server):
"""Wrapper for cheroot.wsgi.Server.
cheroot has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications. Therefore,
we wrap it here, so we can set our own mount points from cherrypy.tree
and apply some attributes from config -> cherrypy.server -> wsgi.Server.
"""
fmt = 'CherryPy/{cherrypy.__version__} {cheroot.wsgi.Server.version}'
version = fmt.format(**globals())
def __init__(self, server_adapter=cherrypy.server):
"""Initialize CPWSGIServer instance.
Args:
server_adapter (cherrypy._cpserver.Server): ...
"""
self.server_adapter = server_adapter
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0
)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0
)
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
self.wsgi_version = self.server_adapter.wsgi_version
super(CPWSGIServer, self).__init__(
server_adapter.bind_addr, cherrypy.tree,
self.server_adapter.thread_pool,
server_name,
max=self.server_adapter.thread_pool_max,
request_queue_size=self.server_adapter.socket_queue_size,
timeout=self.server_adapter.socket_timeout,
shutdown_timeout=self.server_adapter.shutdown_timeout,
accepted_queue_size=self.server_adapter.accepted_queue_size,
accepted_queue_timeout=self.server_adapter.accepted_queue_timeout,
peercreds_enabled=self.server_adapter.peercreds,
peercreds_resolve_enabled=self.server_adapter.peercreds_resolve,
)
self.ConnectionClass.RequestHandlerClass = CPWSGIHTTPRequest
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
if sys.version_info >= (3, 0):
ssl_module = self.server_adapter.ssl_module or 'builtin'
else:
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.stats['Enabled'] = getattr(
self.server_adapter, 'statistics', False)
def error_log(self, msg='', level=20, traceback=False):
"""Write given message to the error log."""
cherrypy.engine.log(msg, level, traceback)
|
from tests.async_mock import MagicMock
def _generate_mock_feed_entry(
external_id,
title,
distance_to_home,
coordinates,
attribution=None,
alert_level=None,
country=None,
duration_in_week=None,
event_name=None,
event_type_short=None,
event_type=None,
from_date=None,
to_date=None,
population=None,
severity=None,
vulnerability=None,
):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
feed_entry.attribution = attribution
feed_entry.alert_level = alert_level
feed_entry.country = country
feed_entry.duration_in_week = duration_in_week
feed_entry.event_name = event_name
feed_entry.event_type_short = event_type_short
feed_entry.event_type = event_type
feed_entry.from_date = from_date
feed_entry.to_date = to_date
feed_entry.population = population
feed_entry.severity = severity
feed_entry.vulnerability = vulnerability
return feed_entry
|
from datetime import datetime, timedelta, timezone
import logging
import re
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_NAME,
CONF_API_KEY,
CONF_ID,
CONF_NAME,
HTTP_OK,
TIME_MINUTES,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESS_CODE = "AccessCode"
ATTR_AVG_TIME = "AverageTime"
ATTR_CURRENT_TIME = "CurrentTime"
ATTR_DESCRIPTION = "Description"
ATTR_TIME_UPDATED = "TimeUpdated"
ATTR_TRAVEL_TIME_ID = "TravelTimeID"
ATTRIBUTION = "Data provided by WSDOT"
CONF_TRAVEL_TIMES = "travel_time"
ICON = "mdi:car"
RESOURCE = (
"http://www.wsdot.wa.gov/Traffic/api/TravelTimes/"
"TravelTimesREST.svc/GetTravelTimeAsJson"
)
SCAN_INTERVAL = timedelta(minutes=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_TRAVEL_TIMES): [
{vol.Required(CONF_ID): cv.string, vol.Optional(CONF_NAME): cv.string}
],
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the WSDOT sensor."""
sensors = []
for travel_time in config.get(CONF_TRAVEL_TIMES):
name = travel_time.get(CONF_NAME) or travel_time.get(CONF_ID)
sensors.append(
WashingtonStateTravelTimeSensor(
name, config.get(CONF_API_KEY), travel_time.get(CONF_ID)
)
)
add_entities(sensors, True)
class WashingtonStateTransportSensor(Entity):
"""
Sensor that reads the WSDOT web API.
WSDOT provides ferry schedules, toll rates, weather conditions,
mountain pass conditions, and more. Subclasses of this
can read them and make them available.
"""
def __init__(self, name, access_code):
"""Initialize the sensor."""
self._data = {}
self._access_code = access_code
self._name = name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
class WashingtonStateTravelTimeSensor(WashingtonStateTransportSensor):
"""Travel time sensor from WSDOT."""
def __init__(self, name, access_code, travel_time_id):
"""Construct a travel time sensor."""
self._travel_time_id = travel_time_id
WashingtonStateTransportSensor.__init__(self, name, access_code)
def update(self):
"""Get the latest data from WSDOT."""
params = {
ATTR_ACCESS_CODE: self._access_code,
ATTR_TRAVEL_TIME_ID: self._travel_time_id,
}
response = requests.get(RESOURCE, params, timeout=10)
if response.status_code != HTTP_OK:
_LOGGER.warning("Invalid response from WSDOT API")
else:
self._data = response.json()
self._state = self._data.get(ATTR_CURRENT_TIME)
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
if self._data is not None:
attrs = {ATTR_ATTRIBUTION: ATTRIBUTION}
for key in [
ATTR_AVG_TIME,
ATTR_NAME,
ATTR_DESCRIPTION,
ATTR_TRAVEL_TIME_ID,
]:
attrs[key] = self._data.get(key)
attrs[ATTR_TIME_UPDATED] = _parse_wsdot_timestamp(
self._data.get(ATTR_TIME_UPDATED)
)
return attrs
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return TIME_MINUTES
def _parse_wsdot_timestamp(timestamp):
"""Convert WSDOT timestamp to datetime."""
if not timestamp:
return None
# ex: Date(1485040200000-0800)
milliseconds, tzone = re.search(r"Date\((\d+)([+-]\d\d)\d\d\)", timestamp).groups()
return datetime.fromtimestamp(
int(milliseconds) / 1000, tz=timezone(timedelta(hours=int(tzone)))
)
|
import pytest
import voluptuous as vol
from homeassistant.components.humidifier.const import (
ATTR_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_MIN_HUMIDITY,
ATTR_MODE,
DOMAIN,
MODE_AWAY,
MODE_ECO,
SERVICE_SET_HUMIDITY,
SERVICE_SET_MODE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
ENTITY_DEHUMIDIFIER = "humidifier.dehumidifier"
ENTITY_HYGROSTAT = "humidifier.hygrostat"
ENTITY_HUMIDIFIER = "humidifier.humidifier"
@pytest.fixture(autouse=True)
async def setup_demo_humidifier(hass):
"""Initialize setup demo humidifier."""
assert await async_setup_component(
hass, DOMAIN, {"humidifier": {"platform": "demo"}}
)
await hass.async_block_till_done()
def test_setup_params(hass):
"""Test the initial parameters."""
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_HUMIDITY) == 54
def test_default_setup_params(hass):
"""Test the setup with default parameters."""
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.attributes.get(ATTR_MIN_HUMIDITY) == 0
assert state.attributes.get(ATTR_MAX_HUMIDITY) == 100
async def test_set_target_humidity_bad_attr(hass):
"""Test setting the target humidity without required attribute."""
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.attributes.get(ATTR_HUMIDITY) == 54
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{ATTR_HUMIDITY: None, ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.attributes.get(ATTR_HUMIDITY) == 54
async def test_set_target_humidity(hass):
"""Test the setting of the target humidity."""
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.attributes.get(ATTR_HUMIDITY) == 54
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{ATTR_HUMIDITY: 64, ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.attributes.get(ATTR_HUMIDITY) == 64
async def test_set_hold_mode_away(hass):
"""Test setting the hold mode away."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_MODE,
{ATTR_MODE: MODE_AWAY, ATTR_ENTITY_ID: ENTITY_HYGROSTAT},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_HYGROSTAT)
assert state.attributes.get(ATTR_MODE) == MODE_AWAY
async def test_set_hold_mode_eco(hass):
"""Test setting the hold mode eco."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_MODE,
{ATTR_MODE: MODE_ECO, ATTR_ENTITY_ID: ENTITY_HYGROSTAT},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_HYGROSTAT)
assert state.attributes.get(ATTR_MODE) == MODE_ECO
async def test_turn_on(hass):
"""Test turn on device."""
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER}, blocking=True
)
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.state == STATE_OFF
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER}, blocking=True
)
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.state == STATE_ON
async def test_turn_off(hass):
"""Test turn off device."""
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER}, blocking=True
)
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.state == STATE_ON
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER}, blocking=True
)
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.state == STATE_OFF
async def test_toggle(hass):
"""Test toggle device."""
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER}, blocking=True
)
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.state == STATE_ON
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER}, blocking=True
)
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.state == STATE_OFF
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_DEHUMIDIFIER}, blocking=True
)
state = hass.states.get(ENTITY_DEHUMIDIFIER)
assert state.state == STATE_ON
|
import random
from django.test import SimpleTestCase
from weblate.checks.flags import Flags
from weblate.lang.models import Language, Plural
class MockLanguage(Language):
"""Mock language object."""
class Meta:
proxy = True
def __init__(self, code="cs"):
super().__init__(code=code)
self.plural = Plural(language=self)
class MockProject:
"""Mock project object."""
def __init__(self):
self.id = 1
self.use_shared_tm = True
self.name = "MockProject"
class MockComponent:
"""Mock component object."""
def __init__(self):
self.id = 1
self.source_language = MockLanguage("en")
self.project = MockProject()
self.name = "MockComponent"
class MockTranslation:
"""Mock translation object."""
def __init__(self, code="cs"):
self.language = MockLanguage(code)
self.component = MockComponent()
self.is_template = False
self.is_source = False
class MockUnit:
"""Mock unit object."""
def __init__(self, id_hash=None, flags="", code="cs", source="", note=""):
if id_hash is None:
id_hash = random.randint(0, 65536)
self.id_hash = id_hash
self.flags = Flags(flags)
self.translation = MockTranslation(code)
self.source = source
self.fuzzy = False
self.translated = True
self.readonly = False
self.state = 20
self.note = note
self.check_cache = {}
self.machinery = {"best": -1}
@property
def all_flags(self):
return self.flags
def get_source_plurals(self):
return [self.source]
@property
def source_string(self):
return self.source
class CheckTestCase(SimpleTestCase):
"""Generic test, also serves for testing base class."""
check = None
default_lang = "cs"
def setUp(self):
self.test_empty = ("", "", "")
self.test_good_matching = ("string", "string", "")
self.test_good_none = ("string", "string", "")
self.test_good_ignore = ()
self.test_good_flag = ()
self.test_failure_1 = ()
self.test_failure_2 = ()
self.test_failure_3 = ()
self.test_ignore_check = (
"x",
"x",
self.check.ignore_string if self.check else "",
)
self.test_highlight = ()
def do_test(self, expected, data, lang=None):
"""Perform single check if we have data to test."""
if lang is None:
lang = self.default_lang
if not data or self.check is None:
return
result = self.check.check_single(
data[0], data[1], MockUnit(None, data[2], lang, source=data[0])
)
if expected:
self.assertTrue(
result, 'Check did not fire for "{}"/"{}" ({})'.format(*data)
)
else:
self.assertFalse(result, 'Check did fire for "{}"/"{}" ({})'.format(*data))
def test_single_good_matching(self):
self.do_test(False, self.test_good_matching)
def test_single_good_none(self):
self.do_test(False, self.test_good_none)
def test_single_good_ignore(self):
self.do_test(False, self.test_good_ignore)
def test_single_empty(self):
self.do_test(False, self.test_empty)
def test_single_failure_1(self):
self.do_test(True, self.test_failure_1)
def test_single_failure_2(self):
self.do_test(True, self.test_failure_2)
def test_single_failure_3(self):
self.do_test(True, self.test_failure_3)
def test_check_good_flag(self):
if self.check is None or not self.test_good_flag:
return
self.assertFalse(
self.check.check_target(
[self.test_good_flag[0]],
[self.test_good_flag[1]],
MockUnit(
None,
self.test_good_flag[2],
self.default_lang,
source=self.test_good_flag[0],
),
)
)
def test_check_good_matching_singular(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_good_matching[0]],
[self.test_good_matching[1]],
MockUnit(
None,
self.test_good_matching[2],
self.default_lang,
source=self.test_good_matching[0],
),
)
)
def test_check_good_none_singular(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_good_none[0]],
[self.test_good_none[1]],
MockUnit(
None,
self.test_good_none[2],
self.default_lang,
source=self.test_good_none[0],
),
)
)
def test_check_good_ignore_singular(self):
if self.check is None or not self.test_good_ignore:
return
self.assertFalse(
self.check.check_target(
[self.test_good_ignore[0]],
[self.test_good_ignore[1]],
MockUnit(
None,
self.test_good_ignore[2],
self.default_lang,
source=self.test_good_ignore[0],
),
)
)
def test_check_good_matching_plural(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_good_matching[0]] * 2,
[self.test_good_matching[1]] * 3,
MockUnit(
None,
self.test_good_matching[2],
self.default_lang,
source=self.test_good_matching[0],
),
)
)
def test_check_failure_1_singular(self):
if not self.test_failure_1 or self.check is None:
return
self.assertTrue(
self.check.check_target(
[self.test_failure_1[0]],
[self.test_failure_1[1]],
MockUnit(
None,
self.test_failure_1[2],
self.default_lang,
source=self.test_failure_1[0],
),
)
)
def test_check_failure_1_plural(self):
if not self.test_failure_1 or self.check is None:
return
self.assertTrue(
self.check.check_target(
[self.test_failure_1[0]] * 2,
[self.test_failure_1[1]] * 3,
MockUnit(
None,
self.test_failure_1[2],
self.default_lang,
source=self.test_failure_1[0],
),
)
)
def test_check_failure_2_singular(self):
if not self.test_failure_2 or self.check is None:
return
self.assertTrue(
self.check.check_target(
[self.test_failure_2[0]],
[self.test_failure_2[1]],
MockUnit(
None,
self.test_failure_2[2],
self.default_lang,
source=self.test_failure_2[0],
),
)
)
def test_check_failure_3_singular(self):
if not self.test_failure_3 or self.check is None:
return
self.assertTrue(
self.check.check_target(
[self.test_failure_3[0]],
[self.test_failure_3[1]],
MockUnit(
None,
self.test_failure_3[2],
self.default_lang,
source=self.test_failure_3[0],
),
)
)
def test_check_ignore_check(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_ignore_check[0]] * 2,
[self.test_ignore_check[1]] * 3,
MockUnit(
None,
self.test_ignore_check[2],
self.default_lang,
source=self.test_ignore_check[0],
),
)
)
def test_check_highlight(self):
if self.check is None or not self.test_highlight:
return
unit = MockUnit(
None,
self.test_highlight[0],
self.default_lang,
source=self.test_highlight[1],
)
self.assertEqual(
self.check.check_highlight(self.test_highlight[1], unit),
self.test_highlight[2],
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_PADDING = 4
slim = tf.contrib.slim
def preprocess_for_train(image,
output_height,
output_width,
padding=_PADDING):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
padding: The amound of padding before and after each dimension of the image.
Returns:
A preprocessed image.
"""
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
if padding > 0:
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image,
[output_height, output_width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(distorted_image)
def preprocess_for_eval(image, output_height, output_width):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
Returns:
A preprocessed image.
"""
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
# Resize and crop if needed.
resized_image = tf.image.resize_image_with_crop_or_pad(image,
output_width,
output_height)
tf.summary.image('resized_image', tf.expand_dims(resized_image, 0))
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(resized_image)
def preprocess_image(image, output_height, output_width, is_training=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width)
else:
return preprocess_for_eval(image, output_height, output_width)
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from interrupt import InterruptCollector
##########################################################################
class TestInterruptCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('InterruptCollector', {
'interval': 1
})
self.collector = InterruptCollector(config, None)
def test_import(self):
self.assertTrue(InterruptCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_stat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/interrupts', 'r')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_24_core(self, publish_mock):
InterruptCollector.PROC = self.getFixturePath('interrupts_24_core_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
InterruptCollector.PROC = self.getFixturePath('interrupts_24_core_2')
self.collector.collect()
metrics = {
'IO-APIC-edge.timer.0.CPU0': 318660.000000,
'IO-APIC-edge.timer.0.total': 318660.000000,
'PCI-MSI-X.eth3-rx-1.51.CPU6': 293.000000,
'PCI-MSI-X.eth3-rx-1.51.CPU7': 330.000000,
'PCI-MSI-X.eth3-rx-1.51.CPU9': 286.000000,
'PCI-MSI-X.eth3-rx-1.51.total': 909.000000,
'PCI-MSI-X.eth3-rx-2.59.CPU21': 98790.000000,
'PCI-MSI-X.eth3-rx-2.59.total': 98790.000000,
'PCI-MSI-X.eth3-rx-3.67.CPU7': 743.000000,
'PCI-MSI-X.eth3-rx-3.67.CPU9': 378.000000,
'PCI-MSI-X.eth3-rx-3.67.total': 1121.000000,
'PCI-MSI-X.eth3-tx-0.75.CPU23': 304345.000000,
'PCI-MSI-X.eth3-tx-0.75.total': 304345.000000,
'IO-APIC-level_3w-sas.CPU6': 301014.000000,
'IO-APIC-level_3w-sas.total': 301014.000000,
'PCI-MSI-X_eth2-rx-0.CPU20': 20570.000000,
'PCI-MSI-X_eth2-rx-0.total': 20570.000000,
'PCI-MSI-X_eth2-rx-1.CPU6': 94.000000,
'PCI-MSI-X_eth2-rx-1.CPU7': 15.000000,
'PCI-MSI-X_eth2-rx-1.CPU9': 50.000000,
'PCI-MSI-X_eth2-rx-1.total': 159.000000,
'PCI-MSI-X_eth2-rx-2.CPU17': 159.000000,
'PCI-MSI-X_eth2-rx-2.total': 159.000000,
'PCI-MSI-X_eth2-rx-3.CPU8': 159.000000,
'PCI-MSI-X_eth2-rx-3.total': 159.000000,
'PCI-MSI-X_eth2-tx-0.CPU16': 159.000000,
'PCI-MSI-X_eth2-tx-0.total': 159.000000,
'PCI-MSI_eth0.CPU18': 10397.000000,
'PCI-MSI_eth0.total': 10397.000000,
'PCI-MSI-X_eth3-rx-0.CPU22': 224074.000000,
'PCI-MSI-X_eth3-rx-0.total': 224074.000000,
'PCI-MSI_eth1.CPU19': 10386.000000,
'PCI-MSI_eth1.total': 10386.000000,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_kvm(self, publish_mock):
InterruptCollector.PROC = self.getFixturePath('interrupts_kvm_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
InterruptCollector.PROC = self.getFixturePath('interrupts_kvm_2')
self.collector.collect()
self.assertPublishedMany(publish_mock, {
'IO-APIC-edge.timer.0.CPU0': 279023.000000,
'IO-APIC-edge.timer.0.total': 279023.000000,
'IO-APIC-level.virtio0-virtio1.10.CPU0': 15068.000000,
'IO-APIC-level.virtio0-virtio1.10.total': 15068.000000,
'LOC.CPU0': 278993.000000,
'LOC.CPU1': 279000.000000,
'LOC.total': 557993.000000,
})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from django.forms import fields, widgets
from django.utils.translation import gettext_lazy as _, ngettext_lazy
from django.utils.text import Truncator
from django.utils.html import format_html
from django.forms.fields import IntegerField
from django.template.loader import select_template
from entangled.forms import EntangledModelFormMixin
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from cmsplugin_cascade.widgets import NumberInputWidget
from cmsplugin_cascade.plugin_base import TransparentWrapper, TransparentContainer
from shop.conf import app_settings
from shop.cascade.plugin_base import ShopPluginBase
class ProcessBarFormMixin(ManageChildrenFormMixin, EntangledModelFormMixin):
num_children = IntegerField(
min_value=1,
initial=1,
widget=NumberInputWidget(attrs={'size': '3', 'style': 'width: 5em;'}),
label=_("Steps"),
help_text=_("Number of steps for this proceed bar."))
class Meta:
untangled_fields = ['num_children']
class ProcessBarPlugin(TransparentWrapper, ShopPluginBase):
name = _("Process Bar")
form = ProcessBarFormMixin
parent_classes = ('BootstrapColumnPlugin',)
direct_child_classes = ('ProcessStepPlugin',)
require_parent = True
allow_children = True
@classmethod
def get_identifier(cls, instance):
identifier = super().get_identifier(instance)
num_cols = instance.get_children().count()
content = ngettext_lazy('with {} page', 'with {} pages', num_cols).format(num_cols)
return format_html('{0}{1}', identifier, content)
def get_render_template(self, context, instance, placeholder):
template_names = [
'{}/checkout/process-bar.html'.format(app_settings.APP_LABEL),
'shop/checkout/process-bar.html',
]
return select_template(template_names)
def render(self, context, instance, placeholder):
self.super(ProcessBarPlugin, self).render(context, instance, placeholder)
num_children = instance.get_num_children()
if num_children > 0:
context['step_css_width'] = '{:3.2f}%'.format(100. / num_children)
return context
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super().save_model(request, obj, form, change)
self.extend_children(obj, wanted_children, ProcessStepPlugin)
plugin_pool.register_plugin(ProcessBarPlugin)
class ProcessStepFormMixin(EntangledModelFormMixin):
step_title = fields.CharField(
widget=widgets.TextInput(attrs={'size': 50}),
label=_("Step Title"),
required=False,
)
class Meta:
entangled_fields = {'glossary': ['step_title']}
class ProcessStepPlugin(TransparentContainer, ShopPluginBase):
name = _("Process Step")
direct_parent_classes = parent_classes = ['ProcessBarPlugin']
require_parent = True
allow_children = True
alien_child_classes = True
form = ProcessStepFormMixin
render_template = 'cascade/generic/wrapper.html'
@classmethod
def get_identifier(cls, obj):
identifier = super().get_identifier(obj)
content = obj.glossary.get('step_title', '')
if content:
content = Truncator(content).words(3, truncate=' ...')
else:
content = obj.get_position_in_placeholder()
return format_html('{0}{1}', identifier, content)
plugin_pool.register_plugin(ProcessStepPlugin)
|
import re
import numpy as np
import pytest
import xarray as xr
from . import assert_equal, requires_dask
@pytest.fixture(params=[np.str_, np.bytes_])
def dtype(request):
return request.param
@requires_dask
def test_dask():
import dask.array as da
arr = da.from_array(["a", "b", "c"], chunks=-1)
xarr = xr.DataArray(arr)
result = xarr.str.len().compute()
expected = xr.DataArray([1, 1, 1])
assert_equal(result, expected)
def test_count(dtype):
values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype)
result = values.str.count("f[o]+")
expected = xr.DataArray([1, 2, 4])
assert_equal(result, expected)
def test_contains(dtype):
values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"]).astype(dtype)
# case insensitive using regex
result = values.str.contains("FOO|mmm", case=False)
expected = xr.DataArray([True, False, True, True])
assert_equal(result, expected)
# case insensitive without regex
result = values.str.contains("foo", regex=False, case=False)
expected = xr.DataArray([True, False, True, False])
assert_equal(result, expected)
def test_starts_ends_with(dtype):
values = xr.DataArray(["om", "foo_nom", "nom", "bar_foo", "foo"]).astype(dtype)
result = values.str.startswith("foo")
expected = xr.DataArray([False, True, False, False, True])
assert_equal(result, expected)
result = values.str.endswith("foo")
expected = xr.DataArray([False, False, False, True, True])
assert_equal(result, expected)
def test_case(dtype):
da = xr.DataArray(["SOme word"]).astype(dtype)
capitalized = xr.DataArray(["Some word"]).astype(dtype)
lowered = xr.DataArray(["some word"]).astype(dtype)
swapped = xr.DataArray(["soME WORD"]).astype(dtype)
titled = xr.DataArray(["Some Word"]).astype(dtype)
uppered = xr.DataArray(["SOME WORD"]).astype(dtype)
assert_equal(da.str.capitalize(), capitalized)
assert_equal(da.str.lower(), lowered)
assert_equal(da.str.swapcase(), swapped)
assert_equal(da.str.title(), titled)
assert_equal(da.str.upper(), uppered)
def test_replace(dtype):
values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
result = values.str.replace("BAD[_]*", "")
expected = xr.DataArray(["foobar"]).astype(dtype)
assert_equal(result, expected)
result = values.str.replace("BAD[_]*", "", n=1)
expected = xr.DataArray(["foobarBAD"]).astype(dtype)
assert_equal(result, expected)
s = xr.DataArray(["A", "B", "C", "Aaba", "Baca", "", "CABA", "dog", "cat"]).astype(
dtype
)
result = s.str.replace("A", "YYY")
expected = xr.DataArray(
["YYY", "B", "C", "YYYaba", "Baca", "", "CYYYBYYY", "dog", "cat"]
).astype(dtype)
assert_equal(result, expected)
result = s.str.replace("A", "YYY", case=False)
expected = xr.DataArray(
["YYY", "B", "C", "YYYYYYbYYY", "BYYYcYYY", "", "CYYYBYYY", "dog", "cYYYt"]
).astype(dtype)
assert_equal(result, expected)
result = s.str.replace("^.a|dog", "XX-XX ", case=False)
expected = xr.DataArray(
["A", "B", "C", "XX-XX ba", "XX-XX ca", "", "XX-XX BA", "XX-XX ", "XX-XX t"]
).astype(dtype)
assert_equal(result, expected)
def test_replace_callable():
values = xr.DataArray(["fooBAD__barBAD"])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace("[a-z][A-Z]{2}", repl, n=2)
exp = xr.DataArray(["foObaD__baRbaD"])
assert_equal(result, exp)
# test regex named groups
values = xr.DataArray(["Foo Bar Baz"])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group("middle").swapcase()
result = values.str.replace(pat, repl)
exp = xr.DataArray(["bAR"])
assert_equal(result, exp)
def test_replace_unicode():
# flags + unicode
values = xr.DataArray([b"abcd,\xc3\xa0".decode("utf-8")])
expected = xr.DataArray([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
assert_equal(result, expected)
def test_replace_compiled_regex(dtype):
values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
# test with compiled regex
pat = re.compile(dtype("BAD[_]*"))
result = values.str.replace(pat, "")
expected = xr.DataArray(["foobar"]).astype(dtype)
assert_equal(result, expected)
result = values.str.replace(pat, "", n=1)
expected = xr.DataArray(["foobarBAD"]).astype(dtype)
assert_equal(result, expected)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = xr.DataArray(["fooBAD__barBAD__bad"]).astype(dtype)
pat = re.compile(dtype("BAD[_]*"))
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", flags=re.IGNORECASE)
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", case=False)
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", case=True)
# test with callable
values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
repl = lambda m: m.group(0).swapcase()
pat = re.compile(dtype("[a-z][A-Z]{2}"))
result = values.str.replace(pat, repl, n=2)
expected = xr.DataArray(["foObaD__baRbaD"]).astype(dtype)
assert_equal(result, expected)
def test_replace_literal(dtype):
# GH16808 literal replace (regex=False vs regex=True)
values = xr.DataArray(["f.o", "foo"]).astype(dtype)
expected = xr.DataArray(["bao", "bao"]).astype(dtype)
result = values.str.replace("f.", "ba")
assert_equal(result, expected)
expected = xr.DataArray(["bao", "foo"]).astype(dtype)
result = values.str.replace("f.", "ba", regex=False)
assert_equal(result, expected)
# Cannot do a literal replace if given a callable repl or compiled
# pattern
callable_repl = lambda m: m.group(0).swapcase()
compiled_pat = re.compile("[a-z][A-Z]{2}")
msg = "Cannot use a callable replacement when regex=False"
with pytest.raises(ValueError, match=msg):
values.str.replace("abc", callable_repl, regex=False)
msg = "Cannot use a compiled regex as replacement pattern with regex=False"
with pytest.raises(ValueError, match=msg):
values.str.replace(compiled_pat, "", regex=False)
def test_repeat(dtype):
values = xr.DataArray(["a", "b", "c", "d"]).astype(dtype)
result = values.str.repeat(3)
expected = xr.DataArray(["aaa", "bbb", "ccc", "ddd"]).astype(dtype)
assert_equal(result, expected)
def test_match(dtype):
# New match behavior introduced in 0.13
values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype)
result = values.str.match(".*(BAD[_]+).*(BAD)")
expected = xr.DataArray([True, False])
assert_equal(result, expected)
values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype)
result = values.str.match(".*BAD[_]+.*BAD")
expected = xr.DataArray([True, False])
assert_equal(result, expected)
def test_empty_str_methods():
empty = xr.DataArray(np.empty(shape=(0,), dtype="U"))
empty_str = empty
empty_int = xr.DataArray(np.empty(shape=(0,), dtype=int))
empty_bool = xr.DataArray(np.empty(shape=(0,), dtype=bool))
empty_bytes = xr.DataArray(np.empty(shape=(0,), dtype="S"))
assert_equal(empty_str, empty.str.title())
assert_equal(empty_int, empty.str.count("a"))
assert_equal(empty_bool, empty.str.contains("a"))
assert_equal(empty_bool, empty.str.startswith("a"))
assert_equal(empty_bool, empty.str.endswith("a"))
assert_equal(empty_str, empty.str.lower())
assert_equal(empty_str, empty.str.upper())
assert_equal(empty_str, empty.str.replace("a", "b"))
assert_equal(empty_str, empty.str.repeat(3))
assert_equal(empty_bool, empty.str.match("^a"))
assert_equal(empty_int, empty.str.len())
assert_equal(empty_int, empty.str.find("a"))
assert_equal(empty_int, empty.str.rfind("a"))
assert_equal(empty_str, empty.str.pad(42))
assert_equal(empty_str, empty.str.center(42))
assert_equal(empty_str, empty.str.slice(stop=1))
assert_equal(empty_str, empty.str.slice(step=1))
assert_equal(empty_str, empty.str.strip())
assert_equal(empty_str, empty.str.lstrip())
assert_equal(empty_str, empty.str.rstrip())
assert_equal(empty_str, empty.str.wrap(42))
assert_equal(empty_str, empty.str.get(0))
assert_equal(empty_str, empty_bytes.str.decode("ascii"))
assert_equal(empty_bytes, empty.str.encode("ascii"))
assert_equal(empty_str, empty.str.isalnum())
assert_equal(empty_str, empty.str.isalpha())
assert_equal(empty_str, empty.str.isdigit())
assert_equal(empty_str, empty.str.isspace())
assert_equal(empty_str, empty.str.islower())
assert_equal(empty_str, empty.str.isupper())
assert_equal(empty_str, empty.str.istitle())
assert_equal(empty_str, empty.str.isnumeric())
assert_equal(empty_str, empty.str.isdecimal())
assert_equal(empty_str, empty.str.capitalize())
assert_equal(empty_str, empty.str.swapcase())
table = str.maketrans("a", "b")
assert_equal(empty_str, empty.str.translate(table))
def test_ismethods(dtype):
values = ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "]
str_s = xr.DataArray(values).astype(dtype)
alnum_e = [True, True, True, True, True, False, True, True, False, False]
alpha_e = [True, True, True, False, False, False, True, False, False, False]
digit_e = [False, False, False, True, False, False, False, True, False, False]
space_e = [False, False, False, False, False, False, False, False, False, True]
lower_e = [False, True, False, False, False, False, False, False, False, False]
upper_e = [True, False, False, False, True, False, True, False, False, False]
title_e = [True, False, True, False, True, False, False, False, False, False]
assert_equal(str_s.str.isalnum(), xr.DataArray(alnum_e))
assert_equal(str_s.str.isalpha(), xr.DataArray(alpha_e))
assert_equal(str_s.str.isdigit(), xr.DataArray(digit_e))
assert_equal(str_s.str.isspace(), xr.DataArray(space_e))
assert_equal(str_s.str.islower(), xr.DataArray(lower_e))
assert_equal(str_s.str.isupper(), xr.DataArray(upper_e))
assert_equal(str_s.str.istitle(), xr.DataArray(title_e))
def test_isnumeric():
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ["A", "3", "¼", "★", "፸", "3", "four"]
s = xr.DataArray(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
assert_equal(s.str.isnumeric(), xr.DataArray(numeric_e))
assert_equal(s.str.isdecimal(), xr.DataArray(decimal_e))
def test_len(dtype):
values = ["foo", "fooo", "fooooo", "fooooooo"]
result = xr.DataArray(values).astype(dtype).str.len()
expected = xr.DataArray([len(x) for x in values])
assert_equal(result, expected)
def test_find(dtype):
values = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"])
values = values.astype(dtype)
result = values.str.find("EF")
assert_equal(result, xr.DataArray([4, 3, 1, 0, -1]))
expected = xr.DataArray([v.find(dtype("EF")) for v in values.values])
assert_equal(result, expected)
result = values.str.rfind("EF")
assert_equal(result, xr.DataArray([4, 5, 7, 4, -1]))
expected = xr.DataArray([v.rfind(dtype("EF")) for v in values.values])
assert_equal(result, expected)
result = values.str.find("EF", 3)
assert_equal(result, xr.DataArray([4, 3, 7, 4, -1]))
expected = xr.DataArray([v.find(dtype("EF"), 3) for v in values.values])
assert_equal(result, expected)
result = values.str.rfind("EF", 3)
assert_equal(result, xr.DataArray([4, 5, 7, 4, -1]))
expected = xr.DataArray([v.rfind(dtype("EF"), 3) for v in values.values])
assert_equal(result, expected)
result = values.str.find("EF", 3, 6)
assert_equal(result, xr.DataArray([4, 3, -1, 4, -1]))
expected = xr.DataArray([v.find(dtype("EF"), 3, 6) for v in values.values])
assert_equal(result, expected)
result = values.str.rfind("EF", 3, 6)
assert_equal(result, xr.DataArray([4, 3, -1, 4, -1]))
xp = xr.DataArray([v.rfind(dtype("EF"), 3, 6) for v in values.values])
assert_equal(result, xp)
def test_index(dtype):
s = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"]).astype(dtype)
result = s.str.index("EF")
assert_equal(result, xr.DataArray([4, 3, 1, 0]))
result = s.str.rindex("EF")
assert_equal(result, xr.DataArray([4, 5, 7, 4]))
result = s.str.index("EF", 3)
assert_equal(result, xr.DataArray([4, 3, 7, 4]))
result = s.str.rindex("EF", 3)
assert_equal(result, xr.DataArray([4, 5, 7, 4]))
result = s.str.index("E", 4, 8)
assert_equal(result, xr.DataArray([4, 5, 7, 4]))
result = s.str.rindex("E", 0, 5)
assert_equal(result, xr.DataArray([4, 3, 1, 4]))
with pytest.raises(ValueError):
result = s.str.index("DE")
def test_pad(dtype):
values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
result = values.str.pad(5, side="left")
expected = xr.DataArray([" a", " b", " c", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.pad(5, side="right")
expected = xr.DataArray(["a ", "b ", "c ", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.pad(5, side="both")
expected = xr.DataArray([" a ", " b ", " c ", "eeeee"]).astype(dtype)
assert_equal(result, expected)
def test_pad_fillchar(dtype):
values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
result = values.str.pad(5, side="left", fillchar="X")
expected = xr.DataArray(["XXXXa", "XXXXb", "XXXXc", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.pad(5, side="right", fillchar="X")
expected = xr.DataArray(["aXXXX", "bXXXX", "cXXXX", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.pad(5, side="both", fillchar="X")
expected = xr.DataArray(["XXaXX", "XXbXX", "XXcXX", "eeeee"]).astype(dtype)
assert_equal(result, expected)
msg = "fillchar must be a character, not str"
with pytest.raises(TypeError, match=msg):
result = values.str.pad(5, fillchar="XY")
def test_translate():
values = xr.DataArray(["abcdefg", "abcc", "cdddfg", "cdefggg"])
table = str.maketrans("abc", "cde")
result = values.str.translate(table)
expected = xr.DataArray(["cdedefg", "cdee", "edddfg", "edefggg"])
assert_equal(result, expected)
def test_center_ljust_rjust(dtype):
values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
result = values.str.center(5)
expected = xr.DataArray([" a ", " b ", " c ", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.ljust(5)
expected = xr.DataArray(["a ", "b ", "c ", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.rjust(5)
expected = xr.DataArray([" a", " b", " c", "eeeee"]).astype(dtype)
assert_equal(result, expected)
def test_center_ljust_rjust_fillchar(dtype):
values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"]).astype(dtype)
result = values.str.center(5, fillchar="X")
expected = xr.DataArray(["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"])
assert_equal(result, expected.astype(dtype))
result = values.str.ljust(5, fillchar="X")
expected = xr.DataArray(["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"])
assert_equal(result, expected.astype(dtype))
result = values.str.rjust(5, fillchar="X")
expected = xr.DataArray(["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"])
assert_equal(result, expected.astype(dtype))
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
template = "fillchar must be a character, not {dtype}"
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.center(5, fillchar="XY")
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.ljust(5, fillchar="XY")
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.rjust(5, fillchar="XY")
def test_zfill(dtype):
values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype)
result = values.str.zfill(5)
expected = xr.DataArray(["00001", "00022", "00aaa", "00333", "45678"])
assert_equal(result, expected.astype(dtype))
result = values.str.zfill(3)
expected = xr.DataArray(["001", "022", "aaa", "333", "45678"])
assert_equal(result, expected.astype(dtype))
def test_slice(dtype):
arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype)
result = arr.str.slice(2, 5)
exp = xr.DataArray(["foo", "bar", "baz"]).astype(dtype)
assert_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2), (3, 0, -1)]:
try:
result = arr.str[start:stop:step]
expected = xr.DataArray([s[start:stop:step] for s in arr.values])
assert_equal(result, expected.astype(dtype))
except IndexError:
print(f"failed on {start}:{stop}:{step}")
raise
def test_slice_replace(dtype):
da = lambda x: xr.DataArray(x).astype(dtype)
values = da(["short", "a bit longer", "evenlongerthanthat", ""])
expected = da(["shrt", "a it longer", "evnlongerthanthat", ""])
result = values.str.slice_replace(2, 3)
assert_equal(result, expected)
expected = da(["shzrt", "a zit longer", "evznlongerthanthat", "z"])
result = values.str.slice_replace(2, 3, "z")
assert_equal(result, expected)
expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"])
result = values.str.slice_replace(2, 2, "z")
assert_equal(result, expected)
expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"])
result = values.str.slice_replace(2, 1, "z")
assert_equal(result, expected)
expected = da(["shorz", "a bit longez", "evenlongerthanthaz", "z"])
result = values.str.slice_replace(-1, None, "z")
assert_equal(result, expected)
expected = da(["zrt", "zer", "zat", "z"])
result = values.str.slice_replace(None, -2, "z")
assert_equal(result, expected)
expected = da(["shortz", "a bit znger", "evenlozerthanthat", "z"])
result = values.str.slice_replace(6, 8, "z")
assert_equal(result, expected)
expected = da(["zrt", "a zit longer", "evenlongzerthanthat", "z"])
result = values.str.slice_replace(-10, 3, "z")
assert_equal(result, expected)
def test_strip_lstrip_rstrip(dtype):
values = xr.DataArray([" aa ", " bb \n", "cc "]).astype(dtype)
result = values.str.strip()
expected = xr.DataArray(["aa", "bb", "cc"]).astype(dtype)
assert_equal(result, expected)
result = values.str.lstrip()
expected = xr.DataArray(["aa ", "bb \n", "cc "]).astype(dtype)
assert_equal(result, expected)
result = values.str.rstrip()
expected = xr.DataArray([" aa", " bb", "cc"]).astype(dtype)
assert_equal(result, expected)
def test_strip_lstrip_rstrip_args(dtype):
values = xr.DataArray(["xxABCxx", "xx BNSD", "LDFJH xx"]).astype(dtype)
rs = values.str.strip("x")
xp = xr.DataArray(["ABC", " BNSD", "LDFJH "]).astype(dtype)
assert_equal(rs, xp)
rs = values.str.lstrip("x")
xp = xr.DataArray(["ABCxx", " BNSD", "LDFJH xx"]).astype(dtype)
assert_equal(rs, xp)
rs = values.str.rstrip("x")
xp = xr.DataArray(["xxABC", "xx BNSD", "LDFJH "]).astype(dtype)
assert_equal(rs, xp)
def test_wrap():
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = xr.DataArray(
[
"hello world",
"hello world!",
"hello world!!",
"abcdefabcde",
"abcdefabcdef",
"abcdefabcdefa",
"ab ab ab ab ",
"ab ab ab ab a",
"\t",
]
)
# expected values
expected = xr.DataArray(
[
"hello world",
"hello world!",
"hello\nworld!!",
"abcdefabcde",
"abcdefabcdef",
"abcdefabcdef\na",
"ab ab ab ab",
"ab ab ab ab\na",
"",
]
)
result = values.str.wrap(12, break_long_words=True)
assert_equal(result, expected)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = xr.DataArray([" pre ", "\xac\u20ac\U00008000 abadcafe"])
expected = xr.DataArray([" pre", "\xac\u20ac\U00008000 ab\nadcafe"])
result = values.str.wrap(6)
assert_equal(result, expected)
def test_wrap_kwargs_passed():
# GH4334
values = xr.DataArray(" hello world ")
result = values.str.wrap(7)
expected = xr.DataArray(" hello\nworld")
assert_equal(result, expected)
result = values.str.wrap(7, drop_whitespace=False)
expected = xr.DataArray(" hello\n world\n ")
assert_equal(result, expected)
def test_get(dtype):
values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"]).astype(dtype)
result = values.str[2]
expected = xr.DataArray(["b", "d", "g"]).astype(dtype)
assert_equal(result, expected)
# bounds testing
values = xr.DataArray(["1_2_3_4_5", "6_7_8_9_10", "11_12"]).astype(dtype)
# positive index
result = values.str[5]
expected = xr.DataArray(["_", "_", ""]).astype(dtype)
assert_equal(result, expected)
# negative index
result = values.str[-6]
expected = xr.DataArray(["_", "8", ""]).astype(dtype)
assert_equal(result, expected)
def test_get_default(dtype):
# GH4334
values = xr.DataArray(["a_b", "c", ""]).astype(dtype)
result = values.str.get(2, "default")
expected = xr.DataArray(["b", "default", "default"]).astype(dtype)
assert_equal(result, expected)
def test_encode_decode():
data = xr.DataArray(["a", "b", "a\xe4"])
encoded = data.str.encode("utf-8")
decoded = encoded.str.decode("utf-8")
assert_equal(data, decoded)
def test_encode_decode_errors():
encodeBase = xr.DataArray(["a", "b", "a\x9d"])
msg = (
r"'charmap' codec can't encode character '\\x9d' in position 1:"
" character maps to <undefined>"
)
with pytest.raises(UnicodeEncodeError, match=msg):
encodeBase.str.encode("cp1252")
f = lambda x: x.encode("cp1252", "ignore")
result = encodeBase.str.encode("cp1252", "ignore")
expected = xr.DataArray([f(x) for x in encodeBase.values.tolist()])
assert_equal(result, expected)
decodeBase = xr.DataArray([b"a", b"b", b"a\x9d"])
msg = (
"'charmap' codec can't decode byte 0x9d in position 1:"
" character maps to <undefined>"
)
with pytest.raises(UnicodeDecodeError, match=msg):
decodeBase.str.decode("cp1252")
f = lambda x: x.decode("cp1252", "ignore")
result = decodeBase.str.decode("cp1252", "ignore")
expected = xr.DataArray([f(x) for x in decodeBase.values.tolist()])
assert_equal(result, expected)
|
import os
from pylatex import Document, PageStyle, Head, Foot, MiniPage, \
StandAloneGraphic, MultiColumn, Tabu, LongTabu, LargeText, MediumText, \
LineBreak, NewPage, Tabularx, TextColor, simple_page_number
from pylatex.utils import bold, NoEscape
def generate_unique():
geometry_options = {
"head": "40pt",
"margin": "0.5in",
"bottom": "0.6in",
"includeheadfoot": True
}
doc = Document(geometry_options=geometry_options)
# Generating first page style
first_page = PageStyle("firstpage")
# Header image
with first_page.create(Head("L")) as header_left:
with header_left.create(MiniPage(width=NoEscape(r"0.49\textwidth"),
pos='c')) as logo_wrapper:
logo_file = os.path.join(os.path.dirname(__file__),
'sample-logo.png')
logo_wrapper.append(StandAloneGraphic(image_options="width=120px",
filename=logo_file))
# Add document title
with first_page.create(Head("R")) as right_header:
with right_header.create(MiniPage(width=NoEscape(r"0.49\textwidth"),
pos='c', align='r')) as title_wrapper:
title_wrapper.append(LargeText(bold("Bank Account Statement")))
title_wrapper.append(LineBreak())
title_wrapper.append(MediumText(bold("Date")))
# Add footer
with first_page.create(Foot("C")) as footer:
message = "Important message please read"
with footer.create(Tabularx(
"X X X X",
width_argument=NoEscape(r"\textwidth"))) as footer_table:
footer_table.add_row(
[MultiColumn(4, align='l', data=TextColor("blue", message))])
footer_table.add_hline(color="blue")
footer_table.add_empty_row()
branch_address = MiniPage(
width=NoEscape(r"0.25\textwidth"),
pos='t')
branch_address.append("960 - 22nd street east")
branch_address.append("\n")
branch_address.append("Saskatoon, SK")
document_details = MiniPage(width=NoEscape(r"0.25\textwidth"),
pos='t', align='r')
document_details.append("1000")
document_details.append(LineBreak())
document_details.append(simple_page_number())
footer_table.add_row([branch_address, branch_address,
branch_address, document_details])
doc.preamble.append(first_page)
# End first page style
# Add customer information
with doc.create(Tabu("X[l] X[r]")) as first_page_table:
customer = MiniPage(width=NoEscape(r"0.49\textwidth"), pos='h')
customer.append("Verna Volcano")
customer.append("\n")
customer.append("For some Person")
customer.append("\n")
customer.append("Address1")
customer.append("\n")
customer.append("Address2")
customer.append("\n")
customer.append("Address3")
# Add branch information
branch = MiniPage(width=NoEscape(r"0.49\textwidth"), pos='t!',
align='r')
branch.append("Branch no.")
branch.append(LineBreak())
branch.append(bold("1181..."))
branch.append(LineBreak())
branch.append(bold("TIB Cheque"))
first_page_table.add_row([customer, branch])
first_page_table.add_empty_row()
doc.change_document_style("firstpage")
doc.add_color(name="lightgray", model="gray", description="0.80")
# Add statement table
with doc.create(LongTabu("X[l] X[2l] X[r] X[r] X[r]",
row_height=1.5)) as data_table:
data_table.add_row(["date",
"description",
"debits($)",
"credits($)",
"balance($)"],
mapper=bold,
color="lightgray")
data_table.add_empty_row()
data_table.add_hline()
row = ["2016-JUN-01", "Test", "$100", "$1000", "-$900"]
for i in range(30):
if (i % 2) == 0:
data_table.add_row(row, color="lightgray")
else:
data_table.add_row(row)
doc.append(NewPage())
# Add cheque images
with doc.create(LongTabu("X[c] X[c]")) as cheque_table:
cheque_file = os.path.join(os.path.dirname(__file__),
'chequeexample.png')
cheque = StandAloneGraphic(cheque_file, image_options="width=200px")
for i in range(0, 20):
cheque_table.add_row([cheque, cheque])
doc.generate_pdf("complex_report", clean_tex=False)
generate_unique()
|
import os
import re
import diamond.collector
class NagiosPerfdataCollector(diamond.collector.Collector):
"""Diamond collector for Nagios performance data
"""
GENERIC_FIELDS = ['DATATYPE', 'HOSTNAME', 'TIMET']
HOST_FIELDS = ['HOSTPERFDATA']
SERVICE_FIELDS = ['SERVICEDESC', 'SERVICEPERFDATA']
TOKENIZER_RE = (
r"([^\s]+|'[^']+')=([-.\d]+)(c|s|ms|us|B|KB|MB|GB|TB|%)?" +
r"(?:;([-.\d]+))?(?:;([-.\d]+))?(?:;([-.\d]+))?(?:;([-.\d]+))?")
def get_default_config_help(self):
config_help = super(NagiosPerfdataCollector,
self).get_default_config_help()
config_help.update({
'perfdata_dir': 'The directory containing Nagios perfdata files'
})
return config_help
def get_default_config(self):
config = super(NagiosPerfdataCollector, self).get_default_config()
config.update({
'path': 'nagiosperfdata',
'perfdata_dir': '/var/spool/diamond/nagiosperfdata',
})
return config
def collect(self):
"""Collect statistics from a Nagios perfdata directory.
"""
perfdata_dir = self.config['perfdata_dir']
try:
filenames = os.listdir(perfdata_dir)
except OSError:
self.log.error("Cannot read directory `{dir}'".format(
dir=perfdata_dir))
return
for filename in filenames:
self._process_file(os.path.join(perfdata_dir, filename))
def _extract_fields(self, line):
"""Extract the key/value fields from a line of performance data
"""
acc = {}
field_tokens = line.split("\t")
for field_token in field_tokens:
kv_tokens = field_token.split('::')
if len(kv_tokens) == 2:
(key, value) = kv_tokens
acc[key] = value
return acc
def _fields_valid(self, d):
"""Verify that all necessary fields are present
Determine whether the fields parsed represent a host or
service perfdata. If the perfdata is unknown, return False.
If the perfdata does not contain all fields required for that
type, return False. Otherwise, return True.
"""
if 'DATATYPE' not in d:
return False
datatype = d['DATATYPE']
if datatype == 'HOSTPERFDATA':
fields = self.GENERIC_FIELDS + self.HOST_FIELDS
elif datatype == 'SERVICEPERFDATA':
fields = self.GENERIC_FIELDS + self.SERVICE_FIELDS
else:
return False
for field in fields:
if field not in d:
return False
return True
def _normalize_to_unit(self, value, unit):
"""Normalize the value to the unit returned.
We use base-1000 for second-based units, and base-1024 for
byte-based units. Sadly, the Nagios-Plugins specification doesn't
disambiguate base-1000 (KB) and base-1024 (KiB).
"""
if unit == 'ms':
return value / 1000.0
if unit == 'us':
return value / 1000000.0
if unit == 'KB':
return value * 1024
if unit == 'MB':
return value * 1024 * 1024
if unit == 'GB':
return value * 1024 * 1024 * 1024
if unit == 'TB':
return value * 1024 * 1024 * 1024 * 1024
return value
def _parse_perfdata(self, s):
"""Parse performance data from a perfdata string
"""
metrics = []
counters = re.findall(self.TOKENIZER_RE, s)
if counters is None:
self.log.warning("Failed to parse performance data: {s}".format(
s=s))
return metrics
for (key, value, uom, warn, crit, min, max) in counters:
try:
norm_value = self._normalize_to_unit(float(value), uom)
metrics.append((key, norm_value))
except ValueError:
self.log.warning(
"Couldn't convert value '{value}' to float".format(
value=value))
return metrics
def _process_file(self, path):
"""Parse and submit the metrics from a file
"""
try:
f = open(path)
for line in f:
self._process_line(line)
os.remove(path)
except IOError as ex:
self.log.error("Could not open file `{path}': {error}".format(
path=path, error=ex.strerror))
def _process_line(self, line):
"""Parse and submit the metrics from a line of perfdata output
"""
fields = self._extract_fields(line)
if not self._fields_valid(fields):
self.log.warning("Missing required fields for line: {line}".format(
line=line))
metric_path_base = []
graphite_prefix = fields.get('GRAPHITEPREFIX')
graphite_postfix = fields.get('GRAPHITEPOSTFIX')
if graphite_prefix:
metric_path_base.append(graphite_prefix)
hostname = fields['HOSTNAME'].lower()
metric_path_base.append(hostname)
datatype = fields['DATATYPE']
if datatype == 'HOSTPERFDATA':
metric_path_base.append('host')
elif datatype == 'SERVICEPERFDATA':
service_desc = fields.get('SERVICEDESC')
graphite_postfix = fields.get('GRAPHITEPOSTFIX')
if graphite_postfix:
metric_path_base.append(graphite_postfix)
else:
metric_path_base.append(service_desc)
perfdata = fields[datatype]
counters = self._parse_perfdata(perfdata)
for (counter, value) in counters:
metric_path = metric_path_base + [counter]
metric_path = [self._sanitize(x) for x in metric_path]
metric_name = '.'.join(metric_path)
self.publish(metric_name, value)
def _sanitize(self, s):
"""Sanitize the name of a metric to remove unwanted chars
"""
return re.sub("[^\w-]", "_", s)
|
from __future__ import print_function
import os
import sys
from functools import wraps
from six.moves import input
_stash = globals()["_stash"]
try:
import github
except ImportError:
print("Could not import 'github', installing it...")
_stash("pip install pygithub")
import github
try:
import docopt
except ImportError:
print("Could not import 'docopt', installing it...")
_stash("pip install docopt")
from docopt import docopt
from github import Github
import keychain, console, inspect
class GitHubRepoNotFoundError(Exception):
pass
def command(func):
@wraps(func)
def tmp(argv):
if len(argv) == 1:
if func.__name__ not in ['gh_list_keys']:
argv.append('--help')
try:
args = docopt(func.__doc__, argv=argv)
return func(args)
except SystemExit as e:
print(e)
return tmp
@command
def gh_fork(args):
'''Usage: gh fork <repo>
Fork a repo to your own github account.
<repo> - repo name of form user/repo
'''
console.show_activity()
g, user = setup_gh()
try:
other_repo = g.get_repo(args['<repo>'])
if other_repo:
mine = user.create_fork(other_repo)
print('fork created: {}/{}'.format(mine.owner.login, mine.name))
else:
pass
finally:
console.hide_activity()
@command
def gh_create(args):
'''Usage: gh create [options] <name>
Options:
-h, --help This message
-s <desc>, --description <desc> Repo description
-h <url>, --homepage <url> Homepage url
-p, --private private
-i, --has_issues has issues
-w, --has_wiki has wiki
-d, --has_downloads has downloads
-a, --auto_init create readme and first commit
-g <ign>, --gitignore_template <ign> create gitignore using string
'''
kwargs = {key[2:]: value for key, value in args.items() if key.startswith('--') and value}
console.show_activity()
try:
g, user = setup_gh()
r = user.create_repo(args['<name>'], **kwargs)
print('Created %s' % r.html_url)
finally:
console.hide_activity()
def parse_branch(userinput):
if ':' in userinput:
owner, branch = userinput.split(':')
else:
owner = userinput
branch = 'master'
return owner, branch
def parent_owner(user, reponame):
return user.get_repo(reponame).parent.owner.login
@command
def gh_pull(args):
'''Usage:
gh pull <reponame> <base> [<head>]
gh pull <reponame> <base> [<head>] --title <title> [--body <body>]
gh pull <reponame> <base> [<head>] -i <issue>
Options:
-h, --help This message
-t <title>, --title <title> Title of pull request
-b <body>, --body <body> Body of pull request [default: ]
-i <issue>, --issue <issue> Issue number
Examples:
gh pull stash ywangd jsbain
gh pull stash ywangd:dev jsbain:dev
gh pull stash :dev :master
base and head should be in the format owner:branch.
if base owner is omitted, owner of parent repo is used.
if head owner is omitted, user is used
'''
console.show_activity()
try:
g, user = setup_gh()
reponame = args['<reponame>']
baseowner, basebranch = parse_branch(args['<base>'])
if not baseowner:
baseowner = parent_owner(reponame)
if not args['<head>']:
args['<head>'] = ':'
headowner, headbranch = parse_branch(args['<head>'])
if not headowner:
headowner = user.login
baserepo = g.get_user(baseowner).get_repo(reponame)
kwargs = {}
if args['--issue']:
kwargs['issue'] = baserepo.get_issue(args['--issue'])
elif not args['--title']:
kwargs['title'] = input('Enter pull title:')
kwargs['body'] = input('Enter pull body:')
else:
kwargs['title'] = args['--title']
kwargs['body'] = args['--body'] or ''
kwargs['base'] = basebranch
kwargs['head'] = ':'.join([headowner, headbranch])
pullreq = baserepo.create_pull(**kwargs)
print('Created pull %s' % pullreq.html_url)
print('Commits:')
print([(x.sha, x.commit.message) for x in pullreq.get_commits()])
print('Changed Files:')
print([x.filename for x in pullreq.get_files()])
finally:
console.hide_activity()
print('success')
@command
def gh_list_keys(args):
'''Usage:
gh list_keys [options]
Options:
-h, --help This message
List keys
'''
g, u = setup_gh()
for key in u.get_keys():
print('{}:\n {}\n'.format(key.title, key.key))
@command
def gh_create_key(args):
'''Usage:
gh create_key <title> [<public_key_path>]
Options:
-h, --help This message
Examples:
gh create_key ipad ~/.ssh/id_rsa.pub
gh create_key ipad (checks for ~/.ssh/id_rsa.pub, or creates new key if needed using ssh-keygen )
'''
title = args['<title>']
default_keyfile = os.path.expanduser('~/.ssh/id_rsa.pub')
if not args['<public_key_path>']:
if not os.path.exists(default_keyfile):
print('Creating a ssh key in ~/.ssh/')
cmd_string = '''
echo ssh-keygen -d rsa -b2048
ssh-keygen -trsa -b2048
'''
_stash(cmd_string)
args['<public_key_path>'] = default_keyfile
#if private key, use pub key
if not args['<public_key_path>'].endswith('.pub'):
args['<public_key_path>'] += '.pub'
if not os.path.exists(args['<public_key_path>']):
raise Exception('Public Key file not found!')
g, u = setup_gh()
with open(args['<public_key_path>']) as pubkey:
u.create_key(title, pubkey.read())
def setup_gh():
keychainservice = 'stash.git.github.com'
user = dict(keychain.get_services())[keychainservice]
pw = keychain.get_password(keychainservice, user)
g = Github(user, pw)
u = g.get_user()
return g, u
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
sys.argv.append('--help')
args = docopt(__doc__, version='0.1', options_first=True)
cmd = args['<command>']
argv = [cmd] + args['<args>']
try:
func = locals()['gh_%s' % cmd]
except KeyError:
print('No such cmd')
print(__doc__)
raise
func(argv)
|
from defusedxml import ElementTree
import pytest
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
@pytest.fixture
def mock_http_client(loop, hass, hass_client):
"""Set up test fixture."""
config = {
"rss_feed_template": {
"testfeed": {
"title": "feed title is {{states.test.test1.state}}",
"items": [
{
"title": "item title is {{states.test.test2.state}}",
"description": "desc {{states.test.test3.state}}",
}
],
}
}
}
loop.run_until_complete(async_setup_component(hass, "rss_feed_template", config))
return loop.run_until_complete(hass_client())
async def test_get_nonexistant_feed(mock_http_client):
"""Test if we can retrieve the correct rss feed."""
resp = await mock_http_client.get("/api/rss_template/otherfeed")
assert resp.status == HTTP_NOT_FOUND
async def test_get_rss_feed(mock_http_client, hass):
"""Test if we can retrieve the correct rss feed."""
hass.states.async_set("test.test1", "a_state_1")
hass.states.async_set("test.test2", "a_state_2")
hass.states.async_set("test.test3", "a_state_3")
resp = await mock_http_client.get("/api/rss_template/testfeed")
assert resp.status == 200
text = await resp.text()
xml = ElementTree.fromstring(text)
assert xml[0].text == "feed title is a_state_1"
assert xml[1][0].text == "item title is a_state_2"
assert xml[1][1].text == "desc a_state_3"
|
import sys
__all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode']
PY3 = True if sys.version_info[0] == 3 else False
if sys.version_info[0] < 3:
b = bytes = str
basestring_ = basestring
else:
def b(s):
if isinstance(s, str):
return s.encode('latin1')
return bytes(s)
basestring_ = (bytes, str)
bytes = bytes
text = str
if sys.version_info[0] < 3:
def next(obj):
return obj.next()
else:
next = next
def is_unicode(obj):
if sys.version_info[0] < 3:
return isinstance(obj, unicode)
else:
return isinstance(obj, str)
def coerce_text(v):
if not isinstance(v, basestring_):
if sys.version_info[0] < 3:
attr = '__unicode__'
else:
attr = '__str__'
if hasattr(v, attr):
return unicode(v)
else:
return bytes(v)
return v
|
from typing import Set
from weblate.machinery.base import BatchStringMachineTranslation, get_machinery_language
from weblate.memory.models import Memory
class WeblateMemory(BatchStringMachineTranslation):
"""Translation service using strings already translated in Weblate."""
name = "Weblate Translation Memory"
rank_boost = 2
cache_translations = False
same_languages = True
def convert_language(self, language):
"""No conversion of language object."""
return get_machinery_language(language)
def is_supported(self, source, language):
"""Any language is supported."""
return True
def is_rate_limited(self):
"""This service has no rate limiting."""
return False
def download_translations(
self,
source,
language,
text: str,
unit,
user,
search: bool,
threshold: int = 75,
):
"""Download list of possible translations from a service."""
for result in Memory.objects.lookup(
source,
language,
text,
user,
unit.translation.component.project,
unit.translation.component.project.use_shared_tm,
).iterator():
quality = self.comparer.similarity(text, result.source)
if quality < 10 or (quality < threshold and not search):
continue
yield {
"text": result.target,
"quality": quality,
"service": self.name,
"origin": result.get_origin_display(),
"source": result.source,
}
def download_batch_strings(
self, source, language, units, texts: Set[str], user=None, threshold: int = 75
):
project = units[0].translation.component.project
return Memory.objects.lookup(
source,
language,
texts,
user,
project,
project.use_shared_tm,
).values_list("source", "target")
|
import unittest
import six
from trashcli.restore import TrashDirectory
from integration_tests.files import require_empty_dir
from integration_tests.files import make_file
from mock import Mock
class TestTrashDirectory(unittest.TestCase):
def setUp(self):
require_empty_dir('sandbox')
self.trash_dir = TrashDirectory()
self.logger = Mock()
self.trash_dir.logger = self.logger
def test_should_list_a_trashinfo(self):
make_file('sandbox/info/foo.trashinfo')
result = self.list_trashinfos()
assert [('trashinfo', 'sandbox/info/foo.trashinfo')] == result
def test_should_list_multiple_trashinfo(self):
make_file('sandbox/info/foo.trashinfo')
make_file('sandbox/info/bar.trashinfo')
make_file('sandbox/info/baz.trashinfo')
result = self.list_trashinfos()
six.assertCountEqual(self,
[('trashinfo', 'sandbox/info/foo.trashinfo'),
('trashinfo', 'sandbox/info/baz.trashinfo'),
('trashinfo', 'sandbox/info/bar.trashinfo')],
result)
def test_non_trashinfo_should_reported_as_a_warn(self):
make_file('sandbox/info/not-a-trashinfo')
result = self.list_trashinfos()
six.assertCountEqual(self,
[('non_trashinfo',
'sandbox/info/not-a-trashinfo')],
result)
def list_trashinfos(self):
return list(self.trash_dir.all_info_files('sandbox'))
|
import os
import numpy as np
import pandas as pd
import xarray as xr
from . import randint, randn, requires_dask
try:
import dask
import dask.multiprocessing
except ImportError:
pass
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
class IOSingleNetCDF:
"""
A few examples that benchmark reading/writing a single netCDF file with
xarray
"""
timeout = 300.0
repeat = 1
number = 5
def make_ds(self):
# single Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.block_chunks = {
"time": self.nt / 4,
"lon": self.nx / 3,
"lat": self.ny / 3,
}
self.time_chunks = {"time": int(self.nt / 36)}
times = pd.date_range("1970-01-01", periods=self.nt, freq="D")
lons = xr.DataArray(
np.linspace(0, 360, self.nx),
dims=("lon",),
attrs={"units": "degrees east", "long_name": "longitude"},
)
lats = xr.DataArray(
np.linspace(-90, 90, self.ny),
dims=("lat",),
attrs={"units": "degrees north", "long_name": "latitude"},
)
self.ds["foo"] = xr.DataArray(
randn((self.nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="foo",
encoding=None,
attrs={"units": "foo units", "description": "a description"},
)
self.ds["bar"] = xr.DataArray(
randn((self.nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="bar",
encoding=None,
attrs={"units": "bar units", "description": "a description"},
)
self.ds["baz"] = xr.DataArray(
randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),
coords={"lon": lons, "lat": lats},
dims=("lon", "lat"),
name="baz",
encoding=None,
attrs={"units": "baz units", "description": "a description"},
)
self.ds.attrs = {"history": "created for xarray benchmarking"}
self.oinds = {
"time": randint(0, self.nt, 120),
"lon": randint(0, self.nx, 20),
"lat": randint(0, self.ny, 10),
}
self.vinds = {
"time": xr.DataArray(randint(0, self.nt, 120), dims="x"),
"lon": xr.DataArray(randint(0, self.nx, 120), dims="x"),
"lat": slice(3, 20),
}
class IOWriteSingleNetCDF3(IOSingleNetCDF):
def setup(self):
self.format = "NETCDF3_64BIT"
self.make_ds()
def time_write_dataset_netcdf4(self):
self.ds.to_netcdf("test_netcdf4_write.nc", engine="netcdf4", format=self.format)
def time_write_dataset_scipy(self):
self.ds.to_netcdf("test_scipy_write.nc", engine="scipy", format=self.format)
class IOReadSingleNetCDF4(IOSingleNetCDF):
def setup(self):
self.make_ds()
self.filepath = "test_single_file.nc4.nc"
self.format = "NETCDF4"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_netcdf4(self):
xr.open_dataset(self.filepath, engine="netcdf4").load()
def time_orthogonal_indexing(self):
ds = xr.open_dataset(self.filepath, engine="netcdf4")
ds = ds.isel(**self.oinds).load()
def time_vectorized_indexing(self):
ds = xr.open_dataset(self.filepath, engine="netcdf4")
ds = ds.isel(**self.vinds).load()
class IOReadSingleNetCDF3(IOReadSingleNetCDF4):
def setup(self):
self.make_ds()
self.filepath = "test_single_file.nc3.nc"
self.format = "NETCDF3_64BIT"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy(self):
xr.open_dataset(self.filepath, engine="scipy").load()
def time_orthogonal_indexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy")
ds = ds.isel(**self.oinds).load()
def time_vectorized_indexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy")
ds = ds.isel(**self.vinds).load()
class IOReadSingleNetCDF4Dask(IOSingleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.filepath = "test_single_file.nc4.nc"
self.format = "NETCDF4"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_dataset(
self.filepath, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_netcdf4_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.time_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="netcdf4", chunks=self.time_chunks
).load()
class IOReadSingleNetCDF3Dask(IOReadSingleNetCDF4Dask):
def setup(self):
requires_dask()
self.make_ds()
self.filepath = "test_single_file.nc3.nc"
self.format = "NETCDF3_64BIT"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="scipy", chunks=self.block_chunks
).load()
def time_load_dataset_scipy_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_scipy_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="scipy", chunks=self.time_chunks
).load()
class IOMultipleNetCDF:
"""
A few examples that benchmark reading/writing multiple netCDF files with
xarray
"""
timeout = 300.0
repeat = 1
number = 5
def make_ds(self, nfiles=10):
# multiple Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.nfiles = nfiles
self.block_chunks = {
"time": self.nt / 4,
"lon": self.nx / 3,
"lat": self.ny / 3,
}
self.time_chunks = {"time": int(self.nt / 36)}
self.time_vars = np.split(
pd.date_range("1970-01-01", periods=self.nt, freq="D"), self.nfiles
)
self.ds_list = []
self.filenames_list = []
for i, times in enumerate(self.time_vars):
ds = xr.Dataset()
nt = len(times)
lons = xr.DataArray(
np.linspace(0, 360, self.nx),
dims=("lon",),
attrs={"units": "degrees east", "long_name": "longitude"},
)
lats = xr.DataArray(
np.linspace(-90, 90, self.ny),
dims=("lat",),
attrs={"units": "degrees north", "long_name": "latitude"},
)
ds["foo"] = xr.DataArray(
randn((nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="foo",
encoding=None,
attrs={"units": "foo units", "description": "a description"},
)
ds["bar"] = xr.DataArray(
randn((nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="bar",
encoding=None,
attrs={"units": "bar units", "description": "a description"},
)
ds["baz"] = xr.DataArray(
randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),
coords={"lon": lons, "lat": lats},
dims=("lon", "lat"),
name="baz",
encoding=None,
attrs={"units": "baz units", "description": "a description"},
)
ds.attrs = {"history": "created for xarray benchmarking"}
self.ds_list.append(ds)
self.filenames_list.append("test_netcdf_%i.nc" % i)
class IOWriteMultipleNetCDF3(IOMultipleNetCDF):
def setup(self):
self.make_ds()
self.format = "NETCDF3_64BIT"
def time_write_dataset_netcdf4(self):
xr.save_mfdataset(
self.ds_list, self.filenames_list, engine="netcdf4", format=self.format
)
def time_write_dataset_scipy(self):
xr.save_mfdataset(
self.ds_list, self.filenames_list, engine="scipy", format=self.format
)
class IOReadMultipleNetCDF4(IOMultipleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.format = "NETCDF4"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_netcdf4(self):
xr.open_mfdataset(self.filenames_list, engine="netcdf4").load()
def time_open_dataset_netcdf4(self):
xr.open_mfdataset(self.filenames_list, engine="netcdf4")
class IOReadMultipleNetCDF3(IOReadMultipleNetCDF4):
def setup(self):
requires_dask()
self.make_ds()
self.format = "NETCDF3_64BIT"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_scipy(self):
xr.open_mfdataset(self.filenames_list, engine="scipy").load()
def time_open_dataset_scipy(self):
xr.open_mfdataset(self.filenames_list, engine="scipy")
class IOReadMultipleNetCDF4Dask(IOMultipleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.format = "NETCDF4"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
).load()
def time_open_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
)
def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
)
def time_open_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
)
def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
)
class IOReadMultipleNetCDF3Dask(IOReadMultipleNetCDF4Dask):
def setup(self):
requires_dask()
self.make_ds()
self.format = "NETCDF3_64BIT"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="scipy", chunks=self.block_chunks
).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="scipy", chunks=self.time_chunks
).load()
def time_open_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="scipy", chunks=self.block_chunks
)
def time_open_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="scipy", chunks=self.time_chunks
)
def create_delayed_write():
import dask.array as da
vals = da.random.random(300, chunks=(1,))
ds = xr.Dataset({"vals": (["a"], vals)})
return ds.to_netcdf("file.nc", engine="netcdf4", compute=False)
class IOWriteNetCDFDask:
timeout = 60
repeat = 1
number = 5
def setup(self):
requires_dask()
self.write = create_delayed_write()
def time_write(self):
self.write.compute()
class IOWriteNetCDFDaskDistributed:
def setup(self):
try:
import distributed
except ImportError:
raise NotImplementedError()
self.client = distributed.Client()
self.write = create_delayed_write()
def cleanup(self):
self.client.shutdown()
def time_write(self):
self.write.compute()
|
GIT_PATH = 'https://github.com/google/multichase'
GIT_VERSION = 'ebd614435f9d510963c5568d840ff647ca8e3701'
INSTALL_PATH = 'multichase'
def _Install(vm):
"""Installs the multichase package on the VM."""
vm.Install('build_tools')
vm.RemoteCommand('rm -rf {path} && mkdir -p {path}'.format(
path=INSTALL_PATH))
vm.RemoteCommand('git clone --recursive {git_path} {dir}'.format(
dir=INSTALL_PATH, git_path=GIT_PATH))
vm.RemoteCommand('cd {dir} && git checkout {version} && make'.format(
dir=INSTALL_PATH, version=GIT_VERSION))
def YumInstall(vm):
"""Installs the multichase package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the multichase package on the VM."""
_Install(vm)
def Uninstall(vm):
vm.RemoteCommand('rm -rf {0}'.format(INSTALL_PATH))
|
from pylatex import Document, MiniPage, LineBreak, VerticalSpace
def generate_labels():
geometry_options = {"margin": "0.5in"}
doc = Document(geometry_options=geometry_options)
doc.change_document_style("empty")
for i in range(10):
with doc.create(MiniPage(width=r"0.5\textwidth")):
doc.append("Vladimir Gorovikov")
doc.append("\n")
doc.append("Company Name")
doc.append("\n")
doc.append("Somewhere, City")
doc.append("\n")
doc.append("Country")
if (i % 2) == 1:
doc.append(VerticalSpace("20pt"))
doc.append(LineBreak())
doc.generate_pdf("minipage", clean_tex=False)
generate_labels()
|
import collections
import re
import time
from absl import flags
import numpy as np
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
flags.DEFINE_integer('nccl_slots', 8,
'Launch n processes per node on all allocated nodes')
flags.DEFINE_string(
'nccl_cuda_visible_devices', None, 'GPU identifiers are '
'given as integer indices or as UUID strings.')
flags.DEFINE_list('nccl_extra_params', [], 'Export an environment variable')
flags.DEFINE_string('nccl_minbytes', '8', 'Minimum size to start with')
flags.DEFINE_string('nccl_maxbytes', '256M', 'Maximum size to start with')
flags.DEFINE_integer('nccl_stepfactor', 2,
'Multiplication factor between sizes')
flags.DEFINE_integer('nccl_ngpus', 1, 'Number of gpus per thread.')
flags.DEFINE_boolean('nccl_check', False, 'Check correctness of results.')
flags.DEFINE_integer('nccl_nthreads', 1, 'Number of threads per process')
flags.DEFINE_integer(
'nccl_num_runs', 10, 'The number of consecutive run.', lower_bound=1)
flags.DEFINE_integer('nccl_seconds_between_runs', 10,
'Sleep between consecutive run.')
flags.DEFINE_integer('nccl_iters', 20, 'Number of iterations')
flags.DEFINE_boolean('nccl_install_mofed', False,
'Install Mellanox OpenFabrics drivers')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'nccl'
BENCHMARK_CONFIG = """
nccl:
description: Runs NCCL Benchmark. Specify the number of VMs with --num_vms.
vm_groups:
default:
vm_count: null
vm_spec:
GCP:
machine_type: n1-highmem-96
zone: us-central1-a
image_family: tf-latest-gpu-gvnic
image_project: deeplearning-platform-release
boot_disk_size: 105
gpu_type: v100
gpu_count: 8
AWS:
machine_type: p3dn.24xlarge
zone: us-east-1a
image: ami-084e787069ee27fb7
boot_disk_size: 105
Azure:
machine_type: Standard_NC24rs_v3
zone: eastus
boot_disk_size: 105
"""
HOSTFILE = 'HOSTFILE'
_SAMPLE_LINE_RE = re.compile(r'# nThread (?P<nThread>\d+) '
r'nGpus (?P<nGpus>\d+) '
r'minBytes (?P<minBytes>\d+) '
r'maxBytes (?P<maxBytes>\d+) '
r'step: (?P<step>\S+) '
r'warmup iters: (?P<warmup_iters>\d+) '
r'iters: (?P<iters>\d+) '
r'validation: (?P<validation>\d+)\s*')
# Without '--mca btl_tcp_if_exclude docker0,lo', it stuck forever
# This is caused by Docker network in DLVM, use mca btl_tcp_if_exclude to skip
# docker network.
RUN_CMD = ('{mpi} '
'--hostfile {hostfile} '
'--mca pml ^cm '
'--mca btl tcp,self '
'--mca btl_tcp_if_exclude docker0,lo '
'--bind-to none '
'-N {slots} '
'{env} '
'nccl-tests/build/all_reduce_perf '
'--minbytes {minbytes} '
'--maxbytes {maxbytes} '
'--stepfactor {stepfactor} '
'--ngpus {ngpus} '
'--check {check} '
'--nthreads {nthreads} '
'--iters {iters}')
_DEFAULT = 'DEFAULT'
_METADATA_COLUMNS = ('size', 'count', 'nccl_type', 'redop', 'out_of_place_time',
'out_of_place_algbw', 'out_of_place_busbw',
'out_of_place_error', 'in_place_time', 'in_place_algbw',
'in_place_busbw', 'in_place_error')
_SAMPLE_NAMES = {
'Out of place algorithm bandwidth': 'out_of_place_algbw',
'Out of place bus bandwidth': 'out_of_place_busbw',
'In place algorithm bandwidth': 'in_place_algbw',
'In place bus bandwidth': 'in_place_busbw'
}
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def PrepareVm(vm):
"""Install and set up NCCL on the target vm.
Args:
vm: virtual machine on which to install NCCL
"""
vm.AuthenticateVm()
vm.Install('openmpi')
vm.Install('nccl')
env = ''
if FLAGS.aws_efa:
env = ('export LD_LIBRARY_PATH=/opt/amazon/efa/lib:/opt/amazon/efa/lib64:'
'$LD_LIBRARY_PATH &&')
if FLAGS.nccl_install_mofed:
vm.Install('mofed')
vm.RemoteCommand('rm -rf nccl-tests')
vm.RemoteCommand('git clone https://github.com/NVIDIA/nccl-tests.git')
vm.RemoteCommand('cd nccl-tests && {env} make MPI=1 MPI_HOME={mpi} '
'NCCL_HOME={nccl} CUDA_HOME={cuda}'.format(
env=env,
mpi=FLAGS.nccl_mpi_home,
nccl=FLAGS.nccl_home,
cuda='/usr/local/cuda-{}'.format(
FLAGS.cuda_toolkit_version)))
def Prepare(benchmark_spec):
"""Install and set up NCCL on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
benchmark_spec.always_call_cleanup = True
vm_util.RunThreaded(PrepareVm, benchmark_spec.vms)
host = benchmark_spec.vms[0]
host.RemoteCommand('rm -rf {hostfile}'.format(hostfile=HOSTFILE))
for vm in benchmark_spec.vms:
cmd = 'echo "{ip} slots={slots}" >> {hostfile}'.format(
ip=vm.internal_ip, hostfile=HOSTFILE, slots=FLAGS.nccl_slots)
host.RemoteCommand(cmd)
def CreateMetadataDict():
"""Create metadata dict to be used in run results.
Returns:
metadata dict
"""
metadata = {
'slots': FLAGS.nccl_slots,
'minbytes': FLAGS.nccl_minbytes,
'maxbytes': FLAGS.nccl_maxbytes,
'stepfactor': FLAGS.nccl_stepfactor,
'ngpus': FLAGS.nccl_ngpus,
'check': FLAGS.nccl_check,
'nthreads': FLAGS.nccl_nthreads,
'iters': FLAGS.nccl_iters,
'cuda_visible_devices': FLAGS.nccl_cuda_visible_devices,
'nccl_version': FLAGS.nccl_version,
'nccl_net_plugin': FLAGS.nccl_net_plugin,
'nccl_extra_params': FLAGS.nccl_extra_params,
'extra_params': FLAGS.nccl_extra_params
}
if FLAGS.nccl_install_mofed:
metadata['mofed_version'] = FLAGS.mofed_version
return metadata
def MakeSamplesFromOutput(metadata, output):
"""Create samples containing metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
Example output:
perfkitbenchmarker/tests/linux_benchmarks/nccl_benchmark_test.py
Returns:
Samples containing training metrics, and the bandwidth
"""
samples = []
metadata.update(_SAMPLE_LINE_RE.match(output).groupdict())
results = regex_util.ExtractAllMatches(r'(Rank\s+\d+) (.*)', output)
for rank, device in results:
metadata[rank] = device
results = regex_util.ExtractAllMatches(
r'^\s*'
r'(\d+)\s+'
r'(\d+)\s+'
r'(\w+)\s+'
r'(\w+)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\S+)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\S+)', output, re.MULTILINE)
max_out_of_place_algbw = 0
for row in results:
metadata_copy = metadata.copy()
metadata_copy.update(zip(_METADATA_COLUMNS, row))
for metric, metadata_key in sorted(_SAMPLE_NAMES.items()):
samples.append(
sample.Sample(metric, float(metadata_copy[metadata_key]), 'GB/s',
metadata_copy))
# Gbps is gigaBIT per second and GB/s is gigaBYTE per second
max_out_of_place_algbw = max(max_out_of_place_algbw,
float(metadata_copy['out_of_place_algbw']))
avg_bus_bandwidth = regex_util.ExtractExactlyOneMatch(
r'Avg bus bandwidth\s+: ([0-9\.]+)', output)
samples.append(
sample.Sample('avg_busbw', float(avg_bus_bandwidth), 'GB/s', metadata))
samples.append(
sample.Sample('max_out_of_place_algbw', max_out_of_place_algbw * 8,
'Gbps', metadata))
return samples, max_out_of_place_algbw
def TuningParameters(params):
"""Get all NCCL tuning parameters combination.
For example:
params = [
('NCCL_NSOCKS_PERTHREAD', ['DEFAULT', '2']),
('NCCL_SOCKET_NTHREADS', ['DEFAULT', '8']),
]
result = [
[],
[('NCCL_NSOCKS_PERTHREAD', '2')],
[('NCCL_SOCKET_NTHREADS', '8')],
[('NCCL_NSOCKS_PERTHREAD', '2'), ('NCCL_SOCKET_NTHREADS', '8')],
]
Args:
params: list of (parameter name and a list of parameter value)
Returns:
a list of all NCCL tuning patameters combination.
"""
if not params:
return [[]]
param_key, param_value_list = params.pop()
result = []
for param in TuningParameters(params):
for param_value in param_value_list:
param_args = [] if param_value == _DEFAULT else [(param_key, param_value)]
result.append(param + param_args)
return result
def Run(benchmark_spec):
"""Run NCCL on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
master = benchmark_spec.vms[0]
env = []
if FLAGS.nccl_cuda_visible_devices:
env.append(('CUDA_VISIBLE_DEVICES', FLAGS.nccl_cuda_visible_devices))
extra_params = collections.defaultdict(list)
metadata = CreateMetadataDict()
sample_results = []
for extra_param in FLAGS.nccl_extra_params:
param_key, param_value = extra_param.split('=', 1)
extra_params[param_key].append(param_value)
for extra_param in TuningParameters(list(extra_params.items())):
metadata_copy = metadata.copy()
for param_key, param_value in extra_param:
metadata_copy[param_key] = param_value
cmd = RUN_CMD.format(
mpi=FLAGS.nccl_mpi,
hostfile=HOSTFILE,
slots=FLAGS.nccl_slots,
env=' '.join('-x {key}={value}'.format(key=key, value=value)
for key, value in env + extra_param),
minbytes=FLAGS.nccl_minbytes,
maxbytes=FLAGS.nccl_maxbytes,
stepfactor=FLAGS.nccl_stepfactor,
ngpus=FLAGS.nccl_ngpus,
check=int(FLAGS.nccl_check),
nthreads=FLAGS.nccl_nthreads,
iters=FLAGS.nccl_iters)
max_out_of_place_algbw_results = []
for iteration in range(FLAGS.nccl_num_runs):
metadata_copy['run_iteration'] = iteration
stdout, _ = master.RobustRemoteCommand(cmd)
samples, max_out_of_place_algbw = MakeSamplesFromOutput(
metadata_copy, stdout)
sample_results.extend(samples)
max_out_of_place_algbw_results.append(max_out_of_place_algbw)
time.sleep(FLAGS.nccl_seconds_between_runs)
metadata_copy.pop('run_iteration')
avg_busbw = [s.value for s in sample_results if s.metric == 'avg_busbw']
sample_results.append(
sample.Sample('avg_busbw_mean', np.mean(avg_busbw), 'GB/s',
metadata_copy))
sample_results.append(
sample.Sample('avg_busbw_std', np.std(avg_busbw), 'GB/s',
metadata_copy))
sample_results.append(
sample.Sample('max_out_of_place_algbw_mean',
np.mean(max_out_of_place_algbw_results), 'Gbps',
metadata_copy))
sample_results.append(
sample.Sample('max_out_of_place_algbw_std',
np.std(max_out_of_place_algbw_results), 'Gbps',
metadata_copy))
return sample_results
def Cleanup(unused_benchmark_spec):
"""Cleanup NCCL on the cluster.
Args:
unused_benchmark_spec: The benchmark specification. Contains all data that
is required to run the benchmark.
"""
pass
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.