text
stringlengths 213
32.3k
|
---|
import sys
import os.path
def error(message, *args):
if args:
message = message % args
sys.stderr.write('ERROR: %s\n' % message)
try:
import lxml.etree as et
except ImportError:
error(sys.exc_info()[1])
sys.exit(5)
try:
basestring
except NameError:
basestring = (str, bytes)
try:
unicode
except NameError:
unicode = str
SHORT_DESCRIPTION = "An XPath file finder for XML files."
__doc__ = SHORT_DESCRIPTION + '''
Evaluates an XPath expression against a series of files and prints the
matching subtrees to stdout.
Examples::
$ cat test.xml
<root>
<a num="1234" notnum="1234abc"/>
<b text="abc"/>
<c text="aBc"/>
<d xmlns="http://www.example.org/ns/example" num="2"/>
<d xmlns="http://www.example.org/ns/example" num="4"/>
</root>
# find all leaf elements:
$ SCRIPT '//*[not(*)]' test.xml
<a num="1234" notnum="1234abc"/>
<b text="abc"/>
<c text="aBc"/>
# find all elements with attribute values containing "abc" ignoring case:
$ SCRIPT '//*[@*[contains(py:lower(.), "abc")]]' test.xml
<a num="1234" notnum="1234abc"/>
<b text="abc"/>
<c text="aBc"/>
# find all numeric attribute values:
$ SCRIPT '//@*[re:match(., "^[0-9]+$")]' test.xml
1234
* find all elements with numeric attribute values:
$ SCRIPT '//*[@*[re:match(., "^[0-9]+$")]]' test.xml
<a num="1234" notnum="1234abc"/>
* find all elements with numeric attribute values in more than one file:
$ SCRIPT '//*[@*[re:match(., "^[0-9]+$")]]' test.xml test.xml test.xml
>> test.xml
<a num="1234" notnum="1234abc"/>
>> test.xml
<a num="1234" notnum="1234abc"/>
>> test.xml
<a num="1234" notnum="1234abc"/>
* find XML files that have non-empty root nodes:
$ SCRIPT -q '*' test.xml test.xml test.xml
>> test.xml
>> test.xml
>> test.xml
* find out if an XML file has at most depth three:
$ SCRIPT 'not(/*/*/*)' test.xml
True
* find all elements that belong to a specific namespace and have @num=2
$ SCRIPT --ns e=http://www.example.org/ns/example '//e:*[@num="2"]' test.xml
<d xmlns="http://www.example.org/ns/example" num="2"/>
By default, all Python builtins and string methods are available as
XPath functions through the ``py`` prefix. There is also a string
comparison function ``py:within(x, a, b)`` that tests the string x for
being lexicographically within the interval ``a <= x <= b``.
'''.replace('SCRIPT', os.path.basename(sys.argv[0]))
REGEXP_NS = "http://exslt.org/regular-expressions"
PYTHON_BUILTINS_NS = "PYTHON-BUILTINS"
def make_parser(remove_blank_text=True, **kwargs):
return et.XMLParser(remove_blank_text=remove_blank_text, **kwargs)
def print_result(result, pretty_print, encoding=None, _is_py3=sys.version_info[0] >= 3):
stdout = sys.stdout
if not stdout.isatty() and not encoding:
encoding = 'utf8'
if et.iselement(result):
result = et.tostring(result, xml_declaration=False, with_tail=False,
pretty_print=pretty_print, encoding=encoding)
if not pretty_print:
# pretty printing appends newline, otherwise we do it
if isinstance(result, unicode):
result += '\n'
else:
result += '\n'.encode('ascii')
elif isinstance(result, basestring):
result += '\n'
else:
result = '%r\n' % result # '%r' for better number formatting
if encoding and encoding != 'unicode' and isinstance(result, unicode):
result = result.encode(encoding)
if _is_py3 and not isinstance(result, unicode):
stdout.buffer.write(result)
else:
stdout.write(result)
def print_results(results, pretty_print):
if isinstance(results, list):
for result in results:
print_result(result, pretty_print)
else:
print_result(results, pretty_print)
def iter_input(input, filename, parser, line_by_line):
if isinstance(input, basestring):
with open(input, 'rb') as f:
for tree in iter_input(f, filename, parser, line_by_line):
yield tree
else:
try:
if line_by_line:
for line in input:
if line:
yield et.ElementTree(et.fromstring(line, parser))
else:
yield et.parse(input, parser)
except IOError:
e = sys.exc_info()[1]
error("parsing %r failed: %s: %s",
filename, e.__class__.__name__, e)
def find_in_file(f, xpath, print_name=True, xinclude=False, pretty_print=True, line_by_line=False,
encoding=None, verbose=True):
try:
filename = f.name
except AttributeError:
filename = f
xml_parser = et.XMLParser(encoding=encoding)
try:
if not callable(xpath):
xpath = et.XPath(xpath)
found = False
for tree in iter_input(f, filename, xml_parser, line_by_line):
try:
if xinclude:
tree.xinclude()
except IOError:
e = sys.exc_info()[1]
error("XInclude for %r failed: %s: %s",
filename, e.__class__.__name__, e)
results = xpath(tree)
if results is not None and results != []:
found = True
if verbose:
print_results(results, pretty_print)
if not found:
return False
if not verbose and print_name:
print(filename)
return True
except Exception:
e = sys.exc_info()[1]
error("%r: %s: %s",
filename, e.__class__.__name__, e)
return False
def register_builtins():
ns = et.FunctionNamespace(PYTHON_BUILTINS_NS)
tostring = et.tostring
def make_string(s):
if isinstance(s, list):
if not s:
return ''
s = s[0]
if not isinstance(s, unicode):
if et.iselement(s):
s = tostring(s, method="text", encoding='unicode')
else:
s = unicode(s)
return s
def wrap_builtin(b):
def wrapped_builtin(_, *args):
return b(*args)
return wrapped_builtin
for (name, builtin) in vars(__builtins__).items():
if callable(builtin):
if not name.startswith('_') and name == name.lower():
ns[name] = wrap_builtin(builtin)
def wrap_str_method(b):
def wrapped_method(_, *args):
args = tuple(map(make_string, args))
return b(*args)
return wrapped_method
for (name, method) in vars(unicode).items():
if callable(method):
if not name.startswith('_'):
ns[name] = wrap_str_method(method)
def within(_, s, a, b):
return make_string(a) <= make_string(s) <= make_string(b)
ns["within"] = within
def parse_options():
from optparse import OptionParser
usage = "usage: %prog [options] XPATH [FILE ...]"
parser = OptionParser(
usage = usage,
version = "%prog using lxml.etree " + et.__version__,
description = SHORT_DESCRIPTION)
parser.add_option("-H", "--long-help",
action="store_true", dest="long_help", default=False,
help="a longer help text including usage examples")
parser.add_option("-i", "--xinclude",
action="store_true", dest="xinclude", default=False,
help="run XInclude on the file before XPath")
parser.add_option("--no-python",
action="store_false", dest="python", default=True,
help="disable Python builtins and functions (prefix 'py')")
parser.add_option("--no-regexp",
action="store_false", dest="regexp", default=True,
help="disable regular expressions (prefix 're')")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-t", "--root-tag",
dest="root_tag", metavar="TAG",
help="surround output with <TAG>...</TAG> to produce a well-formed XML document")
parser.add_option("-p", "--plain",
action="store_false", dest="pretty_print", default=True,
help="do not pretty-print the output")
parser.add_option("-l", "--lines",
action="store_true", dest="line_by_line", default=False,
help="parse each line of input separately (e.g. grep output)")
parser.add_option("-e", "--encoding",
dest="encoding",
help="use a specific encoding for parsing (may be required with --lines)")
parser.add_option("-N", "--ns", metavar="PREFIX=NS",
action="append", dest="namespaces", default=[],
help="add a namespace declaration")
options, args = parser.parse_args()
if options.long_help:
parser.print_help()
print(__doc__[__doc__.find('\n\n')+1:])
sys.exit(0)
if len(args) < 1:
parser.error("first argument must be an XPath expression")
return options, args
def main(options, args):
namespaces = {}
if options.regexp:
namespaces["re"] = REGEXP_NS
if options.python:
register_builtins()
namespaces["py"] = PYTHON_BUILTINS_NS
for ns in options.namespaces:
prefix, NS = ns.split("=", 1)
namespaces[prefix.strip()] = NS.strip()
xpath = et.XPath(args[0], namespaces=namespaces)
files = args[1:] or [sys.stdin]
if options.root_tag and options.verbose:
print('<%s>' % options.root_tag)
found = False
print_name = len(files) > 1 and not options.root_tag
for input in files:
found |= find_in_file(
input, xpath,
print_name=print_name,
xinclude=options.xinclude,
pretty_print=options.pretty_print,
line_by_line=options.line_by_line,
encoding=options.encoding,
verbose=options.verbose,
)
if options.root_tag and options.verbose:
print('</%s>' % options.root_tag)
return found
if __name__ == "__main__":
try:
options, args = parse_options()
found = main(options, args)
if found:
sys.exit(0)
else:
sys.exit(1)
except et.XPathSyntaxError:
error(sys.exc_info()[1])
sys.exit(4)
except KeyboardInterrupt:
pass
|
from homematicip.base.enums import SmokeDetectorAlarmType, WindowState
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.components.homematicip_cloud.binary_sensor import (
ATTR_ACCELERATION_SENSOR_MODE,
ATTR_ACCELERATION_SENSOR_NEUTRAL_POSITION,
ATTR_ACCELERATION_SENSOR_SENSITIVITY,
ATTR_ACCELERATION_SENSOR_TRIGGER_ANGLE,
ATTR_MOISTURE_DETECTED,
ATTR_MOTION_DETECTED,
ATTR_POWER_MAINS_FAILURE,
ATTR_PRESENCE_DETECTED,
ATTR_WATER_LEVEL_DETECTED,
ATTR_WINDOW_STATE,
)
from homeassistant.components.homematicip_cloud.generic_entity import (
ATTR_EVENT_DELAY,
ATTR_GROUP_MEMBER_UNREACHABLE,
ATTR_LOW_BATTERY,
ATTR_RSSI_DEVICE,
ATTR_SABOTAGE,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from .helper import async_manipulate_test_data, get_and_check_entity_basics
async def test_manually_configured_platform(hass):
"""Test that we do not set up an access point."""
assert await async_setup_component(
hass,
BINARY_SENSOR_DOMAIN,
{BINARY_SENSOR_DOMAIN: {"platform": HMIPC_DOMAIN}},
)
assert not hass.data.get(HMIPC_DOMAIN)
async def test_hmip_access_point_cloud_connection_sensor(
hass, default_mock_hap_factory
):
"""Test HomematicipCloudConnectionSensor."""
entity_id = "binary_sensor.access_point_cloud_connection"
entity_name = "Access Point Cloud Connection"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "connected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_acceleration_sensor(hass, default_mock_hap_factory):
"""Test HomematicipAccelerationSensor."""
entity_id = "binary_sensor.garagentor"
entity_name = "Garagentor"
device_model = "HmIP-SAM"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_MODE] == "FLAT_DECT"
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_NEUTRAL_POSITION] == "VERTICAL"
assert (
ha_state.attributes[ATTR_ACCELERATION_SENSOR_SENSITIVITY] == "SENSOR_RANGE_4G"
)
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_TRIGGER_ANGLE] == 45
service_call_counter = len(hmip_device.mock_calls)
await async_manipulate_test_data(
hass, hmip_device, "accelerationSensorTriggered", False
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
assert len(hmip_device.mock_calls) == service_call_counter + 1
await async_manipulate_test_data(
hass, hmip_device, "accelerationSensorTriggered", True
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert len(hmip_device.mock_calls) == service_call_counter + 2
async def test_hmip_tilt_vibration_sensor(hass, default_mock_hap_factory):
"""Test HomematicipTiltVibrationSensor."""
entity_id = "binary_sensor.garage_neigungs_und_erschutterungssensor"
entity_name = "Garage Neigungs- und Erschütterungssensor"
device_model = "HmIP-STV"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_MODE] == "FLAT_DECT"
assert (
ha_state.attributes[ATTR_ACCELERATION_SENSOR_SENSITIVITY] == "SENSOR_RANGE_2G"
)
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_TRIGGER_ANGLE] == 45
service_call_counter = len(hmip_device.mock_calls)
await async_manipulate_test_data(
hass, hmip_device, "accelerationSensorTriggered", False
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
assert len(hmip_device.mock_calls) == service_call_counter + 1
await async_manipulate_test_data(
hass, hmip_device, "accelerationSensorTriggered", True
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert len(hmip_device.mock_calls) == service_call_counter + 2
async def test_hmip_contact_interface(hass, default_mock_hap_factory):
"""Test HomematicipContactInterface."""
entity_id = "binary_sensor.kontakt_schnittstelle_unterputz_1_fach"
entity_name = "Kontakt-Schnittstelle Unterputz – 1-fach"
device_model = "HmIP-FCI1"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "windowState", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_shutter_contact(hass, default_mock_hap_factory):
"""Test HomematicipShutterContact."""
entity_id = "binary_sensor.fenstergriffsensor"
entity_name = "Fenstergriffsensor"
device_model = "HmIP-SRH"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_WINDOW_STATE] == WindowState.TILTED
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_WINDOW_STATE] == WindowState.OPEN
await async_manipulate_test_data(
hass, hmip_device, "windowState", WindowState.CLOSED
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
assert not ha_state.attributes.get(ATTR_WINDOW_STATE)
await async_manipulate_test_data(hass, hmip_device, "windowState", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
# test common attributes
assert ha_state.attributes[ATTR_RSSI_DEVICE] == -54
assert not ha_state.attributes.get(ATTR_SABOTAGE)
await async_manipulate_test_data(hass, hmip_device, "sabotage", True)
ha_state = hass.states.get(entity_id)
assert ha_state.attributes[ATTR_SABOTAGE]
async def test_hmip_shutter_contact_optical(hass, default_mock_hap_factory):
"""Test HomematicipShutterContact."""
entity_id = "binary_sensor.sitzplatzture"
entity_name = "Sitzplatzt\u00fcre"
device_model = "HmIP-SWDO-PL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "windowState", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
# test common attributes
assert ha_state.attributes[ATTR_RSSI_DEVICE] == -72
assert not ha_state.attributes.get(ATTR_SABOTAGE)
await async_manipulate_test_data(hass, hmip_device, "sabotage", True)
ha_state = hass.states.get(entity_id)
assert ha_state.attributes[ATTR_SABOTAGE]
async def test_hmip_motion_detector(hass, default_mock_hap_factory):
"""Test HomematicipMotionDetector."""
entity_id = "binary_sensor.bewegungsmelder_fur_55er_rahmen_innen"
entity_name = "Bewegungsmelder für 55er Rahmen – innen"
device_model = "HmIP-SMI55"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "motionDetected", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_presence_detector(hass, default_mock_hap_factory):
"""Test HomematicipPresenceDetector."""
entity_id = "binary_sensor.spi_1"
entity_name = "SPI_1"
device_model = "HmIP-SPI"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "presenceDetected", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert not ha_state.attributes.get(ATTR_EVENT_DELAY)
await async_manipulate_test_data(hass, hmip_device, "eventDelay", True)
ha_state = hass.states.get(entity_id)
assert ha_state.attributes[ATTR_EVENT_DELAY]
async def test_hmip_pluggable_mains_failure_surveillance_sensor(
hass, default_mock_hap_factory
):
"""Test HomematicipPresenceDetector."""
entity_id = "binary_sensor.netzausfalluberwachung"
entity_name = "Netzausfallüberwachung"
device_model = "HmIP-PMFS"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "powerMainsFailure", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_smoke_detector(hass, default_mock_hap_factory):
"""Test HomematicipSmokeDetector."""
entity_id = "binary_sensor.rauchwarnmelder"
entity_name = "Rauchwarnmelder"
device_model = "HmIP-SWSD"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(
hass,
hmip_device,
"smokeDetectorAlarmType",
SmokeDetectorAlarmType.PRIMARY_ALARM,
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(
hass,
hmip_device,
"smokeDetectorAlarmType",
None,
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_water_detector(hass, default_mock_hap_factory):
"""Test HomematicipWaterDetector."""
entity_id = "binary_sensor.wassersensor"
entity_name = "Wassersensor"
device_model = "HmIP-SWD"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", True)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", True)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", False)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", False)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_storm_sensor(hass, default_mock_hap_factory):
"""Test HomematicipStormSensor."""
entity_id = "binary_sensor.weather_sensor_plus_storm"
entity_name = "Weather Sensor – plus Storm"
device_model = "HmIP-SWO-PL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Weather Sensor – plus"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "storm", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_rain_sensor(hass, default_mock_hap_factory):
"""Test HomematicipRainSensor."""
entity_id = "binary_sensor.wettersensor_pro_raining"
entity_name = "Wettersensor - pro Raining"
device_model = "HmIP-SWO-PR"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Wettersensor - pro"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "raining", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_sunshine_sensor(hass, default_mock_hap_factory):
"""Test HomematicipSunshineSensor."""
entity_id = "binary_sensor.wettersensor_pro_sunshine"
entity_name = "Wettersensor - pro Sunshine"
device_model = "HmIP-SWO-PR"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Wettersensor - pro"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert ha_state.attributes["today_sunshine_duration_in_minutes"] == 100
await async_manipulate_test_data(hass, hmip_device, "sunshine", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_battery_sensor(hass, default_mock_hap_factory):
"""Test HomematicipSunshineSensor."""
entity_id = "binary_sensor.wohnungsture_battery"
entity_name = "Wohnungstüre Battery"
device_model = "HMIP-SWDO"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Wohnungstüre"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "lowBat", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_security_zone_sensor_group(hass, default_mock_hap_factory):
"""Test HomematicipSecurityZoneSensorGroup."""
entity_id = "binary_sensor.internal_securityzone"
entity_name = "INTERNAL SecurityZone"
device_model = "HmIP-SecurityZone"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_groups=["INTERNAL"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
assert not ha_state.attributes.get(ATTR_MOTION_DETECTED)
assert not ha_state.attributes.get(ATTR_PRESENCE_DETECTED)
assert not ha_state.attributes.get(ATTR_GROUP_MEMBER_UNREACHABLE)
assert not ha_state.attributes.get(ATTR_SABOTAGE)
assert not ha_state.attributes.get(ATTR_WINDOW_STATE)
await async_manipulate_test_data(hass, hmip_device, "motionDetected", True)
await async_manipulate_test_data(hass, hmip_device, "presenceDetected", True)
await async_manipulate_test_data(hass, hmip_device, "unreach", True)
await async_manipulate_test_data(hass, hmip_device, "sabotage", True)
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_MOTION_DETECTED]
assert ha_state.attributes[ATTR_PRESENCE_DETECTED]
assert ha_state.attributes[ATTR_GROUP_MEMBER_UNREACHABLE]
assert ha_state.attributes[ATTR_SABOTAGE]
assert ha_state.attributes[ATTR_WINDOW_STATE] == WindowState.OPEN
async def test_hmip_security_sensor_group(hass, default_mock_hap_factory):
"""Test HomematicipSecuritySensorGroup."""
entity_id = "binary_sensor.buro_sensors"
entity_name = "Büro Sensors"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(test_groups=["Büro"])
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
await async_manipulate_test_data(
hass,
hmip_device,
"smokeDetectorAlarmType",
SmokeDetectorAlarmType.PRIMARY_ALARM,
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert (
ha_state.attributes["smoke_detector_alarm"]
== SmokeDetectorAlarmType.PRIMARY_ALARM
)
await async_manipulate_test_data(
hass, hmip_device, "smokeDetectorAlarmType", SmokeDetectorAlarmType.IDLE_OFF
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
assert not ha_state.attributes.get(ATTR_LOW_BATTERY)
assert not ha_state.attributes.get(ATTR_MOTION_DETECTED)
assert not ha_state.attributes.get(ATTR_PRESENCE_DETECTED)
assert not ha_state.attributes.get(ATTR_POWER_MAINS_FAILURE)
assert not ha_state.attributes.get(ATTR_MOISTURE_DETECTED)
assert not ha_state.attributes.get(ATTR_WATER_LEVEL_DETECTED)
assert not ha_state.attributes.get(ATTR_GROUP_MEMBER_UNREACHABLE)
assert not ha_state.attributes.get(ATTR_SABOTAGE)
assert not ha_state.attributes.get(ATTR_WINDOW_STATE)
await async_manipulate_test_data(hass, hmip_device, "lowBat", True)
await async_manipulate_test_data(hass, hmip_device, "motionDetected", True)
await async_manipulate_test_data(hass, hmip_device, "presenceDetected", True)
await async_manipulate_test_data(hass, hmip_device, "powerMainsFailure", True)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", True)
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", True)
await async_manipulate_test_data(hass, hmip_device, "unreach", True)
await async_manipulate_test_data(hass, hmip_device, "sabotage", True)
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_LOW_BATTERY]
assert ha_state.attributes[ATTR_MOTION_DETECTED]
assert ha_state.attributes[ATTR_PRESENCE_DETECTED]
assert ha_state.attributes[ATTR_POWER_MAINS_FAILURE]
assert ha_state.attributes[ATTR_MOISTURE_DETECTED]
assert ha_state.attributes[ATTR_WATER_LEVEL_DETECTED]
assert ha_state.attributes[ATTR_GROUP_MEMBER_UNREACHABLE]
assert ha_state.attributes[ATTR_SABOTAGE]
assert ha_state.attributes[ATTR_WINDOW_STATE] == WindowState.OPEN
await async_manipulate_test_data(
hass,
hmip_device,
"smokeDetectorAlarmType",
SmokeDetectorAlarmType.INTRUSION_ALARM,
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_wired_multi_contact_interface(hass, default_mock_hap_factory):
"""Test HomematicipMultiContactInterface."""
entity_id = "binary_sensor.wired_eingangsmodul_32_fach_channel5"
entity_name = "Wired Eingangsmodul – 32-fach Channel5"
device_model = "HmIPW-DRI32"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Wired Eingangsmodul – 32-fach"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(
hass, hmip_device, "windowState", WindowState.OPEN, channel=5
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "windowState", None, channel=5)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
|
import asyncio
import datetime
import logging
from aiohomekit.exceptions import (
AccessoryDisconnectedError,
AccessoryNotFoundError,
EncryptionError,
)
from aiohomekit.model import Accessories
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_time_interval
from .const import CONTROLLER, DOMAIN, ENTITY_MAP, HOMEKIT_ACCESSORY_DISPATCH
from .device_trigger import async_fire_triggers, async_setup_triggers_for_entry
DEFAULT_SCAN_INTERVAL = datetime.timedelta(seconds=60)
RETRY_INTERVAL = 60 # seconds
_LOGGER = logging.getLogger(__name__)
def get_accessory_information(accessory):
"""Obtain the accessory information service of a HomeKit device."""
result = {}
for service in accessory["services"]:
stype = service["type"].upper()
if ServicesTypes.get_short(stype) != "accessory-information":
continue
for characteristic in service["characteristics"]:
ctype = CharacteristicsTypes.get_short(characteristic["type"])
if "value" in characteristic:
result[ctype] = characteristic["value"]
return result
def get_bridge_information(accessories):
"""Return the accessory info for the bridge."""
for accessory in accessories:
if accessory["aid"] == 1:
return get_accessory_information(accessory)
return get_accessory_information(accessories[0])
def get_accessory_name(accessory_info):
"""Return the name field of an accessory."""
for field in ("name", "model", "manufacturer"):
if field in accessory_info:
return accessory_info[field]
return None
class HKDevice:
"""HomeKit device."""
def __init__(self, hass, config_entry, pairing_data):
"""Initialise a generic HomeKit device."""
self.hass = hass
self.config_entry = config_entry
# We copy pairing_data because homekit_python may mutate it, but we
# don't want to mutate a dict owned by a config entry.
self.pairing_data = pairing_data.copy()
self.pairing = hass.data[CONTROLLER].load_pairing(
self.pairing_data["AccessoryPairingID"], self.pairing_data
)
self.accessories = None
self.config_num = 0
self.entity_map = Accessories()
# A list of callbacks that turn HK service metadata into entities
self.listeners = []
# The platorms we have forwarded the config entry so far. If a new
# accessory is added to a bridge we may have to load additional
# platforms. We don't want to load all platforms up front if its just
# a lightbulb. And we don't want to forward a config entry twice
# (triggers a Config entry already set up error)
self.platforms = set()
# This just tracks aid/iid pairs so we know if a HK service has been
# mapped to a HA entity.
self.entities = []
# A map of aid -> device_id
# Useful when routing events to triggers
self.devices = {}
self.available = True
self.signal_state_updated = "_".join((DOMAIN, self.unique_id, "state_updated"))
# Current values of all characteristics homekit_controller is tracking.
# Key is a (accessory_id, characteristic_id) tuple.
self.current_state = {}
self.pollable_characteristics = []
# If this is set polling is active and can be disabled by calling
# this method.
self._polling_interval_remover = None
# Never allow concurrent polling of the same accessory or bridge
self._polling_lock = asyncio.Lock()
self._polling_lock_warned = False
self.watchable_characteristics = []
self.pairing.dispatcher_connect(self.process_new_events)
def add_pollable_characteristics(self, characteristics):
"""Add (aid, iid) pairs that we need to poll."""
self.pollable_characteristics.extend(characteristics)
def remove_pollable_characteristics(self, accessory_id):
"""Remove all pollable characteristics by accessory id."""
self.pollable_characteristics = [
char for char in self.pollable_characteristics if char[0] != accessory_id
]
def add_watchable_characteristics(self, characteristics):
"""Add (aid, iid) pairs that we need to poll."""
self.watchable_characteristics.extend(characteristics)
self.hass.async_create_task(self.pairing.subscribe(characteristics))
def remove_watchable_characteristics(self, accessory_id):
"""Remove all pollable characteristics by accessory id."""
self.watchable_characteristics = [
char for char in self.watchable_characteristics if char[0] != accessory_id
]
@callback
def async_set_unavailable(self):
"""Mark state of all entities on this connection as unavailable."""
self.available = False
self.hass.helpers.dispatcher.async_dispatcher_send(self.signal_state_updated)
async def async_setup(self):
"""Prepare to use a paired HomeKit device in Home Assistant."""
cache = self.hass.data[ENTITY_MAP].get_map(self.unique_id)
if not cache:
if await self.async_refresh_entity_map(self.config_num):
self._polling_interval_remover = async_track_time_interval(
self.hass, self.async_update, DEFAULT_SCAN_INTERVAL
)
return True
return False
self.accessories = cache["accessories"]
self.config_num = cache["config_num"]
self.entity_map = Accessories.from_list(self.accessories)
self._polling_interval_remover = async_track_time_interval(
self.hass, self.async_update, DEFAULT_SCAN_INTERVAL
)
self.hass.async_create_task(self.async_process_entity_map())
return True
async def async_create_devices(self):
"""
Build device registry entries for all accessories paired with the bridge.
This is done as well as by the entities for 2 reasons. First, the bridge
might not have any entities attached to it. Secondly there are stateless
entities like doorbells and remote controls.
"""
device_registry = await self.hass.helpers.device_registry.async_get_registry()
devices = {}
for accessory in self.entity_map.accessories:
info = accessory.services.first(
service_type=ServicesTypes.ACCESSORY_INFORMATION,
)
device_info = {
"identifiers": {
(
DOMAIN,
"serial-number",
info.value(CharacteristicsTypes.SERIAL_NUMBER),
)
},
"name": info.value(CharacteristicsTypes.NAME),
"manufacturer": info.value(CharacteristicsTypes.MANUFACTURER, ""),
"model": info.value(CharacteristicsTypes.MODEL, ""),
"sw_version": info.value(CharacteristicsTypes.FIRMWARE_REVISION, ""),
}
if accessory.aid == 1:
# Accessory 1 is the root device (sometimes the only device, sometimes a bridge)
# Link the root device to the pairing id for the connection.
device_info["identifiers"].add((DOMAIN, "accessory-id", self.unique_id))
else:
# Every pairing has an accessory 1
# It *doesn't* have a via_device, as it is the device we are connecting to
# Every other accessory should use it as its via device.
device_info["via_device"] = (
DOMAIN,
"serial-number",
self.connection_info["serial-number"],
)
device = device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
**device_info,
)
devices[accessory.aid] = device.id
self.devices = devices
async def async_process_entity_map(self):
"""
Process the entity map and load any platforms or entities that need adding.
This is idempotent and will be called at startup and when we detect metadata changes
via the c# counter on the zeroconf record.
"""
# Ensure the Pairing object has access to the latest version of the entity map. This
# is especially important for BLE, as the Pairing instance relies on the entity map
# to map aid/iid to GATT characteristics. So push it to there as well.
self.pairing.pairing_data["accessories"] = self.accessories
await self.async_load_platforms()
await self.async_create_devices()
# Load any triggers for this config entry
await async_setup_triggers_for_entry(self.hass, self.config_entry)
self.add_entities()
if self.watchable_characteristics:
await self.pairing.subscribe(self.watchable_characteristics)
await self.async_update()
return True
async def async_unload(self):
"""Stop interacting with device and prepare for removal from hass."""
if self._polling_interval_remover:
self._polling_interval_remover()
await self.pairing.unsubscribe(self.watchable_characteristics)
unloads = []
for platform in self.platforms:
unloads.append(
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, platform
)
)
results = await asyncio.gather(*unloads)
return False not in results
async def async_refresh_entity_map(self, config_num):
"""Handle setup of a HomeKit accessory."""
try:
self.accessories = await self.pairing.list_accessories_and_characteristics()
except AccessoryDisconnectedError:
# If we fail to refresh this data then we will naturally retry
# later when Bonjour spots c# is still not up to date.
return False
self.entity_map = Accessories.from_list(self.accessories)
self.hass.data[ENTITY_MAP].async_create_or_update_map(
self.unique_id, config_num, self.accessories
)
self.config_num = config_num
self.hass.async_create_task(self.async_process_entity_map())
return True
def add_listener(self, add_entities_cb):
"""Add a callback to run when discovering new entities."""
self.listeners.append(add_entities_cb)
self._add_new_entities([add_entities_cb])
def add_entities(self):
"""Process the entity map and create HA entities."""
self._add_new_entities(self.listeners)
def _add_new_entities(self, callbacks):
for accessory in self.accessories:
aid = accessory["aid"]
for service in accessory["services"]:
iid = service["iid"]
stype = ServicesTypes.get_short(service["type"].upper())
service["stype"] = stype
if (aid, iid) in self.entities:
# Don't add the same entity again
continue
for listener in callbacks:
if listener(aid, service):
self.entities.append((aid, iid))
break
async def async_load_platforms(self):
"""Load any platforms needed by this HomeKit device."""
for accessory in self.accessories:
for service in accessory["services"]:
stype = ServicesTypes.get_short(service["type"].upper())
if stype not in HOMEKIT_ACCESSORY_DISPATCH:
continue
platform = HOMEKIT_ACCESSORY_DISPATCH[stype]
if platform in self.platforms:
continue
self.platforms.add(platform)
try:
await self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform
)
except Exception:
self.platforms.remove(platform)
raise
async def async_update(self, now=None):
"""Poll state of all entities attached to this bridge/accessory."""
if not self.pollable_characteristics:
_LOGGER.debug("HomeKit connection not polling any characteristics")
return
if self._polling_lock.locked():
if not self._polling_lock_warned:
_LOGGER.warning(
"HomeKit controller update skipped as previous poll still in flight"
)
self._polling_lock_warned = True
return
if self._polling_lock_warned:
_LOGGER.info(
"HomeKit controller no longer detecting back pressure - not skipping poll"
)
self._polling_lock_warned = False
async with self._polling_lock:
_LOGGER.debug("Starting HomeKit controller update")
try:
new_values_dict = await self.get_characteristics(
self.pollable_characteristics
)
except AccessoryNotFoundError:
# Not only did the connection fail, but also the accessory is not
# visible on the network.
self.async_set_unavailable()
return
except (AccessoryDisconnectedError, EncryptionError):
# Temporary connection failure. Device is still available but our
# connection was dropped.
return
self.process_new_events(new_values_dict)
_LOGGER.debug("Finished HomeKit controller update")
def process_new_events(self, new_values_dict):
"""Process events from accessory into HA state."""
self.available = True
# Process any stateless events (via device_triggers)
async_fire_triggers(self, new_values_dict)
for (aid, cid), value in new_values_dict.items():
accessory = self.current_state.setdefault(aid, {})
accessory[cid] = value
# self.current_state will be replaced by entity_map in a future PR
# For now we update both
self.entity_map.process_changes(new_values_dict)
self.hass.helpers.dispatcher.async_dispatcher_send(self.signal_state_updated)
async def get_characteristics(self, *args, **kwargs):
"""Read latest state from homekit accessory."""
return await self.pairing.get_characteristics(*args, **kwargs)
async def put_characteristics(self, characteristics):
"""Control a HomeKit device state from Home Assistant."""
results = await self.pairing.put_characteristics(characteristics)
# Feed characteristics back into HA and update the current state
# results will only contain failures, so anythin in characteristics
# but not in results was applied successfully - we can just have HA
# reflect the change immediately.
new_entity_state = {}
for aid, iid, value in characteristics:
key = (aid, iid)
# If the key was returned by put_characteristics() then the
# change didn't work
if key in results:
continue
# Otherwise it was accepted and we can apply the change to
# our state
new_entity_state[key] = {"value": value}
self.process_new_events(new_entity_state)
@property
def unique_id(self):
"""
Return a unique id for this accessory or bridge.
This id is random and will change if a device undergoes a hard reset.
"""
return self.pairing_data["AccessoryPairingID"]
@property
def connection_info(self):
"""Return accessory information for the main accessory."""
return get_bridge_information(self.accessories)
@property
def name(self):
"""Name of the bridge accessory."""
return get_accessory_name(self.connection_info) or self.unique_id
|
import asyncio
from datetime import timedelta
import os
from homeassistant.helpers import storage
from homeassistant.util import dt
from tests.async_mock import patch
from tests.common import async_fire_time_changed, async_test_home_assistant
async def test_removing_while_delay_in_progress(tmpdir):
"""Test removing while delay in progress."""
loop = asyncio.get_event_loop()
hass = await async_test_home_assistant(loop)
test_dir = await hass.async_add_executor_job(tmpdir.mkdir, "storage")
with patch.object(storage, "STORAGE_DIR", test_dir):
real_store = storage.Store(hass, 1, "remove_me")
await real_store.async_save({"delay": "no"})
assert await hass.async_add_executor_job(os.path.exists, real_store.path)
real_store.async_delay_save(lambda: {"delay": "yes"}, 1)
await real_store.async_remove()
assert not await hass.async_add_executor_job(os.path.exists, real_store.path)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert not await hass.async_add_executor_job(os.path.exists, real_store.path)
await hass.async_stop()
|
from __future__ import division
import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from chainercv.links import Conv2DBNActiv
from chainercv.links.model.resnet import ResBlock
from chainercv.links import PickableSequentialChain
from chainercv import utils
# RGB order
# This is channel wise mean of mean image distributed at
# https://github.com/KaimingHe/deep-residual-networks
_imagenet_mean = np.array(
[123.15163084, 115.90288257, 103.0626238],
dtype=np.float32)[:, np.newaxis, np.newaxis]
class SEResNet(PickableSequentialChain):
"""Base class for SE-ResNet architecture.
This architecture is based on ResNet. A squeeze-and-excitation block is
applied at the end of each non-identity branch of residual block. Please
refer to `the original paper <https://arxiv.org/pdf/1709.01507.pdf>`_
for a detailed description of network architecture.
Similar to :class:`chainercv.links.model.resnet.ResNet`, ImageNet
pretrained weights are downloaded when :obj:`pretrained_model` argument
is :obj:`imagenet`, originally distributed at `the Github repository by
one of the paper authors <https://github.com/hujie-frank/SENet>`_.
.. seealso::
:class:`chainercv.links.model.resnet.ResNet`
:class:`chainercv.links.connection.SEBlock`
Args:
n_layer (int): The number of layers.
n_class (int): The number of classes. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the number of classes used to train the pretrained model
is used. Otherwise, the number of classes in ILSVRC 2012 dataset
is used.
pretrained_model (string): The destination of the pre-trained
chainer model serialized as a :obj:`.npz` file.
If this is one of the strings described
above, it automatically loads weights stored under a directory
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,
where :obj:`$CHAINER_DATASET_ROOT` is set as
:obj:`$HOME/.chainer/dataset` unless you specify another value
by modifying the environment variable.
mean (numpy.ndarray): A mean value. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the mean value used to train the pretrained model is used.
Otherwise, the mean value calculated from ILSVRC 2012 dataset
is used.
initialW (callable): Initializer for the weights of
convolution kernels.
fc_kwargs (dict): Keyword arguments passed to initialize
the :class:`chainer.links.Linear`.
"""
_blocks = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3]
}
_models = {
50: {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': {'mean'},
'url': 'https://chainercv-models.preferred.jp/'
'se_resnet50_imagenet_converted_2018_06_25.npz'
},
},
101: {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': {'mean'},
'url': 'https://chainercv-models.preferred.jp/'
'se_resnet101_imagenet_converted_2018_06_25.npz'
},
},
152: {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': {'mean'},
'url': 'https://chainercv-models.preferred.jp/'
'se_resnet152_imagenet_converted_2018_06_25.npz'
},
}
}
def __init__(self, n_layer,
n_class=None,
pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
blocks = self._blocks[n_layer]
param, path = utils.prepare_pretrained_model(
{'n_class': n_class, 'mean': mean},
pretrained_model, self._models[n_layer],
{'n_class': 1000, 'mean': _imagenet_mean})
self.mean = param['mean']
if initialW is None:
initialW = initializers.HeNormal(scale=1., fan_option='fan_out')
if 'initialW' not in fc_kwargs:
fc_kwargs['initialW'] = initializers.Normal(scale=0.01)
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
initialW = initializers.constant.Zero()
fc_kwargs['initialW'] = initializers.constant.Zero()
kwargs = {
'initialW': initialW, 'stride_first': True, 'add_seblock': True}
super(SEResNet, self).__init__()
with self.init_scope():
self.conv1 = Conv2DBNActiv(None, 64, 7, 2, 3, nobias=True,
initialW=initialW)
self.pool1 = lambda x: F.max_pooling_2d(x, ksize=3, stride=2)
self.res2 = ResBlock(blocks[0], None, 64, 256, 1, **kwargs)
self.res3 = ResBlock(blocks[1], None, 128, 512, 2, **kwargs)
self.res4 = ResBlock(blocks[2], None, 256, 1024, 2, **kwargs)
self.res5 = ResBlock(blocks[3], None, 512, 2048, 2, **kwargs)
self.pool5 = lambda x: F.average(x, axis=(2, 3))
self.fc6 = L.Linear(None, param['n_class'], **fc_kwargs)
self.prob = F.softmax
if path:
chainer.serializers.load_npz(path, self)
class SEResNet50(SEResNet):
"""SE-ResNet-50 Network.
Please consult the documentation for :class:`SEResNet`.
.. seealso::
:class:`chainercv.links.model.senet.SEResNet`
"""
def __init__(self, n_class=None, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SEResNet50, self).__init__(
50, n_class, pretrained_model,
mean, initialW, fc_kwargs)
class SEResNet101(SEResNet):
"""SE-ResNet-101 Network.
Please consult the documentation for :class:`SEResNet`.
.. seealso::
:class:`chainercv.links.model.senet.SEResNet`
"""
def __init__(self, n_class=None, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SEResNet101, self).__init__(
101, n_class, pretrained_model,
mean, initialW, fc_kwargs)
class SEResNet152(SEResNet):
"""SE-ResNet-152 Network.
Please consult the documentation for :class:`SEResNet`.
.. seealso::
:class:`chainercv.links.model.senet.SEResNet`
"""
def __init__(self, n_class=None, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SEResNet152, self).__init__(
152, n_class, pretrained_model,
mean, initialW, fc_kwargs)
|
import os
import platform
import pytest
from pathlib import Path
from mne.utils import (set_config, get_config, get_config_path,
set_memmap_min_size, _get_stim_channel, sys_info,
ClosingStringIO)
def test_config(tmpdir):
"""Test mne-python config file support."""
tempdir = str(tmpdir)
key = '_MNE_PYTHON_CONFIG_TESTING'
value = '123456'
value2 = '123'
value3 = Path('/foo/bar')
old_val = os.getenv(key, None)
os.environ[key] = value
assert (get_config(key) == value)
del os.environ[key]
# catch the warning about it being a non-standard config key
assert (len(get_config('')) > 10) # tuple of valid keys
with pytest.warns(RuntimeWarning, match='non-standard'):
set_config(key, None, home_dir=tempdir, set_env=False)
assert (get_config(key, home_dir=tempdir) is None)
pytest.raises(KeyError, get_config, key, raise_error=True)
assert (key not in os.environ)
with pytest.warns(RuntimeWarning, match='non-standard'):
set_config(key, value, home_dir=tempdir, set_env=True)
assert (key in os.environ)
assert (get_config(key, home_dir=tempdir) == value)
with pytest.warns(RuntimeWarning, match='non-standard'):
set_config(key, None, home_dir=tempdir, set_env=True)
assert (key not in os.environ)
with pytest.warns(RuntimeWarning, match='non-standard'):
set_config(key, None, home_dir=tempdir, set_env=True)
assert (key not in os.environ)
if old_val is not None:
os.environ[key] = old_val
# Check serialization from Path to string
with pytest.warns(RuntimeWarning, match='non-standard'):
set_config(key, value3, home_dir=tempdir)
# Check if get_config with key=None returns all config
key = 'MNE_PYTHON_TESTING_KEY'
assert key not in get_config(home_dir=tempdir)
with pytest.warns(RuntimeWarning, match='non-standard'):
set_config(key, value, home_dir=tempdir)
assert get_config(home_dir=tempdir)[key] == value
old_val = os.environ.get(key)
try: # os.environ should take precedence over config file
os.environ[key] = value2
assert get_config(home_dir=tempdir)[key] == value2
finally: # reset os.environ
if old_val is None:
os.environ.pop(key, None)
else:
os.environ[key] = old_val
# Check what happens when we use a corrupted file
json_fname = get_config_path(home_dir=tempdir)
with open(json_fname, 'w') as fid:
fid.write('foo{}')
with pytest.warns(RuntimeWarning, match='not a valid JSON'):
assert key not in get_config(home_dir=tempdir)
with pytest.warns(RuntimeWarning, match='non-standard'):
pytest.raises(RuntimeError, set_config, key, 'true', home_dir=tempdir)
# degenerate conditions
pytest.raises(ValueError, set_memmap_min_size, 1)
pytest.raises(ValueError, set_memmap_min_size, 'foo')
pytest.raises(TypeError, get_config, 1)
pytest.raises(TypeError, set_config, 1)
pytest.raises(TypeError, set_config, 'foo', 1)
pytest.raises(TypeError, _get_stim_channel, 1, None)
pytest.raises(TypeError, _get_stim_channel, [1], None)
def test_sys_info():
"""Test info-showing utility."""
out = ClosingStringIO()
sys_info(fid=out)
out = out.getvalue()
assert ('numpy:' in out)
if platform.system() == 'Darwin':
assert 'Platform: macOS-' in out
|
import unittest.mock
import pytest
from qutebrowser.misc import cmdhistory
from qutebrowser.utils import objreg
HISTORY = ['first', 'second', 'third', 'fourth', 'fifth']
@pytest.fixture
def hist():
return cmdhistory.History(history=HISTORY)
def test_no_history():
hist = cmdhistory.History()
assert hist.history == []
def test_history():
hist = cmdhistory.History(history=HISTORY)
assert hist.history == HISTORY
@pytest.mark.parametrize('tmphist, expected', [(None, False), (HISTORY, True)])
def test_is_browsing(hist, tmphist, expected):
hist._tmphist = tmphist
assert hist.is_browsing() == expected
def test_start_stop(hist):
# We can use is_browsing() because it is tested above
assert not hist.is_browsing()
hist.start('s')
assert hist.is_browsing()
hist.stop()
assert not hist.is_browsing()
def test_start_with_text(hist):
"""Test start with given 'text'."""
hist.start('f')
assert 'first' in hist._tmphist
assert 'fourth' in hist._tmphist
assert 'second' not in hist._tmphist
def test_start_no_text(hist):
"""Test start with no given text."""
hist.start('')
assert list(hist._tmphist) == HISTORY
def test_start_no_items(hist):
"""Test start with no matching text."""
with pytest.raises(cmdhistory.HistoryEmptyError):
hist.start('k')
assert not hist._tmphist
def test_getitem(hist):
"""Test __getitem__."""
assert hist[0] == HISTORY[0]
def test_setitem(hist):
"""Test __setitem__."""
with pytest.raises(TypeError, match="'History' object does not support "
"item assignment"):
hist[0] = 'foo'
def test_not_browsing_error(hist):
"""Test that next/previtem throws a ValueError."""
with pytest.raises(ValueError, match="Currently not browsing "
"history"):
hist.nextitem()
with pytest.raises(ValueError, match="Currently not browsing "
"history"):
hist.previtem()
def test_nextitem_single(hist, monkeypatch):
"""Test nextitem() with valid input."""
hist.start('f')
monkeypatch.setattr(hist._tmphist, 'nextitem', lambda: 'item')
assert hist.nextitem() == 'item'
def test_previtem_single(hist, monkeypatch):
"""Test previtem() with valid input."""
hist.start('f')
monkeypatch.setattr(hist._tmphist, 'previtem', lambda: 'item')
assert hist.previtem() == 'item'
def test_nextitem_previtem_chain(hist):
"""Test a combination of nextitem and previtem statements."""
assert hist.start('f') == 'fifth'
assert hist.previtem() == 'fourth'
assert hist.previtem() == 'first'
assert hist.nextitem() == 'fourth'
def test_nextitem_index_error(hist):
"""Test nextitem() when _tmphist raises an IndexError."""
hist.start('f')
with pytest.raises(cmdhistory.HistoryEndReachedError):
hist.nextitem()
def test_previtem_index_error(hist):
"""Test previtem() when _tmphist raises an IndexError."""
hist.start('f')
with pytest.raises(cmdhistory.HistoryEndReachedError):
for _ in range(10):
hist.previtem()
def test_append_private_mode(hist, config_stub):
"""Test append in private mode."""
hist._private = True
config_stub.val.content.private_browsing = True
hist.append('new item')
assert hist.history == HISTORY
def test_append(hist):
"""Test append outside private mode."""
hist.append('new item')
assert 'new item' in hist.history
hist.history.remove('new item')
assert hist.history == HISTORY
def test_append_empty_history(hist):
"""Test append when .history is empty."""
hist.history = []
hist.append('item')
assert hist[0] == 'item'
def test_append_double(hist):
hist.append('fifth')
# assert that the new 'fifth' is not added
assert hist.history[-2:] == ['fourth', 'fifth']
@pytest.fixture
def init_patch():
yield
objreg.delete('command-history')
def test_init(init_patch, fake_save_manager, data_tmpdir, config_stub):
cmdhistory.init()
fake_save_manager.add_saveable.assert_any_call(
'command-history', unittest.mock.ANY, unittest.mock.ANY)
|
from homeassistant.const import (
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
PRESSURE_BAR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TIME_HOURS,
)
from . import DOMAIN, AtagEntity
SENSORS = {
"Outside Temperature": "outside_temp",
"Average Outside Temperature": "tout_avg",
"Weather Status": "weather_status",
"CH Water Pressure": "ch_water_pres",
"CH Water Temperature": "ch_water_temp",
"CH Return Temperature": "ch_return_temp",
"Burning Hours": "burning_hours",
"Flame": "rel_mod_level",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Initialize sensor platform from config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities = []
for sensor in SENSORS:
entities.append(AtagSensor(coordinator, sensor))
async_add_entities(entities)
class AtagSensor(AtagEntity):
"""Representation of a AtagOne Sensor."""
def __init__(self, coordinator, sensor):
"""Initialize Atag sensor."""
super().__init__(coordinator, SENSORS[sensor])
self._name = sensor
@property
def state(self):
"""Return the state of the sensor."""
return self.coordinator.data[self._id].state
@property
def icon(self):
"""Return icon."""
return self.coordinator.data[self._id].icon
@property
def device_class(self):
"""Return deviceclass."""
if self.coordinator.data[self._id].sensorclass in [
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
]:
return self.coordinator.data[self._id].sensorclass
return None
@property
def unit_of_measurement(self):
"""Return measure."""
if self.coordinator.data[self._id].measure in [
PRESSURE_BAR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
PERCENTAGE,
TIME_HOURS,
]:
return self.coordinator.data[self._id].measure
return None
|
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
from mne import (read_cov, read_forward_solution, convert_forward_solution,
pick_types_forward, read_evokeds, pick_types, EpochsArray,
compute_covariance, compute_raw_covariance)
from mne.datasets import testing
from mne.simulation import simulate_sparse_stc, simulate_evoked, add_noise
from mne.io import read_raw_fif
from mne.io.pick import pick_channels_cov
from mne.cov import regularize, whiten_evoked
from mne.utils import run_tests_if_main, catch_logging, check_version
data_path = testing.data_path(download=False)
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-cov.fif')
@testing.requires_testing_data
def test_simulate_evoked():
"""Test simulation of evoked data."""
raw = read_raw_fif(raw_fname)
fwd = read_forward_solution(fwd_fname)
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=False)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
cov = regularize(cov, evoked_template.info)
nave = evoked_template.nave
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series for 2 dipoles
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
random_state=42)
# Generate noisy evoked data
iir_filter = [1, -0.9]
evoked = simulate_evoked(fwd, stc, evoked_template.info, cov,
iir_filter=iir_filter, nave=nave)
assert_array_almost_equal(evoked.times, stc.times)
assert len(evoked.data) == len(fwd['sol']['data'])
assert_equal(evoked.nave, nave)
assert len(evoked.info['projs']) == len(cov['projs'])
evoked_white = whiten_evoked(evoked, cov)
assert abs(evoked_white.data[:, 0].std() - 1.) < 0.1
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
stc_bad.vertices[0][0] = mv + 1
pytest.raises(ValueError, simulate_evoked, fwd, stc_bad,
evoked_template.info, cov)
evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov,
nave=np.inf)
evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov,
nave=np.inf)
assert_array_equal(evoked_1.data, evoked_2.data)
cov['names'] = cov.ch_names[:-2] # Error channels are different.
with pytest.raises(RuntimeError, match='Not all channels present'):
simulate_evoked(fwd, stc, evoked_template.info, cov)
# We don't use an avg ref here, but let's ignore it. Also we know we have
# few samples, and that our epochs are not baseline corrected.
@pytest.mark.filterwarnings('ignore:No average EEG reference present')
@pytest.mark.filterwarnings('ignore:Too few samples')
@pytest.mark.filterwarnings('ignore:Epochs are not baseline corrected')
def test_add_noise():
"""Test noise addition."""
if check_version('numpy', '1.17'):
rng = np.random.default_rng(0)
else:
rng = np.random.RandomState(0)
raw = read_raw_fif(raw_fname)
raw.del_proj()
picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
cov = compute_raw_covariance(raw, picks=picks)
with pytest.raises(RuntimeError, match='to be loaded'):
add_noise(raw, cov)
raw.crop(0, 1).load_data()
with pytest.raises(TypeError, match='Raw, Epochs, or Evoked'):
add_noise(0., cov)
with pytest.raises(TypeError, match='Covariance'):
add_noise(raw, 0.)
# test a no-op (data preserved)
orig_data = raw[:][0]
zero_cov = cov.copy()
zero_cov['data'].fill(0)
add_noise(raw, zero_cov)
new_data = raw[:][0]
assert_allclose(orig_data, new_data, atol=1e-30)
# set to zero to make comparisons easier
raw._data[:] = 0.
epochs = EpochsArray(np.zeros((1, len(raw.ch_names), 100)),
raw.info.copy())
epochs.info['bads'] = []
evoked = epochs.average(picks=np.arange(len(raw.ch_names)))
for inst in (raw, epochs, evoked):
with catch_logging() as log:
add_noise(inst, cov, random_state=rng, verbose=True)
log = log.getvalue()
want = ('to {0}/{1} channels ({0}'
.format(len(cov['names']), len(raw.ch_names)))
assert want in log
if inst is evoked:
inst = EpochsArray(inst.data[np.newaxis], inst.info)
if inst is raw:
cov_new = compute_raw_covariance(inst, picks=picks)
else:
cov_new = compute_covariance(inst)
assert cov['names'] == cov_new['names']
r = np.corrcoef(cov['data'].ravel(), cov_new['data'].ravel())[0, 1]
assert r > 0.99
def test_rank_deficiency():
"""Test adding noise from M/EEG float32 (I/O) cov with projectors."""
# See gh-5940
evoked = read_evokeds(ave_fname, 0, baseline=(None, 0))
evoked.info['bads'] = ['MEG 2443']
evoked.info['lowpass'] = 20 # fake for decim
picks = pick_types(evoked.info, meg=True, eeg=False)
picks = picks[::16]
evoked.pick_channels([evoked.ch_names[pick] for pick in picks])
evoked.info.normalize_proj()
cov = read_cov(cov_fname)
cov['projs'] = []
cov = regularize(cov, evoked.info, rank=None)
cov = pick_channels_cov(cov, evoked.ch_names)
evoked.data[:] = 0
add_noise(evoked, cov)
cov_new = compute_covariance(
EpochsArray(evoked.data[np.newaxis], evoked.info), verbose='error')
assert cov['names'] == cov_new['names']
r = np.corrcoef(cov['data'].ravel(), cov_new['data'].ravel())[0, 1]
assert r > 0.98
run_tests_if_main()
|
import asyncio
import logging
import re
import aiohttp
from aiohttp.hdrs import REFERER, USER_AGENT
import async_timeout
from gtts_token import gtts_token
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from homeassistant.const import HTTP_OK
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
GOOGLE_SPEECH_URL = "https://translate.google.com/translate_tts"
MESSAGE_SIZE = 148
SUPPORT_LANGUAGES = [
"af",
"sq",
"ar",
"hy",
"bn",
"ca",
"zh",
"zh-cn",
"zh-tw",
"zh-yue",
"hr",
"cs",
"da",
"nl",
"en",
"en-au",
"en-uk",
"en-us",
"eo",
"fi",
"fr",
"de",
"el",
"hi",
"hu",
"is",
"id",
"it",
"ja",
"ko",
"la",
"lv",
"mk",
"no",
"pl",
"pt",
"pt-br",
"ro",
"ru",
"sr",
"sk",
"es",
"es-es",
"es-mx",
"es-us",
"sw",
"sv",
"ta",
"th",
"tr",
"vi",
"cy",
"uk",
"bg-BG",
]
DEFAULT_LANG = "en"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES)}
)
async def async_get_engine(hass, config, discovery_info=None):
"""Set up Google speech component."""
return GoogleProvider(hass, config[CONF_LANG])
class GoogleProvider(Provider):
"""The Google speech API provider."""
def __init__(self, hass, lang):
"""Init Google TTS service."""
self.hass = hass
self._lang = lang
self.headers = {
REFERER: "http://translate.google.com/",
USER_AGENT: (
"Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/47.0.2526.106 Safari/537.36"
),
}
self.name = "Google"
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
async def async_get_tts_audio(self, message, language, options=None):
"""Load TTS from google."""
token = gtts_token.Token()
websession = async_get_clientsession(self.hass)
message_parts = self._split_message_to_parts(message)
data = b""
for idx, part in enumerate(message_parts):
try:
part_token = await self.hass.async_add_executor_job(
token.calculate_token, part
)
except ValueError as err:
# If token seed fetching fails.
_LOGGER.warning(err)
return None, None
url_param = {
"ie": "UTF-8",
"tl": language,
"q": part,
"tk": part_token,
"total": len(message_parts),
"idx": idx,
"client": "tw-ob",
"textlen": len(part),
}
try:
with async_timeout.timeout(10):
request = await websession.get(
GOOGLE_SPEECH_URL, params=url_param, headers=self.headers
)
if request.status != HTTP_OK:
_LOGGER.error(
"Error %d on load URL %s", request.status, request.url
)
return None, None
data += await request.read()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout for google speech")
return None, None
return "mp3", data
@staticmethod
def _split_message_to_parts(message):
"""Split message into single parts."""
if len(message) <= MESSAGE_SIZE:
return [message]
punc = "!()[]?.,;:"
punc_list = [re.escape(c) for c in punc]
pattern = "|".join(punc_list)
parts = re.split(pattern, message)
def split_by_space(fullstring):
"""Split a string by space."""
if len(fullstring) > MESSAGE_SIZE:
idx = fullstring.rfind(" ", 0, MESSAGE_SIZE)
return [fullstring[:idx]] + split_by_space(fullstring[idx:])
return [fullstring]
msg_parts = []
for part in parts:
msg_parts += split_by_space(part)
return [msg for msg in msg_parts if len(msg) > 0]
|
import hangups
from common import run_example
async def query_presence(client, args):
request = hangups.hangouts_pb2.QueryPresenceRequest(
request_header=client.get_request_header(),
participant_id=[
hangups.hangouts_pb2.ParticipantId(gaia_id=args.user_id),
],
field_mask=[
hangups.hangouts_pb2.FIELD_MASK_REACHABLE,
hangups.hangouts_pb2.FIELD_MASK_AVAILABLE,
hangups.hangouts_pb2.FIELD_MASK_MOOD,
hangups.hangouts_pb2.FIELD_MASK_DEVICE,
hangups.hangouts_pb2.FIELD_MASK_LAST_SEEN,
],
)
res = await client.query_presence(request)
print(res)
if __name__ == '__main__':
run_example(query_presence, '--user-id')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = tf.contrib.slim
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride,
activation_fn=None, scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether is training or not.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
if spatial_squeeze:
logits = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(logits, scope='predictions')
return logits, end_points
resnet_v1.default_image_size = 224
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
resnet_v1_50.default_image_size = resnet_v1.default_image_size
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
resnet_v1_101.default_image_size = resnet_v1.default_image_size
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
resnet_v1_152.default_image_size = resnet_v1.default_image_size
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
resnet_v1_200.default_image_size = resnet_v1.default_image_size
|
import argparse
import json
import sys
from paasta_tools.kubernetes_tools import get_kubernetes_services_running_here_for_nerve
from paasta_tools.marathon_tools import get_marathon_services_running_here_for_nerve
from paasta_tools.marathon_tools import get_puppet_services_running_here_for_nerve
from paasta_tools.utils import DEFAULT_SOA_DIR
def parse_args(argv):
parser = argparse.ArgumentParser(
description="Dumps information about locally running services."
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
soa_dir = args.soa_dir
service_dump = (
get_marathon_services_running_here_for_nerve(cluster=None, soa_dir=soa_dir)
+ get_puppet_services_running_here_for_nerve(soa_dir=soa_dir)
+ get_kubernetes_services_running_here_for_nerve(cluster=None, soa_dir=soa_dir)
)
print(json.dumps(service_dump))
sys.exit(0)
if __name__ == "__main__":
main()
|
import unittest
import numpy as np
from chainer.testing import attr
from chainer import Variable
from chainercv.links import ResNet101
from chainercv.links import ResNet152
from chainercv.links import ResNet50
from chainercv.utils import testing
@testing.parameterize(*(
testing.product_dict(
[
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'res5',
'shapes': (1, 2048, 7, 7), 'n_class': None},
{'pick': ['res2', 'conv1'],
'shapes': ((1, 256, 56, 56), (1, 64, 112, 112)), 'n_class': None},
],
[
{'model_class': ResNet50},
{'model_class': ResNet101},
{'model_class': ResNet152},
],
[
{'arch': 'fb'},
{'arch': 'he'}
]
)
))
class TestResNetCall(unittest.TestCase):
def setUp(self):
self.link = self.model_class(
n_class=self.n_class, pretrained_model=None, arch=self.arch)
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
features = self.link(x)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
@testing.parameterize(*testing.product({
'model': [ResNet50, ResNet101, ResNet152],
'n_class': [None, 500, 1000],
'pretrained_model': ['imagenet'],
'mean': [None, np.random.uniform((3, 1, 1)).astype(np.float32)],
'arch': ['he', 'fb'],
}))
class TestResNetPretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_class': self.n_class,
'pretrained_model': self.pretrained_model,
'mean': self.mean,
'arch': self.arch,
}
if self.pretrained_model == 'imagenet':
valid = self.n_class in {None, 1000}
if valid:
self.model(**kwargs)
else:
with self.assertRaises(ValueError):
self.model(**kwargs)
testing.run_module(__name__, __file__)
|
import os
import re
import subprocess
import Queue
from Handler import Handler
#
# Constants for RRD file creation.
#
# NOTE: We default to the collectd RRD directory
# simply as a compatibility tool. Users that have
# tools that look in that location and would like
# to switch to Diamond need to make zero changes.
BASEDIR = '/var/lib/collectd/rrd'
METRIC_STEP = 10
BATCH_SIZE = 1
# NOTE: We don't really have a rigorous defition
# for metrics, particularly how often they will be
# reported, etc. Because of this, we have to guess
# at the steps and RRAs used for creation of the
# RRD files. These are a fairly sensible default,
# and basically allow for aggregated up from a single
# datapoint (because the XFF is 0.1, and each step
# aggregates not more than 10 of the previous step).
#
# Given a METRIC_STEP of 10 seconds, then these will
# represent data for up to the last full year.
RRA_SPECS = [
"RRA:AVERAGE:0.1:1:1200",
"RRA:MIN:0.1:1:1200",
"RRA:MAX:0.1:1:1200",
"RRA:AVERAGE:0.1:7:1200",
"RRA:MIN:0.1:7:1200",
"RRA:MAX:0.1:7:1200",
"RRA:AVERAGE:0.1:50:1200",
"RRA:MIN:0.1:50:1200",
"RRA:MAX:0.1:50:1200",
"RRA:AVERAGE:0.1:223:1200",
"RRA:MIN:0.1:223:1200",
"RRA:MAX:0.1:223:1200",
"RRA:AVERAGE:0.1:2635:1200",
"RRA:MIN:0.1:2635:1200",
"RRA:MAX:0.1:2635:1200",
]
class RRDHandler(Handler):
# NOTE: This handler is fairly loose about locking (none),
# and the reason is because the calls are always protected
# by locking done in the _process and _flush routines.
# If this were to change at some point, we would definitely
# want to be a bit more sensible about how we lock.
#
# We would probably also want to restructure this as a
# consumer and producer so that one thread can continually
# write out data, but that really depends on the design
# at the top level.
def __init__(self, *args, **kwargs):
super(RRDHandler, self).__init__(*args, **kwargs)
self._exists_cache = dict()
self._basedir = self.config['basedir']
self._batch = self.config['batch']
self._step = self.config['step']
self._queues = {}
self._last_update = {}
def get_default_config_help(self):
config = super(RRDHandler, self).get_default_config_help()
config.update({
'basedir': 'The base directory for all RRD files.',
'batch': 'Wait for this many updates before saving to the RRD file',
'step': 'The minimum interval represented in generated RRD files.',
})
return config
def get_default_config(self):
config = super(RRDHandler, self).get_default_config()
config.update({
'basedir': BASEDIR,
'batch': BATCH_SIZE,
'step': METRIC_STEP,
})
return config
def _ensure_exists(self, filename, metric_name, metric_type):
# We're good to go!
if filename in self._exists_cache:
return True
# Does the file already exist?
if os.path.exists(filename):
self._exists_cache[filename] = True
return True
# Attempt the creation.
self._create(filename, metric_name, metric_type)
self._exists_cache[filename] = True
return True
def _create(self, filename, metric_name, metric_type):
# Sanity check the metric name.
if not re.match("^[a-zA-Z0-9_]+$", metric_name):
raise Exception("Invalid metric name: %s" % metric_name)
# Sanity check the metric type.
if metric_type not in ("GAUGE", "COUNTER"):
raise Exception("Unknown metric type: %s" % metric_type)
# Try to create the directory.
# NOTE: If we aren't successful, the check_call()
# will fail anyways so we can do this optimistically.
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
ds_spec = "DS:%s:%s:%d:U:U" % (
metric_name, metric_type, self._step * 2)
rrd_create_cmd = [
"rrdtool", "create", filename,
"--no-overwrite",
"--step", str(self._step),
ds_spec
]
rrd_create_cmd.extend(RRA_SPECS)
subprocess.check_call(rrd_create_cmd, close_fds=True)
def process(self, metric):
# Extract the filename given the metric.
# NOTE: We have to tweak the metric name and limit
# the length to 19 characters for the RRD file format.
collector = metric.getCollectorPath()
metric_name = metric.getMetricPath().replace(".", "_")[:19]
dirname = os.path.join(self._basedir, metric.host, collector)
filename = os.path.join(dirname, metric_name + ".rrd")
# Ensure that there is an RRD file for this metric.
# This is done inline because it's quickly cached and
# we would like to have exceptions related to creating
# the RRD file raised in the main thread.
self._ensure_exists(filename, metric_name, metric.metric_type)
if self._queue(filename, metric.timestamp, metric.value) >= self._batch:
self._flush_queue(filename)
def _queue(self, filename, timestamp, value):
if filename not in self._queues:
queue = Queue.Queue()
self._queues[filename] = queue
else:
queue = self._queues[filename]
queue.put((timestamp, value))
return queue.qsize()
def flush(self):
# Grab all current queues.
for filename in self._queues.keys():
self._flush_queue(filename)
def _flush_queue(self, filename):
queue = self._queues[filename]
# Collect all pending updates.
updates = {}
max_timestamp = 0
while True:
try:
(timestamp, value) = queue.get(block=False)
# RRD only supports granularity at a
# per-second level (not milliseconds, etc.).
timestamp = int(timestamp)
# Remember the latest update done.
last_update = self._last_update.get(filename, 0)
if last_update >= timestamp:
# Yikes. RRDtool won't let us do this.
# We need to drop this update and log a warning.
self.log.warning(
"Dropping update to %s. Too frequent!" % filename)
continue
max_timestamp = max(timestamp, max_timestamp)
# Add this update.
if timestamp not in updates:
updates[timestamp] = []
updates[timestamp].append(value)
except Queue.Empty:
break
# Save the last update time.
self._last_update[filename] = max_timestamp
if len(updates) > 0:
# Construct our command line.
# This will look like <time>:<value1>[:<value2>...]
# The timestamps must be sorted, and we each of the
# <time> values must be unique (like a snowflake).
data_points = map(
lambda (timestamp, values): "%d:%s" %
(timestamp, ":".join(map(str, values))),
sorted(updates.items()))
# Optimisticly update.
# Nothing can really be done if we fail.
rrd_update_cmd = ["rrdupdate", filename, "--"]
rrd_update_cmd.extend(data_points)
self.log.info("update: %s" % str(rrd_update_cmd))
subprocess.call(rrd_update_cmd)
|
import asyncio
from functools import wraps
import logging
import aiohttp
import async_timeout
import attr
from hass_nabucasa import Cloud, auth, thingtalk
from hass_nabucasa.const import STATE_DISCONNECTED
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.alexa import (
entities as alexa_entities,
errors as alexa_errors,
)
from homeassistant.components.google_assistant import helpers as google_helpers
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.websocket_api import const as ws_const
from homeassistant.const import (
HTTP_BAD_GATEWAY,
HTTP_BAD_REQUEST,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_OK,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import callback
from .const import (
DOMAIN,
PREF_ALEXA_DEFAULT_EXPOSE,
PREF_ALEXA_REPORT_STATE,
PREF_ENABLE_ALEXA,
PREF_ENABLE_GOOGLE,
PREF_GOOGLE_DEFAULT_EXPOSE,
PREF_GOOGLE_REPORT_STATE,
PREF_GOOGLE_SECURE_DEVICES_PIN,
REQUEST_TIMEOUT,
InvalidTrustedNetworks,
InvalidTrustedProxies,
RequireRelink,
)
_LOGGER = logging.getLogger(__name__)
WS_TYPE_STATUS = "cloud/status"
SCHEMA_WS_STATUS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_STATUS}
)
WS_TYPE_SUBSCRIPTION = "cloud/subscription"
SCHEMA_WS_SUBSCRIPTION = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SUBSCRIPTION}
)
WS_TYPE_HOOK_CREATE = "cloud/cloudhook/create"
SCHEMA_WS_HOOK_CREATE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_HOOK_CREATE, vol.Required("webhook_id"): str}
)
WS_TYPE_HOOK_DELETE = "cloud/cloudhook/delete"
SCHEMA_WS_HOOK_DELETE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_HOOK_DELETE, vol.Required("webhook_id"): str}
)
_CLOUD_ERRORS = {
InvalidTrustedNetworks: (
HTTP_INTERNAL_SERVER_ERROR,
"Remote UI not compatible with 127.0.0.1/::1 as a trusted network.",
),
InvalidTrustedProxies: (
HTTP_INTERNAL_SERVER_ERROR,
"Remote UI not compatible with 127.0.0.1/::1 as trusted proxies.",
),
asyncio.TimeoutError: (
HTTP_BAD_GATEWAY,
"Unable to reach the Home Assistant cloud.",
),
aiohttp.ClientError: (
HTTP_INTERNAL_SERVER_ERROR,
"Error making internal request",
),
}
async def async_setup(hass):
"""Initialize the HTTP API."""
async_register_command = hass.components.websocket_api.async_register_command
async_register_command(WS_TYPE_STATUS, websocket_cloud_status, SCHEMA_WS_STATUS)
async_register_command(
WS_TYPE_SUBSCRIPTION, websocket_subscription, SCHEMA_WS_SUBSCRIPTION
)
async_register_command(websocket_update_prefs)
async_register_command(
WS_TYPE_HOOK_CREATE, websocket_hook_create, SCHEMA_WS_HOOK_CREATE
)
async_register_command(
WS_TYPE_HOOK_DELETE, websocket_hook_delete, SCHEMA_WS_HOOK_DELETE
)
async_register_command(websocket_remote_connect)
async_register_command(websocket_remote_disconnect)
async_register_command(google_assistant_list)
async_register_command(google_assistant_update)
async_register_command(alexa_list)
async_register_command(alexa_update)
async_register_command(alexa_sync)
async_register_command(thingtalk_convert)
hass.http.register_view(GoogleActionsSyncView)
hass.http.register_view(CloudLoginView)
hass.http.register_view(CloudLogoutView)
hass.http.register_view(CloudRegisterView)
hass.http.register_view(CloudResendConfirmView)
hass.http.register_view(CloudForgotPasswordView)
_CLOUD_ERRORS.update(
{
auth.UserNotFound: (HTTP_BAD_REQUEST, "User does not exist."),
auth.UserNotConfirmed: (HTTP_BAD_REQUEST, "Email not confirmed."),
auth.UserExists: (
HTTP_BAD_REQUEST,
"An account with the given email already exists.",
),
auth.Unauthenticated: (HTTP_UNAUTHORIZED, "Authentication failed."),
auth.PasswordChangeRequired: (
HTTP_BAD_REQUEST,
"Password change required.",
),
}
)
def _handle_cloud_errors(handler):
"""Webview decorator to handle auth errors."""
@wraps(handler)
async def error_handler(view, request, *args, **kwargs):
"""Handle exceptions that raise from the wrapped request handler."""
try:
result = await handler(view, request, *args, **kwargs)
return result
except Exception as err: # pylint: disable=broad-except
status, msg = _process_cloud_exception(err, request.path)
return view.json_message(
msg, status_code=status, message_code=err.__class__.__name__.lower()
)
return error_handler
def _ws_handle_cloud_errors(handler):
"""Websocket decorator to handle auth errors."""
@wraps(handler)
async def error_handler(hass, connection, msg):
"""Handle exceptions that raise from the wrapped handler."""
try:
return await handler(hass, connection, msg)
except Exception as err: # pylint: disable=broad-except
err_status, err_msg = _process_cloud_exception(err, msg["type"])
connection.send_error(msg["id"], err_status, err_msg)
return error_handler
def _process_cloud_exception(exc, where):
"""Process a cloud exception."""
err_info = None
for err, value_info in _CLOUD_ERRORS.items():
if isinstance(exc, err):
err_info = value_info
break
if err_info is None:
_LOGGER.exception("Unexpected error processing request for %s", where)
err_info = (HTTP_BAD_GATEWAY, f"Unexpected error: {exc}")
return err_info
class GoogleActionsSyncView(HomeAssistantView):
"""Trigger a Google Actions Smart Home Sync."""
url = "/api/cloud/google_actions/sync"
name = "api:cloud:google_actions/sync"
@_handle_cloud_errors
async def post(self, request):
"""Trigger a Google Actions sync."""
hass = request.app["hass"]
cloud: Cloud = hass.data[DOMAIN]
gconf = await cloud.client.get_google_config()
status = await gconf.async_sync_entities(gconf.agent_user_id)
return self.json({}, status_code=status)
class CloudLoginView(HomeAssistantView):
"""Login to Home Assistant cloud."""
url = "/api/cloud/login"
name = "api:cloud:login"
@_handle_cloud_errors
@RequestDataValidator(
vol.Schema({vol.Required("email"): str, vol.Required("password"): str})
)
async def post(self, request, data):
"""Handle login request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
await cloud.login(data["email"], data["password"])
return self.json({"success": True})
class CloudLogoutView(HomeAssistantView):
"""Log out of the Home Assistant cloud."""
url = "/api/cloud/logout"
name = "api:cloud:logout"
@_handle_cloud_errors
async def post(self, request):
"""Handle logout request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await cloud.logout()
return self.json_message("ok")
class CloudRegisterView(HomeAssistantView):
"""Register on the Home Assistant cloud."""
url = "/api/cloud/register"
name = "api:cloud:register"
@_handle_cloud_errors
@RequestDataValidator(
vol.Schema(
{
vol.Required("email"): str,
vol.Required("password"): vol.All(str, vol.Length(min=6)),
}
)
)
async def post(self, request, data):
"""Handle registration request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await cloud.auth.async_register(data["email"], data["password"])
return self.json_message("ok")
class CloudResendConfirmView(HomeAssistantView):
"""Resend email confirmation code."""
url = "/api/cloud/resend_confirm"
name = "api:cloud:resend_confirm"
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({vol.Required("email"): str}))
async def post(self, request, data):
"""Handle resending confirm email code request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await cloud.auth.async_resend_email_confirm(data["email"])
return self.json_message("ok")
class CloudForgotPasswordView(HomeAssistantView):
"""View to start Forgot Password flow.."""
url = "/api/cloud/forgot_password"
name = "api:cloud:forgot_password"
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({vol.Required("email"): str}))
async def post(self, request, data):
"""Handle forgot password request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await cloud.auth.async_forgot_password(data["email"])
return self.json_message("ok")
@callback
def websocket_cloud_status(hass, connection, msg):
"""Handle request for account info.
Async friendly.
"""
cloud = hass.data[DOMAIN]
connection.send_message(
websocket_api.result_message(msg["id"], _account_data(cloud))
)
def _require_cloud_login(handler):
"""Websocket decorator that requires cloud to be logged in."""
@wraps(handler)
def with_cloud_auth(hass, connection, msg):
"""Require to be logged into the cloud."""
cloud = hass.data[DOMAIN]
if not cloud.is_logged_in:
connection.send_message(
websocket_api.error_message(
msg["id"], "not_logged_in", "You need to be logged in to the cloud."
)
)
return
handler(hass, connection, msg)
return with_cloud_auth
@_require_cloud_login
@websocket_api.async_response
async def websocket_subscription(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
response = await cloud.fetch_subscription_info()
if response.status != HTTP_OK:
connection.send_message(
websocket_api.error_message(
msg["id"], "request_failed", "Failed to request subscription"
)
)
data = await response.json()
# Check if a user is subscribed but local info is outdated
# In that case, let's refresh and reconnect
if data.get("provider") and not cloud.is_connected:
_LOGGER.debug("Found disconnected account with valid subscriotion, connecting")
await cloud.auth.async_renew_access_token()
# Cancel reconnect in progress
if cloud.iot.state != STATE_DISCONNECTED:
await cloud.iot.disconnect()
hass.async_create_task(cloud.iot.connect())
connection.send_message(websocket_api.result_message(msg["id"], data))
@_require_cloud_login
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "cloud/update_prefs",
vol.Optional(PREF_ENABLE_GOOGLE): bool,
vol.Optional(PREF_ENABLE_ALEXA): bool,
vol.Optional(PREF_ALEXA_REPORT_STATE): bool,
vol.Optional(PREF_GOOGLE_REPORT_STATE): bool,
vol.Optional(PREF_ALEXA_DEFAULT_EXPOSE): [str],
vol.Optional(PREF_GOOGLE_DEFAULT_EXPOSE): [str],
vol.Optional(PREF_GOOGLE_SECURE_DEVICES_PIN): vol.Any(None, str),
}
)
async def websocket_update_prefs(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("id")
changes.pop("type")
# If we turn alexa linking on, validate that we can fetch access token
if changes.get(PREF_ALEXA_REPORT_STATE):
try:
with async_timeout.timeout(10):
await cloud.client.alexa_config.async_get_access_token()
except asyncio.TimeoutError:
connection.send_error(
msg["id"], "alexa_timeout", "Timeout validating Alexa access token."
)
return
except (alexa_errors.NoTokenAvailable, RequireRelink):
connection.send_error(
msg["id"],
"alexa_relink",
"Please go to the Alexa app and re-link the Home Assistant "
"skill and then try to enable state reporting.",
)
return
await cloud.client.prefs.async_update(**changes)
connection.send_message(websocket_api.result_message(msg["id"]))
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
async def websocket_hook_create(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
hook = await cloud.cloudhooks.async_create(msg["webhook_id"], False)
connection.send_message(websocket_api.result_message(msg["id"], hook))
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
async def websocket_hook_delete(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
await cloud.cloudhooks.async_delete(msg["webhook_id"])
connection.send_message(websocket_api.result_message(msg["id"]))
def _account_data(cloud):
"""Generate the auth data JSON response."""
if not cloud.is_logged_in:
return {"logged_in": False, "cloud": STATE_DISCONNECTED}
claims = cloud.claims
client = cloud.client
remote = cloud.remote
# Load remote certificate
if remote.certificate:
certificate = attr.asdict(remote.certificate)
else:
certificate = None
return {
"logged_in": True,
"email": claims["email"],
"cloud": cloud.iot.state,
"prefs": client.prefs.as_dict(),
"google_entities": client.google_user_config["filter"].config,
"alexa_entities": client.alexa_user_config["filter"].config,
"remote_domain": remote.instance_domain,
"remote_connected": remote.is_connected,
"remote_certificate": certificate,
}
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/remote/connect"})
async def websocket_remote_connect(hass, connection, msg):
"""Handle request for connect remote."""
cloud = hass.data[DOMAIN]
await cloud.client.prefs.async_update(remote_enabled=True)
await cloud.remote.connect()
connection.send_result(msg["id"], _account_data(cloud))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/remote/disconnect"})
async def websocket_remote_disconnect(hass, connection, msg):
"""Handle request for disconnect remote."""
cloud = hass.data[DOMAIN]
await cloud.client.prefs.async_update(remote_enabled=False)
await cloud.remote.disconnect()
connection.send_result(msg["id"], _account_data(cloud))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/google_assistant/entities"})
async def google_assistant_list(hass, connection, msg):
"""List all google assistant entities."""
cloud = hass.data[DOMAIN]
gconf = await cloud.client.get_google_config()
entities = google_helpers.async_get_entities(hass, gconf)
result = []
for entity in entities:
result.append(
{
"entity_id": entity.entity_id,
"traits": [trait.name for trait in entity.traits()],
"might_2fa": entity.might_2fa_traits(),
}
)
connection.send_result(msg["id"], result)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command(
{
"type": "cloud/google_assistant/entities/update",
"entity_id": str,
vol.Optional("should_expose"): vol.Any(None, bool),
vol.Optional("override_name"): str,
vol.Optional("aliases"): [str],
vol.Optional("disable_2fa"): bool,
}
)
async def google_assistant_update(hass, connection, msg):
"""Update google assistant config."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("type")
changes.pop("id")
await cloud.client.prefs.async_update_google_entity_config(**changes)
connection.send_result(
msg["id"], cloud.client.prefs.google_entity_configs.get(msg["entity_id"])
)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/alexa/entities"})
async def alexa_list(hass, connection, msg):
"""List all alexa entities."""
cloud = hass.data[DOMAIN]
entities = alexa_entities.async_get_entities(hass, cloud.client.alexa_config)
result = []
for entity in entities:
result.append(
{
"entity_id": entity.entity_id,
"display_categories": entity.default_display_categories(),
"interfaces": [ifc.name() for ifc in entity.interfaces()],
}
)
connection.send_result(msg["id"], result)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command(
{
"type": "cloud/alexa/entities/update",
"entity_id": str,
vol.Optional("should_expose"): vol.Any(None, bool),
}
)
async def alexa_update(hass, connection, msg):
"""Update alexa entity config."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("type")
changes.pop("id")
await cloud.client.prefs.async_update_alexa_entity_config(**changes)
connection.send_result(
msg["id"], cloud.client.prefs.alexa_entity_configs.get(msg["entity_id"])
)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@websocket_api.websocket_command({"type": "cloud/alexa/sync"})
async def alexa_sync(hass, connection, msg):
"""Sync with Alexa."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(10):
try:
success = await cloud.client.alexa_config.async_sync_entities()
except alexa_errors.NoTokenAvailable:
connection.send_error(
msg["id"],
"alexa_relink",
"Please go to the Alexa app and re-link the Home Assistant skill.",
)
return
if success:
connection.send_result(msg["id"])
else:
connection.send_error(msg["id"], ws_const.ERR_UNKNOWN_ERROR, "Unknown error")
@websocket_api.async_response
@websocket_api.websocket_command({"type": "cloud/thingtalk/convert", "query": str})
async def thingtalk_convert(hass, connection, msg):
"""Convert a query."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(10):
try:
connection.send_result(
msg["id"], await thingtalk.async_convert(cloud, msg["query"])
)
except thingtalk.ThingTalkConversionError as err:
connection.send_error(msg["id"], ws_const.ERR_UNKNOWN_ERROR, str(err))
|
import io
import logging
import os
import sys
import time
from PIL import Image, ImageDraw, UnidentifiedImageError
import numpy as np
import tensorflow as tf # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingEntity,
)
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.util.pil import draw_box
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
DOMAIN = "tensorflow"
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = "matches"
ATTR_SUMMARY = "summary"
ATTR_TOTAL_MATCHES = "total_matches"
ATTR_PROCESS_TIME = "process_time"
CONF_AREA = "area"
CONF_BOTTOM = "bottom"
CONF_CATEGORIES = "categories"
CONF_CATEGORY = "category"
CONF_FILE_OUT = "file_out"
CONF_GRAPH = "graph"
CONF_LABELS = "labels"
CONF_LABEL_OFFSET = "label_offset"
CONF_LEFT = "left"
CONF_MODEL = "model"
CONF_MODEL_DIR = "model_dir"
CONF_RIGHT = "right"
CONF_TOP = "top"
AREA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_BOTTOM, default=1): cv.small_float,
vol.Optional(CONF_LEFT, default=0): cv.small_float,
vol.Optional(CONF_RIGHT, default=1): cv.small_float,
vol.Optional(CONF_TOP, default=0): cv.small_float,
}
)
CATEGORY_SCHEMA = vol.Schema(
{vol.Required(CONF_CATEGORY): cv.string, vol.Optional(CONF_AREA): AREA_SCHEMA}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_FILE_OUT, default=[]): vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema(
{
vol.Required(CONF_GRAPH): cv.isdir,
vol.Optional(CONF_AREA): AREA_SCHEMA,
vol.Optional(CONF_CATEGORIES, default=[]): vol.All(
cv.ensure_list, [vol.Any(cv.string, CATEGORY_SCHEMA)]
),
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_LABEL_OFFSET, default=1): int,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
}
),
}
)
def get_model_detection_function(model):
"""Get a tf.function for detection."""
@tf.function
def detect_fn(image):
"""Detect objects in image."""
image, shapes = model.preprocess(image)
prediction_dict = model.predict(image, shapes)
detections = model.postprocess(prediction_dict, shapes)
return detections
return detect_fn
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the TensorFlow image processing platform."""
model_config = config[CONF_MODEL]
model_dir = model_config.get(CONF_MODEL_DIR) or hass.config.path("tensorflow")
labels = model_config.get(CONF_LABELS) or hass.config.path(
"tensorflow", "object_detection", "data", "mscoco_label_map.pbtxt"
)
checkpoint = os.path.join(model_config[CONF_GRAPH], "checkpoint")
pipeline_config = os.path.join(model_config[CONF_GRAPH], "pipeline.config")
# Make sure locations exist
if (
not os.path.isdir(model_dir)
or not os.path.isdir(checkpoint)
or not os.path.exists(pipeline_config)
or not os.path.exists(labels)
):
_LOGGER.error("Unable to locate tensorflow model or label map")
return
# append custom model path to sys.path
sys.path.append(model_dir)
try:
# Verify that the TensorFlow Object Detection API is pre-installed
# These imports shouldn't be moved to the top, because they depend on code from the model_dir.
# (The model_dir is created during the manual setup process. See integration docs.)
# pylint: disable=import-outside-toplevel
from object_detection.builders import model_builder
from object_detection.utils import config_util, label_map_util
except ImportError:
_LOGGER.error(
"No TensorFlow Object Detection library found! Install or compile "
"for your system following instructions here: "
"https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2.md#installation"
)
return
try:
# Display warning that PIL will be used if no OpenCV is found.
import cv2 # noqa: F401 pylint: disable=unused-import, import-outside-toplevel
except ImportError:
_LOGGER.warning(
"No OpenCV library found. TensorFlow will process image with "
"PIL at reduced resolution"
)
hass.data[DOMAIN] = {CONF_MODEL: None}
def tensorflow_hass_start(_event):
"""Set up TensorFlow model on hass start."""
start = time.perf_counter()
# Load pipeline config and build a detection model
pipeline_configs = config_util.get_configs_from_pipeline_file(pipeline_config)
detection_model = model_builder.build(
model_config=pipeline_configs["model"], is_training=False
)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(checkpoint, "ckpt-0")).expect_partial()
_LOGGER.debug(
"Model checkpoint restore took %d seconds", time.perf_counter() - start
)
model = get_model_detection_function(detection_model)
# Preload model cache with empty image tensor
inp = np.zeros([2160, 3840, 3], dtype=np.uint8)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(inp, dtype=tf.float32)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# Run inference
model(input_tensor)
_LOGGER.debug("Model load took %d seconds", time.perf_counter() - start)
hass.data[DOMAIN][CONF_MODEL] = model
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, tensorflow_hass_start)
category_index = label_map_util.create_category_index_from_labelmap(
labels, use_display_name=True
)
entities = []
for camera in config[CONF_SOURCE]:
entities.append(
TensorFlowImageProcessor(
hass,
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
category_index,
config,
)
)
add_entities(entities)
class TensorFlowImageProcessor(ImageProcessingEntity):
"""Representation of an TensorFlow image processor."""
def __init__(
self,
hass,
camera_entity,
name,
category_index,
config,
):
"""Initialize the TensorFlow entity."""
model_config = config.get(CONF_MODEL)
self.hass = hass
self._camera_entity = camera_entity
if name:
self._name = name
else:
self._name = "TensorFlow {}".format(split_entity_id(camera_entity)[1])
self._category_index = category_index
self._min_confidence = config.get(CONF_CONFIDENCE)
self._file_out = config.get(CONF_FILE_OUT)
# handle categories and specific detection areas
self._label_id_offset = model_config.get(CONF_LABEL_OFFSET)
categories = model_config.get(CONF_CATEGORIES)
self._include_categories = []
self._category_areas = {}
for category in categories:
if isinstance(category, dict):
category_name = category.get(CONF_CATEGORY)
category_area = category.get(CONF_AREA)
self._include_categories.append(category_name)
self._category_areas[category_name] = [0, 0, 1, 1]
if category_area:
self._category_areas[category_name] = [
category_area.get(CONF_TOP),
category_area.get(CONF_LEFT),
category_area.get(CONF_BOTTOM),
category_area.get(CONF_RIGHT),
]
else:
self._include_categories.append(category)
self._category_areas[category] = [0, 0, 1, 1]
# Handle global detection area
self._area = [0, 0, 1, 1]
area_config = model_config.get(CONF_AREA)
if area_config:
self._area = [
area_config.get(CONF_TOP),
area_config.get(CONF_LEFT),
area_config.get(CONF_BOTTOM),
area_config.get(CONF_RIGHT),
]
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
self._process_time = 0
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
ATTR_SUMMARY: {
category: len(values) for category, values in self._matches.items()
},
ATTR_TOTAL_MATCHES: self._total_matches,
ATTR_PROCESS_TIME: self._process_time,
}
def _save_image(self, image, matches, paths):
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
img_width, img_height = img.size
draw = ImageDraw.Draw(img)
# Draw custom global region/area
if self._area != [0, 0, 1, 1]:
draw_box(
draw, self._area, img_width, img_height, "Detection Area", (0, 255, 255)
)
for category, values in matches.items():
# Draw custom category regions/areas
if category in self._category_areas and self._category_areas[category] != [
0,
0,
1,
1,
]:
label = f"{category.capitalize()} Detection Area"
draw_box(
draw,
self._category_areas[category],
img_width,
img_height,
label,
(0, 255, 0),
)
# Draw detected objects
for instance in values:
label = "{} {:.1f}%".format(category, instance["score"])
draw_box(
draw, instance["box"], img_width, img_height, label, (255, 255, 0)
)
for path in paths:
_LOGGER.info("Saving results image to %s", path)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path), exist_ok=True)
img.save(path)
def process_image(self, image):
"""Process the image."""
model = self.hass.data[DOMAIN][CONF_MODEL]
if not model:
_LOGGER.debug("Model not yet ready.")
return
start = time.perf_counter()
try:
import cv2 # pylint: disable=import-error, import-outside-toplevel
# pylint: disable=no-member
img = cv2.imdecode(np.asarray(bytearray(image)), cv2.IMREAD_UNCHANGED)
inp = img[:, :, [2, 1, 0]] # BGR->RGB
inp_expanded = inp.reshape(1, inp.shape[0], inp.shape[1], 3)
except ImportError:
try:
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Unable to process image, bad data")
return
img.thumbnail((460, 460), Image.ANTIALIAS)
img_width, img_height = img.size
inp = (
np.array(img.getdata())
.reshape((img_height, img_width, 3))
.astype(np.uint8)
)
inp_expanded = np.expand_dims(inp, axis=0)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(inp_expanded, dtype=tf.float32)
detections = model(input_tensor)
boxes = detections["detection_boxes"][0].numpy()
scores = detections["detection_scores"][0].numpy()
classes = (
detections["detection_classes"][0].numpy() + self._label_id_offset
).astype(int)
matches = {}
total_matches = 0
for box, score, obj_class in zip(boxes, scores, classes):
score = score * 100
boxes = box.tolist()
# Exclude matches below min confidence value
if score < self._min_confidence:
continue
# Exclude matches outside global area definition
if (
boxes[0] < self._area[0]
or boxes[1] < self._area[1]
or boxes[2] > self._area[2]
or boxes[3] > self._area[3]
):
continue
category = self._category_index[obj_class]["name"]
# Exclude unlisted categories
if self._include_categories and category not in self._include_categories:
continue
# Exclude matches outside category specific area definition
if self._category_areas and (
boxes[0] < self._category_areas[category][0]
or boxes[1] < self._category_areas[category][1]
or boxes[2] > self._category_areas[category][2]
or boxes[3] > self._category_areas[category][3]
):
continue
# If we got here, we should include it
if category not in matches:
matches[category] = []
matches[category].append({"score": float(score), "box": boxes})
total_matches += 1
# Save Images
if total_matches and self._file_out:
paths = []
for path_template in self._file_out:
if isinstance(path_template, template.Template):
paths.append(
path_template.render(camera_entity=self._camera_entity)
)
else:
paths.append(path_template)
self._save_image(image, matches, paths)
self._matches = matches
self._total_matches = total_matches
self._process_time = time.perf_counter() - start
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.kodi import DOMAIN
from homeassistant.components.media_player.const import DOMAIN as MP_DOMAIN
from homeassistant.setup import async_setup_component
from . import init_integration
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture
async def kodi_media_player(hass):
"""Get a kodi media player."""
await init_integration(hass)
return f"{MP_DOMAIN}.name"
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a kodi."""
config_entry = MockConfigEntry(domain=DOMAIN, data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, "host", 1234)},
)
entity_reg.async_get_or_create(MP_DOMAIN, DOMAIN, "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": f"{MP_DOMAIN}.kodi_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": f"{MP_DOMAIN}.kodi_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls, kodi_media_player):
"""Test for turn_on and turn_off triggers firing."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": kodi_media_player,
"type": "turn_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": ("turn_on - {{ trigger.entity_id }}")
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": kodi_media_player,
"type": "turn_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": ("turn_off - {{ trigger.entity_id }}")
},
},
},
]
},
)
await hass.async_block_till_done()
await hass.services.async_call(
MP_DOMAIN,
"turn_on",
{"entity_id": kodi_media_player},
blocking=True,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == f"turn_on - {kodi_media_player}"
await hass.services.async_call(
MP_DOMAIN,
"turn_off",
{"entity_id": kodi_media_player},
blocking=True,
)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == f"turn_off - {kodi_media_player}"
|
import datetime
import json
import numpy as np
from qstrader import settings
import qstrader.statistics.performance as perf
class JSONStatistics(object):
"""
Standalone class to output basic backtesting statistics
into a JSON file format.
Parameters
----------
equity_curve : `pd.DataFrame`
The equity curve DataFrame indexed by date-time.
strategy_id : `str`, optional
The optional ID string for the strategy to pass to
the statistics dict.
strategy_name : `str`, optional
The optional name string for the strategy to pass to
the statistics dict.
benchmark_curve : `pd.DataFrame`, optional
The (optional) equity curve DataFrame for the benchmark
indexed by time.
benchmark_id : `str`, optional
The optional ID string for the benchmark to pass to
the statistics dict.
benchmark_name : `str`, optional
The optional name string for the benchmark to pass to
the statistics dict.
periods : `int`, optional
The number of periods to use for Sharpe ratio calculation.
output_filename : `str`
The filename to output the JSON statistics dictionary to.
"""
def __init__(
self,
equity_curve,
target_allocations,
strategy_id=None,
strategy_name=None,
benchmark_curve=None,
benchmark_id=None,
benchmark_name=None,
periods=252,
output_filename='statistics.json'
):
self.equity_curve = equity_curve
self.target_allocations = target_allocations
self.strategy_id = strategy_id
self.strategy_name = strategy_name
self.benchmark_curve = benchmark_curve
self.benchmark_id = benchmark_id
self.benchmark_name = benchmark_name
self.periods = periods
self.output_filename = output_filename
self.statistics = self._create_full_statistics()
@staticmethod
def _series_to_tuple_list(series):
"""
Converts Pandas Series indexed by date-time into
list of tuples indexed by milliseconds since epoch.
Parameters
----------
series : `pd.Series`
The Pandas Series to be converted.
Returns
-------
`list[tuple]`
The list of epoch-indexed tuple values.
"""
return [
(
int(
datetime.datetime.combine(
k, datetime.datetime.min.time()
).timestamp() * 1000.0
), v if not np.isnan(v) else 0.0
)
for k, v in series.to_dict().items()
]
@staticmethod
def _dataframe_to_column_list(df):
"""
Converts Pandas DataFrame indexed by date-time into
list of tuples indexed by milliseconds since epoch.
Parameters
----------
df : `pd.DataFrame`
The Pandas DataFrame to be converted.
Returns
-------
`list[tuple]`
The list of epoch-indexed tuple values.
"""
col_list = []
for k, v in df.to_dict().items():
name = k.replace('EQ:', '')
date_val_tups = [
(
int(
datetime.datetime.combine(
date_key, datetime.datetime.min.time()
).timestamp() * 1000.0
), date_val if not np.isnan(date_val) else 0.0
)
for date_key, date_val in v.items()
]
col_list.append({'name': name, 'data': date_val_tups})
return col_list
@staticmethod
def _calculate_returns(curve):
"""
Appends returns and cumulative returns to the supplied equity
curve DataFrame.
Parameters
----------
curve : `pd.DataFrame`
The equity curve DataFrame.
"""
curve['Returns'] = curve['Equity'].pct_change().fillna(0.0)
curve['CumReturns'] = np.exp(np.log(1 + curve['Returns']).cumsum())
def _calculate_monthly_aggregated_returns(self, returns):
"""
Calculate the monthly aggregated returns as a list of tuples,
with the first entry a further tuple of (year, month) and the
second entry the returns. 0% -> 0.0, 100% -> 1.0
Parameters
----------
returns : `pd.Series`
The Series of daily returns values.
Returns
-------
`list[tuple]`
The list of tuple-based returns: [((year, month), return)]
"""
month_returns = perf.aggregate_returns(returns, 'monthly')
return list(zip(month_returns.index, month_returns))
def _calculate_monthly_aggregated_returns_hc(self, returns):
"""
Calculate the monthly aggregated returns in the format
utilised by Highcharts. 0% -> 0.0, 100% -> 100.0
Parameters
----------
returns : `pd.Series`
The Series of daily returns values.
Returns
-------
`list[list]`
The list of list-based returns: [[month, year, return]]
"""
month_returns = perf.aggregate_returns(returns, 'monthly')
data = []
years = month_returns.index.levels[0].tolist()
years_range = range(0, len(years))
months_range = range(0, 12)
for month in months_range:
for year in years_range:
try:
data.append([month, year, 100.0 * month_returns.loc[(years[year], month + 1)]])
except KeyError: # Truncated year, so no data available
pass
return data
def _calculate_yearly_aggregated_returns(self, returns):
"""
Calculate the yearly aggregated returns as a list of tuples,
with the first entry being the year integer and the
second entry the returns. 0% -> 0.0, 100% -> 1.0
Parameters
----------
returns : `pd.Series`
The Series of daily returns values.
Returns
-------
`list[tuple]`
The list of tuple-based returns: [(year, return)]
"""
year_returns = perf.aggregate_returns(returns, 'yearly')
return list(zip(year_returns.index, year_returns))
def _calculate_yearly_aggregated_returns_hc(self, returns):
"""
Calculate the yearly aggregated returns in the format
utilised by Highcharts. 0% -> 0.0, 100% -> 100.0
Parameters
----------
returns : `list[tuple]`
The list of tuples, with the first index being the year
integer and the second index being the return.
Returns
-------
`list[float]`
The list of returns.
"""
year_returns = self._calculate_yearly_aggregated_returns(returns)
return [year[1] * 100.0 for year in year_returns]
def _calculate_returns_quantiles_dict(self, returns):
"""
Creates a dictionary with quantiles for the
provided returns series.
Parameters
----------
returns : `pd.Series` or `list[float]`
The Series/list of returns values.
Returns
-------
`dict{str: float}`
The quantiles of the provided returns series.
"""
return {
'min': np.min(returns),
'lq': np.percentile(returns, 25),
'med': np.median(returns),
'uq': np.percentile(returns, 75),
'max': np.max(returns)
}
def _calculate_returns_quantiles(self, daily_returns):
"""
Creates a dict-of-dicts with quantiles for the
daily, monthly and yearly returns series.
Parameters
----------
daily_returns : `pd.Series`
The Series of daily returns values.
Returns
-------
`dict{str: dict{str: float}}`
The quantiles of the daily, monthly and yearly returns.
"""
monthly_returns = [m[1] for m in self._calculate_monthly_aggregated_returns(daily_returns)]
yearly_returns = [y[1] for y in self._calculate_yearly_aggregated_returns(daily_returns)]
return {
'daily': self._calculate_returns_quantiles_dict(daily_returns),
'monthly': self._calculate_returns_quantiles_dict(monthly_returns),
'yearly': self._calculate_returns_quantiles_dict(yearly_returns)
}
def _calculate_returns_quantiles_hc(self, returns_quantiles):
"""
Convert the returns quantiles dict-of-dicts into
a format suitable for Highcharts boxplots.
Parameters
----------
`dict{str: dict{str: float}}`
The quantiles of the daily, monthly and yearly returns.
Returns
-------
`list[list[float]]`
The list-of-lists of return quantiles (in 0-100 percent terms).
"""
percentiles = ['min', 'lq', 'med', 'uq', 'max']
return [
[returns_quantiles['daily'][stat] * 100.0 for stat in percentiles],
[returns_quantiles['monthly'][stat] * 100.0 for stat in percentiles],
[returns_quantiles['yearly'][stat] * 100.0 for stat in percentiles]
]
def _calculate_statistics(self, curve):
"""
Creates a dictionary of various statistics associated with
the backtest of a trading strategy via a supplied equity curve.
All Pandas Series indexed by date-time are converted into
milliseconds since epoch representation.
Parameters
----------
curve : `pd.DataFrame`
The equity curve DataFrame.
Returns
-------
`dict`
The statistics dictionary.
"""
stats = {}
# Drawdown, max drawdown, max drawdown duration
dd_s, max_dd, dd_dur = perf.create_drawdowns(curve['CumReturns'])
# Equity curve and returns
stats['equity_curve'] = JSONStatistics._series_to_tuple_list(curve['Equity'])
stats['returns'] = JSONStatistics._series_to_tuple_list(curve['Returns'])
stats['cum_returns'] = JSONStatistics._series_to_tuple_list(curve['CumReturns'])
# Month/year aggregated returns
stats['monthly_agg_returns'] = self._calculate_monthly_aggregated_returns(curve['Returns'])
stats['monthly_agg_returns_hc'] = self._calculate_monthly_aggregated_returns_hc(curve['Returns'])
stats['yearly_agg_returns'] = self._calculate_yearly_aggregated_returns(curve['Returns'])
stats['yearly_agg_returns_hc'] = self._calculate_yearly_aggregated_returns_hc(curve['Returns'])
# Returns quantiles
stats['returns_quantiles'] = self._calculate_returns_quantiles(curve['Returns'])
stats['returns_quantiles_hc'] = self._calculate_returns_quantiles_hc(stats['returns_quantiles'])
# Drawdown statistics
stats['drawdowns'] = JSONStatistics._series_to_tuple_list(dd_s)
stats['max_drawdown'] = max_dd
stats['max_drawdown_duration'] = dd_dur
# Performance
stats['mean_returns'] = np.mean(curve['Returns'])
stats['stdev_returns'] = np.std(curve['Returns'])
stats['cagr'] = perf.create_cagr(curve['CumReturns'], self.periods)
stats['annualised_vol'] = np.std(curve['Returns']) * np.sqrt(self.periods)
stats['sharpe'] = perf.create_sharpe_ratio(curve['Returns'], self.periods)
stats['sortino'] = perf.create_sortino_ratio(curve['Returns'], self.periods)
return stats
def _calculate_allocations(self, allocations):
"""
"""
return JSONStatistics._dataframe_to_column_list(allocations)
def _create_full_statistics(self):
"""
Create the 'full' statistics dictionary, which has an entry for the
strategy and an optional entry for any supplied benchmark.
Returns
-------
`dict`
The strategy and (optional) benchmark statistics dictionary.
"""
full_stats = {}
JSONStatistics._calculate_returns(self.equity_curve)
full_stats['strategy'] = self._calculate_statistics(self.equity_curve)
full_stats['strategy']['target_allocations'] = self._calculate_allocations(
self.target_allocations
)
if self.benchmark_curve is not None:
JSONStatistics._calculate_returns(self.benchmark_curve)
full_stats['benchmark'] = self._calculate_statistics(self.benchmark_curve)
if self.strategy_id is not None:
full_stats['strategy_id'] = self.strategy_id
if self.strategy_name is not None:
full_stats['strategy_name'] = self.strategy_name
if self.benchmark_id is not None:
full_stats['benchmark_id'] = self.benchmark_id
if self.benchmark_name is not None:
full_stats['benchmark_name'] = self.benchmark_name
return full_stats
def to_file(self):
"""
Outputs the statistics dictionary to a JSON file.
"""
if settings.PRINT_EVENTS:
print('Outputting JSON results to "%s"...' % self.output_filename)
with open(self.output_filename, 'w') as outfile:
json.dump(self.statistics, outfile)
|
import pandas as pd
from urllib.request import urlopen
import scattertext as st
def main():
shisei = _parse_geutenberg('http://www.gutenberg.org/files/31617/31617-0.txt')
horadanshaku = _parse_geutenberg('http://www.gutenberg.org/files/34084/34084-0.txt')
df = pd.DataFrame({'text': [shisei, horadanshaku],
'title': ['Shisei', 'Horadanshaku tabimiyage'],
'author': ['Akutagawa Ryunosuke', 'Kuni Sasaki']})
df['text'] = df['text'].apply(st.japanese_nlp)
corpus = st.CorpusFromParsedDocuments(df,
category_col='title',
parsed_col='text').build()
html = st.produce_scattertext_explorer(corpus,
category='Shisei',
category_name='Shisei',
not_category_name='Horadanshaku tabimiyage',
minimum_term_frequency=5,
width_in_pixels=1000,
metadata=df['title'] + ' by ' + df['author'],
asian_mode=True)
open('./demo_japanese.html', 'w').write(html)
print('Open ./demo_japanese.html in Chrome or Firefox.')
def _parse_geutenberg(url):
return (urlopen(url)
.read()
.decode('utf-8')
.split("Transcriber's Notes")[0]
.split('-------------------------------------------------------')[-1])
if __name__ == '__main__':
main()
|
from __future__ import print_function
import argparse
import os
import sys
import io
import six
_stash = globals()["_stash"]
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('file', nargs='?', help='the file to be pasted')
ns = ap.parse_args(args)
status = 0
if not hasattr(_stash, "libdist"):
print(_stash.text_color("Error: libdist not loaded.", "red"))
sys.exit(1)
content = _stash.libdist.clipboard_get()
if ns.file:
if os.path.exists(ns.file):
print(_stash.text_color("pbpaste: {}: file exists".format(ns.file), "red"), file=sys.stderr)
status = 1
else:
try:
if isinstance(content, six.binary_type):
with io.open(ns.file, 'wb') as f:
f.write(content)
else:
with io.open(ns.file, "w", encoding="utf-8") as f:
f.write(content)
except Exception as err:
print("pbpaste: {}: {!s}".format(type(err).__name__, err), file=sys.stderr)
status = 1
else:
print(content, end="")
sys.exit(status)
if __name__ == "__main__":
main(sys.argv[1:])
|
import diamond.collector
try:
import psycopg2
import psycopg2.extensions
psycopg2 # workaround for pyflakes issue #13
except ImportError:
psycopg2 = None
class SlonyCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(SlonyCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname',
'user': 'Username',
'password': 'Password',
'port': 'Port number',
'slony_node_string': 'Regex for SQL SUBSTRING to extract ' +
'the hostname from sl_node.no_comment',
'instances': 'Subcategory of slony instances that includes the ' +
'slony database, and slony schema to be monitored. ' +
'Optionally, user, password and slony_node_string ' +
'maybe overridden per instance (see example).'
})
return config_help
def get_default_config(self):
"""
Return default config.
"""
config = super(SlonyCollector, self).get_default_config()
config.update({
'path': 'postgres',
'host': 'localhost',
'user': 'postgres',
'password': 'postgres',
'port': 5432,
'slony_node_string': 'Node [0-9]+ - postgres@localhost',
'method': 'Threaded',
'instances': {},
})
return config
def collect(self):
if psycopg2 is None:
self.log.error('Unable to import module psycopg2')
return {}
instances = self.config['instances']
# HACK: setting default with subcategory messes up merging of configs,
# so we only set the default if one wasn't provided.
if not instances:
instances = {
'default': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
}
}
for name, instance in instances.iteritems():
host = self.config['host']
port = self.config['port']
user = instance.get('user') or self.config['user']
password = instance.get('password') or self.config['password']
slony_node_string = instance.get('slony_node_string') or \
self.config['slony_node_string']
slony_db = instance['slony_db']
slony_schema = instance['slony_schema']
stats = self._get_stats_by_database(
host, port, user, password, slony_db,
slony_schema, slony_node_string
)
[self.publish(metric, value) for metric, value in stats]
def _get_stats_by_database(self, host, port, user,
password, db, schema, node_string):
path = "slony.%(datname)s.%(metric)s.lag_events"
conn = psycopg2.connect(
host=host,
user=user,
password=password,
port=port,
database=db)
# Avoid using transactions, set isolation level to autocommit
conn.set_isolation_level(0)
query = """
SELECT SUBSTRING(sl.no_comment FROM %(node_extractor)s) AS node,
st.st_lag_num_events AS lag_events
FROM %(schema)s.sl_status AS st, %(schema)s.sl_node AS sl
WHERE sl.no_id = st.st_received
"""
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(query, {
'node_extractor': node_string,
'schema': psycopg2.extensions.AsIs(schema),
})
metrics = []
for row in cursor.fetchall():
stats = row.copy()
metrics.append((
path % {'datname': db, 'metric': stats.get('node')},
stats.get('lag_events')
))
return metrics
|
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .device import ONVIFDevice
from .models import Profile
class ONVIFBaseEntity(Entity):
"""Base class common to all ONVIF entities."""
def __init__(self, device: ONVIFDevice, profile: Profile = None) -> None:
"""Initialize the ONVIF entity."""
self.device: ONVIFDevice = device
self.profile: Profile = profile
@property
def available(self):
"""Return True if device is available."""
return self.device.available
@property
def device_info(self):
"""Return a device description for device registry."""
device_info = {
"manufacturer": self.device.info.manufacturer,
"model": self.device.info.model,
"name": self.device.name,
"sw_version": self.device.info.fw_version,
"identifiers": {
# MAC address is not always available, and given the number
# of non-conformant ONVIF devices we have historically supported,
# we can not guarantee serial number either. Due to this, we have
# adopted an either/or approach in the config entry setup, and can
# guarantee that one or the other will be populated.
# See: https://github.com/home-assistant/core/issues/35883
(DOMAIN, self.device.info.mac or self.device.info.serial_number)
},
}
if self.device.info.mac:
device_info["connections"] = {
(CONNECTION_NETWORK_MAC, self.device.info.mac)
}
return device_info
|
from docutils import nodes
from docutils.parsers.rst import roles
from nikola.utils import split_explicit_title, LOGGER, slugify
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
"""Plugin for doc role."""
name = 'rest_doc'
def set_site(self, site):
"""Set Nikola site."""
self.site = site
roles.register_canonical_role('doc', doc_role)
self.site.register_shortcode('doc', doc_shortcode)
doc_role.site = site
return super().set_site(site)
def _find_post(slug):
"""Find a post with the given slug in posts or pages."""
twin_slugs = False
post = None
for p in doc_role.site.timeline:
if p.meta('slug') == slug:
if post is None:
post = p
else:
twin_slugs = True
break
return post, twin_slugs
def _doc_link(rawtext, text, options={}, content=[]):
"""Handle the doc role."""
# split link's text and post's slug in role content
has_explicit_title, title, slug = split_explicit_title(text)
if '#' in slug:
slug, fragment = slug.split('#', 1)
else:
fragment = None
# Look for the unslugified input first, then try to slugify (Issue #3450)
post, twin_slugs = _find_post(slug)
if post is None:
slug = slugify(slug)
post, twin_slugs = _find_post(slug)
try:
if post is None:
raise ValueError("No post with matching slug found.")
except ValueError:
return False, False, None, None, slug
if not has_explicit_title:
# use post's title as link's text
title = post.title()
permalink = post.permalink()
if fragment:
permalink += '#' + fragment
return True, twin_slugs, title, permalink, slug
def doc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Handle the doc role."""
success, twin_slugs, title, permalink, slug = _doc_link(rawtext, text, options, content)
if success:
if twin_slugs:
inliner.reporter.warning(
'More than one post with the same slug. Using "{0}"'.format(permalink))
LOGGER.warning(
'More than one post with the same slug. Using "{0}" for doc role'.format(permalink))
node = make_link_node(rawtext, title, permalink, options)
return [node], []
else:
msg = inliner.reporter.error(
'"{0}" slug doesn\'t exist.'.format(slug),
line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
def doc_shortcode(*args, **kwargs):
"""Implement the doc shortcode."""
text = kwargs['data']
success, twin_slugs, title, permalink, slug = _doc_link(text, text, LOGGER)
if success:
if twin_slugs:
LOGGER.warning(
'More than one post with the same slug. Using "{0}" for doc shortcode'.format(permalink))
return '<a href="{0}">{1}</a>'.format(permalink, title)
else:
LOGGER.error(
'"{0}" slug doesn\'t exist.'.format(slug))
return '<span class="error text-error" style="color: red;">Invalid link: {0}</span>'.format(text)
def make_link_node(rawtext, text, url, options):
"""Make a reST link node."""
node = nodes.reference(rawtext, text, refuri=url, *options)
return node
|
from collections import defaultdict
import socket
import diamond.collector
from diamond.metric import Metric
def parse_slab_stats(slab_stats):
"""Convert output from memcached's `stats slabs` into a Python dict.
Newlines are returned by memcached along with carriage returns
(i.e. '\r\n').
>>> parse_slab_stats(
"STAT 1:chunk_size 96\r\nSTAT 1:chunks_per_page 10922\r\nSTAT "
"active_slabs 1\r\nSTAT total_malloced 1048512\r\nEND\r\n")
{
'slabs': {
1: {
'chunk_size': 96,
'chunks_per_page': 10922,
# ...
},
},
'active_slabs': 1,
'total_malloced': 1048512,
}
"""
stats_dict = {'slabs': defaultdict(lambda: {})}
for line in slab_stats.splitlines():
if line == 'END':
break
# e.g.: "STAT 1:chunks_per_page 10922"
cmd, key, value = line.split(' ')
if cmd != 'STAT':
continue
# e.g.: "STAT active_slabs 1"
if ":" not in key:
stats_dict[key] = int(value)
continue
slab, key = key.split(':')
stats_dict['slabs'][int(slab)][key] = int(value)
return stats_dict
def dict_to_paths(dict_):
"""Convert a dict to metric paths.
>>> dict_to_paths({'foo': {'bar': 1}, 'baz': 2})
{
'foo.bar': 1,
'baz': 2,
}
"""
metrics = {}
for k, v in dict_.iteritems():
if isinstance(v, dict):
submetrics = dict_to_paths(v)
for subk, subv in submetrics.iteritems():
metrics['.'.join([str(k), str(subk)])] = subv
else:
metrics[k] = v
return metrics
class MemcachedSlabCollector(diamond.collector.Collector):
def process_config(self):
super(MemcachedSlabCollector, self).process_config()
self.host = self.config['host']
self.port = int(self.config['port'])
def get_default_config(self):
config = super(MemcachedSlabCollector, self).get_default_config()
# Output stats in the format:
# 'servers.cache-main-01.memcached_slab.slabs.1.chunk_size'
config.update({
'interval': 60,
'path_prefix': 'servers',
'path': 'memcached_slab',
'host': 'localhost',
'port': 11211,
})
return config
def get_slab_stats(self):
"""Retrieve slab stats from memcached."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.send("stats slabs\n")
try:
data = ""
while True:
data += s.recv(4096)
if data.endswith('END\r\n'):
break
return data
finally:
s.close()
def collect(self):
unparsed_slab_stats = self.get_slab_stats()
slab_stats = parse_slab_stats(unparsed_slab_stats)
paths = dict_to_paths(slab_stats)
for path, value in paths.iteritems():
# Add path and prefix to metric (e.g.
# 'servers.cache-main-01.memchached_slab')
full_path = self.get_metric_path(path)
metric = Metric(full_path, value)
self.publish_metric(metric)
|
import json
import ntpath
from absl import flags
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
TEST_PORT = 5000
PSPING_OUTPUT_FILE = 'psping_out'
PSPING_DIR = 'PSTools'
PSPING_ZIP = PSPING_DIR + '.zip'
PSPING_URL = 'https://download.sysinternals.com/files/' + PSPING_ZIP
flags.DEFINE_integer('psping_packet_size', 1,
'The size of the packet to test the ping with.')
flags.DEFINE_integer('psping_bucket_count', 100,
'For the results histogram, number of columns')
flags.DEFINE_integer('psping_rr_count', 1000,
'The number of pings to attempt')
flags.DEFINE_integer('psping_timeout', 10,
'The time to allow psping to run')
def Install(vm):
"""Installs the psping package on the VM."""
zip_path = ntpath.join(vm.temp_dir, PSPING_ZIP)
vm.DownloadFile(PSPING_URL, zip_path)
vm.UnzipFile(zip_path, vm.temp_dir)
vm.AllowPort(TEST_PORT)
vm.SetProcessPriorityToHighByFlag('psping.exe')
def StartPspingServer(vm):
server_command = (
'Start-Job -ScriptBlock {{'
'{psping_exec_dir}\\psping.exe /accepteula -s 0.0.0.0:{port};'
'}}').format(
psping_exec_dir=vm.temp_dir,
port=TEST_PORT)
vm.RemoteCommand(server_command)
def _RunPsping(vm, command):
try:
vm.RemoteCommand(command, timeout=FLAGS.psping_timeout)
except errors.VirtualMachine.RemoteCommandError:
# We expect psping to return an error here because the psping server has
# to be killed with a CTRL+C.
pass
@vm_util.Retry(max_retries=3)
def RunLatencyTest(sending_vm, receiving_vm, use_internal_ip=True):
"""Run the psping latency test.
Uses a TCP request-response time to measure latency.
Args:
sending_vm: the vm to send the tcp request.
receiving_vm: the vm acting as the server.
use_internal_ip: whether or not to use the private IP or the public IP.
Returns:
list of samples representing latency between the two VMs.
"""
server_ip = (receiving_vm.internal_ip if use_internal_ip
else receiving_vm.ip_address)
client_command = (
'cd {psping_exec_dir}; '
'sleep 2;' # sleep to make sure the server starts first.
'.\\psping.exe /accepteula -l {packet_size} -i 0 -q '
'-n {rr_count} -h {bucket_count} {ip}:{port}'
' > {out_file}').format(
psping_exec_dir=sending_vm.temp_dir,
packet_size=FLAGS.psping_packet_size,
rr_count=FLAGS.psping_rr_count,
bucket_count=FLAGS.psping_bucket_count,
ip=server_ip,
port=TEST_PORT,
out_file=PSPING_OUTPUT_FILE)
# PSPing does not have a configurable timeout. To get around this, start the
# server as a background job, then kill it after 10 seconds
server_command = (
'{psping_exec_dir}\\psping.exe /accepteula -s 0.0.0.0:{port};').format(
psping_exec_dir=receiving_vm.temp_dir,
port=TEST_PORT)
process_args = [(_RunPsping, (receiving_vm, server_command), {}),
(_RunPsping, (sending_vm, client_command), {})]
background_tasks.RunParallelProcesses(process_args, 200, 1)
cat_command = 'cd {psping_exec_dir}; cat {out_file}'.format(
psping_exec_dir=sending_vm.temp_dir,
out_file=PSPING_OUTPUT_FILE)
output, _ = sending_vm.RemoteCommand(cat_command)
return ParsePspingResults(output, sending_vm, receiving_vm, use_internal_ip)
# example output
# PsPing v2.10 - PsPing - ping, latency, bandwidth measurement utility
# Copyright (C) 2012-2016 Mark Russinovich
# Sysinternals - www.sysinternals.com
#
# TCP latency test connecting to 10.138.0.2:47001: Connected
# 15 iterations (warmup 5) sending 8192 bytes TCP latency test: 0%
# Connected
# 15 iterations (warmup 5) sending 8192 bytes TCP latency test: 100%
#
# TCP roundtrip latency statistics (post warmup):
# Sent = 10, Size = 8192, Total Bytes: 81920,
# Minimum = 0.19ms, Maxiumum = 0.58ms, Average = 0.27ms
#
# Latency\tCount
# 0.30\t688
# 0.51\t292
# 0.71\t15
# 0.92\t2
# 1.13\t0
# 1.33\t2
# 1.54\t0
# 1.75\t0
# 1.95\t0
# 2.16\t1
def ParsePspingResults(results, client_vm, server_vm, internal_ip_used):
"""Turn psping output into a list of samples.
Args:
results: string of the psping output
client_vm: the VM performing the latency test
server_vm: the VM serving the latency test
internal_ip_used: whether or not the private IP was used.
Returns:
list of samples reflecting the psping results
"""
output_list = [val.rstrip('\r') for val in results.split('\n')]
# There should be exactly one line like this.
data_line = [line for line in output_list if 'Minimum' in line][0]
# split the line up by spaces
data_line = [val for val in data_line.split(' ') if val]
minimum = float(data_line[2].rstrip('ms,'))
maximum = float(data_line[5].rstrip('ms,'))
average = float(data_line[8].rstrip('ms,'))
metadata = {
'internal_ip_used': internal_ip_used,
'sending_zone': client_vm.zone,
'sending_machine_type': client_vm.machine_type,
'receiving_zone': server_vm.zone,
'receiving_machine_type': server_vm.machine_type,
}
samples = [
sample.Sample('latency', average, 'ms', metadata),
sample.Sample('latency:maximum', maximum, 'ms', metadata),
sample.Sample('latency:minimum', minimum, 'ms', metadata),
]
histogram = []
index = 1
for line in output_list:
line_data = [val for val in line.split(' ') if val]
# the line should look like ['entry\tvalue']
if len(line_data) is not 1:
continue
entry_data = line_data[0].split('\t')
if len(entry_data) is not 2:
continue
if 'Latency' in entry_data:
continue
# This is a histogram data line
latency = float(entry_data[0])
count = int(entry_data[1])
histogram.append({'latency': latency,
'count': count,
'bucket_number': index})
index += 1
histogram_metadata = metadata.copy()
histogram_metadata.update({'histogram': json.dumps(histogram)})
samples.append(
sample.Sample('latency:histogram', 0, 'ms', histogram_metadata))
return samples
|
import logging
from homeassistant.components.cover import (
ATTR_POSITION,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
CoverEntity,
)
from . import LUTRON_CONTROLLER, LUTRON_DEVICES, LutronDevice
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron shades."""
devs = []
for (area_name, device) in hass.data[LUTRON_DEVICES]["cover"]:
dev = LutronCover(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
add_entities(devs, True)
return True
class LutronCover(LutronDevice, CoverEntity):
"""Representation of a Lutron shade."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._lutron_device.last_level() < 1
@property
def current_cover_position(self):
"""Return the current position of cover."""
return self._lutron_device.last_level()
def close_cover(self, **kwargs):
"""Close the cover."""
self._lutron_device.level = 0
def open_cover(self, **kwargs):
"""Open the cover."""
self._lutron_device.level = 100
def set_cover_position(self, **kwargs):
"""Move the shade to a specific position."""
if ATTR_POSITION in kwargs:
position = kwargs[ATTR_POSITION]
self._lutron_device.level = position
def update(self):
"""Call when forcing a refresh of the device."""
# Reading the property (rather than last_level()) fetches value
level = self._lutron_device.level
_LOGGER.debug("Lutron ID: %d updated to %f", self._lutron_device.id, level)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"Lutron Integration ID": self._lutron_device.id}
|
import os.path
from django import template
from django.template.defaulttags import do_for, do_if
register = template.Library()
@register.simple_tag()
def replace(value, char, replace_char):
return value.replace(char, replace_char)
@register.filter
def dirname(value):
return os.path.dirname(value)
@register.filter
def stripext(value):
return os.path.splitext(value)[0]
@register.filter
def parentdir(value):
return value.split("/", 1)[-1]
register.tag("if")(do_if)
register.tag("for")(do_for)
|
import pytest
from qstrader.broker.fee_model.percent_fee_model import PercentFeeModel
class AssetMock(object):
def __init__(self):
pass
class BrokerMock(object):
def __init__(self):
pass
@pytest.mark.parametrize(
"commission_pct,tax_pct,quantity,consideration,"
"expected_commission,expected_tax,expected_total", [
(0.0, 0.0, 100, 1000.0, 0.0, 0.0, 0.0),
(0.002, 0.0, 100, 1000.0, 2.0, 0.0, 2.0),
(0.0, 0.005, 100, 1000.0, 0.0, 5.0, 5.0),
(0.001, 0.005, 100, 1000.0, 1.0, 5.0, 6.0),
(0.001, 0.005, -100, -1000.0, 1.0, 5.0, 6.0),
(0.002, 0.0025, -50, -8542.0, 17.084, 21.355, 38.439),
]
)
def test_percent_commission(
commission_pct, tax_pct, quantity, consideration,
expected_commission, expected_tax, expected_total
):
"""
Tests that each method returns the appropriate
percentage tax/commision.
"""
pfm = PercentFeeModel(commission_pct=commission_pct, tax_pct=tax_pct)
asset = AssetMock()
broker = BrokerMock()
assert pfm._calc_commission(asset, quantity, consideration, broker=broker) == expected_commission
assert pfm._calc_tax(asset, quantity, consideration, broker=broker) == expected_tax
assert pfm.calc_total_cost(asset, quantity, consideration, broker=broker) == expected_total
|
from concurrent import futures
from datetime import timedelta
import logging
from pytfiac import Tfiac
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SWING_BOTH,
SWING_HORIZONTAL,
SWING_OFF,
SWING_VERTICAL,
)
from homeassistant.const import ATTR_TEMPERATURE, CONF_HOST, TEMP_FAHRENHEIT
import homeassistant.helpers.config_validation as cv
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_HOST): cv.string})
_LOGGER = logging.getLogger(__name__)
MIN_TEMP = 61
MAX_TEMP = 88
HVAC_MAP = {
HVAC_MODE_HEAT: "heat",
HVAC_MODE_AUTO: "selfFeel",
HVAC_MODE_DRY: "dehumi",
HVAC_MODE_FAN_ONLY: "fan",
HVAC_MODE_COOL: "cool",
HVAC_MODE_OFF: "off",
}
HVAC_MAP_REV = {v: k for k, v in HVAC_MAP.items()}
SUPPORT_FAN = [FAN_AUTO, FAN_HIGH, FAN_MEDIUM, FAN_LOW]
SUPPORT_SWING = [SWING_OFF, SWING_HORIZONTAL, SWING_VERTICAL, SWING_BOTH]
SUPPORT_FLAGS = SUPPORT_FAN_MODE | SUPPORT_SWING_MODE | SUPPORT_TARGET_TEMPERATURE
CURR_TEMP = "current_temp"
TARGET_TEMP = "target_temp"
OPERATION_MODE = "operation"
FAN_MODE = "fan_mode"
SWING_MODE = "swing_mode"
ON_MODE = "is_on"
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the TFIAC climate device."""
tfiac_client = Tfiac(config[CONF_HOST])
try:
await tfiac_client.update()
except futures.TimeoutError:
_LOGGER.error("Unable to connect to %s", config[CONF_HOST])
return
async_add_devices([TfiacClimate(hass, tfiac_client)])
class TfiacClimate(ClimateEntity):
"""TFIAC class."""
def __init__(self, hass, client):
"""Init class."""
self._client = client
self._available = True
@property
def available(self):
"""Return if the device is available."""
return self._available
async def async_update(self):
"""Update status via socket polling."""
try:
await self._client.update()
self._available = True
except futures.TimeoutError:
self._available = False
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def min_temp(self):
"""Return the minimum temperature."""
return MIN_TEMP
@property
def max_temp(self):
"""Return the maximum temperature."""
return MAX_TEMP
@property
def name(self):
"""Return the name of the climate device."""
return self._client.name
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._client.status["target_temp"]
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self._client.status["current_temp"]
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if self._client.status[ON_MODE] != "on":
return HVAC_MODE_OFF
state = self._client.status["operation"]
return HVAC_MAP_REV.get(state)
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return list(HVAC_MAP)
@property
def fan_mode(self):
"""Return the fan setting."""
return self._client.status["fan_mode"].lower()
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return SUPPORT_FAN
@property
def swing_mode(self):
"""Return the swing setting."""
return self._client.status["swing_mode"].lower()
@property
def swing_modes(self):
"""List of available swing modes."""
return SUPPORT_SWING
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
await self._client.set_state(TARGET_TEMP, temp)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
await self._client.set_state(ON_MODE, "off")
else:
await self._client.set_state(OPERATION_MODE, HVAC_MAP[hvac_mode])
async def async_set_fan_mode(self, fan_mode):
"""Set new fan mode."""
await self._client.set_state(FAN_MODE, fan_mode.capitalize())
async def async_set_swing_mode(self, swing_mode):
"""Set new swing mode."""
await self._client.set_swing(swing_mode.capitalize())
async def async_turn_on(self):
"""Turn device on."""
await self._client.set_state(OPERATION_MODE)
async def async_turn_off(self):
"""Turn device off."""
await self._client.set_state(ON_MODE, "off")
|
import contextlib
import fcntl
import itertools
import os
import signal
import sys
import time
import mock
import pytest
from paasta_tools import mac_address
skip_if_osx = pytest.mark.skipif(
sys.platform == "darwin", reason="Flock is not present on OS X"
)
def test_simple(tmpdir):
mac, lock_file = mac_address.reserve_unique_mac_address(str(tmpdir))
with contextlib.closing(lock_file):
assert lock_file is not None
assert mac == "02:52:00:00:00:00"
assert tmpdir.join(mac).check()
def test_dir_not_exist(tmpdir):
with pytest.raises(IOError):
mac_address.reserve_unique_mac_address(str(tmpdir.join("nonexistent")))
def test_file_exists_no_flock(tmpdir):
tmpdir.join("02:52:00:00:00:00").ensure()
mac, lock_file = mac_address.reserve_unique_mac_address(str(tmpdir))
with contextlib.closing(lock_file):
assert lock_file is not None
assert mac == "02:52:00:00:00:00"
def _flock_process(path):
# fork a subprocess that holds an flock.
r, w = os.pipe()
child_pid = os.fork()
if child_pid == 0: # pragma: no cover
os.close(r)
fd = os.open(path, os.O_CREAT | os.O_RDWR)
fcntl.flock(fd, fcntl.LOCK_EX)
os.write(w, b"ok")
os.close(w)
time.sleep(60 * 60 * 24) # sleep for some approximation of infinity
sys.exit(0) # never returns
os.close(w)
# wait for something to be printed so we know the flock has occurred
assert os.read(r, 2) == b"ok"
os.close(r)
return child_pid
@skip_if_osx
def test_file_exists_flock(tmpdir):
# it doesn't count if this process has the flock, so we need to spawn a different one to hold it
flock_process = _flock_process(str(tmpdir.join("02:52:00:00:00:00")))
try:
mac, lock_file = mac_address.reserve_unique_mac_address(str(tmpdir))
with contextlib.closing(lock_file):
assert lock_file is not None
assert mac == "02:52:00:00:00:01"
finally:
os.kill(flock_process, signal.SIGKILL)
@pytest.yield_fixture(autouse=True)
def mock_randbits():
# make getrandbits() reliably return an incrementing counter starting at 0
class counter(itertools.count):
def __call__(self, _):
return next(self)
with mock.patch.object(mac_address.random, "getrandbits", side_effect=counter()):
yield
|
import asyncio
from datetime import timedelta
from typing import Optional
from twentemilieu import TwenteMilieu
import voluptuous as vol
from homeassistant.components.twentemilieu.const import (
CONF_HOUSE_LETTER,
CONF_HOUSE_NUMBER,
CONF_POST_CODE,
DATA_UPDATE,
DOMAIN,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ID
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
SCAN_INTERVAL = timedelta(seconds=3600)
SERVICE_UPDATE = "update"
SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_ID): cv.string})
async def _update_twentemilieu(
hass: HomeAssistantType, unique_id: Optional[str]
) -> None:
"""Update Twente Milieu."""
if unique_id is not None:
twentemilieu = hass.data[DOMAIN].get(unique_id)
if twentemilieu is not None:
await twentemilieu.update()
async_dispatcher_send(hass, DATA_UPDATE, unique_id)
else:
tasks = []
for twentemilieu in hass.data[DOMAIN].values():
tasks.append(twentemilieu.update())
await asyncio.wait(tasks)
for uid in hass.data[DOMAIN]:
async_dispatcher_send(hass, DATA_UPDATE, uid)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the Twente Milieu components."""
async def update(call) -> None:
"""Service call to manually update the data."""
unique_id = call.data.get(CONF_ID)
await _update_twentemilieu(hass, unique_id)
hass.services.async_register(DOMAIN, SERVICE_UPDATE, update, schema=SERVICE_SCHEMA)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up Twente Milieu from a config entry."""
session = async_get_clientsession(hass)
twentemilieu = TwenteMilieu(
post_code=entry.data[CONF_POST_CODE],
house_number=entry.data[CONF_HOUSE_NUMBER],
house_letter=entry.data[CONF_HOUSE_LETTER],
session=session,
)
unique_id = entry.data[CONF_ID]
hass.data.setdefault(DOMAIN, {})[unique_id] = twentemilieu
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
async def _interval_update(now=None) -> None:
"""Update Twente Milieu data."""
await _update_twentemilieu(hass, unique_id)
async_track_time_interval(hass, _interval_update, SCAN_INTERVAL)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload Twente Milieu config entry."""
await hass.config_entries.async_forward_entry_unload(entry, "sensor")
del hass.data[DOMAIN][entry.data[CONF_ID]]
return True
|
import functools
from http import server
import logging
import multiprocessing
import os
import subprocess
import sys
import threading
import time
# Amount of time in seconds to attempt calling a client VM if VM calling in.
MAX_TIME_SECONDS = 30
# Amount of time in seconds to attempt calling a client VM if VM not calling in.
MAX_TIME_SECONDS_NO_CALLING = 1200
# entry to stop processing from the timing queue
_STOP_QUEUE_ENTRY = 'stop'
# Tag for undefined hostname, should be synced with large_scale_boot_benchmark.
UNDEFINED_HOSTNAME = 'UNDEFINED'
# Tag for sequential hostname, should be synced with large_scale_boot_benchmark.
SEQUENTIAL_IP = 'SEQUENTIAL_IP'
# Multiplier for nanoseconds
NANO = 1e9
def ConfirmIPAccessible(client_host, port, timeout=MAX_TIME_SECONDS):
"""Confirm the given host's port is accessible and return the access time."""
netcat_command = 'nc -zv -w 1 {client} {port}'.format(
client=client_host,
port=port)
start_time = time.time()
while time.time() <= (start_time + timeout):
p = subprocess.Popen(netcat_command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = p.communicate()
# different versions of netcat uses different stderr strings.
if any(word in stderr.decode('utf-8') for word in ['open', 'succeeded']):
# return the system time in nanoseconds
return 'Pass:%s:%d' % (client_host, time.time() * NANO)
logging.warning('Could not netcat to port %s on client vm %s.',
port, client_host)
return 'Fail:%s:%d' % (client_host, time.time() * NANO)
def WaitForRunningStatus(client_host, timeout=MAX_TIME_SECONDS):
"""Wait for the VM to report running status.
Status command generated from data/large_scale_boot/vm_status.sh.jinja2.
Args:
client_host: client host to check for running status.
timeout: Max timeout to wait before declaring failure.
Returns:
host status string.
"""
with open('/tmp/pkb/vm_status.sh', 'r') as reader:
command = reader.read()
start_time = time.time()
while time.time() <= (start_time + timeout):
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
universal_newlines=True, stderr=subprocess.PIPE)
status, _ = p.communicate()
if 'running' in status.lower():
return 'Running:%s:%d' % (client_host, time.time() * NANO)
logging.warning('Client vm %s not running yet.', client_host)
return 'Fail:%s:%d' % (client_host, time.time() * NANO)
def StoreResult(result_str, queue):
"""Stores a given result string to results queue."""
if result_str:
queue.put(result_str)
def WriteResultsToFile(results_path, queue):
"""Write anything in results queue to a file."""
with open(results_path, 'a') as writer:
while True:
result = queue.get()
if result == _STOP_QUEUE_ENTRY:
logging.info('Told to stop writing to file %s from queue', results_path)
return
writer.write('{}\n'.format(result))
writer.flush()
def BuildHostNames(name_pattern, count, use_public_ip):
"""Derieve host names from either name pattern or boot logs.
See large_scale_boot benchmark for name_pattern. For example, SEQUENTIAL_IP
name pattern is in the form of 'SEQUENTIAL_IP_{public_dns}_{start_index}'.
Args:
name_pattern: Name pattern to build host names with.
count: count of vms.
use_public_ip: hostnames should be public ip.
Returns:
hostnames or host ips to access.
"""
if name_pattern == UNDEFINED_HOSTNAME:
return WaitForHostNames(use_public_ip)
elif SEQUENTIAL_IP in name_pattern:
public_dns = name_pattern.split('_')[-2]
start_vm_index = int(name_pattern.split('_')[-1])
if public_dns:
return [public_dns.replace('VMID', str(vm_id))
for vm_id in range(start_vm_index, count + start_vm_index)]
else:
return GenerateHostIPs(start_vm_index, count)
else:
return [name_pattern.replace('VM_ID', str(vm_id))
for vm_id in range(1, count + 1)]
def WaitForHostNames(use_public_ip, timeout=MAX_TIME_SECONDS_NO_CALLING):
"""Wait for boot logs to complete and grep the newly created ips.
After boot_script.sh completes, it will print out [completed].
In boot_script.sh output, outputs will be of the following formats:
GCP:
networkInterfaces[0].accessConfigs[0].natIP: 34.94.81.165
AWS:
PRIVATEIPADDRESSES True ip-10-0-0-143.ec2.internal 10.0.0.143
ASSOCIATION amazon ec2-100-24-107-67.compute-1.amazonaws.com 100.24.107.67
Args:
use_public_ip: whether to use public_ip hostname.
timeout: Amount of time in seconds to wait for boot.
Returns:
hosts to netcat.
"""
start_time = time.time()
while time.time() <= (start_time + timeout):
if os.system('grep completed log') != 0:
time.sleep(1)
continue
with open('log', 'r') as f:
hostnames = []
for line in f:
# look for GCP public ip
if 'natIP' in line:
hostnames.append(line.split()[1])
# look for amazon public ip if set
if use_public_ip and 'ASSOCIATION' in line:
hostnames.append(line.split()[3])
# look for amazon private ip if public ip is not set
if not use_public_ip and 'PRIVATEIPADDRESSES' in line:
hostnames.append(line.split()[2])
return set(hostnames)
raise ValueError('Boot did not complete successfully before timeout of %s '
'seconds.' % MAX_TIME_SECONDS_NO_CALLING)
def GenerateHostIPs(boot_vm_index, count):
"""Logic must be aligned with large_scale_boot/boot_script.sh."""
hostnames = []
for vm_id in range(boot_vm_index, boot_vm_index + count):
hostnames.append('10.0.{octet3}.{octet4}'.format(
octet3=vm_id // 256,
octet4=vm_id % 256))
return hostnames
def ActAsClient(pool, queue, port, name_pattern, vms_count, use_public_ip):
"""Use as a client."""
store_results = functools.partial(StoreResult, queue=queue)
all_jobs = []
for host_name in BuildHostNames(name_pattern, vms_count, use_public_ip):
job = pool.apply_async(
ConfirmIPAccessible,
args=(host_name, port, MAX_TIME_SECONDS_NO_CALLING,),
callback=store_results)
all_jobs.append(job)
if vms_count == 1:
status_job = pool.apply_async(
WaitForRunningStatus,
args=(host_name, MAX_TIME_SECONDS_NO_CALLING,),
callback=store_results)
all_jobs.append(status_job)
logging.info([async_job.get() for async_job in all_jobs])
queue.put(_STOP_QUEUE_ENTRY)
def ActAsServer(pool, queue, port, host_name, listening_server):
"""Use as a server."""
handler = functools.partial(RequestHandler, pool, host_name, queue, port)
listener = server.HTTPServer(listening_server, handler)
logging.info('Starting httpserver...\n')
try:
listener.serve_forever()
except KeyboardInterrupt:
logging.info('^C received, shutting down server')
listener.server_close()
queue.put(_STOP_QUEUE_ENTRY)
class RequestHandler(server.BaseHTTPRequestHandler):
"""Request handler for incoming curl requests from booted vms."""
def __init__(self, pool, launcher, queue, access_port, *args, **kwargs):
"""Creates a RequestHandler for a http request received by the server.
Args:
pool: multiprocessing process pool object.
launcher: name string of the launcher vm that the server is on.
queue: multiprocessing queue object.
access_port: port number to call on the booted vms.
*args: Other argments to apply to the request handler.
**kwargs: Keyword arguments to apply to the request handler.
"""
self.process_pool = pool
self.launcher = launcher
self.timing_queue = queue
self.access_port = access_port
# BaseHTTPRequestHandler calls do_GET inside __init__
# So we have to call super().__init__ after setting attributes.
super(RequestHandler, self).__init__(*args, **kwargs)
def do_GET(self): # pylint: disable=g-bad-name
"""Process GET requests."""
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes('OK', 'UTF-8'))
# Check that we are not getting random curls on the internet.
client_host = self.client_address[0]
client_check_str = self.headers.get('X-Header', None)
if client_check_str != self.launcher:
logging.error('Got curl with unknown X-Header: %s', client_check_str)
self.shutdown()
return
# Process this client
logging.info(client_host)
store_results_func = functools.partial(StoreResult, queue=self.timing_queue)
self.process_pool.apply_async(ConfirmIPAccessible,
args=(client_host, self.access_port,),
callback=store_results_func)
def shutdown(self):
"""Shut down the server."""
t = threading.Thread(target=self.server.shutdown)
logging.info('Server shut down.')
t.start()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
if len(sys.argv) != 9:
raise ValueError('Got unexpected number of command-line arguments. '
'There should be at most 7 command-line arguments: '
'1. name of the server vm, '
'2. server port, '
'3. results file, '
'4. port to access the boot VMs, '
'5. whether to use the listening server, '
'6. launched vm naming pattern, '
'7. number of launched vms.'
'8. whether to use public ip address.')
hostname = sys.argv[1]
server_address = ('', int(sys.argv[2]))
results_file_path = sys.argv[3]
clients_port = sys.argv[4]
use_listening_server = sys.argv[5] == 'True'
vms_name_pattern = sys.argv[6]
num_vms = int(sys.argv[7])
using_public_ip = sys.argv[8] == 'True'
process_pool = multiprocessing.Pool()
multiprocessing_manager = multiprocessing.Manager()
timing_queue = multiprocessing_manager.Queue()
# Start the worker to move results from queue to file first.
process_pool.apply_async(WriteResultsToFile,
args=(results_file_path, timing_queue,))
if use_listening_server:
ActAsServer(process_pool, timing_queue, clients_port, hostname,
server_address)
# The start the server to listen and put results on queue.
else:
ActAsClient(process_pool, timing_queue, clients_port,
vms_name_pattern, num_vms, using_public_ip)
|
from homeassistant.components.device_tracker import DeviceScanner
from homeassistant.components.opnsense import CONF_TRACKER_INTERFACE, OPNSENSE_DATA
async def async_get_scanner(hass, config, discovery_info=None):
"""Configure the OPNSense device_tracker."""
interface_client = hass.data[OPNSENSE_DATA]["interfaces"]
scanner = OPNSenseDeviceScanner(
interface_client, hass.data[OPNSENSE_DATA][CONF_TRACKER_INTERFACE]
)
return scanner
class OPNSenseDeviceScanner(DeviceScanner):
"""This class queries a router running OPNsense."""
def __init__(self, client, interfaces):
"""Initialize the scanner."""
self.last_results = {}
self.client = client
self.interfaces = interfaces
def _get_mac_addrs(self, devices):
"""Create dict with mac address keys from list of devices."""
out_devices = {}
for device in devices:
if not self.interfaces:
out_devices[device["mac"]] = device
elif device["intf_description"] in self.interfaces:
out_devices[device["mac"]] = device
return out_devices
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self.update_info()
return list(self.last_results)
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if device not in self.last_results:
return None
hostname = self.last_results[device].get("hostname") or None
return hostname
def update_info(self):
"""Ensure the information from the OPNSense router is up to date.
Return boolean if scanning successful.
"""
devices = self.client.get_arp()
self.last_results = self._get_mac_addrs(devices)
def get_extra_attributes(self, device):
"""Return the extra attrs of the given device."""
if device not in self.last_results:
return None
mfg = self.last_results[device].get("manufacturer")
if not mfg:
return {}
return {"manufacturer": mfg}
|
import sys
from .compat3 import basestring_
__all__ = ['looper']
class looper(object):
"""
Helper for looping (particularly in templates)
Use this like::
for loop, item in looper(seq):
if loop.first:
...
"""
def __init__(self, seq):
self.seq = seq
def __iter__(self):
return looper_iter(self.seq)
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__, self.seq)
class looper_iter(object):
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
if sys.version < "3":
next = __next__
class loop_pos(object):
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[self.pos], self.pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
__next__ = property(__next__)
if sys.version < "3":
next = __next__
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq) - 1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, basestring_)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif hasattr(getter, '__call__'):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
|
INLINESTYLES = False
STYLE = "fruity"
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES, style=STYLE)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
class Pygments(Directive):
""" Source code syntax highlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
# print >>open('ui/default/pygments.css', 'w'), formatter.get_style_defs('.highlight')
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
|
from homeassistant.components.weather import WeatherEntity
from homeassistant.const import LENGTH_KILOMETERS, TEMP_CELSIUS
from homeassistant.core import callback
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
ATTRIBUTION,
CONDITION_CLASSES,
DEFAULT_NAME,
DOMAIN,
METOFFICE_COORDINATOR,
METOFFICE_DATA,
METOFFICE_NAME,
VISIBILITY_CLASSES,
VISIBILITY_DISTANCE_CLASSES,
)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigType, async_add_entities
) -> None:
"""Set up the Met Office weather sensor platform."""
hass_data = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
MetOfficeWeather(
entry.data,
hass_data,
)
],
False,
)
class MetOfficeWeather(WeatherEntity):
"""Implementation of a Met Office weather condition."""
def __init__(self, entry_data, hass_data):
"""Initialise the platform with a data instance."""
self._data = hass_data[METOFFICE_DATA]
self._coordinator = hass_data[METOFFICE_COORDINATOR]
self._name = f"{DEFAULT_NAME} {hass_data[METOFFICE_NAME]}"
self._unique_id = f"{self._data.latitude}_{self._data.longitude}"
self.metoffice_now = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique of the sensor."""
return self._unique_id
@property
def condition(self):
"""Return the current condition."""
return (
[
k
for k, v in CONDITION_CLASSES.items()
if self.metoffice_now.weather.value in v
][0]
if self.metoffice_now
else None
)
@property
def temperature(self):
"""Return the platform temperature."""
return (
self.metoffice_now.temperature.value
if self.metoffice_now and self.metoffice_now.temperature
else None
)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def visibility(self):
"""Return the platform visibility."""
_visibility = None
if hasattr(self.metoffice_now, "visibility"):
_visibility = f"{VISIBILITY_CLASSES.get(self.metoffice_now.visibility.value)} - {VISIBILITY_DISTANCE_CLASSES.get(self.metoffice_now.visibility.value)}"
return _visibility
@property
def visibility_unit(self):
"""Return the unit of measurement."""
return LENGTH_KILOMETERS
@property
def pressure(self):
"""Return the mean sea-level pressure."""
return (
self.metoffice_now.pressure.value
if self.metoffice_now and self.metoffice_now.pressure
else None
)
@property
def humidity(self):
"""Return the relative humidity."""
return (
self.metoffice_now.humidity.value
if self.metoffice_now and self.metoffice_now.humidity
else None
)
@property
def wind_speed(self):
"""Return the wind speed."""
return (
self.metoffice_now.wind_speed.value
if self.metoffice_now and self.metoffice_now.wind_speed
else None
)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return (
self.metoffice_now.wind_direction.value
if self.metoffice_now and self.metoffice_now.wind_direction
else None
)
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
async def async_added_to_hass(self) -> None:
"""Set up a listener and load data."""
self.async_on_remove(
self._coordinator.async_add_listener(self._update_callback)
)
self._update_callback()
@callback
def _update_callback(self) -> None:
"""Load data from integration."""
self.metoffice_now = self._data.now
self.async_write_ha_state()
@property
def should_poll(self) -> bool:
"""Entities do not individually poll."""
return False
@property
def available(self):
"""Return if state is available."""
return self.metoffice_now is not None
|
import logging
from pydanfossair.commands import ReadCommand
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.helpers.entity import Entity
from . import DOMAIN as DANFOSS_AIR_DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Danfoss Air sensors etc."""
data = hass.data[DANFOSS_AIR_DOMAIN]
sensors = [
[
"Danfoss Air Exhaust Temperature",
TEMP_CELSIUS,
ReadCommand.exhaustTemperature,
DEVICE_CLASS_TEMPERATURE,
],
[
"Danfoss Air Outdoor Temperature",
TEMP_CELSIUS,
ReadCommand.outdoorTemperature,
DEVICE_CLASS_TEMPERATURE,
],
[
"Danfoss Air Supply Temperature",
TEMP_CELSIUS,
ReadCommand.supplyTemperature,
DEVICE_CLASS_TEMPERATURE,
],
[
"Danfoss Air Extract Temperature",
TEMP_CELSIUS,
ReadCommand.extractTemperature,
DEVICE_CLASS_TEMPERATURE,
],
[
"Danfoss Air Remaining Filter",
PERCENTAGE,
ReadCommand.filterPercent,
None,
],
[
"Danfoss Air Humidity",
PERCENTAGE,
ReadCommand.humidity,
DEVICE_CLASS_HUMIDITY,
],
["Danfoss Air Fan Step", PERCENTAGE, ReadCommand.fan_step, None],
["Danfoss Air Exhaust Fan Speed", "RPM", ReadCommand.exhaust_fan_speed, None],
["Danfoss Air Supply Fan Speed", "RPM", ReadCommand.supply_fan_speed, None],
[
"Danfoss Air Dial Battery",
PERCENTAGE,
ReadCommand.battery_percent,
DEVICE_CLASS_BATTERY,
],
]
dev = []
for sensor in sensors:
dev.append(DanfossAir(data, sensor[0], sensor[1], sensor[2], sensor[3]))
add_entities(dev, True)
class DanfossAir(Entity):
"""Representation of a Sensor."""
def __init__(self, data, name, sensor_unit, sensor_type, device_class):
"""Initialize the sensor."""
self._data = data
self._name = name
self._state = None
self._type = sensor_type
self._unit = sensor_unit
self._device_class = device_class
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
def update(self):
"""Update the new state of the sensor.
This is done through the DanfossAir object that does the actual
communication with the Air CCM.
"""
self._data.update()
self._state = self._data.get_value(self._type)
if self._state is None:
_LOGGER.debug("Could not get data for %s", self._type)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import managed_memory_store
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import ycsb
FLAGS = flags.FLAGS
flags.DEFINE_string('redis_region',
'us-central1',
'The region to spin up cloud redis in.')
BENCHMARK_NAME = 'cloud_redis_ycsb'
BENCHMARK_CONFIG = """
cloud_redis_ycsb:
description: Run YCSB against cloud redis
cloud_redis:
redis_version: redis_3_2
vm_groups:
clients:
vm_spec: *default_single_core
vm_count: 2
"""
CLOUD_REDIS_CLASS_NAME = 'CloudRedis'
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['ycsb_client_vms'].present:
config['vm_groups']['clients']['vm_count'] = FLAGS.ycsb_client_vms
if FLAGS['managed_memory_store_version'].present:
config['cloud_redis']['redis_version'] = FLAGS.managed_memory_store_version
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: benchmark_config
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
# TODO(ruwa): This CheckPrerequisites call checks the prerequisites
# on the resource. Ideally, the benchmark is not responsible for this task.
# Instead, BaseResource should check prerequisites as part of creation and
# child resources can override CheckPrerequisites and benefit from it.
cloud_redis_class = (
managed_memory_store.GetManagedMemoryStoreClass(
FLAGS.cloud,
managed_memory_store.REDIS))
cloud_redis_class.CheckPrerequisites(benchmark_config)
def Prepare(benchmark_spec):
"""Prepare the cloud redis instance to YCSB tasks.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
benchmark_spec.always_call_cleanup = True
ycsb_vms = benchmark_spec.vm_groups['clients']
vm_util.RunThreaded(_Install, ycsb_vms)
cloud_redis_class = (
managed_memory_store.GetManagedMemoryStoreClass(
FLAGS.cloud,
managed_memory_store.REDIS))
benchmark_spec.cloud_redis_instance = (cloud_redis_class(benchmark_spec))
benchmark_spec.cloud_redis_instance.Create()
redis_args = {
'shardkeyspace': True,
'redis.host': benchmark_spec.cloud_redis_instance.GetMemoryStoreIp(),
'redis.port': benchmark_spec.cloud_redis_instance.GetMemoryStorePort(),
}
password = benchmark_spec.cloud_redis_instance.GetMemoryStorePassword()
if password:
redis_args['redis.password'] = password
benchmark_spec.executor = ycsb.YCSBExecutor('redis', **redis_args)
def Run(benchmark_spec):
"""Doc will be updated when implemented.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample instances.
"""
ycsb_vms = benchmark_spec.vm_groups['clients']
samples = benchmark_spec.executor.LoadAndRun(ycsb_vms)
for sample in samples:
sample.metadata.update(
benchmark_spec.cloud_redis_instance.GetResourceMetadata())
return samples
def Cleanup(benchmark_spec):
"""Cleanup.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
benchmark_spec.cloud_redis_instance.Delete()
logging.info('Instance %s deleted successfully',
benchmark_spec.cloud_redis_instance.name)
def _Install(vm):
vm.Install('ycsb')
|
from django import template
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from weblate.utils.checks import check_doc_link
register = template.Library()
@register.simple_tag
def check_link(check):
fallback = None
if check.hint and check.hint.startswith("https:"):
fallback = check.hint
return configuration_error_link(check.id, fallback=fallback)
@register.simple_tag
def configuration_error_link(check, fallback=None):
url = check_doc_link(check) or fallback
if url:
return mark_safe(
'<a class="btn btn-info" href="{}">{}</a>'.format(url, _("Documentation"))
)
return ""
|
import re
from coverage.templite import Templite, TempliteSyntaxError, TempliteValueError
from tests.coveragetest import CoverageTest
# pylint: disable=possibly-unused-variable
class AnyOldObject(object):
"""Simple testing object.
Use keyword arguments in the constructor to set attributes on the object.
"""
def __init__(self, **attrs):
for n, v in attrs.items():
setattr(self, n, v)
class TempliteTest(CoverageTest):
"""Tests for Templite."""
run_in_temp_dir = False
def try_render(self, text, ctx=None, result=None):
"""Render `text` through `ctx`, and it had better be `result`.
Result defaults to None so we can shorten the calls where we expect
an exception and never get to the result comparison.
"""
actual = Templite(text).render(ctx or {})
# If result is None, then an exception should have prevented us getting
# to here.
assert result is not None
self.assertEqual(actual, result)
def assertSynErr(self, msg):
"""Assert that a `TempliteSyntaxError` will happen.
A context manager, and the message should be `msg`.
"""
pat = "^" + re.escape(msg) + "$"
return self.assertRaisesRegex(TempliteSyntaxError, pat)
def test_passthrough(self):
# Strings without variables are passed through unchanged.
self.assertEqual(Templite("Hello").render(), "Hello")
self.assertEqual(
Templite("Hello, 20% fun time!").render(),
"Hello, 20% fun time!"
)
def test_variables(self):
# Variables use {{var}} syntax.
self.try_render("Hello, {{name}}!", {'name':'Ned'}, "Hello, Ned!")
def test_undefined_variables(self):
# Using undefined names is an error.
with self.assertRaisesRegex(Exception, "'name'"):
self.try_render("Hi, {{name}}!")
def test_pipes(self):
# Variables can be filtered with pipes.
data = {
'name': 'Ned',
'upper': lambda x: x.upper(),
'second': lambda x: x[1],
}
self.try_render("Hello, {{name|upper}}!", data, "Hello, NED!")
# Pipes can be concatenated.
self.try_render("Hello, {{name|upper|second}}!", data, "Hello, E!")
def test_reusability(self):
# A single Templite can be used more than once with different data.
globs = {
'upper': lambda x: x.upper(),
'punct': '!',
}
template = Templite("This is {{name|upper}}{{punct}}", globs)
self.assertEqual(template.render({'name':'Ned'}), "This is NED!")
self.assertEqual(template.render({'name':'Ben'}), "This is BEN!")
def test_attribute(self):
# Variables' attributes can be accessed with dots.
obj = AnyOldObject(a="Ay")
self.try_render("{{obj.a}}", locals(), "Ay")
obj2 = AnyOldObject(obj=obj, b="Bee")
self.try_render("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee")
def test_member_function(self):
# Variables' member functions can be used, as long as they are nullary.
class WithMemberFns(AnyOldObject):
"""A class to try out member function access."""
def ditto(self):
"""Return twice the .txt attribute."""
return self.txt + self.txt
obj = WithMemberFns(txt="Once")
self.try_render("{{obj.ditto}}", locals(), "OnceOnce")
def test_item_access(self):
# Variables' items can be used.
d = {'a':17, 'b':23}
self.try_render("{{d.a}} < {{d.b}}", locals(), "17 < 23")
def test_loops(self):
# Loops work like in Django.
nums = [1,2,3,4]
self.try_render(
"Look: {% for n in nums %}{{n}}, {% endfor %}done.",
locals(),
"Look: 1, 2, 3, 4, done."
)
# Loop iterables can be filtered.
def rev(l):
"""Return the reverse of `l`."""
l = l[:]
l.reverse()
return l
self.try_render(
"Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.",
locals(),
"Look: 4, 3, 2, 1, done."
)
def test_empty_loops(self):
self.try_render(
"Empty: {% for n in nums %}{{n}}, {% endfor %}done.",
{'nums':[]},
"Empty: done."
)
def test_multiline_loops(self):
self.try_render(
"Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.",
{'nums':[1,2,3]},
"Look: \n\n1, \n\n2, \n\n3, \ndone."
)
def test_multiple_loops(self):
self.try_render(
"{% for n in nums %}{{n}}{% endfor %} and "
"{% for n in nums %}{{n}}{% endfor %}",
{'nums': [1,2,3]},
"123 and 123"
)
def test_comments(self):
# Single-line comments work:
self.try_render(
"Hello, {# Name goes here: #}{{name}}!",
{'name':'Ned'}, "Hello, Ned!"
)
# and so do multi-line comments:
self.try_render(
"Hello, {# Name\ngoes\nhere: #}{{name}}!",
{'name':'Ned'}, "Hello, Ned!"
)
def test_if(self):
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 0, 'ben': 1},
"Hi, BEN!"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 0, 'ben': 0},
"Hi, !"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 1},
"Hi, NEDBEN!"
)
def test_complex_if(self):
class Complex(AnyOldObject):
"""A class to try out complex data access."""
def getit(self):
"""Return it."""
return self.it
obj = Complex(it={'x':"Hello", 'y': 0})
self.try_render(
"@"
"{% if obj.getit.x %}X{% endif %}"
"{% if obj.getit.y %}Y{% endif %}"
"{% if obj.getit.y|str %}S{% endif %}"
"!",
{ 'obj': obj, 'str': str },
"@XS!"
)
def test_loop_if(self):
self.try_render(
"@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!",
{'nums': [0,1,2]},
"@0Z1Z2!"
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': [0,1,2]},
"X@012!"
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': []},
"X!"
)
def test_nested_loops(self):
self.try_render(
"@"
"{% for n in nums %}"
"{% for a in abc %}{{a}}{{n}}{% endfor %}"
"{% endfor %}"
"!",
{'nums': [0,1,2], 'abc': ['a', 'b', 'c']},
"@a0b0c0a1b1c1a2b2c2!"
)
def test_whitespace_handling(self):
self.try_render(
"@{% for n in nums %}\n"
" {% for a in abc %}{{a}}{{n}}{% endfor %}\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n"
)
self.try_render(
"@{% for n in nums -%}\n"
" {% for a in abc -%}\n"
" {# this disappears completely -#}\n"
" {{a-}}\n"
" {{n -}}\n"
" {{n -}}\n"
" {% endfor %}\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@a00b00c00\na11b11c11\na22b22c22\n!\n"
)
self.try_render(
"@{% for n in nums -%}\n"
" {{n -}}\n"
" x\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2]},
"@0x\n1x\n2x\n!\n"
)
self.try_render(" hello ", {}, " hello ")
def test_eat_whitespace(self):
self.try_render(
"Hey!\n"
"{% joined %}\n"
"@{% for n in nums %}\n"
" {% for a in abc %}\n"
" {# this disappears completely #}\n"
" X\n"
" Y\n"
" {{a}}\n"
" {{n }}\n"
" {% endfor %}\n"
"{% endfor %}!\n"
"{% endjoined %}\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"Hey!\n@XYa0XYb0XYc0XYa1XYb1XYc1XYa2XYb2XYc2!\n"
)
def test_non_ascii(self):
self.try_render(
u"{{where}} ollǝɥ",
{ 'where': u'ǝɹǝɥʇ' },
u"ǝɹǝɥʇ ollǝɥ"
)
def test_exception_during_evaluation(self):
# TypeError: Couldn't evaluate {{ foo.bar.baz }}:
regex = "^Couldn't evaluate None.bar$"
with self.assertRaisesRegex(TempliteValueError, regex):
self.try_render(
"Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there"
)
def test_bad_names(self):
with self.assertSynErr("Not a valid name: 'var%&!@'"):
self.try_render("Wat: {{ var%&!@ }}")
with self.assertSynErr("Not a valid name: 'filter%&!@'"):
self.try_render("Wat: {{ foo|filter%&!@ }}")
with self.assertSynErr("Not a valid name: '@'"):
self.try_render("Wat: {% for @ in x %}{% endfor %}")
def test_bogus_tag_syntax(self):
with self.assertSynErr("Don't understand tag: 'bogus'"):
self.try_render("Huh: {% bogus %}!!{% endbogus %}??")
def test_malformed_if(self):
with self.assertSynErr("Don't understand if: '{% if %}'"):
self.try_render("Buh? {% if %}hi!{% endif %}")
with self.assertSynErr("Don't understand if: '{% if this or that %}'"):
self.try_render("Buh? {% if this or that %}hi!{% endif %}")
def test_malformed_for(self):
with self.assertSynErr("Don't understand for: '{% for %}'"):
self.try_render("Weird: {% for %}loop{% endfor %}")
with self.assertSynErr("Don't understand for: '{% for x from y %}'"):
self.try_render("Weird: {% for x from y %}loop{% endfor %}")
with self.assertSynErr("Don't understand for: '{% for x, y in z %}'"):
self.try_render("Weird: {% for x, y in z %}loop{% endfor %}")
def test_bad_nesting(self):
with self.assertSynErr("Unmatched action tag: 'if'"):
self.try_render("{% if x %}X")
with self.assertSynErr("Mismatched end tag: 'for'"):
self.try_render("{% if x %}X{% endfor %}")
with self.assertSynErr("Too many ends: '{% endif %}'"):
self.try_render("{% if x %}{% endif %}{% endif %}")
def test_malformed_end(self):
with self.assertSynErr("Don't understand end: '{% end if %}'"):
self.try_render("{% if x %}X{% end if %}")
with self.assertSynErr("Don't understand end: '{% endif now %}'"):
self.try_render("{% if x %}X{% endif now %}")
|
import time
import mock
from behave import then
from behave import when
from paasta_tools.cli.cmds import logs
@when("we tail paasta logs and let threads run")
def tail_paasta_logs_let_threads_be_threads(context):
"""This test lets tail_paasta_logs() fire off processes to do work. We
verify that the work was done, basically irrespective of how it was done.
"""
service = "fake_service"
context.levels = ["fake_level1", "fake_level2"]
context.components = ["deploy", "monitoring"]
context.clusters = ["fake_cluster1", "fake_cluster2"]
context.instances = ["fake_instance"]
with mock.patch(
"paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs",
autospec=True,
) as context.determine_scribereader_envs_patch, mock.patch(
"paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail", autospec=True
) as scribe_tail_patch, mock.patch(
"paasta_tools.cli.cmds.logs.log", autospec=True
), mock.patch(
"paasta_tools.cli.cmds.logs.print_log", autospec=True
) as context.print_log_patch, mock.patch(
"paasta_tools.cli.cmds.logs.scribereader", autospec=True
):
context.determine_scribereader_envs_patch.return_value = ["env1", "env2"]
def scribe_tail_side_effect(
self,
scribe_env,
stream_name,
service,
levels,
components,
clusters,
instances,
queue,
filter_fn,
parse_fn=None,
):
# The print here is just for debugging
print("fake log line added for %s" % scribe_env)
queue.put("fake log line added for %s" % scribe_env)
# This sleep() was the straw that broke the camel's back
# and forced me to move this test into the integration
# suite. The test is flaky without the sleep, and the
# sleep make it a lousy unit test.
time.sleep(0.05)
scribe_tail_patch.side_effect = scribe_tail_side_effect
context.scribe_log_reader = logs.ScribeLogReader(
cluster_map={"env1": "env1", "env2": "env2"}
)
context.scribe_log_reader.tail_logs(
service,
context.levels,
context.components,
context.clusters,
context.instances,
)
@then("one message is displayed from each scribe env")
def step_impl(context):
for cluster in context.clusters:
context.determine_scribereader_envs_patch.assert_any_call(
context.scribe_log_reader, context.components, cluster
)
# NOTE: Assertions about scribe_tail_patch break under multiprocessing.
# We think this is because the patched scribe_tail's attributes
# (call_count, call_args, etc.) don't get updated here in the main
# thread where we can inspect them. (The patched-in code does run,
# however, since it prints debugging messages.)
#
# Instead, we'll rely on what we can see, which is the result of the
# thread's work deposited in the shared queue.
assert context.print_log_patch.call_count == 2
context.print_log_patch.assert_any_call(
"fake log line added for env1", context.levels, False, False
)
context.print_log_patch.assert_any_call(
"fake log line added for env2", context.levels, False, False
)
|
import logging
from libpurecool.dyson_pure_cool import DysonPureCool
from libpurecool.dyson_pure_state_v2 import DysonEnvironmentalSensorV2State
from homeassistant.components.air_quality import DOMAIN, AirQualityEntity
from . import DYSON_DEVICES
ATTRIBUTION = "Dyson purifier air quality sensor"
_LOGGER = logging.getLogger(__name__)
DYSON_AIQ_DEVICES = "dyson_aiq_devices"
ATTR_VOC = "volatile_organic_compounds"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson Sensors."""
if discovery_info is None:
return
hass.data.setdefault(DYSON_AIQ_DEVICES, [])
# Get Dyson Devices from parent component
device_ids = [device.unique_id for device in hass.data[DYSON_AIQ_DEVICES]]
new_entities = []
for device in hass.data[DYSON_DEVICES]:
if isinstance(device, DysonPureCool) and device.serial not in device_ids:
new_entities.append(DysonAirSensor(device))
if not new_entities:
return
hass.data[DYSON_AIQ_DEVICES].extend(new_entities)
add_entities(hass.data[DYSON_AIQ_DEVICES])
class DysonAirSensor(AirQualityEntity):
"""Representation of a generic Dyson air quality sensor."""
def __init__(self, device):
"""Create a new generic air quality Dyson sensor."""
self._device = device
self._old_value = None
self._name = device.name
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._device.add_message_listener(self.on_message)
def on_message(self, message):
"""Handle new messages which are received from the fan."""
_LOGGER.debug(
"%s: Message received for %s device: %s", DOMAIN, self.name, message
)
if (
self._old_value is None
or self._old_value != self._device.environmental_state
) and isinstance(message, DysonEnvironmentalSensorV2State):
self._old_value = self._device.environmental_state
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the Dyson sensor."""
return self._name
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def air_quality_index(self):
"""Return the Air Quality Index (AQI)."""
return max(
self.particulate_matter_2_5,
self.particulate_matter_10,
self.nitrogen_dioxide,
self.volatile_organic_compounds,
)
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
if self._device.environmental_state:
return int(self._device.environmental_state.particulate_matter_25)
return None
@property
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
if self._device.environmental_state:
return int(self._device.environmental_state.particulate_matter_10)
return None
@property
def nitrogen_dioxide(self):
"""Return the NO2 (nitrogen dioxide) level."""
if self._device.environmental_state:
return int(self._device.environmental_state.nitrogen_dioxide)
return None
@property
def volatile_organic_compounds(self):
"""Return the VOC (Volatile Organic Compounds) level."""
if self._device.environmental_state:
return int(self._device.environmental_state.volatile_organic_compounds)
return None
@property
def unique_id(self):
"""Return the sensor's unique id."""
return self._device.serial
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
voc = self.volatile_organic_compounds
if voc is not None:
data[ATTR_VOC] = voc
return data
|
import unittest
import numpy as np
from hep_ml.preprocessing import BinTransformer
class TestHepML(unittest.TestCase):
def test_preprocessing(self):
X = np.array([[1.1, 1.2, 1.3],[5.1, 6.4, 10.5]])
transformer = BinTransformer().fit(X)
new_X = transformer.transform(X)
self.assertEqual((2, 3), new_X.shape)
|
import argparse
from PyQt5.QtCore import QUrl
from qutebrowser.commands import cmdexc
from qutebrowser.utils import utils, objreg, log
SUPPRESS = argparse.SUPPRESS
class ArgumentParserError(Exception):
"""Exception raised when the ArgumentParser signals an error."""
class ArgumentParserExit(Exception):
"""Exception raised when the argument parser exited.
Attributes:
status: The exit status.
"""
def __init__(self, status, msg):
self.status = status
super().__init__(msg)
class HelpAction(argparse.Action):
"""Argparse action to open the help page in the browser.
This is horrible encapsulation, but I can't think of a good way to do this
better...
"""
def __call__(self, parser, _namespace, _values, _option_string=None):
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tabbed_browser.tabopen(
QUrl('qute://help/commands.html#{}'.format(parser.name)))
parser.exit()
class ArgumentParser(argparse.ArgumentParser):
"""Subclass ArgumentParser to be more suitable for runtime parsing.
Attributes:
name: The command name.
"""
def __init__(self, name, **kwargs):
self.name = name
super().__init__(add_help=False, prog=name, **kwargs)
def exit(self, status=0, message=None):
raise ArgumentParserExit(status, message)
def error(self, message):
raise ArgumentParserError(message.capitalize())
def arg_name(name):
"""Get the name an argument should have based on its Python name."""
return name.rstrip('_').replace('_', '-')
def _check_choices(param, value, choices):
if value not in choices:
expected_values = ', '.join(arg_name(val) for val in choices)
raise cmdexc.ArgumentTypeError("{}: Invalid value {} - expected "
"one of: {}".format(
param.name, value, expected_values))
def type_conv(param, typ, value, *, str_choices=None):
"""Convert a value based on a type.
Args:
param: The argparse.Parameter we're checking
types: The allowed type
value: The value to convert
str_choices: The allowed choices if the type ends up being a string
Return:
The converted value
"""
if isinstance(typ, str):
raise TypeError("{}: Legacy string type!".format(param.name))
if value is param.default:
return value
assert isinstance(value, str), repr(value)
if utils.is_enum(typ):
_check_choices(param, value, [arg_name(e.name) for e in typ])
return typ[value.replace('-', '_')]
elif typ is str:
if str_choices is not None:
_check_choices(param, value, str_choices)
return value
elif callable(typ):
# int, float, etc.
try:
return typ(value)
except (TypeError, ValueError):
msg = '{}: Invalid {} value {}'.format(
param.name, typ.__name__, value)
raise cmdexc.ArgumentTypeError(msg)
else:
raise ValueError("{}: Unknown type {!r}!".format(param.name, typ))
def multitype_conv(param, types, value, *, str_choices=None):
"""Convert a value based on a choice of types.
Args:
param: The inspect.Parameter we're checking
types: The allowed types ("overloads")
value: The value to convert
str_choices: The allowed choices if the type ends up being a string
Return:
The converted value
"""
types = list(set(types))
if str in types:
# Make sure str is always the last type in the list, so e.g. '23' gets
# returned as 23 if we have typing.Union[str, int]
types.remove(str)
types.append(str)
for typ in types:
log.commands.debug("Trying to parse {!r} as {}".format(value, typ))
try:
return type_conv(param, typ, value, str_choices=str_choices)
except cmdexc.ArgumentTypeError as e:
log.commands.debug("Got {} for {}".format(e, typ))
raise cmdexc.ArgumentTypeError('{}: Invalid value {}'.format(
param.name, value))
|
import contextlib
import asynctest
import mock
import pytest
from paasta_tools import drain_lib
def test_register_drain_method():
with mock.patch.dict(drain_lib._drain_methods):
@drain_lib.register_drain_method("FAKEDRAINMETHOD")
class FakeDrainMethod(drain_lib.DrainMethod):
pass
assert (
type(drain_lib.get_drain_method("FAKEDRAINMETHOD", "srv", "inst", "ns"))
== FakeDrainMethod
)
@contextlib.contextmanager
def mock_ClientSession(**fake_session_kwargs):
fake_session = asynctest.MagicMock(name="session", **fake_session_kwargs)
class FakeClientSession:
def __init__(self, *args, **kwargs):
...
async def __aenter__(*args):
return fake_session
async def __aexit__(*args):
pass
with mock.patch("aiohttp.ClientSession", new=FakeClientSession, autospec=False):
yield
class TestHacheckDrainMethod:
drain_method = drain_lib.HacheckDrainMethod(
service="srv",
instance="inst",
registrations=["ns_one", "ns_two"],
hacheck_port=12345,
)
async def _async_id(self, x):
return x
def test_spool_urls(self):
fake_task = mock.Mock(host="fake_host", ports=[54321])
actual = self.drain_method.spool_urls(fake_task)
# Nerve hits /{mode}/{service}.{namespace}/{port}/status
expected = [
f"http://fake_host:12345/spool/{ns}/54321/status"
for ns in self.drain_method.registrations
]
assert actual == expected
@pytest.mark.asyncio
async def test_for_each_registration_with_no_ports(self):
fake_task = mock.Mock(host="fake_host", ports=[])
actual = await self.drain_method.for_each_registration(
task=fake_task, func=self._async_id
)
assert actual is None
@pytest.mark.asyncio
async def test_for_each_registration(self):
fake_task = mock.Mock(host="fake_host", ports=[54321])
actual = await self.drain_method.for_each_registration(
task=fake_task, func=self._async_id
)
assert actual == self.drain_method.spool_urls(fake_task)
@pytest.mark.asyncio
async def test_is_draining_yes(self):
fake_response = mock.Mock(
status=503,
text=asynctest.CoroutineMock(
return_value="Service service in down state since 1435694078.778886 "
"until 1435694178.780000: Drained by Paasta"
),
)
fake_task = mock.Mock(host="fake_host", ports=[54321])
with mock_ClientSession(
get=mock.Mock(
return_value=asynctest.MagicMock(
__aenter__=asynctest.CoroutineMock(return_value=fake_response)
)
)
):
assert await self.drain_method.is_draining(fake_task) is True
@pytest.mark.asyncio
async def test_is_draining_no(self):
fake_response = mock.Mock(
status=200, text=asynctest.CoroutineMock(return_value="")
)
fake_task = mock.Mock(host="fake_host", ports=[54321])
with mock_ClientSession(
get=mock.Mock(
return_value=asynctest.MagicMock(
__aenter__=asynctest.CoroutineMock(return_value=fake_response)
)
)
):
assert await self.drain_method.is_draining(fake_task) is False
class TestHTTPDrainMethod:
def test_get_format_params(self):
fake_task = mock.Mock(host="fake_host", ports=[54321])
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
assert drain_method.get_format_params(fake_task) == [
{
"host": "fake_host",
"port": 54321,
"service": "fake_service",
"instance": "fake_instance",
"nerve_ns": "fake_nerve_ns",
}
]
def test_format_url(self):
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
url_format = "foo_{host}"
format_params = {"host": "fake_host"}
assert drain_method.format_url(url_format, format_params) == "foo_fake_host"
def test_parse_success_codes(self):
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
assert drain_method.parse_success_codes("200") == {200}
assert drain_method.parse_success_codes("200-203") == {200, 201, 202, 203}
assert drain_method.parse_success_codes("200-202,302,305-306") == {
200,
201,
202,
302,
305,
305,
306,
}
assert drain_method.parse_success_codes(200) == {200}
def test_check_response_code(self):
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
# Happy case
assert drain_method.check_response_code(200, "200-299") is True
# Sad case
assert drain_method.check_response_code(500, "200-299") is False
@pytest.mark.asyncio
async def test_issue_request(self):
drain_method = drain_lib.HTTPDrainMethod(
"fake_service", "fake_instance", ["fake_nerve_ns"], {}, {}, {}, {}
)
fake_task = mock.Mock(host="fake_host", ports=[54321])
url_spec = {
"url_format": "http://localhost:654321/fake/{host}",
"method": "get",
"success_codes": "1234",
}
fake_resp = mock.Mock(status=1234)
mock_request = mock.Mock(
return_value=asynctest.CoroutineMock(return_value=fake_resp)()
)
with mock_ClientSession(request=mock_request):
await drain_method.issue_request(url_spec=url_spec, task=fake_task)
mock_request.assert_called_once_with(
method="GET",
url="http://localhost:654321/fake/fake_host",
headers=mock.ANY,
timeout=15,
)
|
from os import path
from homeassistant import config as hass_config, setup
from homeassistant.components.ping import DOMAIN
from homeassistant.const import SERVICE_RELOAD
from tests.async_mock import patch
async def test_reload(hass):
"""Verify we can reload trend sensors."""
await setup.async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "ping",
"name": "test",
"host": "127.0.0.1",
"count": 1,
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.test")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"ping/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.test") is None
assert hass.states.get("binary_sensor.test2")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
from .visitor import NodeVisitor
VAR_LOAD_PARAMETER = "param"
VAR_LOAD_RESOLVE = "resolve"
VAR_LOAD_ALIAS = "alias"
VAR_LOAD_UNDEFINED = "undefined"
def find_symbols(nodes, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(node, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols:
def __init__(self, parent=None, level=None):
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level = level
self.parent = parent
self.refs = {}
self.loads = {}
self.stores = set()
def analyze_node(self, node, **kwargs):
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(self, name, load=None):
ident = f"l_{self.level}_{name}"
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target):
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
def find_ref(self, name):
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
def ref(self, name):
rv = self.find_ref(name)
if rv is None:
raise AssertionError(
"Tried to resolve a name to a reference that was"
f" unknown to the frame ({name!r})"
)
return rv
def copy(self):
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name):
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name):
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name):
target = self.find_ref(name)
if target is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols):
stores = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in stores.items():
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name)
assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self):
rv = {}
node = self
while node is not None:
for name in node.stores:
if name not in rv:
rv[name] = self.find_ref(name)
node = node.parent
return rv
def dump_param_targets(self):
rv = set()
node = self
while node is not None:
for target, (instr, _) in self.loads.items():
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node, **kwargs):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = (
visit_Block
) = (
visit_Macro
) = (
visit_FilterBlock
) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(self, node, for_branch="body", **kwargs):
if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == "else":
branch = node.else_
elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError("Unknown for branch")
for item in branch or ():
self.sym_visitor.visit(item)
def visit_With(self, node, **kwargs):
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
raise NotImplementedError(
f"Cannot find symbols for {node.__class__.__name__!r}"
)
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols):
self.symbols = symbols
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
elif node.ctx == "store":
self.symbols.store(node.name)
elif node.ctx == "load":
self.symbols.load(node.name)
def visit_NSRef(self, node, **kwargs):
self.symbols.load(node.name)
def visit_If(self, node, **kwargs):
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes):
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node, **kwargs):
self.symbols.store(node.name)
def visit_Import(self, node, **kwargs):
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node, **kwargs):
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node, **kwargs):
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node, **kwargs):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node, **kwargs):
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node, **kwargs):
self.visit(node.filter, **kwargs)
def visit_With(self, node, **kwargs):
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node, **kwargs):
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node, **kwargs):
"""Stop visiting at scopes."""
def visit_Block(self, node, **kwargs):
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node, **kwargs):
"""Do not visit into overlay scopes."""
|
import logging
from cheroot.test import webtest
import pytest
import requests # FIXME: Temporary using it directly, better switch
import cherrypy
from cherrypy.test.logtest import LogCase
# Some unicode strings.
tartaros = u'\u03a4\u1f71\u03c1\u03c4\u03b1\u03c1\u03bf\u03c2'
erebos = u'\u0388\u03c1\u03b5\u03b2\u03bf\u03c2.com'
@pytest.fixture
def access_log_file(tmp_path_factory):
return tmp_path_factory.mktemp('logs') / 'access.log'
@pytest.fixture
def error_log_file(tmp_path_factory):
return tmp_path_factory.mktemp('logs') / 'access.log'
@pytest.fixture
def server(configure_server):
cherrypy.engine.start()
cherrypy.engine.wait(cherrypy.engine.states.STARTED)
yield
shutdown_server()
def shutdown_server():
cherrypy.engine.exit()
cherrypy.engine.block()
for name, server in getattr(cherrypy, 'servers', {}).copy().items():
server.unsubscribe()
del cherrypy.servers[name]
@pytest.fixture
def configure_server(access_log_file, error_log_file):
class Root:
@cherrypy.expose
def index(self):
return 'hello'
@cherrypy.expose
def uni_code(self):
cherrypy.request.login = tartaros
cherrypy.request.remote.name = erebos
@cherrypy.expose
def slashes(self):
cherrypy.request.request_line = r'GET /slashed\path HTTP/1.1'
@cherrypy.expose
def whitespace(self):
# User-Agent = "User-Agent" ":" 1*( product | comment )
# comment = "(" *( ctext | quoted-pair | comment ) ")"
# ctext = <any TEXT excluding "(" and ")">
# TEXT = <any OCTET except CTLs, but including LWS>
# LWS = [CRLF] 1*( SP | HT )
cherrypy.request.headers['User-Agent'] = 'Browzuh (1.0\r\n\t\t.3)'
@cherrypy.expose
def as_string(self):
return 'content'
@cherrypy.expose
def as_yield(self):
yield 'content'
@cherrypy.expose
@cherrypy.config(**{'tools.log_tracebacks.on': True})
def error(self):
raise ValueError()
root = Root()
cherrypy.config.reset()
cherrypy.config.update({
'server.socket_host': webtest.WebCase.HOST,
'server.socket_port': webtest.WebCase.PORT,
'server.protocol_version': webtest.WebCase.PROTOCOL,
'environment': 'test_suite',
})
cherrypy.config.update({
'log.error_file': str(error_log_file),
'log.access_file': str(access_log_file),
})
cherrypy.tree.mount(root)
@pytest.fixture
def log_tracker(access_log_file):
class LogTracker(LogCase):
logfile = str(access_log_file)
return LogTracker()
def test_normal_return(log_tracker, server):
log_tracker.markLog()
host = webtest.interface(webtest.WebCase.HOST)
port = webtest.WebCase.PORT
resp = requests.get(
'http://%s:%s/as_string' % (host, port),
headers={
'Referer': 'http://www.cherrypy.org/',
'User-Agent': 'Mozilla/5.0',
},
)
expected_body = 'content'
assert resp.text == expected_body
assert resp.status_code == 200
intro = '%s - - [' % host
log_tracker.assertLog(-1, intro)
content_length = len(expected_body)
if not any(
k for k, v in resp.headers.items()
if k.lower() == 'content-length'
):
content_length = '-'
log_tracker.assertLog(
-1,
'] "GET /as_string HTTP/1.1" 200 %s '
'"http://www.cherrypy.org/" "Mozilla/5.0"'
% content_length,
)
def test_normal_yield(log_tracker, server):
log_tracker.markLog()
host = webtest.interface(webtest.WebCase.HOST)
port = webtest.WebCase.PORT
resp = requests.get(
'http://%s:%s/as_yield' % (host, port),
headers={
'User-Agent': '',
},
)
expected_body = 'content'
assert resp.text == expected_body
assert resp.status_code == 200
intro = '%s - - [' % host
log_tracker.assertLog(-1, intro)
content_length = len(expected_body)
if not any(
k for k, v in resp.headers.items()
if k.lower() == 'content-length'
):
content_length = '-'
log_tracker.assertLog(
-1,
'] "GET /as_yield HTTP/1.1" 200 %s "" ""'
% content_length,
)
def test_custom_log_format(log_tracker, monkeypatch, server):
"""Test a customized access_log_format string, which is a
feature of _cplogging.LogManager.access()."""
monkeypatch.setattr(
'cherrypy._cplogging.LogManager.access_log_format',
'{h} {l} {u} {t} "{r}" {s} {b} "{f}" "{a}" {o}',
)
log_tracker.markLog()
host = webtest.interface(webtest.WebCase.HOST)
port = webtest.WebCase.PORT
requests.get(
'http://%s:%s/as_string' % (host, port),
headers={
'Referer': 'REFERER',
'User-Agent': 'USERAGENT',
'Host': 'HOST',
},
)
log_tracker.assertLog(-1, '%s - - [' % host)
log_tracker.assertLog(
-1,
'] "GET /as_string HTTP/1.1" '
'200 7 "REFERER" "USERAGENT" HOST',
)
def test_timez_log_format(log_tracker, monkeypatch, server):
"""Test a customized access_log_format string, which is a
feature of _cplogging.LogManager.access()."""
monkeypatch.setattr(
'cherrypy._cplogging.LogManager.access_log_format',
'{h} {l} {u} {z} "{r}" {s} {b} "{f}" "{a}" {o}',
)
log_tracker.markLog()
expected_time = str(cherrypy._cplogging.LazyRfc3339UtcTime())
monkeypatch.setattr(
'cherrypy._cplogging.LazyRfc3339UtcTime',
lambda: expected_time,
)
host = webtest.interface(webtest.WebCase.HOST)
port = webtest.WebCase.PORT
requests.get(
'http://%s:%s/as_string' % (host, port),
headers={
'Referer': 'REFERER',
'User-Agent': 'USERAGENT',
'Host': 'HOST',
},
)
log_tracker.assertLog(-1, '%s - - ' % host)
log_tracker.assertLog(-1, expected_time)
log_tracker.assertLog(
-1,
' "GET /as_string HTTP/1.1" '
'200 7 "REFERER" "USERAGENT" HOST',
)
def test_UUIDv4_parameter_log_format(log_tracker, monkeypatch, server):
"""Test rendering of UUID4 within access log."""
monkeypatch.setattr(
'cherrypy._cplogging.LogManager.access_log_format',
'{i}',
)
log_tracker.markLog()
host = webtest.interface(webtest.WebCase.HOST)
port = webtest.WebCase.PORT
requests.get('http://%s:%s/as_string' % (host, port))
log_tracker.assertValidUUIDv4()
def test_escaped_output(log_tracker, server):
# Test unicode in access log pieces.
log_tracker.markLog()
host = webtest.interface(webtest.WebCase.HOST)
port = webtest.WebCase.PORT
resp = requests.get('http://%s:%s/uni_code' % (host, port))
assert resp.status_code == 200
# The repr of a bytestring includes a b'' prefix
log_tracker.assertLog(-1, repr(tartaros.encode('utf8'))[2:-1])
# Test the erebos value. Included inline for your enlightenment.
# Note the 'r' prefix--those backslashes are literals.
log_tracker.assertLog(
-1,
r'\xce\x88\xcf\x81\xce\xb5\xce\xb2\xce\xbf\xcf\x82',
)
# Test backslashes in output.
log_tracker.markLog()
resp = requests.get('http://%s:%s/slashes' % (host, port))
assert resp.status_code == 200
log_tracker.assertLog(-1, b'"GET /slashed\\path HTTP/1.1"')
# Test whitespace in output.
log_tracker.markLog()
resp = requests.get('http://%s:%s/whitespace' % (host, port))
assert resp.status_code == 200
# Again, note the 'r' prefix.
log_tracker.assertLog(-1, r'"Browzuh (1.0\r\n\t\t.3)"')
def test_tracebacks(server, caplog):
host = webtest.interface(webtest.WebCase.HOST)
port = webtest.WebCase.PORT
with caplog.at_level(logging.ERROR, logger='cherrypy.error'):
resp = requests.get('http://%s:%s/error' % (host, port))
rec = caplog.records[0]
exc_cls, exc_msg = rec.exc_info[0], rec.message
assert 'raise ValueError()' in resp.text
assert 'HTTP' in exc_msg
assert exc_cls is ValueError
|
from contextlib import asynccontextmanager
from datetime import timedelta
from typing import Any, Dict, List, Optional
import pytest
from pytest import raises
from pyvizio.api.apps import AppConfig
from pyvizio.const import (
APPS,
DEVICE_CLASS_SPEAKER as VIZIO_DEVICE_CLASS_SPEAKER,
DEVICE_CLASS_TV as VIZIO_DEVICE_CLASS_TV,
INPUT_APPS,
MAX_VOLUME,
UNKNOWN_APP,
)
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
DEVICE_CLASS_SPEAKER,
DEVICE_CLASS_TV,
DOMAIN as MP_DOMAIN,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
)
from homeassistant.components.vizio import validate_apps
from homeassistant.components.vizio.const import (
CONF_ADDITIONAL_CONFIGS,
CONF_APPS,
CONF_VOLUME_STEP,
DOMAIN,
SERVICE_UPDATE_SETTING,
VIZIO_SCHEMA,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from .const import (
ADDITIONAL_APP_CONFIG,
APP_LIST,
APP_NAME_LIST,
CURRENT_APP,
CURRENT_APP_CONFIG,
CURRENT_EQ,
CURRENT_INPUT,
CUSTOM_CONFIG,
ENTITY_ID,
EQ_LIST,
INPUT_LIST,
INPUT_LIST_WITH_APPS,
MOCK_SPEAKER_APPS_FAILURE,
MOCK_SPEAKER_CONFIG,
MOCK_TV_APPS_FAILURE,
MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG,
MOCK_TV_WITH_EXCLUDE_CONFIG,
MOCK_TV_WITH_INCLUDE_CONFIG,
MOCK_USER_VALID_TV_CONFIG,
NAME,
UNIQUE_ID,
UNKNOWN_APP_CONFIG,
VOLUME_STEP,
)
from tests.async_mock import call, patch
from tests.common import MockConfigEntry, async_fire_time_changed
async def _add_config_entry_to_hass(
hass: HomeAssistantType, config_entry: MockConfigEntry
) -> None:
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
def _get_ha_power_state(vizio_power_state: Optional[bool]) -> str:
"""Return HA power state given Vizio power state."""
if vizio_power_state:
return STATE_ON
if vizio_power_state is False:
return STATE_OFF
return STATE_UNAVAILABLE
def _assert_sources_and_volume(attr: Dict[str, Any], vizio_device_class: str) -> None:
"""Assert source list, source, and volume level based on attr dict and device class."""
assert attr["source_list"] == INPUT_LIST
assert attr["source"] == CURRENT_INPUT
assert (
attr["volume_level"]
== float(int(MAX_VOLUME[vizio_device_class] / 2))
/ MAX_VOLUME[vizio_device_class]
)
def _get_attr_and_assert_base_attr(
hass: HomeAssistantType, device_class: str, power_state: str
) -> Dict[str, Any]:
"""Return entity attributes after asserting name, device class, and power state."""
attr = hass.states.get(ENTITY_ID).attributes
assert attr["friendly_name"] == NAME
assert attr["device_class"] == device_class
assert hass.states.get(ENTITY_ID).state == power_state
return attr
@asynccontextmanager
async def _cm_for_test_setup_without_apps(
all_settings: Dict[str, Any], vizio_power_state: Optional[bool]
) -> None:
"""Context manager to setup test for Vizio devices without including app specific patches."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_all_settings",
return_value=all_settings,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_setting_options",
return_value=EQ_LIST,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_power_state",
return_value=vizio_power_state,
):
yield
async def _test_setup_tv(
hass: HomeAssistantType, vizio_power_state: Optional[bool]
) -> None:
"""Test Vizio TV entity setup."""
ha_power_state = _get_ha_power_state(vizio_power_state)
config_entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_USER_VALID_TV_CONFIG),
unique_id=UNIQUE_ID,
)
async with _cm_for_test_setup_without_apps(
{"volume": int(MAX_VOLUME[VIZIO_DEVICE_CLASS_TV] / 2), "mute": "Off"},
vizio_power_state,
):
await _add_config_entry_to_hass(hass, config_entry)
attr = _get_attr_and_assert_base_attr(hass, DEVICE_CLASS_TV, ha_power_state)
if ha_power_state == STATE_ON:
_assert_sources_and_volume(attr, VIZIO_DEVICE_CLASS_TV)
assert "sound_mode" not in attr
async def _test_setup_speaker(
hass: HomeAssistantType, vizio_power_state: Optional[bool]
) -> None:
"""Test Vizio Speaker entity setup."""
ha_power_state = _get_ha_power_state(vizio_power_state)
config_entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
unique_id=UNIQUE_ID,
)
audio_settings = {
"volume": int(MAX_VOLUME[VIZIO_DEVICE_CLASS_SPEAKER] / 2),
"mute": "Off",
"eq": CURRENT_EQ,
}
async with _cm_for_test_setup_without_apps(
audio_settings,
vizio_power_state,
):
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_app_config",
) as service_call:
await _add_config_entry_to_hass(hass, config_entry)
attr = _get_attr_and_assert_base_attr(
hass, DEVICE_CLASS_SPEAKER, ha_power_state
)
if ha_power_state == STATE_ON:
_assert_sources_and_volume(attr, VIZIO_DEVICE_CLASS_SPEAKER)
assert not service_call.called
assert "sound_mode" in attr
@asynccontextmanager
async def _cm_for_test_setup_tv_with_apps(
hass: HomeAssistantType, device_config: Dict[str, Any], app_config: Dict[str, Any]
) -> None:
"""Context manager to setup test for Vizio TV with support for apps."""
config_entry = MockConfigEntry(
domain=DOMAIN, data=vol.Schema(VIZIO_SCHEMA)(device_config), unique_id=UNIQUE_ID
)
async with _cm_for_test_setup_without_apps(
{"volume": int(MAX_VOLUME[VIZIO_DEVICE_CLASS_TV] / 2), "mute": "Off"},
True,
):
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_app_config",
return_value=AppConfig(**app_config),
):
await _add_config_entry_to_hass(hass, config_entry)
attr = _get_attr_and_assert_base_attr(hass, DEVICE_CLASS_TV, STATE_ON)
assert (
attr["volume_level"]
== float(int(MAX_VOLUME[VIZIO_DEVICE_CLASS_TV] / 2))
/ MAX_VOLUME[VIZIO_DEVICE_CLASS_TV]
)
yield
def _assert_source_list_with_apps(
list_to_test: List[str], attr: Dict[str, Any]
) -> None:
"""Assert source list matches list_to_test after removing INPUT_APPS from list."""
for app_to_remove in INPUT_APPS:
if app_to_remove in list_to_test:
list_to_test.remove(app_to_remove)
assert attr["source_list"] == list_to_test
async def _test_setup_failure(hass: HomeAssistantType, config: str) -> None:
"""Test generic Vizio entity setup failure."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.can_connect_with_auth_check",
return_value=False,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=config, unique_id=UNIQUE_ID)
await _add_config_entry_to_hass(hass, config_entry)
assert len(hass.states.async_entity_ids(MP_DOMAIN)) == 0
async def _test_service(
hass: HomeAssistantType,
domain: str,
vizio_func_name: str,
ha_service_name: str,
additional_service_data: Optional[Dict[str, Any]],
*args,
**kwargs,
) -> None:
"""Test generic Vizio media player entity service."""
service_data = {ATTR_ENTITY_ID: ENTITY_ID}
if additional_service_data:
service_data.update(additional_service_data)
with patch(
f"homeassistant.components.vizio.media_player.VizioAsync.{vizio_func_name}"
) as service_call:
await hass.services.async_call(
domain,
ha_service_name,
service_data=service_data,
blocking=True,
)
assert service_call.called
if args or kwargs:
assert service_call.call_args == call(*args, **kwargs)
async def test_speaker_on(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio Speaker entity setup when on."""
await _test_setup_speaker(hass, True)
async def test_speaker_off(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio Speaker entity setup when off."""
await _test_setup_speaker(hass, False)
async def test_speaker_unavailable(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio Speaker entity setup when unavailable."""
await _test_setup_speaker(hass, None)
async def test_init_tv_on(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio TV entity setup when on."""
await _test_setup_tv(hass, True)
async def test_init_tv_off(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio TV entity setup when off."""
await _test_setup_tv(hass, False)
async def test_init_tv_unavailable(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio TV entity setup when unavailable."""
await _test_setup_tv(hass, None)
async def test_setup_failure_speaker(
hass: HomeAssistantType, vizio_connect: pytest.fixture
) -> None:
"""Test speaker entity setup failure."""
await _test_setup_failure(hass, MOCK_SPEAKER_CONFIG)
async def test_setup_failure_tv(
hass: HomeAssistantType, vizio_connect: pytest.fixture
) -> None:
"""Test TV entity setup failure."""
await _test_setup_failure(hass, MOCK_USER_VALID_TV_CONFIG)
async def test_services(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test all Vizio media player entity services."""
await _test_setup_tv(hass, True)
await _test_service(hass, MP_DOMAIN, "pow_on", SERVICE_TURN_ON, None)
await _test_service(hass, MP_DOMAIN, "pow_off", SERVICE_TURN_OFF, None)
await _test_service(
hass,
MP_DOMAIN,
"mute_on",
SERVICE_VOLUME_MUTE,
{ATTR_MEDIA_VOLUME_MUTED: True},
)
await _test_service(
hass,
MP_DOMAIN,
"mute_off",
SERVICE_VOLUME_MUTE,
{ATTR_MEDIA_VOLUME_MUTED: False},
)
await _test_service(
hass,
MP_DOMAIN,
"set_input",
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: "USB"},
"USB",
)
await _test_service(hass, MP_DOMAIN, "vol_up", SERVICE_VOLUME_UP, None)
await _test_service(hass, MP_DOMAIN, "vol_down", SERVICE_VOLUME_DOWN, None)
await _test_service(
hass, MP_DOMAIN, "vol_up", SERVICE_VOLUME_SET, {ATTR_MEDIA_VOLUME_LEVEL: 1}
)
await _test_service(
hass, MP_DOMAIN, "vol_down", SERVICE_VOLUME_SET, {ATTR_MEDIA_VOLUME_LEVEL: 0}
)
await _test_service(hass, MP_DOMAIN, "ch_up", SERVICE_MEDIA_NEXT_TRACK, None)
await _test_service(hass, MP_DOMAIN, "ch_down", SERVICE_MEDIA_PREVIOUS_TRACK, None)
await _test_service(
hass,
MP_DOMAIN,
"set_setting",
SERVICE_SELECT_SOUND_MODE,
{ATTR_SOUND_MODE: "Music"},
)
# Test that the update_setting service does config validation/transformation correctly
await _test_service(
hass,
DOMAIN,
"set_setting",
SERVICE_UPDATE_SETTING,
{"setting_type": "Audio", "setting_name": "AV Delay", "new_value": "0"},
"audio",
"av_delay",
0,
)
await _test_service(
hass,
DOMAIN,
"set_setting",
SERVICE_UPDATE_SETTING,
{"setting_type": "Audio", "setting_name": "EQ", "new_value": "Music"},
"audio",
"eq",
"Music",
)
async def test_options_update(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test when config entry update event fires."""
await _test_setup_speaker(hass, True)
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
assert config_entry.options
new_options = config_entry.options.copy()
updated_options = {CONF_VOLUME_STEP: VOLUME_STEP}
new_options.update(updated_options)
hass.config_entries.async_update_entry(
entry=config_entry,
options=new_options,
)
assert config_entry.options == updated_options
await _test_service(
hass, MP_DOMAIN, "vol_up", SERVICE_VOLUME_UP, None, num=VOLUME_STEP
)
async def _test_update_availability_switch(
hass: HomeAssistantType,
initial_power_state: Optional[bool],
final_power_state: Optional[bool],
caplog: pytest.fixture,
) -> None:
now = dt_util.utcnow()
future_interval = timedelta(minutes=1)
# Setup device as if time is right now
with patch("homeassistant.util.dt.utcnow", return_value=now):
await _test_setup_speaker(hass, initial_power_state)
# Clear captured logs so that only availability state changes are captured for
# future assertion
caplog.clear()
# Fast forward time to future twice to trigger update and assert vizio log message
for i in range(1, 3):
future = now + (future_interval * i)
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_power_state",
return_value=final_power_state,
), patch("homeassistant.util.dt.utcnow", return_value=future), patch(
"homeassistant.util.utcnow", return_value=future
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
if final_power_state is None:
assert hass.states.get(ENTITY_ID).state == STATE_UNAVAILABLE
else:
assert hass.states.get(ENTITY_ID).state != STATE_UNAVAILABLE
# Ensure connection status messages from vizio.media_player appear exactly once
# (on availability state change)
vizio_log_list = [
log
for log in caplog.records
if log.name == "homeassistant.components.vizio.media_player"
]
assert len(vizio_log_list) == 1
async def test_update_unavailable_to_available(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device becomes available after being unavailable."""
await _test_update_availability_switch(hass, None, True, caplog)
async def test_update_available_to_unavailable(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device becomes unavailable after being available."""
await _test_update_availability_switch(hass, True, None, caplog)
async def test_setup_with_apps(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_USER_VALID_TV_CONFIG, CURRENT_APP_CONFIG
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + APP_NAME_LIST), attr)
assert CURRENT_APP in attr["source_list"]
assert attr["source"] == CURRENT_APP
assert attr["app_name"] == CURRENT_APP
assert "app_id" not in attr
await _test_service(
hass,
MP_DOMAIN,
"launch_app",
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: CURRENT_APP},
CURRENT_APP,
APP_LIST,
)
async def test_setup_with_apps_include(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps and apps["include"] in config."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_TV_WITH_INCLUDE_CONFIG, CURRENT_APP_CONFIG
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + [CURRENT_APP]), attr)
assert CURRENT_APP in attr["source_list"]
assert attr["source"] == CURRENT_APP
assert attr["app_name"] == CURRENT_APP
assert "app_id" not in attr
async def test_setup_with_apps_exclude(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps and apps["exclude"] in config."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_TV_WITH_EXCLUDE_CONFIG, CURRENT_APP_CONFIG
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + [CURRENT_APP]), attr)
assert CURRENT_APP in attr["source_list"]
assert attr["source"] == CURRENT_APP
assert attr["app_name"] == CURRENT_APP
assert "app_id" not in attr
async def test_setup_with_apps_additional_apps_config(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps and apps["additional_configs"] in config."""
async with _cm_for_test_setup_tv_with_apps(
hass,
MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG,
ADDITIONAL_APP_CONFIG["config"],
):
attr = hass.states.get(ENTITY_ID).attributes
assert attr["source_list"].count(CURRENT_APP) == 1
_assert_source_list_with_apps(
list(
INPUT_LIST_WITH_APPS
+ APP_NAME_LIST
+ [
app["name"]
for app in MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG[CONF_APPS][
CONF_ADDITIONAL_CONFIGS
]
if app["name"] not in APP_NAME_LIST
]
),
attr,
)
assert ADDITIONAL_APP_CONFIG["name"] in attr["source_list"]
assert attr["source"] == ADDITIONAL_APP_CONFIG["name"]
assert attr["app_name"] == ADDITIONAL_APP_CONFIG["name"]
assert "app_id" not in attr
await _test_service(
hass,
MP_DOMAIN,
"launch_app",
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: "Netflix"},
"Netflix",
APP_LIST,
)
await _test_service(
hass,
MP_DOMAIN,
"launch_app_config",
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: CURRENT_APP},
**CUSTOM_CONFIG,
)
# Test that invalid app does nothing
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.launch_app"
) as service_call1, patch(
"homeassistant.components.vizio.media_player.VizioAsync.launch_app_config"
) as service_call2:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_SELECT_SOURCE,
service_data={ATTR_ENTITY_ID: ENTITY_ID, ATTR_INPUT_SOURCE: "_"},
blocking=True,
)
assert not service_call1.called
assert not service_call2.called
def test_invalid_apps_config(hass: HomeAssistantType):
"""Test that schema validation fails on certain conditions."""
with raises(vol.Invalid):
vol.Schema(vol.All(VIZIO_SCHEMA, validate_apps))(MOCK_TV_APPS_FAILURE)
with raises(vol.Invalid):
vol.Schema(vol.All(VIZIO_SCHEMA, validate_apps))(MOCK_SPEAKER_APPS_FAILURE)
async def test_setup_with_unknown_app_config(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps where app config returned is unknown."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_USER_VALID_TV_CONFIG, UNKNOWN_APP_CONFIG
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + APP_NAME_LIST), attr)
assert attr["source"] == UNKNOWN_APP
assert attr["app_name"] == UNKNOWN_APP
assert attr["app_id"] == UNKNOWN_APP_CONFIG
async def test_setup_with_no_running_app(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps where no app is running."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_USER_VALID_TV_CONFIG, vars(AppConfig())
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + APP_NAME_LIST), attr)
assert attr["source"] == "CAST"
assert "app_id" not in attr
assert "app_name" not in attr
async def test_setup_tv_without_mute(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio TV entity setup when mute property isn't returned by Vizio API."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_USER_VALID_TV_CONFIG),
unique_id=UNIQUE_ID,
)
async with _cm_for_test_setup_without_apps(
{"volume": int(MAX_VOLUME[VIZIO_DEVICE_CLASS_TV] / 2)},
STATE_ON,
):
await _add_config_entry_to_hass(hass, config_entry)
attr = _get_attr_and_assert_base_attr(hass, DEVICE_CLASS_TV, STATE_ON)
_assert_sources_and_volume(attr, VIZIO_DEVICE_CLASS_TV)
assert "sound_mode" not in attr
assert "is_volume_muted" not in attr
async def test_apps_update(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps where no app is running."""
with patch(
"homeassistant.components.vizio.gen_apps_list_from_url",
return_value=None,
):
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_USER_VALID_TV_CONFIG, vars(AppConfig())
):
# Check source list, remove TV inputs, and verify that the integration is
# using the default APPS list
sources = hass.states.get(ENTITY_ID).attributes["source_list"]
apps = list(set(sources) - set(INPUT_LIST))
assert len(apps) == len(APPS)
with patch(
"homeassistant.components.vizio.gen_apps_list_from_url",
return_value=APP_LIST,
):
async_fire_time_changed(hass, dt_util.now() + timedelta(days=2))
await hass.async_block_till_done()
# Check source list, remove TV inputs, and verify that the integration is
# now using the APP_LIST list
sources = hass.states.get(ENTITY_ID).attributes["source_list"]
apps = list(set(sources) - set(INPUT_LIST))
assert len(apps) == len(APP_LIST)
|
from homeassistant.components.vacuum import (
ATTR_FAN_SPEED,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_SET_FAN_SPEED,
SERVICE_START,
SERVICE_STOP,
STATE_CLEANING,
STATE_DOCKED,
STATE_RETURNING,
)
from homeassistant.const import (
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
)
from homeassistant.core import State
from tests.common import async_mock_service
FAN_SPEED_LOW = "low"
FAN_SPEED_HIGH = "high"
async def test_reproducing_states(hass, caplog):
"""Test reproducing Vacuum states."""
hass.states.async_set("vacuum.entity_off", STATE_OFF, {})
hass.states.async_set("vacuum.entity_on", STATE_ON, {})
hass.states.async_set(
"vacuum.entity_on_fan", STATE_ON, {ATTR_FAN_SPEED: FAN_SPEED_LOW}
)
hass.states.async_set("vacuum.entity_cleaning", STATE_CLEANING, {})
hass.states.async_set("vacuum.entity_docked", STATE_DOCKED, {})
hass.states.async_set("vacuum.entity_idle", STATE_IDLE, {})
hass.states.async_set("vacuum.entity_returning", STATE_RETURNING, {})
hass.states.async_set("vacuum.entity_paused", STATE_PAUSED, {})
turn_on_calls = async_mock_service(hass, "vacuum", SERVICE_TURN_ON)
turn_off_calls = async_mock_service(hass, "vacuum", SERVICE_TURN_OFF)
start_calls = async_mock_service(hass, "vacuum", SERVICE_START)
pause_calls = async_mock_service(hass, "vacuum", SERVICE_PAUSE)
stop_calls = async_mock_service(hass, "vacuum", SERVICE_STOP)
return_calls = async_mock_service(hass, "vacuum", SERVICE_RETURN_TO_BASE)
fan_speed_calls = async_mock_service(hass, "vacuum", SERVICE_SET_FAN_SPEED)
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[
State("vacuum.entity_off", STATE_OFF),
State("vacuum.entity_on", STATE_ON),
State("vacuum.entity_on_fan", STATE_ON, {ATTR_FAN_SPEED: FAN_SPEED_LOW}),
State("vacuum.entity_cleaning", STATE_CLEANING),
State("vacuum.entity_docked", STATE_DOCKED),
State("vacuum.entity_idle", STATE_IDLE),
State("vacuum.entity_returning", STATE_RETURNING),
State("vacuum.entity_paused", STATE_PAUSED),
],
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(start_calls) == 0
assert len(pause_calls) == 0
assert len(stop_calls) == 0
assert len(return_calls) == 0
assert len(fan_speed_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("vacuum.entity_off", "not_supported")]
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(start_calls) == 0
assert len(pause_calls) == 0
assert len(stop_calls) == 0
assert len(return_calls) == 0
assert len(fan_speed_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("vacuum.entity_off", STATE_ON),
State("vacuum.entity_on", STATE_OFF),
State("vacuum.entity_on_fan", STATE_ON, {ATTR_FAN_SPEED: FAN_SPEED_HIGH}),
State("vacuum.entity_cleaning", STATE_PAUSED),
State("vacuum.entity_docked", STATE_CLEANING),
State("vacuum.entity_idle", STATE_DOCKED),
State("vacuum.entity_returning", STATE_CLEANING),
State("vacuum.entity_paused", STATE_IDLE),
# Should not raise
State("vacuum.non_existing", STATE_ON),
],
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "vacuum"
assert turn_on_calls[0].data == {"entity_id": "vacuum.entity_off"}
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "vacuum"
assert turn_off_calls[0].data == {"entity_id": "vacuum.entity_on"}
assert len(start_calls) == 2
entities = [
{"entity_id": "vacuum.entity_docked"},
{"entity_id": "vacuum.entity_returning"},
]
for call in start_calls:
assert call.domain == "vacuum"
assert call.data in entities
entities.remove(call.data)
assert len(pause_calls) == 1
assert pause_calls[0].domain == "vacuum"
assert pause_calls[0].data == {"entity_id": "vacuum.entity_cleaning"}
assert len(stop_calls) == 1
assert stop_calls[0].domain == "vacuum"
assert stop_calls[0].data == {"entity_id": "vacuum.entity_paused"}
assert len(return_calls) == 1
assert return_calls[0].domain == "vacuum"
assert return_calls[0].data == {"entity_id": "vacuum.entity_idle"}
assert len(fan_speed_calls) == 1
assert fan_speed_calls[0].domain == "vacuum"
assert fan_speed_calls[0].data == {
"entity_id": "vacuum.entity_on_fan",
ATTR_FAN_SPEED: FAN_SPEED_HIGH,
}
|
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from shop.modifiers.base import BaseCartModifier
class ShippingModifier(BaseCartModifier):
"""
Base class for all shipping modifiers. The purpose of a shipping modifier is to calculate the shipping costs and/or
prevent its usage, in case products in the cart can not be shipped to the desired destination. The merchant may
either append a single shipping modifier to the list of ``SHOP_CART_MODIFIERS``, or create a sublist of shipping
modifier and append this sublist to ``SHOP_CART_MODIFIERS``. The latter is useful to instantiate the same shipping
modifier multiple times for different shipping carriers using the same interface.
The merchant must specify at least one shipping modifier. If there is more than one, the merchant shall offer a
select option during checkout. In django-SHOP, one can use the plugin **Shipping Method Form** to render such a
select option.
Each shipping modifier can add a surcharge on the current cart. If weight affects the shipping price, it shall be
summed up inside the method `add_extra_cart_row` and used to lookup the shipping costs.
"""
def get_choice(self):
"""
:returns: A tuple consisting of 'value, label' used by the shipping form dialog to render
the available shipping choices.
"""
raise NotImplemented("{} must implement method `get_choice()`.".format(self.__class__))
def is_active(self, shipping_modifier):
"""
:returns: ``True`` if this shipping modifier is the actively selected one.
"""
return shipping_modifier == self.identifier
def is_disabled(self, cart):
"""
Hook method to be overridden by the concrete shipping modifier. Shall be used to
temporarily disable a shipping method, in case the cart does not fulfill certain criteria,
for instance an undeliverable destination address.
:returns: ``True`` if this shipping modifier is disabled for the current cart.
"""
return False
def update_render_context(self, context):
"""
Hook to update the rendering context with shipping specific data.
"""
from shop.models.cart import CartModel
if 'shipping_modifiers' not in context:
context['shipping_modifiers'] = {}
try:
cart = CartModel.objects.get_from_request(context['request'])
if self.is_active(cart.extra.get('shipping_modifier')):
cart.update(context['request'])
data = cart.extra_rows[self.identifier].data
data.update(modifier=self.identifier)
context['shipping_modifiers']['initial_row'] = data
except (KeyError, CartModel.DoesNotExist):
pass
def ship_the_goods(self, delivery):
"""
Hook to be overridden by the active shipping modifier. It should be used to perform the
shipping request.
"""
delivery.shipped_at = timezone.now()
class SelfCollectionModifier(ShippingModifier):
"""
This modifiers has not influence on the cart final. It can be used,
to enable the customer to pick up the products in the shop.
"""
identifier = 'self-collection'
def get_choice(self):
return (self.identifier, _("Self-collection"))
def ship_the_goods(self, delivery):
if not delivery.shipping_id:
delivery.shipping_id = str(delivery.id)
super().ship_the_goods(delivery)
|
import configparser
import re
from radicale import pathutils, rights
from radicale.log import logger
class Rights(rights.BaseRights):
def __init__(self, configuration):
super().__init__(configuration)
self._filename = configuration.get("rights", "file")
def authorization(self, user, path):
user = user or ""
sane_path = pathutils.strip_path(path)
# Prevent "regex injection"
escaped_user = re.escape(user)
rights_config = configparser.ConfigParser()
try:
if not rights_config.read(self._filename):
raise RuntimeError("No such file: %r" %
self._filename)
except Exception as e:
raise RuntimeError("Failed to load rights file %r: %s" %
(self._filename, e)) from e
for section in rights_config.sections():
try:
user_pattern = rights_config.get(section, "user")
collection_pattern = rights_config.get(section, "collection")
# Use empty format() for harmonized handling of curly braces
user_match = re.fullmatch(user_pattern.format(), user)
collection_match = user_match and re.fullmatch(
collection_pattern.format(
*map(re.escape, user_match.groups()),
user=escaped_user), sane_path)
except Exception as e:
raise RuntimeError("Error in section %r of rights file %r: "
"%s" % (section, self._filename, e)) from e
if user_match and collection_match:
logger.debug("Rule %r:%r matches %r:%r from section %r",
user, sane_path, user_pattern,
collection_pattern, section)
return rights_config.get(section, "permissions")
logger.debug("Rule %r:%r doesn't match %r:%r from section %r",
user, sane_path, user_pattern, collection_pattern,
section)
logger.info("Rights: %r:%r doesn't match any section", user, sane_path)
return ""
|
from itertools import *
import benchbase
from benchbase import (with_text, children, nochange)
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
repeat100 = range(100)
repeat1000 = range(1000)
repeat3000 = range(3000)
def __init__(self, lib):
from lxml import etree, objectify
self.objectify = objectify
parser = etree.XMLParser(remove_blank_text=True)
lookup = objectify.ObjectifyElementClassLookup()
parser.setElementClassLookup(lookup)
super(BenchMark, self).__init__(etree, parser)
@nochange
def bench_attribute(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz
def bench_attribute_assign_int(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = 5
def bench_attribute_assign_string(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = "5"
@nochange
def bench_attribute_cached(self, root):
"1 2 4"
cache = root.zzzzz
for i in self.repeat3000:
root.zzzzz
@nochange
def bench_attributes_deep(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_attributes_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
for i in self.repeat3000:
root.zzzzz['{cdefg}a00001']
@nochange
def bench_objectpath(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1['{cdefg}a00001']
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@with_text(text=True, utext=True, no_text=True)
def bench_annotate(self, root):
self.objectify.annotate(root)
@nochange
def bench_descendantpaths(self, root):
root.descendantpaths()
@nochange
@with_text(text=True)
def bench_type_inference(self, root):
"1 2 4"
el = root.aaaaa
for i in self.repeat1000:
el.getchildren()
@nochange
@with_text(text=True)
def bench_type_inference_annotated(self, root):
"1 2 4"
el = root.aaaaa
self.objectify.annotate(el)
for i in self.repeat1000:
el.getchildren()
@nochange
@children
def bench_elementmaker(self, children):
E = self.objectify.E
for child in children:
root = E.this(
"test",
E.will(
E.do("nothing"),
E.special,
)
)
if __name__ == '__main__':
benchbase.main(BenchMark)
|
from six import text_type as str
from subliminal.utils import hash_opensubtitles, hash_thesubdb, sanitize
def test_hash_opensubtitles(mkv):
assert hash_opensubtitles(mkv['test1']) == '40b44a7096b71ec3'
def test_hash_opensubtitles_too_small(tmpdir):
path = tmpdir.ensure('test_too_small.mkv')
assert hash_opensubtitles(str(path)) is None
def test_hash_thesubdb(mkv):
assert hash_thesubdb(mkv['test1']) == '054e667e93e254f8fa9f9e8e6d4e73ff'
def test_hash_thesubdb_too_small(tmpdir):
path = tmpdir.ensure('test_too_small.mkv')
assert hash_thesubdb(str(path)) is None
def test_sanitize():
assert sanitize('Marvel\'s Agents of S.H.I.E.L.D.') == 'marvels agents of s h i e l d'
|
from __future__ import print_function
import os
import tempfile
import console
import editor
import time
import argparse
_stash = globals()["_stash"]
try:
raw_input
except NameError:
raw_input = input
def open_temp(file='', new_tab=True):
try:
file_to_edit = file
temp = tempfile.NamedTemporaryFile(dir=os.path.expanduser('~/Documents'), suffix='.py')
cur_path = editor.get_path()
if file_to_edit != '':
try:
to_edit = open(file_to_edit, 'r')
except:
to_edit = open(file_to_edit, 'w+')
temp.write(to_edit.read())
temp.flush()
to_edit.close()
print('***When you are finished editing the file, you must come back to console to confim changes***')
editor.open_file(temp.name, new_tab)
time.sleep(1.5)
console.hide_output()
input = raw_input('Save Changes? Y,N: ')
if input == 'Y' or input == 'y':
while True:
try:
save_as = raw_input('Save file as [Enter to confirm]: %s' % file_to_edit) or file_to_edit
except:
save_as = file_to_edit
if save_as:
break
if not new_tab:
editor.open_file(cur_path) # restore previous script in editor
with open(save_as, 'w') as f:
with open(temp.name, 'r') as tmp:
f.write(tmp.read())
print('File Saved.')
elif input == 'N' or input == 'n':
if not new_tab:
editor.open_file(cur_path) # restore previous script in editor
except Exception as e:
print(e)
finally:
temp.close()
def open_editor(file='', new_tab=True):
if os.path.isfile(os.getcwd() + '/' + file):
editor.open_file(os.getcwd() + '/' + file, new_tab)
console.hide_output()
else:
editor.make_new_file(file if file else 'untitled.py') # new_tab not supported by make_new_file
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-t', '--temp', action='store_true', default=False, help='open file to a temp file')
ap.add_argument(
'-o',
'--old_tab',
action='store_true',
default=False,
help='open file in an old editor tab (default is new tab)'
)
ap.add_argument('file', action='store', nargs='?', default=False, help='File to open')
ns = ap.parse_args()
# Calculate the relative path because absolute path crashes Pythonista
# most likely due to access right for iOS root path.
if ns.file:
filename = os.path.relpath(ns.file, '.')
if ns.temp and ns.file:
open_temp(filename, new_tab=not ns.old_tab)
elif ns.file:
open_editor(filename, new_tab=not ns.old_tab)
else:
open_temp(new_tab=not ns.old_tab)
|
from collections import defaultdict
import logging
from requests.exceptions import RequestException
from tahoma_api import Action, TahomaApi
import voluptuous as vol
from homeassistant.const import CONF_EXCLUDE, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "tahoma"
TAHOMA_ID_FORMAT = "{}_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
TAHOMA_COMPONENTS = ["binary_sensor", "cover", "lock", "scene", "sensor", "switch"]
TAHOMA_TYPES = {
"io:AwningValanceIOComponent": "cover",
"io:ExteriorVenetianBlindIOComponent": "cover",
"io:DiscreteGarageOpenerIOComponent": "cover",
"io:DiscreteGarageOpenerWithPartialPositionIOComponent": "cover",
"io:HorizontalAwningIOComponent": "cover",
"io:GarageOpenerIOComponent": "cover",
"io:LightIOSystemSensor": "sensor",
"io:OnOffIOComponent": "switch",
"io:OnOffLightIOComponent": "switch",
"io:RollerShutterGenericIOComponent": "cover",
"io:RollerShutterUnoIOComponent": "cover",
"io:RollerShutterVeluxIOComponent": "cover",
"io:RollerShutterWithLowSpeedManagementIOComponent": "cover",
"io:SomfyBasicContactIOSystemSensor": "sensor",
"io:SomfyContactIOSystemSensor": "sensor",
"io:TemperatureIOSystemSensor": "sensor",
"io:VerticalExteriorAwningIOComponent": "cover",
"io:VerticalInteriorBlindVeluxIOComponent": "cover",
"io:WindowOpenerVeluxIOComponent": "cover",
"opendoors:OpenDoorsSmartLockComponent": "lock",
"rtds:RTDSContactSensor": "sensor",
"rtds:RTDSMotionSensor": "sensor",
"rtds:RTDSSmokeSensor": "smoke",
"rts:BlindRTSComponent": "cover",
"rts:CurtainRTSComponent": "cover",
"rts:DualCurtainRTSComponent": "cover",
"rts:ExteriorVenetianBlindRTSComponent": "cover",
"rts:GarageDoor4TRTSComponent": "switch",
"rts:LightRTSComponent": "switch",
"rts:RollerShutterRTSComponent": "cover",
"rts:OnOffRTSComponent": "switch",
"rts:VenetianBlindRTSComponent": "cover",
"somfythermostat:SomfyThermostatTemperatureSensor": "sensor",
"somfythermostat:SomfyThermostatHumiditySensor": "sensor",
"zwave:OnOffLightZWaveComponent": "switch",
}
def setup(hass, config):
"""Activate Tahoma component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
exclude = conf.get(CONF_EXCLUDE)
try:
api = TahomaApi(username, password)
except RequestException:
_LOGGER.exception("Error when trying to log in to the Tahoma API")
return False
try:
api.get_setup()
devices = api.get_devices()
scenes = api.get_action_groups()
except RequestException:
_LOGGER.exception("Error when getting devices from the Tahoma API")
return False
hass.data[DOMAIN] = {"controller": api, "devices": defaultdict(list), "scenes": []}
for device in devices:
_device = api.get_device(device)
if all(ext not in _device.type for ext in exclude):
device_type = map_tahoma_device(_device)
if device_type is None:
_LOGGER.warning(
"Unsupported type %s for Tahoma device %s",
_device.type,
_device.label,
)
continue
hass.data[DOMAIN]["devices"][device_type].append(_device)
for scene in scenes:
hass.data[DOMAIN]["scenes"].append(scene)
for component in TAHOMA_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
def map_tahoma_device(tahoma_device):
"""Map Tahoma device types to Home Assistant components."""
return TAHOMA_TYPES.get(tahoma_device.type)
class TahomaDevice(Entity):
"""Representation of a Tahoma device entity."""
def __init__(self, tahoma_device, controller):
"""Initialize the device."""
self.tahoma_device = tahoma_device
self.controller = controller
self._name = self.tahoma_device.label
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {"tahoma_device_id": self.tahoma_device.url}
def apply_action(self, cmd_name, *args):
"""Apply Action to Device."""
action = Action(self.tahoma_device.url)
action.add_command(cmd_name, *args)
self.controller.apply_actions("HomeAssistant", [action])
|
import os
import subprocess
import pytest
from homeassistant.util import process
async def test_kill_process():
"""Test killing a process."""
sleeper = subprocess.Popen(
"sleep 1000",
shell=True, # nosec # shell by design
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
pid = sleeper.pid
assert os.kill(pid, 0) is None
process.kill_subprocess(sleeper)
with pytest.raises(OSError):
os.kill(pid, 0)
|
import cherrypy
from cherrypy.test import helper
script_names = ['', '/foo', '/users/fred/blog', '/corp/blog']
def setup_server():
class SubSubRoot:
@cherrypy.expose
def index(self):
return 'SubSubRoot index'
@cherrypy.expose
def default(self, *args):
return 'SubSubRoot default'
@cherrypy.expose
def handler(self):
return 'SubSubRoot handler'
@cherrypy.expose
def dispatch(self):
return 'SubSubRoot dispatch'
subsubnodes = {
'1': SubSubRoot(),
'2': SubSubRoot(),
}
class SubRoot:
@cherrypy.expose
def index(self):
return 'SubRoot index'
@cherrypy.expose
def default(self, *args):
return 'SubRoot %s' % (args,)
@cherrypy.expose
def handler(self):
return 'SubRoot handler'
def _cp_dispatch(self, vpath):
return subsubnodes.get(vpath[0], None)
subnodes = {
'1': SubRoot(),
'2': SubRoot(),
}
class Root:
@cherrypy.expose
def index(self):
return 'index'
@cherrypy.expose
def default(self, *args):
return 'default %s' % (args,)
@cherrypy.expose
def handler(self):
return 'handler'
def _cp_dispatch(self, vpath):
return subnodes.get(vpath[0])
# -------------------------------------------------------------------------
# DynamicNodeAndMethodDispatcher example.
# This example exposes a fairly naive HTTP api
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
def __unicode__(self):
return str(self.name)
def __str__(self):
return str(self.name)
user_lookup = {
1: User(1, 'foo'),
2: User(2, 'bar'),
}
def make_user(name, id=None):
if not id:
id = max(*list(user_lookup.keys())) + 1
user_lookup[id] = User(id, name)
return id
@cherrypy.expose
class UserContainerNode(object):
def POST(self, name):
"""
Allow the creation of a new Object
"""
return 'POST %d' % make_user(name)
def GET(self):
return str(sorted(user_lookup.keys()))
def dynamic_dispatch(self, vpath):
try:
id = int(vpath[0])
except (ValueError, IndexError):
return None
return UserInstanceNode(id)
@cherrypy.expose
class UserInstanceNode(object):
def __init__(self, id):
self.id = id
self.user = user_lookup.get(id, None)
# For all but PUT methods there MUST be a valid user identified
# by self.id
if not self.user and cherrypy.request.method != 'PUT':
raise cherrypy.HTTPError(404)
def GET(self, *args, **kwargs):
"""
Return the appropriate representation of the instance.
"""
return str(self.user)
def POST(self, name):
"""
Update the fields of the user instance.
"""
self.user.name = name
return 'POST %d' % self.user.id
def PUT(self, name):
"""
Create a new user with the specified id, or edit it if it already
exists
"""
if self.user:
# Edit the current user
self.user.name = name
return 'PUT %d' % self.user.id
else:
# Make a new user with said attributes.
return 'PUT %d' % make_user(name, self.id)
def DELETE(self):
"""
Delete the user specified at the id.
"""
id = self.user.id
del user_lookup[self.user.id]
del self.user
return 'DELETE %d' % id
class ABHandler:
class CustomDispatch:
@cherrypy.expose
def index(self, a, b):
return 'custom'
def _cp_dispatch(self, vpath):
"""Make sure that if we don't pop anything from vpath,
processing still works.
"""
return self.CustomDispatch()
@cherrypy.expose
def index(self, a, b=None):
body = ['a:' + str(a)]
if b is not None:
body.append(',b:' + str(b))
return ''.join(body)
@cherrypy.expose
def delete(self, a, b):
return 'deleting ' + str(a) + ' and ' + str(b)
class IndexOnly:
def _cp_dispatch(self, vpath):
"""Make sure that popping ALL of vpath still shows the index
handler.
"""
while vpath:
vpath.pop()
return self
@cherrypy.expose
def index(self):
return 'IndexOnly index'
class DecoratedPopArgs:
"""Test _cp_dispatch with @cherrypy.popargs."""
@cherrypy.expose
def index(self):
return 'no params'
@cherrypy.expose
def hi(self):
return "hi was not interpreted as 'a' param"
DecoratedPopArgs = cherrypy.popargs(
'a', 'b', handler=ABHandler())(DecoratedPopArgs)
class NonDecoratedPopArgs:
"""Test _cp_dispatch = cherrypy.popargs()"""
_cp_dispatch = cherrypy.popargs('a')
@cherrypy.expose
def index(self, a):
return 'index: ' + str(a)
class ParameterizedHandler:
"""Special handler created for each request"""
def __init__(self, a):
self.a = a
@cherrypy.expose
def index(self):
if 'a' in cherrypy.request.params:
raise Exception(
'Parameterized handler argument ended up in '
'request.params')
return self.a
class ParameterizedPopArgs:
"""Test cherrypy.popargs() with a function call handler"""
ParameterizedPopArgs = cherrypy.popargs(
'a', handler=ParameterizedHandler)(ParameterizedPopArgs)
Root.decorated = DecoratedPopArgs()
Root.undecorated = NonDecoratedPopArgs()
Root.index_only = IndexOnly()
Root.parameter_test = ParameterizedPopArgs()
Root.users = UserContainerNode()
md = cherrypy.dispatch.MethodDispatcher('dynamic_dispatch')
for url in script_names:
conf = {
'/': {
'user': (url or '/').split('/')[-2],
},
'/users': {
'request.dispatch': md
},
}
cherrypy.tree.mount(Root(), url, conf)
class DynamicObjectMappingTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def testObjectMapping(self):
for url in script_names:
self.script_name = url
self.getPage('/')
self.assertBody('index')
self.getPage('/handler')
self.assertBody('handler')
# Dynamic dispatch will succeed here for the subnodes
# so the subroot gets called
self.getPage('/1/')
self.assertBody('SubRoot index')
self.getPage('/2/')
self.assertBody('SubRoot index')
self.getPage('/1/handler')
self.assertBody('SubRoot handler')
self.getPage('/2/handler')
self.assertBody('SubRoot handler')
# Dynamic dispatch will fail here for the subnodes
# so the default gets called
self.getPage('/asdf/')
self.assertBody("default ('asdf',)")
self.getPage('/asdf/asdf')
self.assertBody("default ('asdf', 'asdf')")
self.getPage('/asdf/handler')
self.assertBody("default ('asdf', 'handler')")
# Dynamic dispatch will succeed here for the subsubnodes
# so the subsubroot gets called
self.getPage('/1/1/')
self.assertBody('SubSubRoot index')
self.getPage('/2/2/')
self.assertBody('SubSubRoot index')
self.getPage('/1/1/handler')
self.assertBody('SubSubRoot handler')
self.getPage('/2/2/handler')
self.assertBody('SubSubRoot handler')
self.getPage('/2/2/dispatch')
self.assertBody('SubSubRoot dispatch')
# The exposed dispatch will not be called as a dispatch
# method.
self.getPage('/2/2/foo/foo')
self.assertBody('SubSubRoot default')
# Dynamic dispatch will fail here for the subsubnodes
# so the SubRoot gets called
self.getPage('/1/asdf/')
self.assertBody("SubRoot ('asdf',)")
self.getPage('/1/asdf/asdf')
self.assertBody("SubRoot ('asdf', 'asdf')")
self.getPage('/1/asdf/handler')
self.assertBody("SubRoot ('asdf', 'handler')")
def testMethodDispatch(self):
# GET acts like a container
self.getPage('/users')
self.assertBody('[1, 2]')
self.assertHeader('Allow', 'GET, HEAD, POST')
# POST to the container URI allows creation
self.getPage('/users', method='POST', body='name=baz')
self.assertBody('POST 3')
self.assertHeader('Allow', 'GET, HEAD, POST')
# POST to a specific instanct URI results in a 404
# as the resource does not exit.
self.getPage('/users/5', method='POST', body='name=baz')
self.assertStatus(404)
# PUT to a specific instanct URI results in creation
self.getPage('/users/5', method='PUT', body='name=boris')
self.assertBody('PUT 5')
self.assertHeader('Allow', 'DELETE, GET, HEAD, POST, PUT')
# GET acts like a container
self.getPage('/users')
self.assertBody('[1, 2, 3, 5]')
self.assertHeader('Allow', 'GET, HEAD, POST')
test_cases = (
(1, 'foo', 'fooupdated', 'DELETE, GET, HEAD, POST, PUT'),
(2, 'bar', 'barupdated', 'DELETE, GET, HEAD, POST, PUT'),
(3, 'baz', 'bazupdated', 'DELETE, GET, HEAD, POST, PUT'),
(5, 'boris', 'borisupdated', 'DELETE, GET, HEAD, POST, PUT'),
)
for id, name, updatedname, headers in test_cases:
self.getPage('/users/%d' % id)
self.assertBody(name)
self.assertHeader('Allow', headers)
# Make sure POSTs update already existings resources
self.getPage('/users/%d' %
id, method='POST', body='name=%s' % updatedname)
self.assertBody('POST %d' % id)
self.assertHeader('Allow', headers)
# Make sure PUTs Update already existing resources.
self.getPage('/users/%d' %
id, method='PUT', body='name=%s' % updatedname)
self.assertBody('PUT %d' % id)
self.assertHeader('Allow', headers)
# Make sure DELETES Remove already existing resources.
self.getPage('/users/%d' % id, method='DELETE')
self.assertBody('DELETE %d' % id)
self.assertHeader('Allow', headers)
# GET acts like a container
self.getPage('/users')
self.assertBody('[]')
self.assertHeader('Allow', 'GET, HEAD, POST')
def testVpathDispatch(self):
self.getPage('/decorated/')
self.assertBody('no params')
self.getPage('/decorated/hi')
self.assertBody("hi was not interpreted as 'a' param")
self.getPage('/decorated/yo/')
self.assertBody('a:yo')
self.getPage('/decorated/yo/there/')
self.assertBody('a:yo,b:there')
self.getPage('/decorated/yo/there/delete')
self.assertBody('deleting yo and there')
self.getPage('/decorated/yo/there/handled_by_dispatch/')
self.assertBody('custom')
self.getPage('/undecorated/blah/')
self.assertBody('index: blah')
self.getPage('/index_only/a/b/c/d/e/f/g/')
self.assertBody('IndexOnly index')
self.getPage('/parameter_test/argument2/')
self.assertBody('argument2')
|
from io import BytesIO
import http.client
"""
The python3 http.client api moved some stuff around, so this is an abstraction
layer that tries to cope with this move.
"""
def get_header(message, name):
return message.getallmatchingheaders(name)
def get_header_items(message):
for (key, values) in get_headers(message):
for value in values:
yield key, value
def get_headers(message):
for key in set(message.keys()):
yield key, message.get_all(key)
def get_httpmessage(headers):
return http.client.parse_headers(BytesIO(headers))
|
from homeassistant.components.alarm_control_panel import DOMAIN
from homeassistant.const import (
ATTR_CODE,
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
)
async def async_alarm_disarm(hass, code=None, entity_id=ENTITY_MATCH_ALL):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(DOMAIN, SERVICE_ALARM_DISARM, data, blocking=True)
async def async_alarm_arm_home(hass, code=None, entity_id=ENTITY_MATCH_ALL):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(DOMAIN, SERVICE_ALARM_ARM_HOME, data, blocking=True)
async def async_alarm_arm_away(hass, code=None, entity_id=ENTITY_MATCH_ALL):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(DOMAIN, SERVICE_ALARM_ARM_AWAY, data, blocking=True)
async def async_alarm_arm_night(hass, code=None, entity_id=ENTITY_MATCH_ALL):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(DOMAIN, SERVICE_ALARM_ARM_NIGHT, data, blocking=True)
async def async_alarm_trigger(hass, code=None, entity_id=ENTITY_MATCH_ALL):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(DOMAIN, SERVICE_ALARM_TRIGGER, data, blocking=True)
async def async_alarm_arm_custom_bypass(hass, code=None, entity_id=ENTITY_MATCH_ALL):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_ALARM_ARM_CUSTOM_BYPASS, data, blocking=True
)
|
from datetime import timedelta
import logging
from pybotvac.exceptions import NeatoRobotException
from homeassistant.components.camera import Camera
from .const import (
NEATO_DOMAIN,
NEATO_LOGIN,
NEATO_MAP_DATA,
NEATO_ROBOTS,
SCAN_INTERVAL_MINUTES,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=SCAN_INTERVAL_MINUTES)
ATTR_GENERATED_AT = "generated_at"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Neato camera with config entry."""
dev = []
neato = hass.data.get(NEATO_LOGIN)
mapdata = hass.data.get(NEATO_MAP_DATA)
for robot in hass.data[NEATO_ROBOTS]:
if "maps" in robot.traits:
dev.append(NeatoCleaningMap(neato, robot, mapdata))
if not dev:
return
_LOGGER.debug("Adding robots for cleaning maps %s", dev)
async_add_entities(dev, True)
class NeatoCleaningMap(Camera):
"""Neato cleaning map for last clean."""
def __init__(self, neato, robot, mapdata):
"""Initialize Neato cleaning map."""
super().__init__()
self.robot = robot
self.neato = neato
self._mapdata = mapdata
self._available = self.neato.logged_in if self.neato is not None else False
self._robot_name = f"{self.robot.name} Cleaning Map"
self._robot_serial = self.robot.serial
self._generated_at = None
self._image_url = None
self._image = None
def camera_image(self):
"""Return image response."""
self.update()
return self._image
def update(self):
"""Check the contents of the map list."""
if self.neato is None:
_LOGGER.error("Error while updating '%s'", self.entity_id)
self._image = None
self._image_url = None
self._available = False
return
_LOGGER.debug("Running camera update for '%s'", self.entity_id)
try:
self.neato.update_robots()
except NeatoRobotException as ex:
if self._available: # Print only once when available
_LOGGER.error(
"Neato camera connection error for '%s': %s", self.entity_id, ex
)
self._image = None
self._image_url = None
self._available = False
return
image_url = None
map_data = self._mapdata[self._robot_serial]["maps"][0]
image_url = map_data["url"]
if image_url == self._image_url:
_LOGGER.debug(
"The map image_url for '%s' is the same as old", self.entity_id
)
return
try:
image = self.neato.download_map(image_url)
except NeatoRobotException as ex:
if self._available: # Print only once when available
_LOGGER.error(
"Neato camera connection error for '%s': %s", self.entity_id, ex
)
self._image = None
self._image_url = None
self._available = False
return
self._image = image.read()
self._image_url = image_url
self._generated_at = (map_data["generated_at"].strip("Z")).replace("T", " ")
self._available = True
@property
def name(self):
"""Return the name of this camera."""
return self._robot_name
@property
def unique_id(self):
"""Return unique ID."""
return self._robot_serial
@property
def available(self):
"""Return if the robot is available."""
return self._available
@property
def device_info(self):
"""Device info for neato robot."""
return {"identifiers": {(NEATO_DOMAIN, self._robot_serial)}}
@property
def device_state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self._generated_at is not None:
data[ATTR_GENERATED_AT] = self._generated_at
return data
|
from gi.repository import Gio
def replace_menu_section(menu: Gio.Menu, section: Gio.MenuItem):
"""Replaces an existing section in GMenu `menu` with `section`
The sections are compared by their `id` attributes, with the
matching section in `menu` being replaced by the passed `section`.
If there is no section in `menu` that matches `section`'s `id`
attribute, a ValueError is raised.
"""
section_id = section.get_attribute_value("id").get_string()
for idx in range(menu.get_n_items()):
item_id = menu.get_item_attribute_value(idx, "id").get_string()
if item_id == section_id:
break
else:
# FIXME: Better exception
raise ValueError("Section %s not found" % section_id)
menu.remove(idx)
menu.insert_item(idx, section)
|
import re
from enum import Enum
from io import StringIO
import natsort
from nikola.plugin_categories import MetadataExtractor
from nikola.utils import unslugify
__all__ = ('MetaCondition', 'MetaPriority', 'MetaSource', 'check_conditions')
_default_extractors = []
DEFAULT_EXTRACTOR_NAME = 'nikola'
DEFAULT_EXTRACTOR = None
class MetaCondition(Enum):
"""Conditions for extracting metadata."""
config_bool = 1
config_present = 2
extension = 3
compiler = 4
first_line = 5
never = -1
class MetaPriority(Enum):
"""Priority of metadata.
An extractor is used if and only if the higher-priority extractors returned nothing.
"""
override = 1
specialized = 2
normal = 3
fallback = 4
class MetaSource(Enum):
"""Source of metadata."""
text = 1
filename = 2
def check_conditions(post, filename: str, conditions: list, config: dict, source_text: str) -> bool:
"""Check the conditions for a metadata extractor."""
for ct, arg in conditions:
if any((
ct == MetaCondition.config_bool and not config.get(arg, False),
ct == MetaCondition.config_present and arg not in config,
ct == MetaCondition.extension and not filename.endswith(arg),
ct == MetaCondition.compiler and (post is None or post.compiler.name != arg),
ct == MetaCondition.never
)):
return False
elif ct == MetaCondition.first_line:
if not source_text or not source_text.startswith(arg + '\n'):
return False
return True
def classify_extractor(extractor: MetadataExtractor, metadata_extractors_by: dict):
"""Classify an extractor and add it to the metadata_extractors_by dict."""
global DEFAULT_EXTRACTOR
if extractor.name == DEFAULT_EXTRACTOR_NAME:
DEFAULT_EXTRACTOR = extractor
metadata_extractors_by['priority'][extractor.priority].append(extractor)
metadata_extractors_by['source'][extractor.source].append(extractor)
metadata_extractors_by['name'][extractor.name] = extractor
metadata_extractors_by['all'].append(extractor)
def load_defaults(site, metadata_extractors_by: dict):
"""Load default metadata extractors."""
for extractor in _default_extractors:
extractor.site = site
classify_extractor(extractor, metadata_extractors_by)
def is_extractor(extractor) -> bool: # pragma: no cover
"""Check if a given class is an extractor."""
return isinstance(extractor, MetadataExtractor)
def default_metadata_extractors_by() -> dict:
"""Return the default metadata_extractors_by dictionary."""
d = {
'priority': {},
'source': {},
'name': {},
'all': []
}
for i in MetaPriority:
d['priority'][i] = []
for i in MetaSource:
d['source'][i] = []
return d
def _register_default(extractor: type) -> type:
"""Register a default extractor."""
_default_extractors.append(extractor())
return extractor
@_register_default
class NikolaMetadata(MetadataExtractor):
"""Extractor for Nikola-style metadata."""
name = 'nikola'
source = MetaSource.text
priority = MetaPriority.normal
supports_write = True
split_metadata_re = re.compile('\n\n')
nikola_re = re.compile(r'^\s*\.\. (.*?): (.*)')
map_from = 'nikola' # advertised in values mapping only
def _extract_metadata_from_text(self, source_text: str) -> dict:
"""Extract metadata from text."""
outdict = {}
for line in source_text.split('\n'):
match = self.nikola_re.match(line)
if match:
k, v = match.group(1), match.group(2)
if v:
outdict[k] = v
return outdict
def write_metadata(self, metadata: dict, comment_wrap=False) -> str:
"""Write metadata in this extractor’s format."""
metadata = metadata.copy()
order = ('title', 'slug', 'date', 'tags', 'category', 'link', 'description', 'type')
f = '.. {0}: {1}'
meta = []
for k in order:
try:
meta.append(f.format(k, metadata.pop(k)))
except KeyError:
pass
# Leftover metadata (user-specified/non-default).
for k in natsort.natsorted(list(metadata.keys()), alg=natsort.ns.F | natsort.ns.IC):
meta.append(f.format(k, metadata[k]))
data = '\n'.join(meta)
if comment_wrap is True:
comment_wrap = ('<!--', '-->')
if comment_wrap:
return '\n'.join((comment_wrap[0], data, comment_wrap[1], '', ''))
else:
return data + '\n\n'
@_register_default
class YAMLMetadata(MetadataExtractor):
"""Extractor for YAML metadata."""
name = 'yaml'
source = MetaSource.text
conditions = ((MetaCondition.first_line, '---'),)
requirements = [('ruamel.yaml', 'ruamel.yaml', 'YAML')]
supports_write = True
split_metadata_re = re.compile('\n---\n')
map_from = 'yaml'
priority = MetaPriority.specialized
def _extract_metadata_from_text(self, source_text: str) -> dict:
"""Extract metadata from text."""
from ruamel.yaml import YAML
yaml = YAML(typ='safe')
meta = yaml.load(source_text[4:])
# We expect empty metadata to be '', not None
for k in meta:
if meta[k] is None:
meta[k] = ''
return meta
def write_metadata(self, metadata: dict, comment_wrap=False) -> str:
"""Write metadata in this extractor’s format."""
from ruamel.yaml import YAML
yaml = YAML(typ='safe')
yaml.default_flow_style = False
stream = StringIO()
yaml.dump(metadata, stream)
stream.seek(0)
return '\n'.join(('---', stream.read().strip(), '---', ''))
@_register_default
class TOMLMetadata(MetadataExtractor):
"""Extractor for TOML metadata."""
name = 'toml'
source = MetaSource.text
conditions = ((MetaCondition.first_line, '+++'),)
requirements = [('toml', 'toml', 'TOML')]
supports_write = True
split_metadata_re = re.compile('\n\\+\\+\\+\n')
map_from = 'toml'
priority = MetaPriority.specialized
def _extract_metadata_from_text(self, source_text: str) -> dict:
"""Extract metadata from text."""
import toml
return toml.loads(source_text[4:])
def write_metadata(self, metadata: dict, comment_wrap=False) -> str:
"""Write metadata in this extractor’s format."""
import toml
return '\n'.join(('+++', toml.dumps(metadata).strip(), '+++', ''))
@_register_default
class FilenameRegexMetadata(MetadataExtractor):
"""Extractor for filename metadata."""
name = 'filename_regex'
source = MetaSource.filename
priority = MetaPriority.fallback
conditions = [(MetaCondition.config_bool, 'FILE_METADATA_REGEXP')]
def _extract_metadata_from_text(self, source_text: str) -> dict:
"""Extract metadata from text."""
# This extractor does not use the source text, and as such, this method returns an empty dict.
return {}
def extract_filename(self, filename: str, lang: str) -> dict:
"""Try to read the metadata from the filename based on the given re.
This requires to use symbolic group names in the pattern.
The part to read the metadata from the filename based on a regular
expression is taken from Pelican - pelican/readers.py
"""
match = re.match(self.site.config['FILE_METADATA_REGEXP'], filename)
meta = {}
if match:
for key, value in match.groupdict().items():
k = key.lower().strip() # metadata must be lowercase
if k == 'title' and self.site.config['FILE_METADATA_UNSLUGIFY_TITLES']:
meta[k] = unslugify(value, lang, discard_numbers=False)
else:
meta[k] = value
return meta
|
from typing import Dict, List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import DOMAIN
CONDITION_TYPES = {"is_on", "is_off", "is_idle", "is_paused", "is_playing"}
CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES),
}
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions for Media player devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add conditions for each entity that belongs to this integration
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_on",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_off",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_idle",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_paused",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_playing",
}
)
return conditions
@callback
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == "is_playing":
state = STATE_PLAYING
elif config[CONF_TYPE] == "is_idle":
state = STATE_IDLE
elif config[CONF_TYPE] == "is_paused":
state = STATE_PAUSED
elif config[CONF_TYPE] == "is_on":
state = STATE_ON
else:
state = STATE_OFF
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
return condition.state(hass, config[ATTR_ENTITY_ID], state)
return test_is_state
|
import asyncio
from datetime import timedelta
import logging
from accuweather import AccuWeather, ApiError, InvalidApiKeyError, RequestsExceededError
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
from homeassistant.const import CONF_API_KEY
from homeassistant.core import Config, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
ATTR_FORECAST,
CONF_FORECAST,
COORDINATOR,
DOMAIN,
UNDO_UPDATE_LISTENER,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "weather"]
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Set up configured AccuWeather."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass, config_entry) -> bool:
"""Set up AccuWeather as config entry."""
api_key = config_entry.data[CONF_API_KEY]
location_key = config_entry.unique_id
forecast = config_entry.options.get(CONF_FORECAST, False)
_LOGGER.debug("Using location_key: %s, get forecast: %s", location_key, forecast)
websession = async_get_clientsession(hass)
coordinator = AccuWeatherDataUpdateCoordinator(
hass, websession, api_key, location_key, forecast
)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
undo_listener = config_entry.add_update_listener(update_listener)
hass.data[DOMAIN][config_entry.entry_id] = {
COORDINATOR: coordinator,
UNDO_UPDATE_LISTENER: undo_listener,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in PLATFORMS
]
)
)
hass.data[DOMAIN][config_entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Update listener."""
await hass.config_entries.async_reload(config_entry.entry_id)
class AccuWeatherDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching AccuWeather data API."""
def __init__(self, hass, session, api_key, location_key, forecast: bool):
"""Initialize."""
self.location_key = location_key
self.forecast = forecast
self.is_metric = hass.config.units.is_metric
self.accuweather = AccuWeather(api_key, session, location_key=self.location_key)
# Enabling the forecast download increases the number of requests per data
# update, we use 32 minutes for current condition only and 64 minutes for
# current condition and forecast as update interval to not exceed allowed number
# of requests. We have 50 requests allowed per day, so we use 45 and leave 5 as
# a reserve for restarting HA.
update_interval = (
timedelta(minutes=64) if self.forecast else timedelta(minutes=32)
)
_LOGGER.debug("Data will be update every %s", update_interval)
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
async def _async_update_data(self):
"""Update data via library."""
try:
async with timeout(10):
current = await self.accuweather.async_get_current_conditions()
forecast = (
await self.accuweather.async_get_forecast(metric=self.is_metric)
if self.forecast
else {}
)
except (
ApiError,
ClientConnectorError,
InvalidApiKeyError,
RequestsExceededError,
) as error:
raise UpdateFailed(error) from error
_LOGGER.debug("Requests remaining: %s", self.accuweather.requests_remaining)
return {**current, **{ATTR_FORECAST: forecast}}
|
from typing import Optional
from google_nest_sdm.device import Device
from google_nest_sdm.device_traits import HumidityTrait, TemperatureTrait
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN, SIGNAL_NEST_UPDATE
from .device_info import DeviceInfo
DEVICE_TYPE_MAP = {
"sdm.devices.types.CAMERA": "Camera",
"sdm.devices.types.DISPLAY": "Display",
"sdm.devices.types.DOORBELL": "Doorbell",
"sdm.devices.types.THERMOSTAT": "Thermostat",
}
async def async_setup_sdm_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the sensors."""
subscriber = hass.data[DOMAIN][entry.entry_id]
device_manager = await subscriber.async_get_device_manager()
# Fetch initial data so we have data when entities subscribe.
entities = []
for device in device_manager.devices.values():
if TemperatureTrait.NAME in device.traits:
entities.append(TemperatureSensor(device))
if HumidityTrait.NAME in device.traits:
entities.append(HumiditySensor(device))
async_add_entities(entities)
class SensorBase(Entity):
"""Representation of a dynamically updated Sensor."""
def __init__(self, device: Device):
"""Initialize the sensor."""
self._device = device
self._device_info = DeviceInfo(device)
@property
def should_poll(self) -> bool:
"""Disable polling since entities have state pushed via pubsub."""
return False
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
# The API "name" field is a unique device identifier.
return f"{self._device.name}-{self.device_class}"
@property
def device_info(self):
"""Return device specific attributes."""
return self._device_info.device_info
async def async_added_to_hass(self):
"""Run when entity is added to register update signal handler."""
# Event messages trigger the SIGNAL_NEST_UPDATE, which is intercepted
# here to re-fresh the signals from _device. Unregister this callback
# when the entity is removed.
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_NEST_UPDATE,
self.async_write_ha_state,
)
)
class TemperatureSensor(SensorBase):
"""Representation of a Temperature Sensor."""
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._device_info.device_name} Temperature"
@property
def state(self):
"""Return the state of the sensor."""
trait = self._device.traits[TemperatureTrait.NAME]
return trait.ambient_temperature_celsius
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def device_class(self):
"""Return the class of this device."""
return DEVICE_CLASS_TEMPERATURE
class HumiditySensor(SensorBase):
"""Representation of a Humidity Sensor."""
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
# The API returns the identifier under the name field.
return f"{self._device.name}-humidity"
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._device_info.device_name} Humidity"
@property
def state(self):
"""Return the state of the sensor."""
trait = self._device.traits[HumidityTrait.NAME]
return trait.ambient_humidity_percent
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return PERCENTAGE
@property
def device_class(self):
"""Return the class of this device."""
return DEVICE_CLASS_HUMIDITY
|
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
BinarySensorEntity,
)
from . import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo binary sensor platform."""
async_add_entities(
[
DemoBinarySensor(
"binary_1", "Basement Floor Wet", False, DEVICE_CLASS_MOISTURE
),
DemoBinarySensor(
"binary_2", "Movement Backyard", True, DEVICE_CLASS_MOTION
),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoBinarySensor(BinarySensorEntity):
"""representation of a Demo binary sensor."""
def __init__(self, unique_id, name, state, device_class):
"""Initialize the demo sensor."""
self._unique_id = unique_id
self._name = name
self._state = state
self._sensor_type = device_class
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self.unique_id)
},
"name": self.name,
}
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def should_poll(self):
"""No polling needed for a demo binary sensor."""
return False
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
|
import logging
import unittest
import numpy as np
import scipy.linalg
from gensim import matutils
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import lsimodel
from gensim.test import basetmtests
from gensim.test.utils import datapath, get_tmpfile
class TestLsiModel(unittest.TestCase, basetmtests.TestBaseTopicModel):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
self.model = lsimodel.LsiModel(self.corpus, num_topics=2)
def testTransform(self):
"""Test lsi[vector] transformation."""
# create the transformation model
model = self.model
# make sure the decomposition is enough accurate
u, s, vt = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False)
self.assertTrue(np.allclose(s[:2], model.projection.s)) # singular values must match
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.6594664, 0.142115444]) # scaled LSI version
# expected = np.array([-0.1973928, 0.05591352]) # non-scaled LSI version
self.assertTrue(np.allclose(abs(vec), abs(expected))) # transformed entries must be equal up to sign
def testTransformFloat32(self):
"""Test lsi[vector] transformation."""
# create the transformation model
model = lsimodel.LsiModel(self.corpus, num_topics=2, dtype=np.float32)
# make sure the decomposition is enough accurate
u, s, vt = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False)
self.assertTrue(np.allclose(s[:2], model.projection.s)) # singular values must match
self.assertEqual(model.projection.u.dtype, np.float32)
self.assertEqual(model.projection.s.dtype, np.float32)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.6594664, 0.142115444]) # scaled LSI version
# transformed entries must be equal up to sign
self.assertTrue(np.allclose(abs(vec), abs(expected), atol=1.e-5))
def testCorpusTransform(self):
"""Test lsi[corpus] transformation."""
model = self.model
got = np.vstack([matutils.sparse2full(doc, 2) for doc in model[self.corpus]])
expected = np.array([
[0.65946639, 0.14211544],
[2.02454305, -0.42088759],
[1.54655361, 0.32358921],
[1.81114125, 0.5890525],
[0.9336738, -0.27138939],
[0.01274618, -0.49016181],
[0.04888203, -1.11294699],
[0.08063836, -1.56345594],
[0.27381003, -1.34694159]
])
self.assertTrue(np.allclose(abs(got), abs(expected))) # must equal up to sign
def testOnlineTransform(self):
corpus = list(self.corpus)
doc = corpus[0] # use the corpus' first document for testing
# create the transformation model
model2 = lsimodel.LsiModel(corpus=corpus, num_topics=5) # compute everything at once
# start with no documents, we will add them later
model = lsimodel.LsiModel(corpus=None, id2word=model2.id2word, num_topics=5)
# train model on a single document
model.add_documents([corpus[0]])
# transform the testing document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = np.array([-1.73205078, 0.0, 0.0, 0.0, 0.0]) # scaled LSI version
self.assertTrue(np.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on another 4 documents
model.add_documents(corpus[1:5], chunksize=2) # train on 4 extra docs, in chunks of 2 documents, for the lols
# transform a document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = np.array([-0.66493785, -0.28314203, -1.56376302, 0.05488682, 0.17123269]) # scaled LSI version
self.assertTrue(np.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on the rest of documents
model.add_documents(corpus[5:])
# make sure the final transformation is the same as if we had decomposed the whole corpus at once
vec1 = matutils.sparse2full(model[doc], model.num_topics)
vec2 = matutils.sparse2full(model2[doc], model2.num_topics)
# the two LSI representations must equal up to sign
self.assertTrue(np.allclose(abs(vec1), abs(vec2), atol=1e-5))
def testPersistence(self):
fname = get_tmpfile('gensim_models_lsi.tst')
model = self.model
model.save(fname)
model2 = lsimodel.LsiModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection.u, model2.projection.u))
self.assertTrue(np.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models_lsi.tst.gz')
model = self.model
model.save(fname)
model2 = lsimodel.LsiModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection.u, model2.projection.u))
self.assertTrue(np.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = get_tmpfile('gensim_models_lsi.tst')
model = self.model
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
model2 = lsimodel.LsiModel.load(fname, mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.projection.u, np.memmap))
self.assertTrue(isinstance(model2.projection.s, np.memmap))
self.assertTrue(np.allclose(model.projection.u, model2.projection.u))
self.assertTrue(np.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = get_tmpfile('gensim_models_lsi.tst.gz')
model = self.model
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
return
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
self.assertRaises(IOError, lsimodel.LsiModel.load, fname, mmap='r')
def testDocsProcessed(self):
self.assertEqual(self.model.docs_processed, 9)
self.assertEqual(self.model.docs_processed, self.corpus.num_docs)
def test_get_topics(self):
topics = self.model.get_topics()
vocab_size = len(self.model.id2word)
for topic in topics:
self.assertTrue(isinstance(topic, np.ndarray))
self.assertEqual(topic.dtype, np.float64)
self.assertEqual(vocab_size, topic.shape[0])
# LSI topics are not probability distributions
# self.assertAlmostEqual(np.sum(topic), 1.0, 5)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
import time
import pytest
import jax
import jax.config
jax.config.update("jax_enable_x64", True)
import jax.numpy as np
import jax.random
import tensornetwork as tn
import simple_mera
def test_descend(random_tensors):
h, s, iso, dis = random_tensors
s = simple_mera.descend(h, s, iso, dis)
assert len(s.shape) == 6
D = s.shape[0]
smat = np.reshape(s, [D**3] * 2)
assert np.isclose(np.trace(smat), 1.0)
assert np.isclose(np.linalg.norm(smat - np.conj(np.transpose(smat))), 0.0)
spec, _ = np.linalg.eigh(smat)
assert np.alltrue(spec >= 0.0)
def test_ascend(random_tensors):
h, s, iso, dis = random_tensors
h = simple_mera.ascend(h, s, iso, dis)
assert len(h.shape) == 6
D = h.shape[0]
hmat = np.reshape(h, [D**3] * 2)
norm = np.linalg.norm(hmat - np.conj(np.transpose(hmat)))
assert np.isclose(norm, 0.0)
def test_energy(wavelet_tensors):
h, iso, dis = wavelet_tensors
s = np.reshape(np.eye(2**3) / 2**3, [2] * 6)
for _ in range(20):
s = simple_mera.descend(h, s, iso, dis)
en = np.trace(np.reshape(s, [2**3, -1]) @ np.reshape(h, [2**3, -1]))
assert np.isclose(en, -1.242, rtol=1e-3, atol=1e-3)
en = simple_mera.binary_mera_energy(h, s, iso, dis)
assert np.isclose(en, -1.242, rtol=1e-3, atol=1e-3)
def test_opt(wavelet_tensors):
h, iso, dis = wavelet_tensors
s = np.reshape(np.eye(2**3) / 2**3, [2] * 6)
for _ in range(20):
s = simple_mera.descend(h, s, iso, dis)
s, iso, dis = simple_mera.optimize_linear(h, s, iso, dis, 100)
en = np.trace(np.reshape(s, [2**3, -1]) @ np.reshape(h, [2**3, -1]))
assert en < -1.25
@pytest.fixture(params=[2, 3])
def random_tensors(request):
D = request.param
key = jax.random.PRNGKey(0)
h = jax.random.normal(key, shape=[D**3] * 2)
h = 0.5 * (h + np.conj(np.transpose(h)))
h = np.reshape(h, [D] * 6)
s = jax.random.normal(key, shape=[D**3] * 2)
s = s @ np.conj(np.transpose(s))
s /= np.trace(s)
s = np.reshape(s, [D] * 6)
a = jax.random.normal(key, shape=[D**2] * 2)
u, _, vh = np.linalg.svd(a)
dis = np.reshape(u, [D] * 4)
iso = np.reshape(vh, [D] * 4)[:, :, :, 0]
return tuple(x.astype(np.complex128) for x in (h, s, iso, dis))
@pytest.fixture
def wavelet_tensors(request):
"""Returns the Hamiltonian and MERA tensors for the D=2 wavelet MERA.
From Evenbly & White, Phys. Rev. Lett. 116, 140403 (2016).
"""
D = 2
h = simple_mera.ham_ising()
E = np.array([[1, 0], [0, 1]])
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
wmat_un = np.real((np.sqrt(3) + np.sqrt(2)) / 4 * np.kron(E, E) +
(np.sqrt(3) - np.sqrt(2)) / 4 * np.kron(Z, Z) + 1.j *
(1 + np.sqrt(2)) / 4 * np.kron(X, Y) + 1.j *
(1 - np.sqrt(2)) / 4 * np.kron(Y, X))
umat = np.real((np.sqrt(3) + 2) / 4 * np.kron(E, E) +
(np.sqrt(3) - 2) / 4 * np.kron(Z, Z) +
1.j / 4 * np.kron(X, Y) + 1.j / 4 * np.kron(Y, X))
w = np.reshape(wmat_un, (D, D, D, D))[:, 0, :, :]
u = np.reshape(umat, (D, D, D, D))
w = np.transpose(w, [1, 2, 0])
u = np.transpose(u, [2, 3, 0, 1])
return tuple(x.astype(np.complex128) for x in (h, w, u))
|
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
FORMAT_NUMBER,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_CODE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
CONF_ALT_NIGHT_MODE,
CONF_AUTO_BYPASS,
CONF_CODE_ARM_REQUIRED,
DATA_AD,
DEFAULT_ARM_OPTIONS,
DOMAIN,
OPTIONS_ARM,
SIGNAL_PANEL_MESSAGE,
)
SERVICE_ALARM_TOGGLE_CHIME = "alarm_toggle_chime"
SERVICE_ALARM_KEYPRESS = "alarm_keypress"
ATTR_KEYPRESS = "keypress"
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
):
"""Set up for AlarmDecoder alarm panels."""
options = entry.options
arm_options = options.get(OPTIONS_ARM, DEFAULT_ARM_OPTIONS)
client = hass.data[DOMAIN][entry.entry_id][DATA_AD]
entity = AlarmDecoderAlarmPanel(
client=client,
auto_bypass=arm_options[CONF_AUTO_BYPASS],
code_arm_required=arm_options[CONF_CODE_ARM_REQUIRED],
alt_night_mode=arm_options[CONF_ALT_NIGHT_MODE],
)
async_add_entities([entity])
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_ALARM_TOGGLE_CHIME,
{
vol.Required(ATTR_CODE): cv.string,
},
"alarm_toggle_chime",
)
platform.async_register_entity_service(
SERVICE_ALARM_KEYPRESS,
{
vol.Required(ATTR_KEYPRESS): cv.string,
},
"alarm_keypress",
)
class AlarmDecoderAlarmPanel(AlarmControlPanelEntity):
"""Representation of an AlarmDecoder-based alarm panel."""
def __init__(self, client, auto_bypass, code_arm_required, alt_night_mode):
"""Initialize the alarm panel."""
self._client = client
self._display = ""
self._name = "Alarm Panel"
self._state = None
self._ac_power = None
self._alarm_event_occurred = None
self._backlight_on = None
self._battery_low = None
self._check_zone = None
self._chime = None
self._entry_delay_off = None
self._programming_mode = None
self._ready = None
self._zone_bypassed = None
self._auto_bypass = auto_bypass
self._code_arm_required = code_arm_required
self._alt_night_mode = alt_night_mode
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_PANEL_MESSAGE, self._message_callback
)
)
def _message_callback(self, message):
"""Handle received messages."""
if message.alarm_sounding or message.fire_alarm:
self._state = STATE_ALARM_TRIGGERED
elif message.armed_away:
self._state = STATE_ALARM_ARMED_AWAY
elif message.armed_home and (message.entry_delay_off or message.perimeter_only):
self._state = STATE_ALARM_ARMED_NIGHT
elif message.armed_home:
self._state = STATE_ALARM_ARMED_HOME
else:
self._state = STATE_ALARM_DISARMED
self._ac_power = message.ac_power
self._alarm_event_occurred = message.alarm_event_occurred
self._backlight_on = message.backlight_on
self._battery_low = message.battery_low
self._check_zone = message.check_zone
self._chime = message.chime_on
self._entry_delay_off = message.entry_delay_off
self._programming_mode = message.programming_mode
self._ready = message.ready
self._zone_bypassed = message.zone_bypassed
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def code_format(self):
"""Return one or more digits/characters."""
return FORMAT_NUMBER
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return self._code_arm_required
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
"ac_power": self._ac_power,
"alarm_event_occurred": self._alarm_event_occurred,
"backlight_on": self._backlight_on,
"battery_low": self._battery_low,
"check_zone": self._check_zone,
"chime": self._chime,
"entry_delay_off": self._entry_delay_off,
"programming_mode": self._programming_mode,
"ready": self._ready,
"zone_bypassed": self._zone_bypassed,
"code_arm_required": self._code_arm_required,
}
def alarm_disarm(self, code=None):
"""Send disarm command."""
if code:
self._client.send(f"{code!s}1")
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self._client.arm_away(
code=code,
code_arm_required=self._code_arm_required,
auto_bypass=self._auto_bypass,
)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self._client.arm_home(
code=code,
code_arm_required=self._code_arm_required,
auto_bypass=self._auto_bypass,
)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
self._client.arm_night(
code=code,
code_arm_required=self._code_arm_required,
alt_night_mode=self._alt_night_mode,
auto_bypass=self._auto_bypass,
)
def alarm_toggle_chime(self, code=None):
"""Send toggle chime command."""
if code:
self._client.send(f"{code!s}9")
def alarm_keypress(self, keypress):
"""Send custom keypresses."""
if keypress:
self._client.send(keypress)
|
import glob
import importlib
import os
import os.path as op
from optparse import OptionParser
import sys
import mne
def _add_verbose_flag(parser):
parser.add_option("--verbose", dest='verbose',
help="Enable verbose mode (printing of log messages).",
default=None, action="store_true")
def load_module(name, path):
"""Load module from .py/.pyc file.
Parameters
----------
name : str
Name of the module.
path : str
Path to .py/.pyc file.
Returns
-------
mod : module
Imported module.
"""
from importlib.util import spec_from_file_location, module_from_spec
spec = spec_from_file_location(name, path)
mod = module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def get_optparser(cmdpath, usage=None, prog_prefix='mne', version=None):
"""Create OptionParser with cmd specific settings (e.g., prog value)."""
# Fetch description
mod = load_module('__temp', cmdpath)
if mod.__doc__:
doc, description, epilog = mod.__doc__, None, None
doc_lines = doc.split('\n')
description = doc_lines[0]
if len(doc_lines) > 1:
epilog = '\n'.join(doc_lines[1:])
# Get the name of the command
command = os.path.basename(cmdpath)
command, _ = os.path.splitext(command)
command = command[len(prog_prefix) + 1:] # +1 is for `_` character
# Set prog
prog = prog_prefix + ' {}'.format(command)
# Set version
if version is None:
version = mne.__version__
# monkey patch OptionParser to not wrap epilog
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(prog=prog,
version=version,
description=description,
epilog=epilog, usage=usage)
return parser
def main():
"""Entrypoint for mne <command> usage."""
mne_bin_dir = op.dirname(op.dirname(__file__))
valid_commands = sorted(glob.glob(op.join(mne_bin_dir,
'commands', 'mne_*.py')))
valid_commands = [c.split(op.sep)[-1][4:-3] for c in valid_commands]
def print_help(): # noqa
print("Usage : mne command options\n")
print("Accepted commands :\n")
for c in valid_commands:
print("\t- %s" % c)
print("\nExample : mne browse_raw --raw sample_audvis_raw.fif")
print("\nGetting help example : mne compute_proj_eog -h")
if len(sys.argv) == 1 or "help" in sys.argv[1] or "-h" in sys.argv[1]:
print_help()
elif sys.argv[1] == "--version":
print("MNE %s" % mne.__version__)
elif sys.argv[1] not in valid_commands:
print('Invalid command: "%s"\n' % sys.argv[1])
print_help()
else:
cmd = sys.argv[1]
cmd = importlib.import_module('.mne_%s' % (cmd,), 'mne.commands')
sys.argv = sys.argv[1:]
cmd.run()
|
import numpy
from affinegap import normalizedAffineGapDistance as comparator
def getCentroid(attribute_variants, comparator):
"""
Takes in a list of attribute values for a field,
evaluates the centroid using the comparator,
& returns the centroid (i.e. the 'best' value for the field)
"""
n = len(attribute_variants)
distance_matrix = numpy.zeros([n, n])
# populate distance matrix by looping through elements of matrix triangle
for i in range(0, n):
for j in range(0, i):
distance = comparator(attribute_variants[i], attribute_variants[j])
distance_matrix[i, j] = distance_matrix[j, i] = distance
average_distance = distance_matrix.mean(0)
# there can be ties for minimum, average distance string
min_dist_indices = numpy.where(
average_distance == average_distance.min())[0]
if len(min_dist_indices) > 1:
centroid = breakCentroidTie(attribute_variants, min_dist_indices)
else:
centroid_index = min_dist_indices[0]
centroid = attribute_variants[centroid_index]
return centroid
def breakCentroidTie(attribute_variants, min_dist_indices):
"""
Finds centroid when there are multiple values w/ min avg distance
(e.g. any dupe cluster of 2) right now this selects the first
among a set of ties, but can be modified to break ties in strings
by selecting the longest string
"""
return attribute_variants[min_dist_indices[0]]
def getCanonicalRep(record_cluster):
"""
Given a list of records within a duplicate cluster, constructs a
canonical representation of the cluster by finding canonical
values for each field
"""
canonical_rep = {}
keys = record_cluster[0].keys()
for key in keys:
key_values = []
for record in record_cluster:
# assume non-empty values always better than empty value
# for canonical record
if record.get(key):
key_values.append(record[key])
if key_values:
canonical_rep[key] = getCentroid(key_values, comparator)
else:
canonical_rep[key] = ''
return canonical_rep
|
import json
import logging
import os
from rtmapi import Rtm, RtmRequestFailedException
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_ID, CONF_NAME, CONF_TOKEN, STATE_OK
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
# httplib2 is a transitive dependency from RtmAPI. If this dependency is not
# set explicitly, the library does not work.
_LOGGER = logging.getLogger(__name__)
DOMAIN = "remember_the_milk"
DEFAULT_NAME = DOMAIN
CONF_SHARED_SECRET = "shared_secret"
CONF_ID_MAP = "id_map"
CONF_LIST_ID = "list_id"
CONF_TIMESERIES_ID = "timeseries_id"
CONF_TASK_ID = "task_id"
RTM_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SHARED_SECRET): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [RTM_SCHEMA])}, extra=vol.ALLOW_EXTRA
)
CONFIG_FILE_NAME = ".remember_the_milk.conf"
SERVICE_CREATE_TASK = "create_task"
SERVICE_COMPLETE_TASK = "complete_task"
SERVICE_SCHEMA_CREATE_TASK = vol.Schema(
{vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_ID): cv.string}
)
SERVICE_SCHEMA_COMPLETE_TASK = vol.Schema({vol.Required(CONF_ID): cv.string})
def setup(hass, config):
"""Set up the Remember the milk component."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
stored_rtm_config = RememberTheMilkConfiguration(hass)
for rtm_config in config[DOMAIN]:
account_name = rtm_config[CONF_NAME]
_LOGGER.info("Adding Remember the milk account %s", account_name)
api_key = rtm_config[CONF_API_KEY]
shared_secret = rtm_config[CONF_SHARED_SECRET]
token = stored_rtm_config.get_token(account_name)
if token:
_LOGGER.debug("found token for account %s", account_name)
_create_instance(
hass,
account_name,
api_key,
shared_secret,
token,
stored_rtm_config,
component,
)
else:
_register_new_account(
hass, account_name, api_key, shared_secret, stored_rtm_config, component
)
_LOGGER.debug("Finished adding all Remember the milk accounts")
return True
def _create_instance(
hass, account_name, api_key, shared_secret, token, stored_rtm_config, component
):
entity = RememberTheMilk(
account_name, api_key, shared_secret, token, stored_rtm_config
)
component.add_entities([entity])
hass.services.register(
DOMAIN,
f"{account_name}_create_task",
entity.create_task,
schema=SERVICE_SCHEMA_CREATE_TASK,
)
hass.services.register(
DOMAIN,
f"{account_name}_complete_task",
entity.complete_task,
schema=SERVICE_SCHEMA_COMPLETE_TASK,
)
def _register_new_account(
hass, account_name, api_key, shared_secret, stored_rtm_config, component
):
request_id = None
configurator = hass.components.configurator
api = Rtm(api_key, shared_secret, "write", None)
url, frob = api.authenticate_desktop()
_LOGGER.debug("Sent authentication request to server")
def register_account_callback(_):
"""Call for register the configurator."""
api.retrieve_token(frob)
token = api.token
if api.token is None:
_LOGGER.error("Failed to register, please try again")
configurator.notify_errors(
request_id, "Failed to register, please try again."
)
return
stored_rtm_config.set_token(account_name, token)
_LOGGER.debug("Retrieved new token from server")
_create_instance(
hass,
account_name,
api_key,
shared_secret,
token,
stored_rtm_config,
component,
)
configurator.request_done(request_id)
request_id = configurator.async_request_config(
f"{DOMAIN} - {account_name}",
callback=register_account_callback,
description=(
"You need to log in to Remember The Milk to"
"connect your account. \n\n"
"Step 1: Click on the link 'Remember The Milk login'\n\n"
"Step 2: Click on 'login completed'"
),
link_name="Remember The Milk login",
link_url=url,
submit_caption="login completed",
)
class RememberTheMilkConfiguration:
"""Internal configuration data for RememberTheMilk class.
This class stores the authentication token it get from the backend.
"""
def __init__(self, hass):
"""Create new instance of configuration."""
self._config_file_path = hass.config.path(CONFIG_FILE_NAME)
if not os.path.isfile(self._config_file_path):
self._config = {}
return
try:
_LOGGER.debug("Loading configuration from file: %s", self._config_file_path)
with open(self._config_file_path) as config_file:
self._config = json.load(config_file)
except ValueError:
_LOGGER.error(
"Failed to load configuration file, creating a new one: %s",
self._config_file_path,
)
self._config = {}
def save_config(self):
"""Write the configuration to a file."""
with open(self._config_file_path, "w") as config_file:
json.dump(self._config, config_file)
def get_token(self, profile_name):
"""Get the server token for a profile."""
if profile_name in self._config:
return self._config[profile_name][CONF_TOKEN]
return None
def set_token(self, profile_name, token):
"""Store a new server token for a profile."""
self._initialize_profile(profile_name)
self._config[profile_name][CONF_TOKEN] = token
self.save_config()
def delete_token(self, profile_name):
"""Delete a token for a profile.
Usually called when the token has expired.
"""
self._config.pop(profile_name, None)
self.save_config()
def _initialize_profile(self, profile_name):
"""Initialize the data structures for a profile."""
if profile_name not in self._config:
self._config[profile_name] = {}
if CONF_ID_MAP not in self._config[profile_name]:
self._config[profile_name][CONF_ID_MAP] = {}
def get_rtm_id(self, profile_name, hass_id):
"""Get the RTM ids for a Home Assistant task ID.
The id of a RTM tasks consists of the tuple:
list id, timeseries id and the task id.
"""
self._initialize_profile(profile_name)
ids = self._config[profile_name][CONF_ID_MAP].get(hass_id)
if ids is None:
return None
return ids[CONF_LIST_ID], ids[CONF_TIMESERIES_ID], ids[CONF_TASK_ID]
def set_rtm_id(self, profile_name, hass_id, list_id, time_series_id, rtm_task_id):
"""Add/Update the RTM task ID for a Home Assistant task IS."""
self._initialize_profile(profile_name)
id_tuple = {
CONF_LIST_ID: list_id,
CONF_TIMESERIES_ID: time_series_id,
CONF_TASK_ID: rtm_task_id,
}
self._config[profile_name][CONF_ID_MAP][hass_id] = id_tuple
self.save_config()
def delete_rtm_id(self, profile_name, hass_id):
"""Delete a key mapping."""
self._initialize_profile(profile_name)
if hass_id in self._config[profile_name][CONF_ID_MAP]:
del self._config[profile_name][CONF_ID_MAP][hass_id]
self.save_config()
class RememberTheMilk(Entity):
"""Representation of an interface to Remember The Milk."""
def __init__(self, name, api_key, shared_secret, token, rtm_config):
"""Create new instance of Remember The Milk component."""
self._name = name
self._api_key = api_key
self._shared_secret = shared_secret
self._token = token
self._rtm_config = rtm_config
self._rtm_api = Rtm(api_key, shared_secret, "delete", token)
self._token_valid = None
self._check_token()
_LOGGER.debug("Instance created for account %s", self._name)
def _check_token(self):
"""Check if the API token is still valid.
If it is not valid any more, delete it from the configuration. This
will trigger a new authentication process.
"""
valid = self._rtm_api.token_valid()
if not valid:
_LOGGER.error(
"Token for account %s is invalid. You need to register again!",
self.name,
)
self._rtm_config.delete_token(self._name)
self._token_valid = False
else:
self._token_valid = True
return self._token_valid
def create_task(self, call):
"""Create a new task on Remember The Milk.
You can use the smart syntax to define the attributes of a new task,
e.g. "my task #some_tag ^today" will add tag "some_tag" and set the
due date to today.
"""
try:
task_name = call.data.get(CONF_NAME)
hass_id = call.data.get(CONF_ID)
rtm_id = None
if hass_id is not None:
rtm_id = self._rtm_config.get_rtm_id(self._name, hass_id)
result = self._rtm_api.rtm.timelines.create()
timeline = result.timeline.value
if hass_id is None or rtm_id is None:
result = self._rtm_api.rtm.tasks.add(
timeline=timeline, name=task_name, parse="1"
)
_LOGGER.debug(
"Created new task '%s' in account %s", task_name, self.name
)
self._rtm_config.set_rtm_id(
self._name,
hass_id,
result.list.id,
result.list.taskseries.id,
result.list.taskseries.task.id,
)
else:
self._rtm_api.rtm.tasks.setName(
name=task_name,
list_id=rtm_id[0],
taskseries_id=rtm_id[1],
task_id=rtm_id[2],
timeline=timeline,
)
_LOGGER.debug(
"Updated task with id '%s' in account %s to name %s",
hass_id,
self.name,
task_name,
)
except RtmRequestFailedException as rtm_exception:
_LOGGER.error(
"Error creating new Remember The Milk task for account %s: %s",
self._name,
rtm_exception,
)
return False
return True
def complete_task(self, call):
"""Complete a task that was previously created by this component."""
hass_id = call.data.get(CONF_ID)
rtm_id = self._rtm_config.get_rtm_id(self._name, hass_id)
if rtm_id is None:
_LOGGER.error(
"Could not find task with ID %s in account %s. "
"So task could not be closed",
hass_id,
self._name,
)
return False
try:
result = self._rtm_api.rtm.timelines.create()
timeline = result.timeline.value
self._rtm_api.rtm.tasks.complete(
list_id=rtm_id[0],
taskseries_id=rtm_id[1],
task_id=rtm_id[2],
timeline=timeline,
)
self._rtm_config.delete_rtm_id(self._name, hass_id)
_LOGGER.debug(
"Completed task with id %s in account %s", hass_id, self._name
)
except RtmRequestFailedException as rtm_exception:
_LOGGER.error(
"Error creating new Remember The Milk task for account %s: %s",
self._name,
rtm_exception,
)
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if not self._token_valid:
return "API token invalid"
return STATE_OK
|
import json
import arrow
from flask import current_app
from lemur.plugins.bases import ExpirationNotificationPlugin
from lemur.plugins import lemur_slack as slack
import requests
def create_certificate_url(name):
return "https://{hostname}/#/certificates/{name}".format(
hostname=current_app.config.get("LEMUR_HOSTNAME"), name=name
)
def create_expiration_attachments(certificates):
attachments = []
for certificate in certificates:
attachments.append(
{
"title": certificate["name"],
"title_link": create_certificate_url(certificate["name"]),
"color": "danger",
"fallback": "",
"fields": [
{"title": "Owner", "value": certificate["owner"], "short": True},
{
"title": "Expires",
"value": arrow.get(certificate["validityEnd"]).format(
"dddd, MMMM D, YYYY"
),
"short": True,
},
{
"title": "Endpoints Detected",
"value": len(certificate["endpoints"]),
"short": True,
},
],
"text": "",
"mrkdwn_in": ["text"],
}
)
return attachments
def create_rotation_attachments(certificate):
return {
"title": certificate["name"],
"title_link": create_certificate_url(certificate["name"]),
"fields": [
{"title": "Owner", "value": certificate["owner"], "short": True},
{
"title": "Expires",
"value": arrow.get(certificate["validityEnd"]).format(
"dddd, MMMM D, YYYY"
),
"short": True,
},
{
"title": "Endpoints Rotated",
"value": len(certificate["endpoints"]),
"short": True,
},
],
}
class SlackNotificationPlugin(ExpirationNotificationPlugin):
title = "Slack"
slug = "slack-notification"
description = "Sends notifications to Slack"
version = slack.VERSION
author = "Harm Weites"
author_url = "https://github.com/netflix/lemur"
additional_options = [
{
"name": "webhook",
"type": "str",
"required": True,
"validation": r"^https:\/\/hooks\.slack\.com\/services\/.+$",
"helpMessage": "The url Slack told you to use for this integration",
},
{
"name": "username",
"type": "str",
"validation": "^.+$",
"helpMessage": "The great storyteller",
"default": "Lemur",
},
{
"name": "recipients",
"type": "str",
"required": True,
"validation": "^(@|#).+$",
"helpMessage": "Where to send to, either @username or #channel",
},
]
def send(self, notification_type, message, targets, options, **kwargs):
"""
A typical check can be performed using the notify command:
`lemur notify`
While we receive a `targets` parameter here, it is unused, as Slack webhooks do not allow
dynamic re-targeting of messages. The webhook itself specifies a channel.
"""
attachments = None
if notification_type == "expiration":
attachments = create_expiration_attachments(message)
elif notification_type == "rotation":
attachments = create_rotation_attachments(message)
if not attachments:
raise Exception("Unable to create message attachments")
body = {
"text": f"Lemur {notification_type.capitalize()} Notification",
"attachments": attachments,
"channel": self.get_option("recipients", options),
"username": self.get_option("username", options),
}
r = requests.post(self.get_option("webhook", options), json.dumps(body))
if r.status_code not in [200]:
raise Exception(f"Failed to send message. Slack response: {r.status_code} {body}")
current_app.logger.info(
f"Slack response: {r.status_code} Message Body: {body}"
)
|
import pytest
from lemur.tests.vectors import INTERNAL_PRIVATE_KEY_A_STR, INTERNAL_CERTIFICATE_A_STR
def test_export_certificate_to_csr(app):
from lemur.plugins.base import plugins
p = plugins.get("openssl-csr")
options = []
with pytest.raises(Exception):
p.export(INTERNAL_CERTIFICATE_A_STR, "", "", options)
raw = p.export(INTERNAL_CERTIFICATE_A_STR, "", INTERNAL_PRIVATE_KEY_A_STR, options)
assert raw != b""
|
import pytest
from homeassistant.components.alarm_control_panel import DOMAIN
import homeassistant.components.automation as automation
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a alarm_control_panel."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
"alarm_control_panel.test_5678", "attributes", {"supported_features": 15}
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "disarmed",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "triggered",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "arming",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "armed_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "armed_away",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "armed_night",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_PENDING)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "triggered",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"triggered - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "disarmed",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"disarmed - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "armed_home",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"armed_home - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "armed_away",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"armed_away - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "armed_night",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"armed_night - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
]
},
)
# Fake that the entity is triggered.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_TRIGGERED)
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== "triggered - device - alarm_control_panel.entity - pending - triggered - None"
)
# Fake that the entity is disarmed.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_DISARMED)
await hass.async_block_till_done()
assert len(calls) == 2
assert (
calls[1].data["some"]
== "disarmed - device - alarm_control_panel.entity - triggered - disarmed - None"
)
# Fake that the entity is armed home.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_PENDING)
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_HOME)
await hass.async_block_till_done()
assert len(calls) == 3
assert (
calls[2].data["some"]
== "armed_home - device - alarm_control_panel.entity - pending - armed_home - None"
)
# Fake that the entity is armed away.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_PENDING)
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_AWAY)
await hass.async_block_till_done()
assert len(calls) == 4
assert (
calls[3].data["some"]
== "armed_away - device - alarm_control_panel.entity - pending - armed_away - None"
)
# Fake that the entity is armed night.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_PENDING)
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_NIGHT)
await hass.async_block_till_done()
assert len(calls) == 5
assert (
calls[4].data["some"]
== "armed_night - device - alarm_control_panel.entity - pending - armed_night - None"
)
|
import codecs
import errno
import io
import os
import re
import shutil
import stat
import tempfile
from collections import defaultdict
from meld.conf import _, ngettext
from . import _vc
NULL_SHA = "0000000000000000000000000000000000000000"
class Vc(_vc.Vc):
CMD = "git"
NAME = "Git"
VC_DIR = ".git"
DIFF_FILES_RE = r":(\d+) (\d+) ([a-z0-9]+) ([a-z0-9]+) ([XADMTU])\t(.*)"
DIFF_RE = re.compile(DIFF_FILES_RE)
conflict_map = {
# These are the arguments for git-show
# CONFLICT_MERGED has no git-show argument unfortunately.
_vc.CONFLICT_BASE: 1,
_vc.CONFLICT_LOCAL: 2,
_vc.CONFLICT_REMOTE: 3,
}
state_map = {
"X": _vc.STATE_NONE, # Unknown
"A": _vc.STATE_NEW, # New
"D": _vc.STATE_REMOVED, # Deleted
"M": _vc.STATE_MODIFIED, # Modified
"T": _vc.STATE_MODIFIED, # Type-changed
"U": _vc.STATE_CONFLICT, # Unmerged
}
@classmethod
def is_installed(cls):
try:
proc = _vc.popen([cls.CMD, '--version'])
assert proc.read().startswith('git version')
return True
except Exception:
return False
@classmethod
def check_repo_root(cls, location):
# Check exists instead of isdir, since .git might be a git-file
return os.path.exists(os.path.join(location, cls.VC_DIR))
def get_commits_to_push_summary(self):
branch_refs = self.get_commits_to_push()
unpushed_branches = len([v for v in branch_refs.values() if v])
unpushed_commits = sum(len(v) for v in branch_refs.values())
if unpushed_commits:
if unpushed_branches > 1:
# Translators: First %s is replaced by translated "%d unpushed
# commits", second %s is replaced by translated "%d branches"
label = _("%s in %s") % (
ngettext("%d unpushed commit", "%d unpushed commits",
unpushed_commits) % unpushed_commits,
ngettext("%d branch", "%d branches",
unpushed_branches) % unpushed_branches)
else:
# Translators: These messages cover the case where there is
# only one branch, and are not part of another message.
label = ngettext("%d unpushed commit", "%d unpushed commits",
unpushed_commits) % (unpushed_commits)
else:
label = ""
return label
def get_commits_to_push(self):
proc = self.run(
"for-each-ref", "--format=%(refname:short) %(upstream:short)",
"refs/heads")
branch_remotes = proc.stdout.read().split("\n")[:-1]
branch_revisions = {}
for line in branch_remotes:
try:
branch, remote = line.split()
except ValueError:
continue
proc = self.run("rev-list", branch, "^" + remote, "--")
revisions = proc.stdout.read().split("\n")[:-1]
branch_revisions[branch] = revisions
return branch_revisions
def get_files_to_commit(self, paths):
files = []
for p in paths:
if os.path.isdir(p):
cached_entries, entries = self._get_modified_files(p)
all_entries = set(entries + cached_entries)
names = [
self.DIFF_RE.search(e).groups()[5] for e in all_entries
]
files.extend(names)
else:
files.append(os.path.relpath(p, self.root))
return sorted(list(set(files)))
def get_commit_message_prefill(self):
commit_path = os.path.join(self.root, ".git", "MERGE_MSG")
if os.path.exists(commit_path):
# If I have to deal with non-ascii, non-UTF8 pregenerated commit
# messages, I'm taking up pig farming.
with open(commit_path, encoding='utf-8') as f:
message = f.read()
return "\n".join(
(l for l in message.splitlines() if not l.startswith("#")))
return None
def commit(self, runner, files, message):
command = [self.CMD, 'commit', '-m', message]
runner(command, files, refresh=True, working_dir=self.root)
def update(self, runner):
command = [self.CMD, 'pull']
runner(command, [], refresh=True, working_dir=self.root)
def push(self, runner):
command = [self.CMD, 'push']
runner(command, [], refresh=True, working_dir=self.root)
def add(self, runner, files):
command = [self.CMD, 'add']
runner(command, files, refresh=True, working_dir=self.root)
def remove(self, runner, files):
command = [self.CMD, 'rm', '-r']
runner(command, files, refresh=True, working_dir=self.root)
def revert(self, runner, files):
exists = [f for f in files if os.path.exists(f)]
missing = [f for f in files if not os.path.exists(f)]
if exists:
command = [self.CMD, 'checkout']
runner(command, exists, refresh=True, working_dir=self.root)
if missing:
command = [self.CMD, 'checkout', 'HEAD']
runner(command, missing, refresh=True, working_dir=self.root)
def resolve(self, runner, files):
command = [self.CMD, 'add']
runner(command, files, refresh=True, working_dir=self.root)
def remerge_with_ancestor(self, local, base, remote):
"""Reconstruct a mixed merge-plus-base file
This method re-merges a given file to get diff3-style conflicts
which we can then use to get a file that contains the
pre-merged result everywhere that has no conflict, and the
common ancestor anywhere there *is* a conflict.
"""
proc = self.run(
"merge-file", "-p", "--diff3", local, base, remote,
use_locale_encoding=False)
vc_file = io.BytesIO(
_vc.base_from_diff3(proc.stdout.read()))
prefix = 'meld-tmp-%s-' % _vc.CONFLICT_MERGED
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as f:
shutil.copyfileobj(vc_file, f)
return f.name, True
def get_path_for_conflict(self, path, conflict):
if not path.startswith(self.root + os.path.sep):
raise _vc.InvalidVCPath(self, path, "Path not in repository")
if conflict == _vc.CONFLICT_MERGED:
# Special case: no way to get merged result from git directly
local, _ = self.get_path_for_conflict(path, _vc.CONFLICT_LOCAL)
base, _ = self.get_path_for_conflict(path, _vc.CONFLICT_BASE)
remote, _ = self.get_path_for_conflict(path, _vc.CONFLICT_REMOTE)
if not (local and base and remote):
raise _vc.InvalidVCPath(self, path,
"Couldn't access conflict parents")
filename, is_temp = self.remerge_with_ancestor(local, base, remote)
for temp_file in (local, base, remote):
if os.name == "nt":
os.chmod(temp_file, stat.S_IWRITE)
os.remove(temp_file)
return filename, is_temp
path = path[len(self.root) + 1:]
if os.name == "nt":
path = path.replace("\\", "/")
args = ["git", "show", ":%s:%s" % (self.conflict_map[conflict], path)]
filename = _vc.call_temp_output(
args, cwd=self.location, file_id=_vc.conflicts[conflict])
return filename, True
def get_path_for_repo_file(self, path, commit=None):
if commit is None:
commit = "HEAD"
else:
raise NotImplementedError()
if not path.startswith(self.root + os.path.sep):
raise _vc.InvalidVCPath(self, path, "Path not in repository")
path = path[len(self.root) + 1:]
if os.name == "nt":
path = path.replace("\\", "/")
obj = commit + ":" + path
args = [self.CMD, "cat-file", "blob", obj]
return _vc.call_temp_output(args, cwd=self.root)
@classmethod
def valid_repo(cls, path):
# TODO: On Windows, this exit code is wrong under the normal shell; it
# appears to be correct under the default git bash shell however.
return not _vc.call([cls.CMD, "branch"], cwd=path)
def _get_modified_files(self, path):
# Update the index to avoid reading stale status information
proc = self.run("update-index", "--refresh")
# Get status differences between the index and the repo HEAD
proc = self.run("diff-index", "--cached", "HEAD", "--relative", path)
cached_entries = proc.stdout.read().split("\n")[:-1]
# Get status differences between the index and files-on-disk
proc = self.run("diff-files", "-0", "--relative", path)
entries = proc.stdout.read().split("\n")[:-1]
# Files can show up in both lists, e.g., if a file is modified,
# added to the index and changed again. This is okay, and in
# fact the calling logic requires it for staging feedback.
return cached_entries, entries
def _update_tree_state_cache(self, path):
""" Update the state of the file(s) at self._tree_cache['path'] """
while 1:
try:
cached_entries, entries = self._get_modified_files(path)
# Identify ignored files and folders
proc = self.run(
"ls-files", "--others", "--ignored", "--exclude-standard",
"--directory", path)
ignored_entries = proc.stdout.read().split("\n")[:-1]
# Identify unversioned files
proc = self.run(
"ls-files", "--others", "--exclude-standard", path)
unversioned_entries = proc.stdout.read().split("\n")[:-1]
break
except OSError as e:
if e.errno != errno.EAGAIN:
raise
def get_real_path(name):
name = name.strip()
if os.name == 'nt':
# Git returns unix-style paths on Windows
name = os.path.normpath(name)
# Unicode file names and file names containing quotes are
# returned by git as quoted strings
if name[0] == '"':
name = name.encode('latin1')
name = codecs.escape_decode(name[1:-1])[0].decode('utf-8')
return os.path.abspath(
os.path.join(self.location, name))
if not cached_entries and not entries and os.path.isfile(path):
# If we're just updating a single file there's a chance that it
# was it was previously modified, and now has been edited so that
# it is un-modified. This will result in an empty 'entries' list,
# and self._tree_cache['path'] will still contain stale data.
# When this corner case occurs we force self._tree_cache['path']
# to STATE_NORMAL.
self._tree_cache[get_real_path(path)] = _vc.STATE_NORMAL
else:
tree_meta_cache = defaultdict(list)
staged = set()
unstaged = set()
# We iterate over both cached entries and entries, accumulating
# metadata from both, but using the state from entries.
for entry in cached_entries + entries:
columns = self.DIFF_RE.search(entry).groups()
old_mode, new_mode, old_sha, new_sha, statekey, path = columns
state = self.state_map.get(statekey.strip(), _vc.STATE_NONE)
path = get_real_path(path)
self._tree_cache[path] = state
# Git entries can't be MISSING; that's just an unstaged REMOVED
self._add_missing_cache_entry(path, state)
if old_mode != new_mode:
msg = _("Mode changed from %s to %s" %
(old_mode, new_mode))
tree_meta_cache[path].append(msg)
collection = unstaged if new_sha == NULL_SHA else staged
collection.add(path)
for path in staged:
tree_meta_cache[path].append(
_("Partially staged") if path in unstaged else _("Staged"))
for path, msgs in tree_meta_cache.items():
self._tree_meta_cache[path] = "; ".join(msgs)
for path in ignored_entries:
self._tree_cache[get_real_path(path)] = _vc.STATE_IGNORED
for path in unversioned_entries:
self._tree_cache[get_real_path(path)] = _vc.STATE_NONE
|
import numpy as np
from hypertools.tools.procrustes import procrustes
from hypertools.tools.load import load
def test_procrustes_func():
target = load('spiral').get_data()[0]
rot = np.array([[-0.89433495, -0.44719485, -0.01348182],
[-0.43426149, 0.87492975, -0.21427761],
[-0.10761949, 0.18578133, 0.97667976]])
source = np.dot(target, rot)
source_aligned = procrustes(source,target)
assert np.allclose(target,source_aligned)
|
import unittest
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import camvid_label_names
from chainercv.datasets import CamVidDataset
from chainercv.utils import assert_is_semantic_segmentation_dataset
@testing.parameterize(
{'split': 'train'},
{'split': 'val'},
{'split': 'test'}
)
class TestCamVidDataset(unittest.TestCase):
def setUp(self):
self.dataset = CamVidDataset(split=self.split)
@attr.slow
def test_camvid_dataset(self):
assert_is_semantic_segmentation_dataset(
self.dataset, len(camvid_label_names), n_example=10)
testing.run_module(__name__, __file__)
|
import logging
import liffylights
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
LightEntity,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_time_change
from homeassistant.util.color import (
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
_LOGGER = logging.getLogger(__name__)
BYTE_MAX = 255
CONF_BROADCAST = "broadcast"
CONF_SERVER = "server"
SHORT_MAX = 65535
TEMP_MAX = 9000
TEMP_MAX_HASS = 500
TEMP_MIN = 2500
TEMP_MIN_HASS = 154
SUPPORT_LIFX = (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR | SUPPORT_TRANSITION
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_SERVER): cv.string, vol.Optional(CONF_BROADCAST): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LIFX platform."""
server_addr = config.get(CONF_SERVER)
broadcast_addr = config.get(CONF_BROADCAST)
lifx_library = LIFX(add_entities, server_addr, broadcast_addr)
# Register our poll service
track_time_change(hass, lifx_library.poll, second=[10, 40])
lifx_library.probe()
class LIFX:
"""Representation of a LIFX light."""
def __init__(self, add_entities_callback, server_addr=None, broadcast_addr=None):
"""Initialize the light."""
self._devices = []
self._add_entities_callback = add_entities_callback
self._liffylights = liffylights.LiffyLights(
self.on_device, self.on_power, self.on_color, server_addr, broadcast_addr
)
def find_bulb(self, ipaddr):
"""Search for bulbs."""
bulb = None
for device in self._devices:
if device.ipaddr == ipaddr:
bulb = device
break
return bulb
def on_device(self, ipaddr, name, power, hue, sat, bri, kel):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is None:
_LOGGER.debug(
"new bulb %s %s %d %d %d %d %d", ipaddr, name, power, hue, sat, bri, kel
)
bulb = LIFXLight(self._liffylights, ipaddr, name, power, hue, sat, bri, kel)
self._devices.append(bulb)
self._add_entities_callback([bulb])
else:
_LOGGER.debug(
"update bulb %s %s %d %d %d %d %d",
ipaddr,
name,
power,
hue,
sat,
bri,
kel,
)
bulb.set_power(power)
bulb.set_color(hue, sat, bri, kel)
bulb.schedule_update_ha_state()
def on_color(self, ipaddr, hue, sat, bri, kel):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is not None:
bulb.set_color(hue, sat, bri, kel)
bulb.schedule_update_ha_state()
def on_power(self, ipaddr, power):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is not None:
bulb.set_power(power)
bulb.schedule_update_ha_state()
def poll(self, now):
"""Set up polling for the light."""
self.probe()
def probe(self, address=None):
"""Probe the light."""
self._liffylights.probe(address)
class LIFXLight(LightEntity):
"""Representation of a LIFX light."""
def __init__(self, liffy, ipaddr, name, power, hue, saturation, brightness, kelvin):
"""Initialize the light."""
_LOGGER.debug("LIFXLight: %s %s", ipaddr, name)
self._liffylights = liffy
self._ip = ipaddr
self.set_name(name)
self.set_power(power)
self.set_color(hue, saturation, brightness, kelvin)
@property
def should_poll(self):
"""No polling needed for LIFX light."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def ipaddr(self):
"""Return the IP address of the device."""
return self._ip
@property
def hs_color(self):
"""Return the hs value."""
return (self._hue / 65535 * 360, self._sat / 65535 * 100)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
brightness = int(self._bri / (BYTE_MAX + 1))
_LOGGER.debug("brightness: %d", brightness)
return brightness
@property
def color_temp(self):
"""Return the color temperature."""
temperature = color_temperature_kelvin_to_mired(self._kel)
_LOGGER.debug("color_temp: %d", temperature)
return temperature
@property
def is_on(self):
"""Return true if device is on."""
_LOGGER.debug("is_on: %d", self._power)
return self._power != 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_LIFX
def turn_on(self, **kwargs):
"""Turn the device on."""
if ATTR_TRANSITION in kwargs:
fade = int(kwargs[ATTR_TRANSITION] * 1000)
else:
fade = 0
if ATTR_HS_COLOR in kwargs:
hue, saturation = kwargs[ATTR_HS_COLOR]
hue = hue / 360 * 65535
saturation = saturation / 100 * 65535
else:
hue = self._hue
saturation = self._sat
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS] * (BYTE_MAX + 1)
else:
brightness = self._bri
if ATTR_COLOR_TEMP in kwargs:
kelvin = int(color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP]))
else:
kelvin = self._kel
_LOGGER.debug(
"turn_on: %s (%d) %d %d %d %d %d",
self._ip,
self._power,
hue,
saturation,
brightness,
kelvin,
fade,
)
if self._power == 0:
self._liffylights.set_color(
self._ip, hue, saturation, brightness, kelvin, 0
)
self._liffylights.set_power(self._ip, 65535, fade)
else:
self._liffylights.set_color(
self._ip, hue, saturation, brightness, kelvin, fade
)
def turn_off(self, **kwargs):
"""Turn the device off."""
if ATTR_TRANSITION in kwargs:
fade = int(kwargs[ATTR_TRANSITION] * 1000)
else:
fade = 0
_LOGGER.debug("turn_off: %s %d", self._ip, fade)
self._liffylights.set_power(self._ip, 0, fade)
def set_name(self, name):
"""Set name of the light."""
self._name = name
def set_power(self, power):
"""Set power state value."""
_LOGGER.debug("set_power: %d", power)
self._power = power != 0
def set_color(self, hue, sat, bri, kel):
"""Set color state values."""
self._hue = hue
self._sat = sat
self._bri = bri
self._kel = kel
|
import asyncio
from copy import deepcopy
from homeassistant.components.deconz import (
DeconzGateway,
async_setup_entry,
async_unload_entry,
)
from homeassistant.components.deconz.const import DOMAIN as DECONZ_DOMAIN
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
ENTRY1_HOST = "1.2.3.4"
ENTRY1_PORT = 80
ENTRY1_API_KEY = "1234567890ABCDEF"
ENTRY1_BRIDGEID = "12345ABC"
ENTRY1_UUID = "456DEF"
ENTRY2_HOST = "2.3.4.5"
ENTRY2_PORT = 80
ENTRY2_API_KEY = "1234567890ABCDEF"
ENTRY2_BRIDGEID = "23456DEF"
ENTRY2_UUID = "789ACE"
async def setup_entry(hass, entry):
"""Test that setup entry works."""
with patch.object(DeconzGateway, "async_setup", return_value=True), patch.object(
DeconzGateway, "async_update_device_registry", return_value=True
):
assert await async_setup_entry(hass, entry) is True
async def test_setup_entry_fails(hass):
"""Test setup entry fails if deCONZ is not available."""
with patch("pydeconz.DeconzSession.initialize", side_effect=Exception):
await setup_deconz_integration(hass)
assert not hass.data[DECONZ_DOMAIN]
async def test_setup_entry_no_available_bridge(hass):
"""Test setup entry fails if deCONZ is not available."""
with patch("pydeconz.DeconzSession.initialize", side_effect=asyncio.TimeoutError):
await setup_deconz_integration(hass)
assert not hass.data[DECONZ_DOMAIN]
async def test_setup_entry_successful(hass):
"""Test setup entry is successful."""
config_entry = await setup_deconz_integration(hass)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert hass.data[DECONZ_DOMAIN]
assert gateway.bridgeid in hass.data[DECONZ_DOMAIN]
assert hass.data[DECONZ_DOMAIN][gateway.bridgeid].master
async def test_setup_entry_multiple_gateways(hass):
"""Test setup entry is successful with multiple gateways."""
config_entry = await setup_deconz_integration(hass)
gateway = get_gateway_from_config_entry(hass, config_entry)
data = deepcopy(DECONZ_WEB_REQUEST)
data["config"]["bridgeid"] = "01234E56789B"
config_entry2 = await setup_deconz_integration(
hass, get_state_response=data, entry_id="2"
)
gateway2 = get_gateway_from_config_entry(hass, config_entry2)
assert len(hass.data[DECONZ_DOMAIN]) == 2
assert hass.data[DECONZ_DOMAIN][gateway.bridgeid].master
assert not hass.data[DECONZ_DOMAIN][gateway2.bridgeid].master
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
config_entry = await setup_deconz_integration(hass)
assert hass.data[DECONZ_DOMAIN]
assert await async_unload_entry(hass, config_entry)
assert not hass.data[DECONZ_DOMAIN]
async def test_unload_entry_multiple_gateways(hass):
"""Test being able to unload an entry and master gateway gets moved."""
config_entry = await setup_deconz_integration(hass)
data = deepcopy(DECONZ_WEB_REQUEST)
data["config"]["bridgeid"] = "01234E56789B"
config_entry2 = await setup_deconz_integration(
hass, get_state_response=data, entry_id="2"
)
gateway2 = get_gateway_from_config_entry(hass, config_entry2)
assert len(hass.data[DECONZ_DOMAIN]) == 2
assert await async_unload_entry(hass, config_entry)
assert len(hass.data[DECONZ_DOMAIN]) == 1
assert hass.data[DECONZ_DOMAIN][gateway2.bridgeid].master
|
import collections
import re
import numpy as np
import pandas as pd
from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory
from scattertext.FeatureOuput import FeatureLister
from scattertext.Common import SPACY_ENTITY_TAGS, MY_ENGLISH_STOP_WORDS, DEFAULT_BACKGROUND_SCALER_ALGO, \
DEFAULT_BACKGROUND_BETA
from scattertext.frequencyreaders.DefaultBackgroundFrequencies import DefaultBackgroundFrequencies
from scattertext.termranking import AbsoluteFrequencyRanker
from scattertext.termscoring import ScaledFScore
from scattertext.indexstore.IndexStore import IndexStore
class TermDocMatrixWithoutCategories(object):
def __init__(self, X, mX, term_idx_store, metadata_idx_store, unigram_frequency_path=None):
'''
Parameters
----------
X : csr_matrix
term document matrix
mX : csr_matrix
metadata-document matrix
term_idx_store : IndexStore
Term indices
metadata_idx_store : IndexStore
Document metadata indices
unigram_frequency_path : str or None
Path to term frequency file.
'''
self._X = X
self._mX = mX
self._term_idx_store = term_idx_store
self._metadata_idx_store = metadata_idx_store
self._unigram_frequency_path = unigram_frequency_path
self._background_corpus = None
self._strict_unigram_definition = True
def get_default_stoplist(self):
return MY_ENGLISH_STOP_WORDS
def allow_single_quotes_in_unigrams(self):
'''
Don't filter out single quotes in unigrams
:return: self
'''
self._strict_unigram_definition = False
return self
def compact(self, compactor, non_text=False):
'''
Compact term document matrix.
Parameters
----------
compactor : object
Object that takes a Term Doc Matrix as its first argument, and has a compact function which returns a
Term Doc Matrix like argument
non_text : bool
Use non text features. False by default.
Returns
-------
TermDocMatrix
'''
return compactor.compact(self, non_text)
def select(self, compactor, non_text=False):
'''
Same as compact
'''
return compactor.compact(self, non_text)
def get_num_terms(self):
'''
Returns
-------
The number of terms registered in the term doc matrix
'''
return len(self._term_idx_store)
def get_num_docs(self):
'''
Returns
-------
int, number of documents
'''
return self._X.shape[0]
def get_num_metadata(self):
'''
Returns
-------
int, number of unique metadata items
'''
return len(self.get_metadata())
def set_background_corpus(self, background):
'''
Parameters
----------
background
'''
if issubclass(type(background), TermDocMatrixWithoutCategories):
self._background_corpus = pd.DataFrame(background
.get_term_freq_df()
.sum(axis=1),
columns=['background']).reset_index()
self._background_corpus.columns = ['word', 'background']
elif (type(background) == pd.DataFrame
and set(background.columns) == set(['word', 'background'])):
self._background_corpus = background
else:
raise Exception('The argument named background must be a subclass of TermDocMatrix or a ' \
+ 'DataFrame with columns "word" and "background", where "word" ' \
+ 'is the term text, and "background" is its frequency.')
def get_background_corpus(self):
if self._background_corpus is not None:
return self._background_corpus
return DefaultBackgroundFrequencies.get_background_frequency_df(self._unigram_frequency_path)
def get_term_and_background_counts(self):
'''
Returns
-------
A pd.DataFrame consisting of unigram term counts of words occurring
in the TermDocumentMatrix and their corresponding background corpus
counts. The dataframe has two columns, corpus and background.
>>> corpus.get_unigram_corpus().get_term_and_background_counts()
corpus background
obama 702.0 565739.0
romney 570.0 695398.0
barack 248.0 227861.0
...
'''
background_df = self._get_background_unigram_frequencies()
corpus_freq_df = self.get_term_count_df()
corpus_unigram_freq = self._get_corpus_unigram_freq(corpus_freq_df)
df = corpus_unigram_freq.join(background_df, how='outer').fillna(0)
return df
def get_term_count_df(self):
return pd.DataFrame({'corpus': self._X.sum(axis=0).A1, 'term': self.get_terms()}).set_index('term')
def _get_corpus_unigram_freq(self, corpus_freq_df):
unigram_validator = re.compile('^[A-Za-z]+$')
corpus_unigram_freq = corpus_freq_df.loc[[term for term
in corpus_freq_df.index
if unigram_validator.match(term) is not None]]
return corpus_unigram_freq
def _get_background_unigram_frequencies(self):
if self.get_background_corpus() is not None:
return self.get_background_corpus()
return DefaultBackgroundFrequencies.get_background_frequency_df(self._unigram_frequency_path)
def list_extra_features(self):
'''
Returns
-------
List of dicts. One dict for each document, keys are metadata, values are counts
'''
return FeatureLister(self._mX,
self._metadata_idx_store,
self.get_num_docs()).output()
def get_terms(self):
'''
Returns
-------
np.array of unique terms
'''
return self._term_idx_store._i2val
def get_metadata(self):
'''
Returns
-------
np.array of unique metadata
'''
return self._metadata_idx_store._i2val
def get_total_unigram_count(self):
return self._get_unigram_term_freq_df().sum()
def _get_unigram_term_freq_df(self):
return self._get_corpus_unigram_freq(
# self.get_term_freq_df().sum(axis=1)
self.get_term_count_df()['corpus']
)
def _get_X_after_delete_terms(self, idx_to_delete_list, non_text=False):
new_term_idx_store = self._get_relevant_idx_store(non_text).batch_delete_idx(idx_to_delete_list)
new_X = delete_columns(self._get_relevant_X(non_text), idx_to_delete_list)
return new_X, new_term_idx_store
def _get_relevant_X(self, non_text):
return self._mX if non_text else self._X
def _get_relevant_idx_store(self, non_text):
return self._metadata_idx_store if non_text else self._term_idx_store
def remove_infrequent_words(self, minimum_term_count, term_ranker=AbsoluteFrequencyRanker):
'''
Returns
-------
A new TermDocumentMatrix consisting of only terms which occur at least minimum_term_count.
'''
tdf = term_ranker(self).get_ranks().sum(axis=1)
return self.remove_terms(list(tdf[tdf <= minimum_term_count].index))
def remove_entity_tags(self):
'''
Returns
-------
A new TermDocumentMatrix consisting of only terms in the current TermDocumentMatrix
that aren't spaCy entity tags.
Note: Used if entity types are censored using FeatsFromSpacyDoc(tag_types_to_censor=...).
'''
terms_to_remove = [term for term in self._term_idx_store._i2val
if any([word in SPACY_ENTITY_TAGS for word in term.split()])]
return self.remove_terms(terms_to_remove)
def remove_terms(self, terms, ignore_absences=False, non_text=False):
'''Non destructive term removal.
Parameters
----------
terms : list
list of terms to remove
ignore_absences : bool, False by default
If term does not appear, don't raise an error, just move on.
non_text : bool, False by default
Remove metadata terms instead of regular terms
Returns
-------
TermDocMatrix, new object with terms removed.
'''
idx_to_delete_list = self._build_term_index_list(ignore_absences, terms, non_text)
return self.remove_terms_by_indices(idx_to_delete_list, non_text)
def whitelist_terms(self, whitelist_terms):
'''
:param whitelist_terms: list[str], terms to whitelist
:return: TermDocMatrix, new object with only terms in parameter
'''
return self.remove_terms(list(set(self.get_terms()) - set(whitelist_terms)))
def _build_term_index_list(self, ignore_absences, terms, non_text=False):
idx_to_delete_list = []
my_term_idx_store = self._get_relevant_idx_store(non_text)
for term in terms:
if term not in my_term_idx_store:
if not ignore_absences:
raise KeyError('Term %s not found' % (term))
continue
idx_to_delete_list.append(my_term_idx_store.getidx(term))
return idx_to_delete_list
def _make_new_term_doc_matrix(self,
new_X=None,
new_mX=None,
new_y=None,
new_term_idx_store=None,
new_category_idx_store=None,
new_metadata_idx_store=None,
new_y_mask=None):
return TermDocMatrixWithoutCategories(
X=new_X if new_X is not None else self._X,
mX=new_mX if new_mX is not None else self._mX,
term_idx_store=new_term_idx_store if new_term_idx_store is not None else self._term_idx_store,
metadata_idx_store=new_metadata_idx_store if new_metadata_idx_store is not None else self._metadata_idx_store,
unigram_frequency_path=self._unigram_frequency_path
)
def remove_terms_used_in_less_than_num_docs(self, threshold, non_text=False):
'''
Parameters
----------
threshold: int
Minimum number of documents term should appear in to be kept
non_text: bool
Use non-text features instead of terms
Returns
-------
TermDocMatrix, new object with terms removed.
'''
term_counts = self._get_relevant_X(non_text).astype(bool).astype(int).sum(axis=0).A[0]
terms_to_remove = np.where(term_counts < threshold)[0]
return self.remove_terms_by_indices(terms_to_remove, non_text)
def get_unigram_corpus(self):
'''
Returns
-------
A new TermDocumentMatrix consisting of only unigrams in the current TermDocumentMatrix.
'''
terms_to_ignore = self._get_non_unigrams()
return self.remove_terms(terms_to_ignore)
def _get_non_unigrams(self):
return [term for term
in self._term_idx_store._i2val
if ' ' in term or (self._strict_unigram_definition and "'" in term)
]
def get_stoplisted_unigram_corpus(self, stoplist=None):
'''
Parameters
-------
stoplist : list, optional
Returns
-------
A new TermDocumentMatrix consisting of only unigrams in the current TermDocumentMatrix.
'''
if stoplist is None:
stoplist = self.get_default_stoplist()
else:
stoplist = [w.lower() for w in stoplist]
return self._remove_terms_from_list(stoplist)
def get_stoplisted_unigram_corpus_and_custom(self,
custom_stoplist):
'''
Parameters
-------
stoplist : list of lower-cased words, optional
Returns
-------
A new TermDocumentMatrix consisting of only unigrams in the current TermDocumentMatrix.
'''
if type(custom_stoplist) == str:
custom_stoplist = [custom_stoplist]
return self._remove_terms_from_list(set(self.get_default_stoplist())
| set(w.lower() for w in custom_stoplist))
def _remove_terms_from_list(self, stoplist):
terms_to_ignore = [term for term
in self._term_idx_store._i2val
if ' ' in term or (self._strict_unigram_definition
and ("'" in term or '’' in term))
or term in stoplist]
return self.remove_terms(terms_to_ignore)
def metadata_in_use(self):
'''
Returns True if metadata values are in term doc matrix.
Returns
-------
bool
'''
return len(self._metadata_idx_store) > 0
def _make_all_positive_data_ones(self, newX):
# type: (sparse_matrix) -> sparse_matrix
return (newX > 0).astype(np.int32)
def get_doc_lengths(self):
'''
Returns a list of document lengths in words
Returns
-------
np.array
'''
idx_to_delete_list = self._build_term_index_list(True, self._get_non_unigrams())
unigram_X, _ = self._get_X_after_delete_terms(idx_to_delete_list)
return unigram_X.sum(axis=1).A1
def remove_terms_by_indices(self, idx_to_delete_list, non_text=False):
'''
Parameters
----------
idx_to_delete_list, list
non_text, bool
Should we remove non text features or just terms?
Returns
-------
TermDocMatrix
'''
new_X, new_idx_store = self._get_X_after_delete_terms(idx_to_delete_list, non_text)
return self._make_new_term_doc_matrix(new_X=self._X if non_text else new_X,
new_mX=new_X if non_text else self._mX,
new_y=None,
new_category_idx_store=None,
new_term_idx_store=self._term_idx_store if non_text else new_idx_store,
new_metadata_idx_store=(new_idx_store if non_text
else self._metadata_idx_store),
new_y_mask=np.ones(new_X.shape[0]).astype(np.bool))
def get_scaled_f_scores_vs_background(self,
scaler_algo=DEFAULT_BACKGROUND_SCALER_ALGO,
beta=DEFAULT_BACKGROUND_BETA):
'''
Parameters
----------
scaler_algo : str
see get_scaled_f_scores, default 'none'
beta : float
default 1.
Returns
-------
pd.DataFrame of scaled_f_score scores compared to background corpus
'''
df = self.get_term_and_background_counts()
df['Scaled f-score'] = ScaledFScore.get_scores_for_category(
df['corpus'], df['background'], scaler_algo, beta
)
return df.sort_values(by='Scaled f-score', ascending=False)
def get_term_doc_mat(self):
'''
Returns sparse matrix representation of term-doc-matrix
Returns
-------
scipy.sparse.csr_matrix
'''
return self._X
def get_term_doc_mat_coo(self):
'''
Returns sparse matrix representation of term-doc-matrix
Returns
-------
scipy.sparse.coo_matrix
'''
return self._X.astype(np.double).tocoo()
def get_metadata_doc_mat(self):
'''
Returns sparse matrix representation of term-doc-matrix
Returns
-------
scipy.sparse.csr_matrix
'''
return self._mX
def term_doc_lists(self):
'''
Returns
-------
dict
'''
doc_ids = self._X.transpose().tolil().rows
terms = self._term_idx_store.values()
return dict(zip(terms, doc_ids))
def apply_ranker(self, term_ranker, use_non_text_features):
'''
Parameters
----------
term_ranker : TermRanker
Returns
-------
pd.Dataframe
'''
if use_non_text_features:
return term_ranker(self).use_non_text_features().get_ranks()
return term_ranker(self).get_ranks()
def add_doc_names_as_metadata(self, doc_names):
'''
:param doc_names: array-like[str], document names of reach document
:return: Corpus-like object with doc names as metadata. If two documents share the same name
(doc number) will be appended to their names.
'''
if len(doc_names) != self.get_num_docs():
raise Exception("The parameter doc_names contains %s elements. "
"It should have %s elements, one per document." % (len(doc_names), self.get_num_docs()))
doc_names_counter = collections.Counter(np.array(doc_names))
metafact = CSRMatrixFactory()
metaidxstore = IndexStore()
doc_id_uses = collections.Counter()
for i in range(self.get_num_docs()):
doc_id = doc_names[i]
if doc_names_counter[doc_id] > 1:
doc_id_uses[doc_id] += 1
doc_name_idx = metaidxstore.getidx('%s (%s)' % (doc_id, doc_id_uses[doc_id]))
else:
doc_name_idx = metaidxstore.getidx(doc_id)
metafact[i, i] = doc_name_idx
return self.add_metadata(metafact.get_csr_matrix(), metaidxstore)
def add_metadata(self, metadata_matrix, meta_index_store):
'''
Returns a new corpus with a the metadata matrix and index store integrated.
:param metadata_matrix: scipy.sparse matrix (# docs, # metadata)
:param meta_index_store: IndexStore of metadata values
:return: TermDocMatrixWithoutCategories
'''
assert isinstance(meta_index_store, IndexStore)
assert len(metadata_matrix.shape) == 2
assert metadata_matrix.shape[0] == self.get_num_docs()
return self._make_new_term_doc_matrix(new_X=self._X,
new_y=None,
new_category_idx_store=None,
new_y_mask=np.ones(self.get_num_docs()).astype(bool),
new_mX=metadata_matrix,
new_term_idx_store=self._term_idx_store,
new_metadata_idx_store=meta_index_store)
|
import json
from django.core.serializers.json import DjangoJSONEncoder, Serializer as DjangoSerializer
from django.core.serializers.json import Deserializer
from shop.money.money_maker import AbstractMoney
__all__ = ['JSONEncoder', 'Serializer', 'Deserializer']
class JSONEncoder(DjangoJSONEncoder):
"""
Money type aware JSON encoder for reciprocal usage, such as import/export/dumpdata/loaddata.
"""
def default(self, obj):
if isinstance(obj, AbstractMoney):
return float(obj)
return super().default(obj)
class Serializer(DjangoSerializer):
"""
Money type aware JSON serializer.
"""
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
kwargs = dict(self.json_kwargs, cls=JSONEncoder)
json.dump(self.get_dump_object(obj), self.stream, **kwargs)
self._current = None
|
from collections import OrderedDict
from pyvesync import VeSync
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from .const import DOMAIN
@callback
def configured_instances(hass):
"""Return already configured instances."""
return hass.config_entries.async_entries(DOMAIN)
@config_entries.HANDLERS.register(DOMAIN)
class VeSyncFlowHandler(config_entries.ConfigFlow):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Instantiate config flow."""
self._username = None
self._password = None
self.data_schema = OrderedDict()
self.data_schema[vol.Required(CONF_USERNAME)] = str
self.data_schema[vol.Required(CONF_PASSWORD)] = str
@callback
def _show_form(self, errors=None):
"""Show form to the user."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(self.data_schema),
errors=errors if errors else {},
)
async def async_step_import(self, import_config):
"""Handle external yaml configuration."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
if configured_instances(self.hass):
return self.async_abort(reason="single_instance_allowed")
if not user_input:
return self._show_form()
self._username = user_input[CONF_USERNAME]
self._password = user_input[CONF_PASSWORD]
manager = VeSync(self._username, self._password)
login = await self.hass.async_add_executor_job(manager.login)
if not login:
return self._show_form(errors={"base": "invalid_auth"})
return self.async_create_entry(
title=self._username,
data={CONF_USERNAME: self._username, CONF_PASSWORD: self._password},
)
|
from collections import OrderedDict
from homeassistant.helpers.entity_values import EntityValues as EV
ent = "test.test"
def test_override_single_value():
"""Test values with exact match."""
store = EV({ent: {"key": "value"}})
assert store.get(ent) == {"key": "value"}
assert len(store._cache) == 1
assert store.get(ent) == {"key": "value"}
assert len(store._cache) == 1
def test_override_by_domain():
"""Test values with domain match."""
store = EV(domain={"test": {"key": "value"}})
assert store.get(ent) == {"key": "value"}
def test_override_by_glob():
"""Test values with glob match."""
store = EV(glob={"test.?e*": {"key": "value"}})
assert store.get(ent) == {"key": "value"}
def test_glob_overrules_domain():
"""Test domain overrules glob match."""
store = EV(domain={"test": {"key": "domain"}}, glob={"test.?e*": {"key": "glob"}})
assert store.get(ent) == {"key": "glob"}
def test_exact_overrules_domain():
"""Test exact overrules domain match."""
store = EV(
exact={"test.test": {"key": "exact"}},
domain={"test": {"key": "domain"}},
glob={"test.?e*": {"key": "glob"}},
)
assert store.get(ent) == {"key": "exact"}
def test_merging_values():
"""Test merging glob, domain and exact configs."""
store = EV(
exact={"test.test": {"exact_key": "exact"}},
domain={"test": {"domain_key": "domain"}},
glob={"test.?e*": {"glob_key": "glob"}},
)
assert store.get(ent) == {
"exact_key": "exact",
"domain_key": "domain",
"glob_key": "glob",
}
def test_glob_order():
"""Test merging glob, domain and exact configs."""
glob = OrderedDict()
glob["test.*est"] = {"value": "first"}
glob["test.*"] = {"value": "second"}
store = EV(glob=glob)
assert store.get(ent) == {"value": "second"}
|
from __future__ import absolute_import, unicode_literals
import copy
import logging
import os
from . import exc
from ._compat import string_types
logger = logging.getLogger(__name__)
def validate_schema(sconf):
"""
Return True if config schema is correct.
Parameters
----------
sconf : dict
session configuration
Returns
-------
bool
"""
# verify session_name
if 'session_name' not in sconf:
raise exc.ConfigError('config requires "session_name"')
if 'windows' not in sconf:
raise exc.ConfigError('config requires list of "windows"')
for window in sconf['windows']:
if 'window_name' not in window:
raise exc.ConfigError('config window is missing "window_name"')
return True
def is_config_file(filename, extensions=['.yml', '.yaml', '.json']):
"""
Return True if file has a valid config file type.
Parameters
----------
filename : str
filename to check (e.g. ``mysession.json``).
extensions : str or list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
bool
"""
extensions = [extensions] if isinstance(extensions, string_types) else extensions
return any(filename.endswith(e) for e in extensions)
def in_dir(
config_dir=os.path.expanduser('~/.tmuxp'), extensions=['.yml', '.yaml', '.json']
):
"""
Return a list of configs in ``config_dir``.
Parameters
----------
config_dir : str
directory to search
extensions : list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
list
"""
configs = []
for filename in os.listdir(config_dir):
if is_config_file(filename, extensions) and not filename.startswith('.'):
configs.append(filename)
return configs
def in_cwd():
"""
Return list of configs in current working directory.
If filename is ``.tmuxp.py``, ``.tmuxp.json``, ``.tmuxp.yaml``.
Returns
-------
list
configs in current working directory
"""
configs = []
for filename in os.listdir(os.getcwd()):
if filename.startswith('.tmuxp') and is_config_file(filename):
configs.append(filename)
return configs
def expandshell(_path):
"""
Return expanded path based on user's ``$HOME`` and ``env``.
:py:func:`os.path.expanduser` and :py:func:`os.path.expandvars`
Parameters
----------
path : str
path to expand
Returns
-------
str
path with shell variables expanded
"""
return os.path.expandvars(os.path.expanduser(_path))
def inline(sconf):
"""
Return config in inline form, opposite of :meth:`config.expand`.
Parameters
----------
sconf : dict
Returns
-------
dict
configuration with optional inlined configs.
"""
if (
'shell_command' in sconf
and isinstance(sconf['shell_command'], list)
and len(sconf['shell_command']) == 1
):
sconf['shell_command'] = sconf['shell_command'][0]
if len(sconf.keys()) == int(1):
sconf = sconf['shell_command']
if (
'shell_command_before' in sconf
and isinstance(sconf['shell_command_before'], list)
and len(sconf['shell_command_before']) == 1
):
sconf['shell_command_before'] = sconf['shell_command_before'][0]
# recurse into window and pane config items
if 'windows' in sconf:
sconf['windows'] = [inline(window) for window in sconf['windows']]
if 'panes' in sconf:
sconf['panes'] = [inline(pane) for pane in sconf['panes']]
return sconf
def expand(sconf, cwd=None, parent=None):
"""Return config with shorthand and inline properties expanded.
This is necessary to keep the code in the :class:`WorkspaceBuilder` clean
and also allow for neat, short-hand configurations.
As a simple example, internally, tmuxp expects that config options
like ``shell_command`` are a list (array)::
'shell_command': ['htop']
tmuxp configs allow for it to be simply a string::
'shell_command': 'htop'
Kaptan will load JSON/YAML files into python dicts for you.
Parameters
----------
sconf : dict
the configuration for the session
cwd : str
directory to expand relative paths against. should be the dir of the
config directory.
parent : str
(used on recursive entries) start_directory of parent window or session
object.
Returns
-------
dict
"""
# Note: cli.py will expand configs relative to project's config directory
# for the first cwd argument.
if not cwd:
cwd = os.getcwd()
if 'session_name' in sconf:
sconf['session_name'] = expandshell(sconf['session_name'])
if 'window_name' in sconf:
sconf['window_name'] = expandshell(sconf['window_name'])
if 'environment' in sconf:
for key in sconf['environment']:
val = sconf['environment'][key]
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['environment'][key] = val
if 'global_options' in sconf:
for key in sconf['global_options']:
val = sconf['global_options'][key]
if isinstance(val, string_types):
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['global_options'][key] = val
if 'options' in sconf:
for key in sconf['options']:
val = sconf['options'][key]
if isinstance(val, string_types):
val = expandshell(val)
if any(val.startswith(a) for a in ['.', './']):
val = os.path.normpath(os.path.join(cwd, val))
sconf['options'][key] = val
# Any config section, session, window, pane that can contain the
# 'shell_command' value
if 'start_directory' in sconf:
sconf['start_directory'] = expandshell(sconf['start_directory'])
start_path = sconf['start_directory']
if any(start_path.startswith(a) for a in ['.', './']):
# if window has a session, or pane has a window with a
# start_directory of . or ./, make sure the start_directory can be
# relative to the parent.
#
# This is for the case where you may be loading a config from
# outside your shell current directory.
if parent:
cwd = parent['start_directory']
start_path = os.path.normpath(os.path.join(cwd, start_path))
sconf['start_directory'] = start_path
if 'before_script' in sconf:
sconf['before_script'] = expandshell(sconf['before_script'])
if any(sconf['before_script'].startswith(a) for a in ['.', './']):
sconf['before_script'] = os.path.normpath(
os.path.join(cwd, sconf['before_script'])
)
if 'shell_command' in sconf and isinstance(sconf['shell_command'], string_types):
sconf['shell_command'] = [sconf['shell_command']]
if 'shell_command_before' in sconf and isinstance(
sconf['shell_command_before'], string_types
):
sconf['shell_command_before'] = [sconf['shell_command_before']]
if 'shell_command_before' in sconf and isinstance(
sconf['shell_command_before'], list
):
sconf['shell_command_before'] = [
expandshell(scmd) for scmd in sconf['shell_command_before']
]
# recurse into window and pane config items
if 'windows' in sconf:
sconf['windows'] = [expand(window, parent=sconf) for window in sconf['windows']]
elif 'panes' in sconf:
for pconf in sconf['panes']:
p_index = sconf['panes'].index(pconf)
p = copy.deepcopy(pconf)
pconf = sconf['panes'][p_index] = {}
if isinstance(p, string_types):
p = {'shell_command': [p]}
elif not p:
p = {'shell_command': []}
assert isinstance(p, dict)
if 'shell_command' in p:
cmd = p['shell_command']
if isinstance(p['shell_command'], string_types):
cmd = [cmd]
if not cmd or any(a == cmd for a in [None, 'blank', 'pane']):
cmd = []
if isinstance(cmd, list) and len(cmd) == int(1):
if any(a in cmd for a in [None, 'blank', 'pane']):
cmd = []
p['shell_command'] = cmd
else:
p['shell_command'] = []
pconf.update(p)
sconf['panes'] = [expand(pane, parent=sconf) for pane in sconf['panes']]
return sconf
def trickle(sconf):
"""Return a dict with "trickled down" / inherited config values.
This will only work if config has been expanded to full form with
:meth:`config.expand`.
tmuxp allows certain commands to be default at the session, window
level. shell_command_before trickles down and prepends the
``shell_command`` for the pane.
Parameters
----------
sconf : dict
the session configuration.
Returns
-------
dict
"""
# prepends a pane's ``shell_command`` list with the window and sessions'
# ``shell_command_before``.
if 'start_directory' in sconf:
session_start_directory = sconf['start_directory']
else:
session_start_directory = None
if 'suppress_history' in sconf:
suppress_history = sconf['suppress_history']
else:
suppress_history = None
for windowconfig in sconf['windows']:
# Prepend start_directory to relative window commands
if session_start_directory:
if 'start_directory' not in windowconfig:
windowconfig['start_directory'] = session_start_directory
else:
if not any(
windowconfig['start_directory'].startswith(a) for a in ['~', '/']
):
window_start_path = os.path.join(
session_start_directory, windowconfig['start_directory']
)
windowconfig['start_directory'] = window_start_path
# We only need to trickle to the window, workspace builder checks wconf
if suppress_history is not None:
if 'suppress_history' not in windowconfig:
windowconfig['suppress_history'] = suppress_history
# If panes were NOT specified for a window, assume that a single pane
# with no shell commands is desired
if 'panes' not in windowconfig:
windowconfig['panes'] = [{'shell_command': []}]
for paneconfig in windowconfig['panes']:
commands_before = []
# Prepend shell_command_before to commands
if 'shell_command_before' in sconf:
commands_before.extend(sconf['shell_command_before'])
if 'shell_command_before' in windowconfig:
commands_before.extend(windowconfig['shell_command_before'])
if 'shell_command_before' in paneconfig:
commands_before.extend(paneconfig['shell_command_before'])
if 'shell_command' in paneconfig:
commands_before.extend(paneconfig['shell_command'])
p_index = windowconfig['panes'].index(paneconfig)
windowconfig['panes'][p_index]['shell_command'] = commands_before
# paneconfig['shell_command'] = commands_before
return sconf
def import_tmuxinator(sconf):
"""Return tmuxp config from a `tmuxinator`_ yaml config.
.. _tmuxinator: https://github.com/aziz/tmuxinator
Parameters
----------
sconf : dict
python dict for session configuration.
Returns
-------
dict
"""
tmuxp_config = {}
if 'project_name' in sconf:
tmuxp_config['session_name'] = sconf.pop('project_name')
elif 'name' in sconf:
tmuxp_config['session_name'] = sconf.pop('name')
else:
tmuxp_config['session_name'] = None
if 'project_root' in sconf:
tmuxp_config['start_directory'] = sconf.pop('project_root')
elif 'root' in sconf:
tmuxp_config['start_directory'] = sconf.pop('root')
if 'cli_args' in sconf:
tmuxp_config['config'] = sconf['cli_args']
if '-f' in tmuxp_config['config']:
tmuxp_config['config'] = tmuxp_config['config'].replace('-f', '').strip()
elif 'tmux_options' in sconf:
tmuxp_config['config'] = sconf['tmux_options']
if '-f' in tmuxp_config['config']:
tmuxp_config['config'] = tmuxp_config['config'].replace('-f', '').strip()
if 'socket_name' in sconf:
tmuxp_config['socket_name'] = sconf['socket_name']
tmuxp_config['windows'] = []
if 'tabs' in sconf:
sconf['windows'] = sconf.pop('tabs')
if 'pre' in sconf and 'pre_window' in sconf:
tmuxp_config['shell_command'] = sconf['pre']
if isinstance(sconf['pre'], string_types):
tmuxp_config['shell_command_before'] = [sconf['pre_window']]
else:
tmuxp_config['shell_command_before'] = sconf['pre_window']
elif 'pre' in sconf:
if isinstance(sconf['pre'], string_types):
tmuxp_config['shell_command_before'] = [sconf['pre']]
else:
tmuxp_config['shell_command_before'] = sconf['pre']
if 'rbenv' in sconf:
if 'shell_command_before' not in tmuxp_config:
tmuxp_config['shell_command_before'] = []
tmuxp_config['shell_command_before'].append('rbenv shell %s' % sconf['rbenv'])
for w in sconf['windows']:
for k, v in w.items():
windowdict = {'window_name': k}
if isinstance(v, string_types) or v is None:
windowdict['panes'] = [v]
tmuxp_config['windows'].append(windowdict)
continue
elif isinstance(v, list):
windowdict['panes'] = v
tmuxp_config['windows'].append(windowdict)
continue
if 'pre' in v:
windowdict['shell_command_before'] = v['pre']
if 'panes' in v:
windowdict['panes'] = v['panes']
if 'root' in v:
windowdict['start_directory'] = v['root']
if 'layout' in v:
windowdict['layout'] = v['layout']
tmuxp_config['windows'].append(windowdict)
return tmuxp_config
def import_teamocil(sconf):
"""Return tmuxp config from a `teamocil`_ yaml config.
.. _teamocil: https://github.com/remiprev/teamocil
Parameters
----------
sconf : dict
python dict for session configuration
Notes
-----
Todos:
- change 'root' to a cd or start_directory
- width in pane -> main-pain-width
- with_env_var
- clear
- cmd_separator
"""
tmuxp_config = {}
if 'session' in sconf:
sconf = sconf['session']
if 'name' in sconf:
tmuxp_config['session_name'] = sconf['name']
else:
tmuxp_config['session_name'] = None
if 'root' in sconf:
tmuxp_config['start_directory'] = sconf.pop('root')
tmuxp_config['windows'] = []
for w in sconf['windows']:
windowdict = {'window_name': w['name']}
if 'clear' in w:
windowdict['clear'] = w['clear']
if 'filters' in w:
if 'before' in w['filters']:
for b in w['filters']['before']:
windowdict['shell_command_before'] = w['filters']['before']
if 'after' in w['filters']:
for b in w['filters']['after']:
windowdict['shell_command_after'] = w['filters']['after']
if 'root' in w:
windowdict['start_directory'] = w.pop('root')
if 'splits' in w:
w['panes'] = w.pop('splits')
if 'panes' in w:
for p in w['panes']:
if 'cmd' in p:
p['shell_command'] = p.pop('cmd')
if 'width' in p:
# todo support for height/width
p.pop('width')
windowdict['panes'] = w['panes']
if 'layout' in w:
windowdict['layout'] = w['layout']
tmuxp_config['windows'].append(windowdict)
return tmuxp_config
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import (
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.remote import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_actions(hass, device_reg, entity_reg):
"""Test we get the expected actions from a remote."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"domain": DOMAIN,
"type": "toggle",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert actions == expected_actions
async def test_action(hass, calls):
"""Test for turn_on and turn_off actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_off",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_on",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "toggle",
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
|
import urwid
def menu_button(caption, callback):
button = urwid.Button(caption)
urwid.connect_signal(button, 'click', callback)
return urwid.AttrMap(button, None, focus_map='reversed')
def sub_menu(caption, choices):
contents = menu(caption, choices)
def open_menu(button):
return top.open_box(contents)
return menu_button([caption, u'...'], open_menu)
def menu(title, choices):
body = [urwid.Text(title), urwid.Divider()]
body.extend(choices)
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
def item_chosen(button):
response = urwid.Text([u'You chose ', button.label, u'\n'])
done = menu_button(u'Ok', exit_program)
top.open_box(urwid.Filler(urwid.Pile([response, done])))
def exit_program(button):
raise urwid.ExitMainLoop()
menu_top = menu(u'Main Menu', [
sub_menu(u'Applications', [
sub_menu(u'Accessories', [
menu_button(u'Text Editor', item_chosen),
menu_button(u'Terminal', item_chosen),
]),
]),
sub_menu(u'System', [
sub_menu(u'Preferences', [
menu_button(u'Appearance', item_chosen),
]),
menu_button(u'Lock Screen', item_chosen),
]),
])
class CascadingBoxes(urwid.WidgetPlaceholder):
max_box_levels = 4
def __init__(self, box):
super(CascadingBoxes, self).__init__(urwid.SolidFill(u'/'))
self.box_level = 0
self.open_box(box)
def open_box(self, box):
self.original_widget = urwid.Overlay(urwid.LineBox(box),
self.original_widget,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80),
min_width=24, min_height=8,
left=self.box_level * 3,
right=(self.max_box_levels - self.box_level - 1) * 3,
top=self.box_level * 2,
bottom=(self.max_box_levels - self.box_level - 1) * 2)
self.box_level += 1
def keypress(self, size, key):
if key == 'esc' and self.box_level > 1:
self.original_widget = self.original_widget[0]
self.box_level -= 1
else:
return super(CascadingBoxes, self).keypress(size, key)
top = CascadingBoxes(menu_top)
urwid.MainLoop(top, palette=[('reversed', 'standout', '')]).run()
|
from ipaddress import ip_address
import logging
import os
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound, HTTPUnauthorized
import voluptuous as vol
from homeassistant.auth.models import User
from homeassistant.auth.providers import homeassistant as auth_ha
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.const import KEY_HASS_USER
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import HTTP_OK
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import ATTR_ADDON, ATTR_PASSWORD, ATTR_USERNAME
_LOGGER = logging.getLogger(__name__)
@callback
def async_setup_auth_view(hass: HomeAssistantType, user: User):
"""Auth setup."""
hassio_auth = HassIOAuth(hass, user)
hassio_password_reset = HassIOPasswordReset(hass, user)
hass.http.register_view(hassio_auth)
hass.http.register_view(hassio_password_reset)
class HassIOBaseAuth(HomeAssistantView):
"""Hass.io view to handle auth requests."""
def __init__(self, hass: HomeAssistantType, user: User):
"""Initialize WebView."""
self.hass = hass
self.user = user
def _check_access(self, request: web.Request):
"""Check if this call is from Supervisor."""
# Check caller IP
hassio_ip = os.environ["HASSIO"].split(":")[0]
if ip_address(request.transport.get_extra_info("peername")[0]) != ip_address(
hassio_ip
):
_LOGGER.error("Invalid auth request from %s", request.remote)
raise HTTPUnauthorized()
# Check caller token
if request[KEY_HASS_USER].id != self.user.id:
_LOGGER.error("Invalid auth request from %s", request[KEY_HASS_USER].name)
raise HTTPUnauthorized()
class HassIOAuth(HassIOBaseAuth):
"""Hass.io view to handle auth requests."""
name = "api:hassio:auth"
url = "/api/hassio_auth"
@RequestDataValidator(
vol.Schema(
{
vol.Required(ATTR_USERNAME): cv.string,
vol.Required(ATTR_PASSWORD): cv.string,
vol.Required(ATTR_ADDON): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
)
async def post(self, request, data):
"""Handle auth requests."""
self._check_access(request)
provider = auth_ha.async_get_provider(request.app["hass"])
try:
await provider.async_validate_login(
data[ATTR_USERNAME], data[ATTR_PASSWORD]
)
except auth_ha.InvalidAuth:
raise HTTPUnauthorized() from None
return web.Response(status=HTTP_OK)
class HassIOPasswordReset(HassIOBaseAuth):
"""Hass.io view to handle password reset requests."""
name = "api:hassio:auth:password:reset"
url = "/api/hassio_auth/password_reset"
@RequestDataValidator(
vol.Schema(
{
vol.Required(ATTR_USERNAME): cv.string,
vol.Required(ATTR_PASSWORD): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
)
async def post(self, request, data):
"""Handle password reset requests."""
self._check_access(request)
provider = auth_ha.async_get_provider(request.app["hass"])
try:
await provider.async_change_password(
data[ATTR_USERNAME], data[ATTR_PASSWORD]
)
except auth_ha.InvalidUser as err:
raise HTTPNotFound() from err
return web.Response(status=HTTP_OK)
|
import tensorflow as tf
from tensorflow.keras.layers import Layer # type: ignore
from tensorflow.keras import activations
from tensorflow.keras import initializers
from typing import List, Optional, Text, Tuple
import tensornetwork as tn
import numpy as np
# pytype: disable=module-attr
@tf.keras.utils.register_keras_serializable(package='tensornetwork')
# pytype: enable=module-attr
class DenseDecomp(Layer):
"""TN layer comparable to Dense that carries out matrix multiplication
with 2 significantly smaller weight matrices instead of 1 large one.
This layer is similar to performing a SVD on the weight matrix and dropping
the lowest singular values.
This layer can take an input shape of arbitrary dimension, with the first
dimension expected to be a batch dimension. The weight matrix will be
constructed from and applied to the last input dimension.
Example:
::
# as first layer in a sequential model:
model = Sequential()
model.add(
DenseDecomp(512,
decomp_size=128,
activation='relu',
input_shape=(1024,)))
# now the model will take as input arrays of shape (*, 1024)
# and output arrays of shape (*, 512).
# After the first layer, you don't need to specify
# the size of the input anymore:
model.add(DenseDecomp(512, decomp_size=128, activation='relu'))
Args:
output_dim: Positive integer, dimensionality of the output space.
decomp_size: Positive integer, size of the intermediate dimension. For
TPU inference, it is recommended to use 128 or a small multiple of 128.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the two weight matrices.
bias_initializer: Initializer for the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., output_dim)`.
"""
def __init__(self,
output_dim: int,
decomp_size: int,
use_bias: Optional[bool] = True,
activation: Optional[Text] = None,
kernel_initializer: Optional[Text] = 'glorot_uniform',
bias_initializer: Optional[Text] = 'zeros',
**kwargs) -> None:
# Allow specification of input_dim instead of input_shape,
# for compatability with Keras layers that support this
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super().__init__(**kwargs)
self.output_dim = output_dim
self.decomp_size = decomp_size
self.use_bias = use_bias
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape: List[int]) -> None:
# Disable the attribute-defined-outside-init violations in this function
# pylint: disable=attribute-defined-outside-init
if input_shape[-1] is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
super().build(input_shape)
self.a_var = self.add_weight(name='a',
shape=(input_shape[-1], self.decomp_size),
trainable=True,
initializer=self.kernel_initializer)
self.b_var = self.add_weight(name='b',
shape=(self.decomp_size, self.output_dim),
trainable=True,
initializer=self.kernel_initializer)
self.bias_var = self.add_weight(
name='bias',
shape=(self.output_dim,),
trainable=True,
initializer=self.bias_initializer) if self.use_bias else None
def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: # pylint: disable=unused-argument
def f(x: tf.Tensor, a_var: tf.Tensor, b_var: tf.Tensor, use_bias: bool,
bias_var: tf.Tensor) -> tf.Tensor:
a = tn.Node(a_var, backend="tensorflow")
b = tn.Node(b_var, backend="tensorflow")
x_node = tn.Node(x, backend="tensorflow")
tn.connect(x_node[0], a[0])
tn.connect(a[1], b[0])
# The TN should now look like this
# |
# b
# |
# a
# |
# x
c = a @ x_node
result = (c @ b).tensor
if use_bias:
result += bias_var
return result
input_shape = list(inputs.shape)
inputs = tf.reshape(inputs, (-1, input_shape[-1]))
result = tf.vectorized_map(
lambda vec: f(vec, self.a_var, self.b_var, self.use_bias, self.bias_var
), inputs)
if self.activation is not None:
result = self.activation(result)
result = tf.reshape(result, [-1] + input_shape[1:-1] + [self.output_dim,])
return result
def compute_output_shape(self, input_shape: List[int]) -> Tuple[int, int]:
return tuple(input_shape[0:-1]) + (self.output_dim,)
def get_config(self) -> dict:
"""Returns the config of the layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
Python dictionary containing the configuration of the layer.
"""
config = {}
# Include the DenseDecomp-specific arguments
decomp_args = ['output_dim', 'decomp_size', 'use_bias']
for arg in decomp_args:
config[arg] = getattr(self, arg)
# Serialize the activation
config['activation'] = activations.serialize(getattr(self, 'activation'))
# Serialize the initializers
decomp_initializers = ['kernel_initializer', 'bias_initializer']
for initializer_arg in decomp_initializers:
config[initializer_arg] = initializers.serialize(
getattr(self, initializer_arg))
# Get base config
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
from enocean.protocol.packet import Packet
from enocean.utils import combine_hex
from homeassistant.helpers.entity import Entity
from .const import SIGNAL_RECEIVE_MESSAGE, SIGNAL_SEND_MESSAGE
class EnOceanEntity(Entity):
"""Parent class for all entities associated with the EnOcean component."""
def __init__(self, dev_id, dev_name="EnOcean device"):
"""Initialize the device."""
self.dev_id = dev_id
self.dev_name = dev_name
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_RECEIVE_MESSAGE, self._message_received_callback
)
)
def _message_received_callback(self, packet):
"""Handle incoming packets."""
if packet.sender_int == combine_hex(self.dev_id):
self.value_changed(packet)
def value_changed(self, packet):
"""Update the internal state of the device when a packet arrives."""
def send_command(self, data, optional, packet_type):
"""Send a command via the EnOcean dongle."""
packet = Packet(packet_type, data=data, optional=optional)
self.hass.helpers.dispatcher.dispatcher_send(SIGNAL_SEND_MESSAGE, packet)
|
import argparse
import json
from weblate.auth.models import User
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
help = "imports users from JSON dump of database"
def add_arguments(self, parser):
parser.add_argument(
"--check",
action="store_true",
help="Only check import, do not actually create users",
)
parser.add_argument(
"json-file",
type=argparse.FileType("r"),
help="JSON file containing user dump to import",
)
def handle(self, *args, **options):
data = json.load(options["json-file"])
options["json-file"].close()
for line in data:
if "fields" in line:
line = line["fields"]
if "is_active" in line and not line["is_active"]:
continue
username = line["username"]
email = line["email"]
if not email or not username:
self.stderr.write(f"Skipping {line}, has blank username or email")
continue
if User.objects.filter(username=username).exists():
self.stderr.write(f"Skipping {username}, username exists")
continue
if User.objects.filter(email=email).exists():
self.stderr.write(f"Skipping {email}, email exists")
continue
last_name = line.get("last_name", "")
first_name = line.get("first_name", "")
if last_name and last_name not in first_name:
full_name = f"{first_name} {last_name}"
elif first_name:
full_name = first_name
elif last_name:
full_name = last_name
else:
full_name = username
if not options["check"]:
User.objects.create(
username=username,
full_name=full_name,
password=line.get("password", ""),
email=email,
)
|
from homeassistant import config_entries, setup
from homeassistant.components.elkm1.const import DOMAIN
from tests.async_mock import MagicMock, patch
def mock_elk(invalid_auth=None, sync_complete=None):
"""Mock m1lib Elk."""
def handler_callbacks(type_, callback):
nonlocal invalid_auth, sync_complete
if type_ == "login":
if invalid_auth is not None:
callback(not invalid_auth)
elif type_ == "sync_complete":
if sync_complete:
callback()
mocked_elk = MagicMock()
mocked_elk.add_handler.side_effect = handler_callbacks
return mocked_elk
async def test_form_user_with_secure_elk(hass):
"""Test we can setup a secure elk."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mocked_elk = mock_elk(invalid_auth=False, sync_complete=True)
with patch(
"homeassistant.components.elkm1.config_flow.elkm1.Elk",
return_value=mocked_elk,
), patch(
"homeassistant.components.elkm1.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.elkm1.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"protocol": "secure",
"address": "1.2.3.4",
"username": "test-username",
"password": "test-password",
"temperature_unit": "°F",
"prefix": "",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "ElkM1"
assert result2["data"] == {
"auto_configure": True,
"host": "elks://1.2.3.4",
"password": "test-password",
"prefix": "",
"temperature_unit": "°F",
"username": "test-username",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_with_non_secure_elk(hass):
"""Test we can setup a non-secure elk."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mocked_elk = mock_elk(invalid_auth=None, sync_complete=True)
with patch(
"homeassistant.components.elkm1.config_flow.elkm1.Elk",
return_value=mocked_elk,
), patch(
"homeassistant.components.elkm1.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.elkm1.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"protocol": "non-secure",
"address": "1.2.3.4",
"temperature_unit": "°F",
"prefix": "guest_house",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "guest_house"
assert result2["data"] == {
"auto_configure": True,
"host": "elk://1.2.3.4",
"prefix": "guest_house",
"username": "",
"password": "",
"temperature_unit": "°F",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_with_serial_elk(hass):
"""Test we can setup a serial elk."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mocked_elk = mock_elk(invalid_auth=None, sync_complete=True)
with patch(
"homeassistant.components.elkm1.config_flow.elkm1.Elk",
return_value=mocked_elk,
), patch(
"homeassistant.components.elkm1.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.elkm1.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"protocol": "serial",
"address": "/dev/ttyS0:115200",
"temperature_unit": "°C",
"prefix": "",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "ElkM1"
assert result2["data"] == {
"auto_configure": True,
"host": "serial:///dev/ttyS0:115200",
"prefix": "",
"username": "",
"password": "",
"temperature_unit": "°C",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mocked_elk = mock_elk(invalid_auth=None, sync_complete=None)
with patch(
"homeassistant.components.elkm1.config_flow.elkm1.Elk",
return_value=mocked_elk,
), patch(
"homeassistant.components.elkm1.config_flow.VALIDATE_TIMEOUT",
0,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"protocol": "secure",
"address": "1.2.3.4",
"username": "test-username",
"password": "test-password",
"temperature_unit": "°F",
"prefix": "",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mocked_elk = mock_elk(invalid_auth=True, sync_complete=True)
with patch(
"homeassistant.components.elkm1.config_flow.elkm1.Elk",
return_value=mocked_elk,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"protocol": "secure",
"address": "1.2.3.4",
"username": "test-username",
"password": "test-password",
"temperature_unit": "°F",
"prefix": "",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_import(hass):
"""Test we get the form with import source."""
await setup.async_setup_component(hass, "persistent_notification", {})
mocked_elk = mock_elk(invalid_auth=False, sync_complete=True)
with patch(
"homeassistant.components.elkm1.config_flow.elkm1.Elk",
return_value=mocked_elk,
), patch(
"homeassistant.components.elkm1.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.elkm1.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
"host": "elks://1.2.3.4",
"username": "friend",
"password": "love",
"temperature_unit": "C",
"auto_configure": False,
"keypad": {
"enabled": True,
"exclude": [],
"include": [[1, 1], [2, 2], [3, 3]],
},
"output": {"enabled": False, "exclude": [], "include": []},
"counter": {"enabled": False, "exclude": [], "include": []},
"plc": {"enabled": False, "exclude": [], "include": []},
"prefix": "ohana",
"setting": {"enabled": False, "exclude": [], "include": []},
"area": {"enabled": False, "exclude": [], "include": []},
"task": {"enabled": False, "exclude": [], "include": []},
"thermostat": {"enabled": False, "exclude": [], "include": []},
"zone": {
"enabled": True,
"exclude": [[15, 15], [28, 208]],
"include": [],
},
},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "ohana"
assert result["data"] == {
"auto_configure": False,
"host": "elks://1.2.3.4",
"keypad": {"enabled": True, "exclude": [], "include": [[1, 1], [2, 2], [3, 3]]},
"output": {"enabled": False, "exclude": [], "include": []},
"password": "love",
"plc": {"enabled": False, "exclude": [], "include": []},
"prefix": "ohana",
"setting": {"enabled": False, "exclude": [], "include": []},
"area": {"enabled": False, "exclude": [], "include": []},
"counter": {"enabled": False, "exclude": [], "include": []},
"task": {"enabled": False, "exclude": [], "include": []},
"temperature_unit": "C",
"thermostat": {"enabled": False, "exclude": [], "include": []},
"username": "friend",
"zone": {"enabled": True, "exclude": [[15, 15], [28, 208]], "include": []},
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
|
import posixpath
from perfkitbenchmarker import linux_packages
MYSQL_RPM = 'mysql56-community-release-el6-5.noarch.rpm'
MYSQL_PSWD = 'perfkitbenchmarker'
MYSQL_URL = 'https://dev.mysql.com/get/' + MYSQL_RPM
PACKAGE_NAME = 'mysql'
PREPROVISIONED_DATA = {
MYSQL_RPM:
'81b2256f778bb3972054257edda2c2a82fcec455cae3d45ba9c8778a46aa8eb3'
}
PACKAGE_DATA_URL = {
MYSQL_RPM: MYSQL_URL
}
def YumInstall(vm):
"""Installs the mysql package on the VM."""
vm.RemoteCommand('sudo setenforce 0')
vm.InstallPreprovisionedPackageData(PACKAGE_NAME, PREPROVISIONED_DATA.keys(),
linux_packages.INSTALL_DIR)
vm.RemoteCommand('sudo rpm -ivh --force %s' %
posixpath.join(linux_packages.INSTALL_DIR, MYSQL_RPM))
vm.InstallPackages('mysql-server')
vm.RemoteCommand('sudo service mysqld start')
vm.RemoteCommand('/usr/bin/mysqladmin -u root password "%s"' % MYSQL_PSWD)
def AptInstall(vm):
"""Installs the mysql package on the VM."""
vm.RemoteCommand('echo "mysql-server-5.6 mysql-server/root_password password '
'%s" | sudo debconf-set-selections' % MYSQL_PSWD)
vm.RemoteCommand('echo "mysql-server-5.6 mysql-server/root_password_again '
'password %s" | sudo debconf-set-selections' % MYSQL_PSWD)
vm.InstallPackages('mysql-server')
def YumGetPathToConfig(vm):
"""Returns the path to the mysql config file."""
del vm
return '/etc/my.cnf'
def AptGetPathToConfig(vm):
"""Returns the path to the mysql config file."""
del vm
return '/etc/mysql/mysql.conf.d/mysqld.cnf'
def YumGetServiceName(vm):
"""Returns the name of the mysql service."""
del vm
return 'mysqld'
def AptGetServiceName(vm):
"""Returns the name of the mysql service."""
del vm
return 'mysql'
|
from homeassistant.components import tradfri
from homeassistant.helpers.device_registry import (
async_entries_for_config_entry,
async_get_registry as async_get_device_registry,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_config_yaml_host_not_imported(hass):
"""Test that we don't import a configured host."""
MockConfigEntry(domain="tradfri", data={"host": "mock-host"}).add_to_hass(hass)
with patch(
"homeassistant.components.tradfri.load_json", return_value={}
), patch.object(hass.config_entries.flow, "async_init") as mock_init:
assert await async_setup_component(
hass, "tradfri", {"tradfri": {"host": "mock-host"}}
)
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
async def test_config_yaml_host_imported(hass):
"""Test that we import a configured host."""
with patch("homeassistant.components.tradfri.load_json", return_value={}):
assert await async_setup_component(
hass, "tradfri", {"tradfri": {"host": "mock-host"}}
)
await hass.async_block_till_done()
progress = hass.config_entries.flow.async_progress()
assert len(progress) == 1
assert progress[0]["handler"] == "tradfri"
assert progress[0]["context"] == {"source": "import"}
async def test_config_json_host_not_imported(hass):
"""Test that we don't import a configured host."""
MockConfigEntry(domain="tradfri", data={"host": "mock-host"}).add_to_hass(hass)
with patch(
"homeassistant.components.tradfri.load_json",
return_value={"mock-host": {"key": "some-info"}},
), patch.object(hass.config_entries.flow, "async_init") as mock_init:
assert await async_setup_component(hass, "tradfri", {"tradfri": {}})
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
async def test_config_json_host_imported(
hass, mock_gateway_info, mock_entry_setup, gateway_id
):
"""Test that we import a configured host."""
mock_gateway_info.side_effect = lambda hass, host, identity, key: {
"host": host,
"identity": identity,
"key": key,
"gateway_id": gateway_id,
}
with patch(
"homeassistant.components.tradfri.load_json",
return_value={"mock-host": {"key": "some-info"}},
):
assert await async_setup_component(hass, "tradfri", {"tradfri": {}})
await hass.async_block_till_done()
config_entry = mock_entry_setup.mock_calls[0][1][1]
assert config_entry.domain == "tradfri"
assert config_entry.source == "import"
assert config_entry.title == "mock-host"
async def test_entry_setup_unload(hass, api_factory, gateway_id):
"""Test config entry setup and unload."""
entry = MockConfigEntry(
domain=tradfri.DOMAIN,
data={
tradfri.CONF_HOST: "mock-host",
tradfri.CONF_IDENTITY: "mock-identity",
tradfri.CONF_KEY: "mock-key",
tradfri.CONF_IMPORT_GROUPS: True,
tradfri.CONF_GATEWAY_ID: gateway_id,
},
)
entry.add_to_hass(hass)
with patch.object(
hass.config_entries, "async_forward_entry_setup", return_value=True
) as setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert setup.call_count == len(tradfri.PLATFORMS)
dev_reg = await async_get_device_registry(hass)
dev_entries = async_entries_for_config_entry(dev_reg, entry.entry_id)
assert dev_entries
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {
(tradfri.DOMAIN, entry.data[tradfri.CONF_GATEWAY_ID])
}
assert dev_entry.manufacturer == tradfri.ATTR_TRADFRI_MANUFACTURER
assert dev_entry.name == tradfri.ATTR_TRADFRI_GATEWAY
assert dev_entry.model == tradfri.ATTR_TRADFRI_GATEWAY_MODEL
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as unload:
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert unload.call_count == len(tradfri.PLATFORMS)
assert api_factory.shutdown.call_count == 1
|
from datetime import datetime as dt
from pyflick import FlickAPI
from pyflick.authentication import AbstractFlickAuth
from pyflick.const import DEFAULT_CLIENT_ID, DEFAULT_CLIENT_SECRET
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_PASSWORD,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from .const import CONF_TOKEN_EXPIRES_IN, CONF_TOKEN_EXPIRY, DOMAIN
CONF_ID_TOKEN = "id_token"
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Flick Electric component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Flick Electric from a config entry."""
auth = HassFlickAuth(hass, entry)
hass.data[DOMAIN][entry.entry_id] = FlickAPI(auth)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
if await hass.config_entries.async_forward_entry_unload(entry, "sensor"):
hass.data[DOMAIN].pop(entry.entry_id)
return True
return False
class HassFlickAuth(AbstractFlickAuth):
"""Implementation of AbstractFlickAuth based on a Home Assistant entity config."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Flick authention based on a Home Assistant entity config."""
super().__init__(aiohttp_client.async_get_clientsession(hass))
self._entry = entry
self._hass = hass
async def _get_entry_token(self):
# No token saved, generate one
if (
CONF_TOKEN_EXPIRY not in self._entry.data
or CONF_ACCESS_TOKEN not in self._entry.data
):
await self._update_token()
# Token is expired, generate a new one
if self._entry.data[CONF_TOKEN_EXPIRY] <= dt.now().timestamp():
await self._update_token()
return self._entry.data[CONF_ACCESS_TOKEN]
async def _update_token(self):
token = await self.get_new_token(
username=self._entry.data[CONF_USERNAME],
password=self._entry.data[CONF_PASSWORD],
client_id=self._entry.data.get(CONF_CLIENT_ID, DEFAULT_CLIENT_ID),
client_secret=self._entry.data.get(
CONF_CLIENT_SECRET, DEFAULT_CLIENT_SECRET
),
)
# Reduce expiry by an hour to avoid API being called after expiry
expiry = dt.now().timestamp() + int(token[CONF_TOKEN_EXPIRES_IN] - 3600)
self._hass.config_entries.async_update_entry(
self._entry,
data={
**self._entry.data,
CONF_ACCESS_TOKEN: token,
CONF_TOKEN_EXPIRY: expiry,
},
)
async def async_get_access_token(self):
"""Get Access Token from HASS Storage."""
token = await self._get_entry_token()
return token[CONF_ID_TOKEN]
|
import asyncio
from asyncio import CancelledError
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DATA_TASK, DOMAIN, PLATFORMS
async def async_setup(hass, config: dict):
"""Set up the DSMR platform."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up DSMR from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
task = hass.data[DOMAIN][entry.entry_id][DATA_TASK]
# Cancel the reconnect task
task.cancel()
try:
await task
except CancelledError:
pass
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc_accept)
has_brainstorm_data = partial(has_dataset, name='brainstorm.bst_resting')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/DatasetResting
- One subject
- Two runs of 10 min of resting state recordings
- Eyes open
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
*, accept=False, verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name='bst_resting.tar.gz',
accept=accept)
_data_path_doc = _data_path_doc_accept.format(
name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_resting) dataset')
data_path.__doc__ = _data_path_doc
def get_version(): # noqa: D103
return _get_version('brainstorm.bst_resting')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_resting) dataset."""
for desc in _description.splitlines():
print(desc)
|
from __future__ import print_function
import sys
import argparse
import requests
from six.moves.urllib.parse import urlparse
try:
import clipboard
except ImportError:
clipboard = None
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('url', nargs='?', help='the url to read (default to clipboard')
ap.add_argument('-o', '--output-file', help='write output to file instead of stdout')
ap.add_argument(
'-O',
'--remote-name',
action='store_true',
help='write output to a local file named like the remote file we get'
)
ap.add_argument(
'-L',
'--location',
action='store_true',
help='follow redirects to other web pages (if the URL has a 3XX response code)'
)
ap.add_argument(
'-X',
'--request-method',
default='GET',
choices=['GET',
'POST',
'HEAD'],
help='specify request method to use (default to GET)'
)
ap.add_argument('-H', '--header', help='Custom header to pass to server (H)')
ap.add_argument('-d', '--data', help='HTTP POST data (H)')
ns = ap.parse_args(args)
url = ns.url or clipboard.get()
headers = {}
if ns.header:
for h in ns.header.split(';'):
name, value = h.split(':')
headers[name.strip()] = value.strip()
if ns.request_method == 'GET':
r = requests.get(
url,
headers=headers,
allow_redirects=ns.location
)
elif ns.request_method == 'POST':
r = requests.post(
url,
data=ns.data,
headers=headers,
allow_redirects=ns.location
)
elif ns.request_method == 'HEAD':
r = requests.head(
url,
headers=headers,
allow_redirects=ns.location
)
else:
print('unknown request method: {}'.format(ns.request_method))
return
if ns.output_file:
with open(ns.output_file, 'wb') as outs:
outs.write(r.content)
elif ns.remote_name:
# get basename of url
url_path = urlparse(url).path
filename = url_path.split('/')[-1]
with open(filename, 'wb') as outs:
outs.write(r.content)
else:
print(r.text)
if __name__ == '__main__':
main(sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.