text
stringlengths 213
32.3k
|
---|
import logging
from homeassistant.const import (
DEGREE,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_WATT_HOUR,
FREQUENCY_HERTZ,
LENGTH_MILLIMETERS,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
PRESSURE_HPA,
SPEED_KILOMETERS_PER_HOUR,
TEMP_CELSIUS,
VOLT,
VOLUME_CUBIC_METERS,
)
from .const import ATTR_DISCOVER_DEVICES
from .entity import HMDevice
_LOGGER = logging.getLogger(__name__)
HM_STATE_HA_CAST = {
"IPGarage": {0: "closed", 1: "open", 2: "ventilation", 3: None},
"RotaryHandleSensor": {0: "closed", 1: "tilted", 2: "open"},
"RotaryHandleSensorIP": {0: "closed", 1: "tilted", 2: "open"},
"WaterSensor": {0: "dry", 1: "wet", 2: "water"},
"CO2Sensor": {0: "normal", 1: "added", 2: "strong"},
"IPSmoke": {0: "off", 1: "primary", 2: "intrusion", 3: "secondary"},
"RFSiren": {
0: "disarmed",
1: "extsens_armed",
2: "allsens_armed",
3: "alarm_blocked",
},
}
HM_UNIT_HA_CAST = {
"HUMIDITY": PERCENTAGE,
"TEMPERATURE": TEMP_CELSIUS,
"ACTUAL_TEMPERATURE": TEMP_CELSIUS,
"BRIGHTNESS": "#",
"POWER": POWER_WATT,
"CURRENT": "mA",
"VOLTAGE": VOLT,
"ENERGY_COUNTER": ENERGY_WATT_HOUR,
"GAS_POWER": VOLUME_CUBIC_METERS,
"GAS_ENERGY_COUNTER": VOLUME_CUBIC_METERS,
"LUX": LIGHT_LUX,
"ILLUMINATION": LIGHT_LUX,
"CURRENT_ILLUMINATION": LIGHT_LUX,
"AVERAGE_ILLUMINATION": LIGHT_LUX,
"LOWEST_ILLUMINATION": LIGHT_LUX,
"HIGHEST_ILLUMINATION": LIGHT_LUX,
"RAIN_COUNTER": LENGTH_MILLIMETERS,
"WIND_SPEED": SPEED_KILOMETERS_PER_HOUR,
"WIND_DIRECTION": DEGREE,
"WIND_DIRECTION_RANGE": DEGREE,
"SUNSHINEDURATION": "#",
"AIR_PRESSURE": PRESSURE_HPA,
"FREQUENCY": FREQUENCY_HERTZ,
"VALUE": "#",
"VALVE_STATE": PERCENTAGE,
}
HM_DEVICE_CLASS_HA_CAST = {
"HUMIDITY": DEVICE_CLASS_HUMIDITY,
"TEMPERATURE": DEVICE_CLASS_TEMPERATURE,
"ACTUAL_TEMPERATURE": DEVICE_CLASS_TEMPERATURE,
"LUX": DEVICE_CLASS_ILLUMINANCE,
"CURRENT_ILLUMINATION": DEVICE_CLASS_ILLUMINANCE,
"AVERAGE_ILLUMINATION": DEVICE_CLASS_ILLUMINANCE,
"LOWEST_ILLUMINATION": DEVICE_CLASS_ILLUMINANCE,
"HIGHEST_ILLUMINATION": DEVICE_CLASS_ILLUMINANCE,
"POWER": DEVICE_CLASS_POWER,
"CURRENT": DEVICE_CLASS_POWER,
}
HM_ICON_HA_CAST = {"WIND_SPEED": "mdi:weather-windy", "BRIGHTNESS": "mdi:invert-colors"}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the HomeMatic sensor platform."""
if discovery_info is None:
return
devices = []
for conf in discovery_info[ATTR_DISCOVER_DEVICES]:
new_device = HMSensor(conf)
devices.append(new_device)
add_entities(devices, True)
class HMSensor(HMDevice):
"""Representation of a HomeMatic sensor."""
@property
def state(self):
"""Return the state of the sensor."""
# Does a cast exist for this class?
name = self._hmdevice.__class__.__name__
if name in HM_STATE_HA_CAST:
return HM_STATE_HA_CAST[name].get(self._hm_get_state())
# No cast, return original value
return self._hm_get_state()
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return HM_UNIT_HA_CAST.get(self._state)
@property
def device_class(self):
"""Return the device class to use in the frontend, if any."""
return HM_DEVICE_CLASS_HA_CAST.get(self._state)
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return HM_ICON_HA_CAST.get(self._state)
def _init_data_struct(self):
"""Generate a data dictionary (self._data) from metadata."""
if self._state:
self._data.update({self._state: None})
else:
_LOGGER.critical("Unable to initialize sensor: %s", self._name)
|
import os
import shutil
import stat
from django.conf import settings
from translation_finder.finder import EXCLUDES
DEFAULT_DATA_DIR = os.path.join(settings.BASE_DIR, "data")
DEFAULT_TEST_DIR = os.path.join(settings.BASE_DIR, "data-test")
BUILD_DIR = os.path.join(settings.BASE_DIR, "build")
VENV_DIR = os.path.join(settings.BASE_DIR, ".venv")
DOCS_DIR = os.path.join(settings.BASE_DIR, "docs")
SCRIPTS_DIR = os.path.join(settings.BASE_DIR, "scripts")
EXAMPLES_DIR = os.path.join(settings.BASE_DIR, "weblate", "examples")
PATH_EXCLUDES = [f"/{exclude}/" for exclude in EXCLUDES]
def remove_readonly(func, path, excinfo):
"""Clear the readonly bit and reattempt the removal."""
if isinstance(excinfo[1], FileNotFoundError):
return
if os.path.isdir(path):
os.chmod(path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
else:
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
if func in (os.open, os.lstat, os.rmdir):
# Failed to remove a directory
remove_tree(path)
else:
func(path)
def remove_tree(path: str, ignore_errors: bool = False):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=remove_readonly)
def should_skip(location):
"""Check for skipping location in manage commands."""
location = os.path.abspath(location)
return (
location.startswith(VENV_DIR)
or location.startswith(settings.DATA_DIR)
or location.startswith(DEFAULT_DATA_DIR)
or location.startswith(BUILD_DIR)
or location.startswith(DEFAULT_TEST_DIR)
or location.startswith(DOCS_DIR)
or location.startswith(SCRIPTS_DIR)
or location.startswith(EXAMPLES_DIR)
)
def is_excluded(path):
"""Whether path should be excluded from zip extraction."""
for exclude in PATH_EXCLUDES:
if exclude in path:
return True
return False
|
from adafruit_mcp230xx.mcp23017 import MCP23017 # pylint: disable=import-error
import board # pylint: disable=import-error
import busio # pylint: disable=import-error
import digitalio # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2C_ADDRESS = "i2c_address"
CONF_PINS = "pins"
CONF_PULL_MODE = "pull_mode"
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 0x20
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SWITCHES_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MCP23017 devices."""
invert_logic = config.get(CONF_INVERT_LOGIC)
i2c_address = config.get(CONF_I2C_ADDRESS)
i2c = busio.I2C(board.SCL, board.SDA)
mcp = MCP23017(i2c, address=i2c_address)
switches = []
pins = config.get(CONF_PINS)
for pin_num, pin_name in pins.items():
pin = mcp.get_pin(pin_num)
switches.append(MCP23017Switch(pin_name, pin, invert_logic))
add_entities(switches)
class MCP23017Switch(ToggleEntity):
"""Representation of a MCP23017 output pin."""
def __init__(self, name, pin, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._pin = pin
self._invert_logic = invert_logic
self._state = False
self._pin.direction = digitalio.Direction.OUTPUT
self._pin.value = self._invert_logic
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if optimistic updates are used."""
return True
def turn_on(self, **kwargs):
"""Turn the device on."""
self._pin.value = not self._invert_logic
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._pin.value = self._invert_logic
self._state = False
self.schedule_update_ha_state()
|
import pytest
from homeassistant.components.input_select import (
ATTR_OPTION,
ATTR_OPTIONS,
CONF_INITIAL,
DOMAIN,
SERVICE_SELECT_NEXT,
SERVICE_SELECT_OPTION,
SERVICE_SELECT_PREVIOUS,
SERVICE_SET_OPTIONS,
)
from homeassistant.const import (
ATTR_EDITABLE,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_NAME,
SERVICE_RELOAD,
)
from homeassistant.core import Context, State
from homeassistant.exceptions import Unauthorized
from homeassistant.helpers import entity_registry
from homeassistant.loader import bind_hass
from homeassistant.setup import async_setup_component
# pylint: disable=protected-access
from tests.async_mock import patch
from tests.common import mock_restore_cache
@pytest.fixture
def storage_setup(hass, hass_storage):
"""Storage setup."""
async def _storage(items=None, config=None):
if items is None:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {
"items": [
{
"id": "from_storage",
"name": "from storage",
"options": ["storage option 1", "storage option 2"],
}
]
},
}
else:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {"items": items},
}
if config is None:
config = {DOMAIN: {}}
return await async_setup_component(hass, DOMAIN, config)
return _storage
@bind_hass
def select_option(hass, entity_id, option):
"""Set value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_SELECT_OPTION,
{ATTR_ENTITY_ID: entity_id, ATTR_OPTION: option},
)
)
@bind_hass
def select_next(hass, entity_id):
"""Set next value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_NEXT, {ATTR_ENTITY_ID: entity_id}
)
)
@bind_hass
def select_previous(hass, entity_id):
"""Set previous value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_PREVIOUS, {ATTR_ENTITY_ID: entity_id}
)
)
async def test_config(hass):
"""Test config."""
invalid_configs = [
None,
{},
{"name with space": None},
# {'bad_options': {'options': None}},
{"bad_initial": {"options": [1, 2], "initial": 3}},
]
for cfg in invalid_configs:
assert not await async_setup_component(hass, DOMAIN, {DOMAIN: cfg})
async def test_select_option(hass):
"""Test select_option methods."""
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {"test_1": {"options": ["some option", "another option"]}}},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert "some option" == state.state
select_option(hass, entity_id, "another option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "another option" == state.state
select_option(hass, entity_id, "non existing option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "another option" == state.state
async def test_select_next(hass):
"""Test select_next methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert "middle option" == state.state
select_next(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "last option" == state.state
select_next(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "first option" == state.state
async def test_select_previous(hass):
"""Test select_previous methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert "middle option" == state.state
select_previous(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "first option" == state.state
select_previous(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "last option" == state.state
async def test_config_options(hass):
"""Test configuration options."""
count_start = len(hass.states.async_entity_ids())
test_2_options = ["Good Option", "Better Option", "Best Option"]
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {"options": [1, 2]},
"test_2": {
"name": "Hello World",
"icon": "mdi:work",
"options": test_2_options,
"initial": "Better Option",
},
}
},
)
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
assert state_1 is not None
assert state_2 is not None
assert "1" == state_1.state
assert ["1", "2"] == state_1.attributes.get(ATTR_OPTIONS)
assert ATTR_ICON not in state_1.attributes
assert "Better Option" == state_2.state
assert test_2_options == state_2.attributes.get(ATTR_OPTIONS)
assert "Hello World" == state_2.attributes.get(ATTR_FRIENDLY_NAME)
assert "mdi:work" == state_2.attributes.get(ATTR_ICON)
async def test_set_options_service(hass):
"""Test set_options service."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert "middle option" == state.state
data = {ATTR_OPTIONS: ["test1", "test2"], "entity_id": entity_id}
await hass.services.async_call(DOMAIN, SERVICE_SET_OPTIONS, data)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "test1" == state.state
select_option(hass, entity_id, "first option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "test1" == state.state
select_option(hass, entity_id, "test2")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "test2" == state.state
async def test_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_select.s1", "last option"),
State("input_select.s2", "bad option"),
),
)
options = {"options": ["first option", "middle option", "last option"]}
await async_setup_component(hass, DOMAIN, {DOMAIN: {"s1": options, "s2": options}})
state = hass.states.get("input_select.s1")
assert state
assert state.state == "last option"
state = hass.states.get("input_select.s2")
assert state
assert state.state == "first option"
async def test_initial_state_overrules_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_select.s1", "last option"),
State("input_select.s2", "bad option"),
),
)
options = {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
await async_setup_component(hass, DOMAIN, {DOMAIN: {"s1": options, "s2": options}})
state = hass.states.get("input_select.s1")
assert state
assert state.state == "middle option"
state = hass.states.get("input_select.s2")
assert state
assert state.state == "middle option"
async def test_input_select_context(hass, hass_admin_user):
"""Test that input_select context works."""
assert await async_setup_component(
hass,
"input_select",
{
"input_select": {
"s1": {"options": ["first option", "middle option", "last option"]}
}
},
)
state = hass.states.get("input_select.s1")
assert state is not None
await hass.services.async_call(
"input_select",
"select_next",
{"entity_id": state.entity_id},
True,
Context(user_id=hass_admin_user.id),
)
state2 = hass.states.get("input_select.s1")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
async def test_reload(hass, hass_admin_user, hass_read_only_user):
"""Test reload service."""
count_start = len(hass.states.async_entity_ids())
ent_reg = await entity_registry.async_get_registry(hass)
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
},
"test_2": {
"options": ["an option", "not an option"],
"initial": "an option",
},
}
},
)
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
state_3 = hass.states.get("input_select.test_3")
assert state_1 is not None
assert state_2 is not None
assert state_3 is None
assert "middle option" == state_1.state
assert "an option" == state_2.state
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_1") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_2") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_3") is None
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={
DOMAIN: {
"test_2": {
"options": ["an option", "reloaded option"],
"initial": "reloaded option",
},
"test_3": {
"options": ["new option", "newer option"],
"initial": "newer option",
},
}
},
):
with pytest.raises(Unauthorized):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
await hass.async_block_till_done()
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
state_3 = hass.states.get("input_select.test_3")
assert state_1 is None
assert state_2 is not None
assert state_3 is not None
assert "an option" == state_2.state
assert "newer option" == state_3.state
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_1") is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_2") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_3") is not None
async def test_load_from_storage(hass, storage_setup):
"""Test set up from storage."""
assert await storage_setup()
state = hass.states.get(f"{DOMAIN}.from_storage")
assert state.state == "storage option 1"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "from storage"
assert state.attributes.get(ATTR_EDITABLE)
async def test_editable_state_attribute(hass, storage_setup):
"""Test editable attribute."""
assert await storage_setup(
config={DOMAIN: {"from_yaml": {"options": ["yaml option", "other option"]}}}
)
state = hass.states.get(f"{DOMAIN}.from_storage")
assert state.state == "storage option 1"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "from storage"
assert state.attributes.get(ATTR_EDITABLE)
state = hass.states.get(f"{DOMAIN}.from_yaml")
assert state.state == "yaml option"
assert not state.attributes.get(ATTR_EDITABLE)
async def test_ws_list(hass, hass_ws_client, storage_setup):
"""Test listing via WS."""
assert await storage_setup(
config={DOMAIN: {"from_yaml": {"options": ["yaml option"]}}}
)
client = await hass_ws_client(hass)
await client.send_json({"id": 6, "type": f"{DOMAIN}/list"})
resp = await client.receive_json()
assert resp["success"]
storage_ent = "from_storage"
yaml_ent = "from_yaml"
result = {item["id"]: item for item in resp["result"]}
assert len(result) == 1
assert storage_ent in result
assert yaml_ent not in result
assert result[storage_ent][ATTR_NAME] == "from storage"
async def test_ws_delete(hass, hass_ws_client, storage_setup):
"""Test WS delete cleans up entity registry."""
assert await storage_setup()
input_id = "from_storage"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = await entity_registry.async_get_registry(hass)
state = hass.states.get(input_entity_id)
assert state is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 6, "type": f"{DOMAIN}/delete", f"{DOMAIN}_id": f"{input_id}"}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is None
async def test_update(hass, hass_ws_client, storage_setup):
"""Test updating min/max updates the state."""
items = [
{
"id": "from_storage",
"name": "from storage",
"options": ["yaml update 1", "yaml update 2"],
}
]
assert await storage_setup(items)
input_id = "from_storage"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = await entity_registry.async_get_registry(hass)
state = hass.states.get(input_entity_id)
assert state.attributes[ATTR_OPTIONS] == ["yaml update 1", "yaml update 2"]
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is not None
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/update",
f"{DOMAIN}_id": f"{input_id}",
"options": ["new option", "newer option"],
CONF_INITIAL: "newer option",
}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state.attributes[ATTR_OPTIONS] == ["new option", "newer option"]
await client.send_json(
{
"id": 7,
"type": f"{DOMAIN}/update",
f"{DOMAIN}_id": f"{input_id}",
"options": ["new option", "no newer option"],
}
)
resp = await client.receive_json()
assert not resp["success"]
async def test_ws_create(hass, hass_ws_client, storage_setup):
"""Test create WS."""
assert await storage_setup(items=[])
input_id = "new_input"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = await entity_registry.async_get_registry(hass)
state = hass.states.get(input_entity_id)
assert state is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is None
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/create",
"name": "New Input",
"options": ["new option", "even newer option"],
"initial": "even newer option",
}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state.state == "even newer option"
async def test_setup_no_config(hass, hass_admin_user):
"""Test component setup with no config."""
count_start = len(hass.states.async_entity_ids())
assert await async_setup_component(hass, DOMAIN, {})
with patch(
"homeassistant.config.load_yaml_config_file", autospec=True, return_value={}
):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
await hass.async_block_till_done()
assert count_start == len(hass.states.async_entity_ids())
|
from functools import partial
import ipaddress
import getmac
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
from . import MinecraftServer, helpers
from .const import ( # pylint: disable=unused-import
DEFAULT_HOST,
DEFAULT_NAME,
DEFAULT_PORT,
DOMAIN,
)
class MinecraftServerConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Minecraft Server."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
host = None
port = DEFAULT_PORT
# Split address at last occurrence of ':'.
address_left, separator, address_right = user_input[CONF_HOST].rpartition(
":"
)
# If no separator is found, 'rpartition' return ('', '', original_string).
if separator == "":
host = address_right
else:
host = address_left
try:
port = int(address_right)
except ValueError:
pass # 'port' is already set to default value.
# Remove '[' and ']' in case of an IPv6 address.
host = host.strip("[]")
# Check if 'host' is a valid IP address and if so, get the MAC address.
ip_address = None
mac_address = None
try:
ip_address = ipaddress.ip_address(host)
except ValueError:
# Host is not a valid IP address.
# Continue with host and port.
pass
else:
# Host is a valid IP address.
if ip_address.version == 4:
# Address type is IPv4.
params = {"ip": host}
else:
# Address type is IPv6.
params = {"ip6": host}
mac_address = await self.hass.async_add_executor_job(
partial(getmac.get_mac_address, **params)
)
# Validate IP address (MAC address must be available).
if ip_address is not None and mac_address is None:
errors["base"] = "invalid_ip"
# Validate port configuration (limit to user and dynamic port range).
elif (port < 1024) or (port > 65535):
errors["base"] = "invalid_port"
# Validate host and port by checking the server connection.
else:
# Create server instance with configuration data and ping the server.
config_data = {
CONF_NAME: user_input[CONF_NAME],
CONF_HOST: host,
CONF_PORT: port,
}
server = MinecraftServer(self.hass, "dummy_unique_id", config_data)
await server.async_check_connection()
if not server.online:
# Host or port invalid or server not reachable.
errors["base"] = "cannot_connect"
else:
# Build unique_id and config entry title.
unique_id = ""
title = f"{host}:{port}"
if ip_address is not None:
# Since IP addresses can change and therefore are not allowed in a
# unique_id, fall back to the MAC address and port (to support
# servers with same MAC address but different ports).
unique_id = f"{mac_address}-{port}"
if ip_address.version == 6:
title = f"[{host}]:{port}"
else:
# Check if 'host' is a valid SRV record.
srv_record = await helpers.async_check_srv_record(
self.hass, host
)
if srv_record is not None:
# Use only SRV host name in unique_id (does not change).
unique_id = f"{host}-srv"
title = host
else:
# Use host name and port in unique_id (to support servers with
# same host name but different ports).
unique_id = f"{host}-{port}"
# Abort in case the host was already configured before.
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
# Configuration data are available and no error was detected, create configuration entry.
return self.async_create_entry(title=title, data=config_data)
# Show configuration form (default form in case of no user_input,
# form filled with user_input and eventually with errors otherwise).
return self._show_config_form(user_input, errors)
def _show_config_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME)
): str,
vol.Required(
CONF_HOST, default=user_input.get(CONF_HOST, DEFAULT_HOST)
): vol.All(str, vol.Lower),
}
),
errors=errors,
)
|
import pytest
from redbot.core.rpc import RPC, RPCMixin
from unittest.mock import MagicMock
__all__ = ["rpc", "rpcmixin", "cog", "existing_func", "existing_multi_func"]
@pytest.fixture()
def rpc():
return RPC()
@pytest.fixture()
def rpcmixin():
r = RPCMixin()
r.rpc = MagicMock(spec=RPC)
return r
@pytest.fixture()
def cog():
class Cog:
async def cofunc(*args, **kwargs):
pass
async def cofunc2(*args, **kwargs):
pass
async def cofunc3(*args, **kwargs):
pass
def func(*args, **kwargs):
pass
return Cog()
@pytest.fixture()
def existing_func(rpc, cog):
rpc.add_method(cog.cofunc)
return cog.cofunc
@pytest.fixture()
def existing_multi_func(rpc, cog):
funcs = [cog.cofunc, cog.cofunc2, cog.cofunc3]
rpc.add_multi_method(*funcs)
return funcs
|
import keras
import keras.backend as K
import tensorflow as tf
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine import hyper_spaces
class MatchLSTM(BaseModel):
"""
Match LSTM model.
Examples:
>>> model = MatchLSTM()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 100
>>> model.params['embedding_trainable'] = True
>>> model.params['fc_num_units'] = 200
>>> model.params['lstm_num_units'] = 256
>>> model.params['dropout_rate'] = 0.5
>>> model.build()
"""
@classmethod
def get_default_params(cls):
"""Get default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(
'lstm_num_units', 256,
hyper_space=hyper_spaces.quniform(low=128, high=384, q=32),
desc="The hidden size in the LSTM layer."
))
params.add(Param(
'fc_num_units', 200,
hyper_space=hyper_spaces.quniform(
low=100, high=300, q=20),
desc="The hidden size in the full connection layer."
))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.9, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""Build model."""
input_left, input_right = self._make_inputs()
len_left = input_left.shape[1]
len_right = input_right.shape[1]
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
lstm_left = keras.layers.LSTM(self._params['lstm_num_units'],
return_sequences=True,
name='lstm_left')
lstm_right = keras.layers.LSTM(self._params['lstm_num_units'],
return_sequences=True,
name='lstm_right')
encoded_left = lstm_left(embed_left)
encoded_right = lstm_right(embed_right)
def attention(tensors):
"""Attention layer."""
left, right = tensors
tensor_left = tf.expand_dims(left, axis=2)
tensor_right = tf.expand_dims(right, axis=1)
tensor_left = K.repeat_elements(tensor_left, len_right, 2)
tensor_right = K.repeat_elements(tensor_right, len_left, 1)
tensor_merged = tf.concat([tensor_left, tensor_right], axis=-1)
middle_output = keras.layers.Dense(self._params['fc_num_units'],
activation='tanh')(
tensor_merged)
attn_scores = keras.layers.Dense(1)(middle_output)
attn_scores = tf.squeeze(attn_scores, axis=3)
exp_attn_scores = tf.math.exp(
attn_scores - tf.reduce_max(attn_scores, axis=-1, keepdims=True))
exp_sum = tf.reduce_sum(exp_attn_scores, axis=-1, keepdims=True)
attention_weights = exp_attn_scores / exp_sum
return K.batch_dot(attention_weights, right)
attn_layer = keras.layers.Lambda(attention)
left_attn_vec = attn_layer([encoded_left, encoded_right])
concat = keras.layers.Concatenate(axis=1)(
[left_attn_vec, encoded_right])
lstm_merge = keras.layers.LSTM(self._params['lstm_num_units'] * 2,
return_sequences=False,
name='lstm_merge')
merged = lstm_merge(concat)
dropout = keras.layers.Dropout(
rate=self._params['dropout_rate'])(merged)
phi = keras.layers.Dense(self._params['fc_num_units'],
activation='tanh')(dropout)
inputs = [input_left, input_right]
out = self._make_output_layer()(phi)
self._backend = keras.Model(inputs=inputs, outputs=[out])
|
import logging
from RFXtrx import ControlEvent, SensorEvent
from homeassistant.components.sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
)
from homeassistant.const import (
CONF_DEVICES,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_VOLTAGE,
)
from homeassistant.core import callback
from . import (
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
DATA_TYPES,
SIGNAL_EVENT,
RfxtrxEntity,
get_device_id,
get_rfx_object,
)
from .const import ATTR_EVENT
_LOGGER = logging.getLogger(__name__)
def _battery_convert(value):
"""Battery is given as a value between 0 and 9."""
if value is None:
return None
return (value + 1) * 10
def _rssi_convert(value):
"""Rssi is given as dBm value."""
if value is None:
return None
return f"{value*8-120}"
DEVICE_CLASSES = {
"Barometer": DEVICE_CLASS_PRESSURE,
"Battery numeric": DEVICE_CLASS_BATTERY,
"Current Ch. 1": DEVICE_CLASS_CURRENT,
"Current Ch. 2": DEVICE_CLASS_CURRENT,
"Current Ch. 3": DEVICE_CLASS_CURRENT,
"Energy usage": DEVICE_CLASS_POWER,
"Humidity": DEVICE_CLASS_HUMIDITY,
"Rssi numeric": DEVICE_CLASS_SIGNAL_STRENGTH,
"Temperature": DEVICE_CLASS_TEMPERATURE,
"Total usage": DEVICE_CLASS_ENERGY,
"Voltage": DEVICE_CLASS_VOLTAGE,
}
CONVERT_FUNCTIONS = {
"Battery numeric": _battery_convert,
"Rssi numeric": _rssi_convert,
}
async def async_setup_entry(
hass,
config_entry,
async_add_entities,
):
"""Set up platform."""
discovery_info = config_entry.data
data_ids = set()
def supported(event):
return isinstance(event, (ControlEvent, SensorEvent))
entities = []
for packet_id, entity_info in discovery_info[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
if event is None:
_LOGGER.error("Invalid device: %s", packet_id)
continue
if not supported(event):
continue
device_id = get_device_id(
event.device, data_bits=entity_info.get(CONF_DATA_BITS)
)
for data_type in set(event.values) & set(DATA_TYPES):
data_id = (*device_id, data_type)
if data_id in data_ids:
continue
data_ids.add(data_id)
entity = RfxtrxSensor(event.device, device_id, data_type)
entities.append(entity)
async_add_entities(entities)
@callback
def sensor_update(event, device_id):
"""Handle sensor updates from the RFXtrx gateway."""
if not supported(event):
return
for data_type in set(event.values) & set(DATA_TYPES):
data_id = (*device_id, data_type)
if data_id in data_ids:
continue
data_ids.add(data_id)
_LOGGER.info(
"Added sensor (Device ID: %s Class: %s Sub: %s, Event: %s)",
event.device.id_string.lower(),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
entity = RfxtrxSensor(event.device, device_id, data_type, event=event)
async_add_entities([entity])
# Subscribe to main RFXtrx events
if discovery_info[CONF_AUTOMATIC_ADD]:
hass.helpers.dispatcher.async_dispatcher_connect(SIGNAL_EVENT, sensor_update)
class RfxtrxSensor(RfxtrxEntity):
"""Representation of a RFXtrx sensor."""
def __init__(self, device, device_id, data_type, event=None):
"""Initialize the sensor."""
super().__init__(device, device_id, event=event)
self.data_type = data_type
self._unit_of_measurement = DATA_TYPES.get(data_type)
self._name = f"{device.type_string} {device.id_string} {data_type}"
self._unique_id = "_".join(x for x in (*self._device_id, data_type))
self._device_class = DEVICE_CLASSES.get(data_type)
self._convert_fun = CONVERT_FUNCTIONS.get(data_type, lambda x: x)
async def async_added_to_hass(self):
"""Restore device state."""
await super().async_added_to_hass()
if self._event is None:
old_state = await self.async_get_last_state()
if old_state is not None:
event = old_state.attributes.get(ATTR_EVENT)
if event:
self._apply_event(get_rfx_object(event))
@property
def state(self):
"""Return the state of the sensor."""
if not self._event:
return None
value = self._event.values.get(self.data_type)
return self._convert_fun(value)
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def force_update(self) -> bool:
"""We should force updates. Repeated states have meaning."""
return True
@property
def device_class(self):
"""Return a device class for sensor."""
return self._device_class
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if device_id != self._device_id:
return
if self.data_type not in event.values:
return
_LOGGER.debug(
"Sensor update (Device ID: %s Class: %s Sub: %s)",
event.device.id_string,
event.device.__class__.__name__,
event.device.subtype,
)
self._apply_event(event)
self.async_write_ha_state()
|
import unittest
from absl import flags
from absl.testing import parameterized
import mock
from perfkitbenchmarker.linux_packages import gce_hpc_tools
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
_GITHASH = 'abcdef'
def _YumInstall():
vm = mock.Mock(RemoteCommand=mock.Mock(return_value=(_GITHASH, '')))
vm.metadata = {}
gce_hpc_tools.YumInstall(vm)
return vm
class GcpHpcToolsTest(pkb_common_test_case.PkbCommonTestCase):
@parameterized.named_parameters(
('has_reboot', 'reboot', True),
('no_reboot', 'tcpmem', False),
)
def testRebootFlag(self, hpc_tools_turning_flag, wait_for_reboot_called):
FLAGS.gce_hpc_tools_tuning = [hpc_tools_turning_flag]
vm = _YumInstall()
if wait_for_reboot_called:
vm.WaitForBootCompletion.assert_called_once()
else:
vm.WaitForBootCompletion.assert_not_called()
def testMetadataRecorded(self):
vm = _YumInstall()
hpc_tools_tuning_str = ('limits,networklatency,nofirewalld,nomitigation,'
'noselinux,nosmt,reboot,tcpmem')
expected_metadata = {
'hpc_tools': True,
'hpc_tools_tag': 'head',
'hpc_tools_tuning': hpc_tools_tuning_str,
'hpc_tools_version': _GITHASH,
}
self.assertEqual(expected_metadata, vm.metadata)
@parameterized.named_parameters(
('has_tag', 'foo', 'foo'),
('tag_not_set', None, 'head'),
)
def testSetGitCommitTag(self, git_commit_tag, metadata_tag):
FLAGS.gce_hpc_tools_tag = git_commit_tag
vm = _YumInstall()
self.assertEqual(metadata_tag, vm.metadata['hpc_tools_tag'])
def testBashCommandCalled(self):
vm = _YumInstall()
base_command = 'cd /tmp/pkb/hpc-tools; sudo bash mpi-tuning.sh'
command_flags = ('--limits --networklatency --nofirewalld --nomitigation '
'--noselinux --nosmt --reboot --tcpmem')
vm.RemoteCommand.assert_called_with(
f'{base_command} {command_flags}', ignore_failure=True)
if __name__ == '__main__':
unittest.main()
|
import json
from lark import Lark
from lark.reconstruct import Reconstructor
from _json_parser import json_grammar
test_json = '''
{
"empty_object" : {},
"empty_array" : [],
"booleans" : { "YES" : true, "NO" : false },
"numbers" : [ 0, 1, -2, 3.3, 4.4e5, 6.6e-7 ],
"strings" : [ "This", [ "And" , "That", "And a \\"b" ] ],
"nothing" : null
}
'''
def test_earley():
json_parser = Lark(json_grammar, maybe_placeholders=False)
tree = json_parser.parse(test_json)
new_json = Reconstructor(json_parser).reconstruct(tree)
print (new_json)
print (json.loads(new_json) == json.loads(test_json))
def test_lalr():
json_parser = Lark(json_grammar, parser='lalr', maybe_placeholders=False)
tree = json_parser.parse(test_json)
new_json = Reconstructor(json_parser).reconstruct(tree)
print (new_json)
print (json.loads(new_json) == json.loads(test_json))
test_earley()
test_lalr()
|
from django.db import models
from weblate.trans.fields import RegexField
class Variant(models.Model):
component = models.ForeignKey("Component", on_delete=models.deletion.CASCADE)
variant_regex = RegexField(max_length=190)
key = models.CharField(max_length=190, db_index=True)
class Meta:
unique_together = (("key", "component", "variant_regex"),)
verbose_name = "variant definition"
verbose_name_plural = "variant definitions"
def __str__(self):
return f"{self.component}: {self.key}"
|
from pytest import mark
from cerberus import errors
from cerberus.tests import assert_fail, assert_success
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'this_field': {}}),
(assert_success, {'that_field': {}}),
(assert_success, {}),
(assert_fail, {'that_field': {}, 'this_field': {}}),
],
)
def test_excludes(test_function, document):
test_function(
schema={
'this_field': {'type': 'dict', 'excludes': 'that_field'},
'that_field': {'type': 'dict'},
},
document=document,
)
def test_excludes_basic_error_handler_message(validator):
assert_fail(
document={'that_field': {}, 'this_field': {}},
schema={
'this_field': {
'type': 'dict',
'excludes': ['that_field', 'bazo_field'],
'required': True,
},
'that_field': {'type': 'dict', 'excludes': 'this_field', 'required': True},
},
validator=validator,
)
message = errors.BasicErrorHandler.messages[errors.EXCLUDES_FIELD.code]
assert validator.errors == {
'that_field': [message.format("'this_field'", field="that_field")],
'this_field': [
message.format("'that_field', 'bazo_field'", field="this_field")
],
}
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'this_field': {}}),
(assert_success, {'that_field': {}}),
(assert_success, {'that_field': {}, 'bazo_field': {}}),
(assert_fail, {'this_field': {}, 'that_field': {}}),
(assert_fail, {'this_field': {}, 'bazo_field': {}}),
(assert_fail, {'that_field': {}, 'this_field': {}, 'bazo_field': {}}),
],
)
def test_excludes_of_multiple_fields(test_function, document):
test_function(
schema={
'this_field': {'type': 'dict', 'excludes': ['that_field', 'bazo_field']},
'that_field': {'type': 'dict', 'excludes': 'this_field'},
'bazo_field': {'type': 'dict'},
},
document=document,
)
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'this_field': {}}),
(assert_success, {'that_field': {}}),
(assert_fail, {}),
(assert_fail, {'that_field': {}, 'this_field': {}}),
],
)
def test_excludes_of_required_fields(test_function, document):
test_function(
schema={
'this_field': {'type': 'dict', 'excludes': 'that_field', 'required': True},
'that_field': {'type': 'dict', 'excludes': 'this_field', 'required': True},
},
document=document,
update=False,
)
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'this_field': {}}),
(assert_success, {'that_field': {}}),
(assert_success, {}),
(assert_fail, {'that_field': {}, 'this_field': {}}),
],
)
def test_mutual_excludes(test_function, document):
test_function(
schema={
'this_field': {'type': 'dict', 'excludes': 'that_field'},
'that_field': {'type': 'dict', 'excludes': 'this_field'},
},
document=document,
)
|
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import EmailMessage
from django.core.mail import send_mail
from django.template import loader
from django.utils.translation import activate
from django.utils.translation import get_language
from django.utils.translation import gettext_lazy as _
from django_comments.moderation import CommentModerator
from zinnia.settings import AUTO_CLOSE_COMMENTS_AFTER
from zinnia.settings import AUTO_MODERATE_COMMENTS
from zinnia.settings import MAIL_COMMENT_AUTHORS
from zinnia.settings import MAIL_COMMENT_NOTIFICATION_RECIPIENTS
from zinnia.settings import MAIL_COMMENT_REPLY
from zinnia.settings import PROTOCOL
from zinnia.settings import SPAM_CHECKER_BACKENDS
from zinnia.spam_checker import check_is_spam
class EntryCommentModerator(CommentModerator):
"""
Moderate the comments on entries.
"""
email_reply = MAIL_COMMENT_REPLY
email_authors = MAIL_COMMENT_AUTHORS
enable_field = 'comment_enabled'
auto_close_field = 'start_publication'
close_after = AUTO_CLOSE_COMMENTS_AFTER
spam_checker_backends = SPAM_CHECKER_BACKENDS
auto_moderate_comments = AUTO_MODERATE_COMMENTS
mail_comment_notification_recipients = MAIL_COMMENT_NOTIFICATION_RECIPIENTS
def moderate(self, comment, entry, request):
"""
Determine if a new comment should be marked as non-public
and await approval.
Return ``True`` to put the comment into the moderator queue,
or ``False`` to allow it to be showed up immediately.
"""
if self.auto_moderate_comments:
return True
if check_is_spam(comment, entry, request,
self.spam_checker_backends):
return True
return False
def email(self, comment, entry, request):
"""
Send email notifications needed.
"""
current_language = get_language()
try:
activate(settings.LANGUAGE_CODE)
site = Site.objects.get_current()
if self.auto_moderate_comments or comment.is_public:
self.do_email_notification(comment, entry, site)
if comment.is_public:
self.do_email_authors(comment, entry, site)
self.do_email_reply(comment, entry, site)
finally:
activate(current_language)
def do_email_notification(self, comment, entry, site):
"""
Send email notification of a new comment to site staff.
"""
if not self.mail_comment_notification_recipients:
return
template = loader.get_template(
'comments/zinnia/entry/email/notification.txt')
context = {
'comment': comment,
'entry': entry,
'site': site,
'protocol': PROTOCOL
}
subject = _('[%(site)s] New comment posted on "%(title)s"') % \
{'site': site.name, 'title': entry.title}
message = template.render(context)
send_mail(
subject, message,
settings.DEFAULT_FROM_EMAIL,
self.mail_comment_notification_recipients,
fail_silently=not settings.DEBUG
)
def do_email_authors(self, comment, entry, site):
"""
Send email notification of a new comment to
the authors of the entry.
"""
if not self.email_authors:
return
exclude_list = self.mail_comment_notification_recipients + ['']
recipient_list = (
set([author.email for author in entry.authors.all()])
- set(exclude_list)
)
if not recipient_list:
return
template = loader.get_template(
'comments/zinnia/entry/email/authors.txt')
context = {
'comment': comment,
'entry': entry,
'site': site,
'protocol': PROTOCOL
}
subject = _('[%(site)s] New comment posted on "%(title)s"') % \
{'site': site.name, 'title': entry.title}
message = template.render(context)
send_mail(
subject, message,
settings.DEFAULT_FROM_EMAIL,
recipient_list,
fail_silently=not settings.DEBUG
)
def do_email_reply(self, comment, entry, site):
"""
Send email notification of a new comment to
the authors of the previous comments.
"""
if not self.email_reply:
return
exclude_list = (
self.mail_comment_notification_recipients
+ [author.email for author in entry.authors.all()]
+ [comment.email]
)
recipient_list = (
set([other_comment.email
for other_comment in entry.comments
if other_comment.email])
- set(exclude_list)
)
if not recipient_list:
return
template = loader.get_template(
'comments/zinnia/entry/email/reply.txt')
context = {
'comment': comment,
'entry': entry,
'site': site,
'protocol': PROTOCOL
}
subject = _('[%(site)s] New comment posted on "%(title)s"') % \
{'site': site.name, 'title': entry.title}
message = template.render(context)
mail = EmailMessage(
subject, message,
settings.DEFAULT_FROM_EMAIL,
bcc=recipient_list)
mail.send(fail_silently=not settings.DEBUG)
|
import aiohttp
from homeassistant import config_entries, setup
from homeassistant.components.nws.const import DOMAIN
from tests.async_mock import patch
async def test_form(hass, mock_simple_nws_config):
"""Test we get the form."""
hass.config.latitude = 35
hass.config.longitude = -90
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.nws.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"api_key": "test"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "ABC"
assert result2["data"] == {
"api_key": "test",
"latitude": 35,
"longitude": -90,
"station": "ABC",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass, mock_simple_nws_config):
"""Test we handle cannot connect error."""
mock_instance = mock_simple_nws_config.return_value
mock_instance.set_station.side_effect = aiohttp.ClientError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass, mock_simple_nws_config):
"""Test we handle unknown error."""
mock_instance = mock_simple_nws_config.return_value
mock_instance.set_station.side_effect = ValueError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_already_configured(hass, mock_simple_nws_config):
"""Test we handle duplicate entries."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nws.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nws.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
|
import graphviz
from typing import Optional, Text, Iterable
from tensornetwork.network_components import AbstractNode
#pylint: disable=no-member
def to_graphviz(nodes: Iterable[AbstractNode],
graph: Optional[graphviz.Graph] = None,
include_all_names: bool = False,
engine: Text = "neato") -> graphviz.Graph:
"""Create a graphviz Graph that is isomorphic to the given TensorNetwork.
Args:
nodes: a collection of nodes
graph: An optional `graphviz.Graph` object to write to. Use this only
if you wish to set custom attributes for the graph.
include_all_names: Whether to include all of the names in the graph.
If False, all names starting with '__' (which are almost always just
the default generated names) will be dropped to reduce clutter.
engine: The graphviz engine to use. Only applicable if `graph` is None.
Returns:
The `graphviz.Graph` object.
"""
if graph is None:
#pylint: disable=no-member
graph = graphviz.Graph('G', engine=engine)
for node in nodes:
if not node.name.startswith("__") or include_all_names:
label = node.name
else:
label = ""
graph.node(str(id(node)), label=label)
seen_edges = set()
for node in nodes:
for i, edge in enumerate(node.edges):
if edge in seen_edges:
continue
seen_edges.add(edge)
if not edge.name.startswith("__") or include_all_names:
edge_label = edge.name
else:
edge_label = ""
if edge.is_dangling():
# We need to create an invisible node for the dangling edge
# to connect to.
graph.node(
"{}_{}".format(id(node), i),
label="",
_attributes={"style": "invis"})
graph.edge("{}_{}".format(id(node), i), str(id(node)), label=edge_label)
else:
graph.edge(str(id(edge.node1)), str(id(edge.node2)), label=edge_label)
return graph
|
import numpy as np
from ...utils import verbose
from ._utils import _fetch_one, _data_path, _on_missing, AGE_SLEEP_RECORDS
from ._utils import _check_subjects
data_path = _data_path # expose _data_path(..) as data_path(..)
BASE_URL = 'https://physionet.org/physiobank/database/sleep-edfx/sleep-cassette/' # noqa: E501
@verbose
def fetch_data(subjects, recording=[1, 2], path=None, force_update=False,
update_path=None, base_url=BASE_URL, on_missing='raise',
verbose=None): # noqa: D301
"""Get paths to local copies of PhysioNet Polysomnography dataset files.
This will fetch data from the publicly available subjects from PhysioNet's
study of age effects on sleep in healthy subjects [1]_[2]_. This
corresponds to a subset of 153 recordings from 37 males and 41 females that
were 25-101 years old at the time of the recordings. There are two night
recordings per subject except for subjects 13, 36 and 52 which have one
record missing each due to missing recording hardware.
See more details in
`physionet website <https://physionet.org/physiobank/database/sleep-edfx/sleep-cassette/>`_.
Parameters
----------
subjects : list of int
The subjects to use. Can be in the range of 0-82 (inclusive), however
the following subjects are not available: 39, 68, 69, 78 and 79.
recording : list of int
The night recording indices. Valid values are : [1], [2], or [1, 2].
The following recordings are not available: recording 1 for subject 36
and 52, and recording 2 for subject 13.
path : None | str
Location of where to look for the PhysioNet data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the Polysomnography dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
config to the given path. If None, the user is prompted.
on_missing : 'raise' | 'warn' | 'ignore'
What to do if one or several recordings are not available. Valid keys
are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing
is 'warn' it will proceed but warn, if 'ignore' it will proceed
silently.
%(verbose)s
Returns
-------
paths : list
List of local data paths of the given type.
Notes
-----
For example, one could do:
>>> from mne.datasets import sleep_physionet
>>> sleep_physionet.age.fetch_data(subjects=[0]) # doctest: +SKIP
This would download data for subject 0 if it isn't there already.
References
----------
.. [1] MS Mourtazaev, B Kemp, AH Zwinderman, HAC Kamphuisen. Age and gender
affect different characteristics of slow waves in the sleep EEG.
Sleep 18(7):557–564 (1995).
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
Research Resource for Complex Physiologic Signals.
Circulation 101(23):e215-e220
See Also
--------
:func:`mne.datasets.sleep_physionet.temazepam.fetch_data`
""" # noqa: E501
records = np.loadtxt(AGE_SLEEP_RECORDS,
skiprows=1,
delimiter=',',
usecols=(0, 1, 2, 6, 7),
dtype={'names': ('subject', 'record', 'type', 'sha',
'fname'),
'formats': ('<i2', 'i1', '<S9', 'S40', '<S22')}
)
psg_records = records[np.where(records['type'] == b'PSG')]
hyp_records = records[np.where(records['type'] == b'Hypnogram')]
path = data_path(path=path, update_path=update_path)
params = [path, force_update, base_url]
_check_subjects(
subjects, 83, missing=[39, 68, 69, 78, 79], on_missing=on_missing)
# Check for missing recordings
if set(subjects) & {36, 52} and 1 in recording:
msg = ('Requested recording 1 for subject 36 and/or 52, but it is not '
'available in corpus.')
_on_missing(on_missing, msg)
if 13 in subjects and 2 in recording:
msg = ('Requested recording 2 for subject 13, but it is not available '
'in corpus.')
_on_missing(on_missing, msg)
fnames = []
for subject in subjects:
for idx in np.where(psg_records['subject'] == subject)[0]:
if psg_records['record'][idx] in recording:
psg_fname = _fetch_one(psg_records['fname'][idx].decode(),
psg_records['sha'][idx].decode(),
*params)
hyp_fname = _fetch_one(hyp_records['fname'][idx].decode(),
hyp_records['sha'][idx].decode(),
*params)
fnames.append([psg_fname, hyp_fname])
return fnames
|
import os
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image, Figure
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
"""Plugin for thumbnail directive."""
name = "rest_thumbnail"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
directives.register_directive('thumbnail', Thumbnail)
return super().set_site(site)
class Thumbnail(Figure):
"""Thumbnail directive for reST."""
def align(argument):
"""Return thumbnail alignment."""
return directives.choice(argument, Image.align_values)
def figwidth_value(argument):
"""Return figure width."""
if argument.lower() == 'image':
return 'image'
else:
return directives.length_or_percentage_or_unitless(argument, 'px')
option_spec = Image.option_spec.copy()
option_spec['figwidth'] = figwidth_value
option_spec['figclass'] = directives.class_option
has_content = True
def run(self):
"""Run the thumbnail directive."""
uri = directives.uri(self.arguments[0])
if uri.endswith('.svg'):
# the ? at the end makes docutil output an <img> instead of an object for the svg, which lightboxes may require
self.arguments[0] = '.thumbnail'.join(os.path.splitext(uri)) + '?'
else:
self.arguments[0] = '.thumbnail'.join(os.path.splitext(uri))
self.options['target'] = uri
if self.content:
(node,) = Figure.run(self)
else:
(node,) = Image.run(self)
return [node]
|
from nikola.plugin_categories import Taxonomy
from nikola import utils
class ClassifyTags(Taxonomy):
"""Classify the posts by tags."""
name = "classify_tags"
classification_name = "tag"
overview_page_variable_name = "tags"
overview_page_items_variable_name = "items"
more_than_one_classifications_per_post = True
has_hierarchy = False
show_list_as_subcategories_list = False
template_for_classification_overview = "tags.tmpl"
always_disable_rss = False
always_disable_atom = False
apply_to_posts = True
apply_to_pages = False
omit_empty_classifications = True
add_other_languages_variable = True
path_handler_docstrings = {
'tag_index': """A link to the tag index.
Example:
link://tag_index => /tags/index.html""",
'tag': """A link to a tag's page. Takes page number as optional keyword argument.
Example:
link://tag/cats => /tags/cats.html""",
'tag_atom': """A link to a tag's Atom feed.
Example:
link://tag_atom/cats => /tags/cats.atom""",
'tag_rss': """A link to a tag's RSS feed.
Example:
link://tag_rss/cats => /tags/cats.xml""",
}
def set_site(self, site):
"""Set site, which is a Nikola instance."""
super().set_site(site)
self.show_list_as_index = self.site.config['TAG_PAGES_ARE_INDEXES']
self.template_for_single_list = "tagindex.tmpl" if self.show_list_as_index else "tag.tmpl"
self.minimum_post_count_per_classification_in_overview = self.site.config['TAGLIST_MINIMUM_POSTS']
self.translation_manager = utils.ClassificationTranslationManager()
def is_enabled(self, lang=None):
"""Return True if this taxonomy is enabled, or False otherwise."""
return True
def classify(self, post, lang):
"""Classify the given post for the given language."""
return post.tags_for_language(lang)
def get_classification_friendly_name(self, classification, lang, only_last_component=False):
"""Extract a friendly name from the classification."""
return classification
def slugify_tag_name(self, name, lang):
"""Slugify a tag name."""
if self.site.config['SLUG_TAG_PATH']:
name = utils.slugify(name, lang)
return name
def get_overview_path(self, lang, dest_type='page'):
"""Return a path for the list of all classifications."""
if self.site.config['TAGS_INDEX_PATH'](lang):
path = self.site.config['TAGS_INDEX_PATH'](lang)
append_index = 'never'
else:
path = self.site.config['TAG_PATH'](lang)
append_index = 'always'
return [component for component in path.split('/') if component], append_index
def get_path(self, classification, lang, dest_type='page'):
"""Return a path for the given classification."""
return [_f for _f in [
self.site.config['TAG_PATH'](lang),
self.slugify_tag_name(classification, lang)] if _f], 'auto'
def provide_overview_context_and_uptodate(self, lang):
"""Provide data for the context and the uptodate list for the list of all classifiations."""
kw = {
"tag_path": self.site.config['TAG_PATH'],
"tag_pages_are_indexes": self.site.config['TAG_PAGES_ARE_INDEXES'],
"taglist_minimum_post_count": self.site.config['TAGLIST_MINIMUM_POSTS'],
"tzinfo": self.site.tzinfo,
"tag_descriptions": self.site.config['TAG_DESCRIPTIONS'],
"tag_titles": self.site.config['TAG_TITLES'],
}
context = {
"title": self.site.MESSAGES[lang]["Tags"],
"description": self.site.MESSAGES[lang]["Tags"],
"pagekind": ["list", "tags_page"],
}
kw.update(context)
return context, kw
def provide_context_and_uptodate(self, classification, lang, node=None):
"""Provide data for the context and the uptodate list for the list of the given classifiation."""
kw = {
"tag_path": self.site.config['TAG_PATH'],
"tag_pages_are_indexes": self.site.config['TAG_PAGES_ARE_INDEXES'],
"taglist_minimum_post_count": self.site.config['TAGLIST_MINIMUM_POSTS'],
"tzinfo": self.site.tzinfo,
"tag_descriptions": self.site.config['TAG_DESCRIPTIONS'],
"tag_titles": self.site.config['TAG_TITLES'],
}
context = {
"title": self.site.config['TAG_TITLES'].get(lang, {}).get(classification, self.site.MESSAGES[lang]["Posts about %s"] % classification),
"description": self.site.config['TAG_DESCRIPTIONS'].get(lang, {}).get(classification),
"pagekind": ["tag_page", "index" if self.show_list_as_index else "list"],
"tag": classification,
}
kw.update(context)
return context, kw
def get_other_language_variants(self, classification, lang, classifications_per_language):
"""Return a list of variants of the same tag in other languages."""
return self.translation_manager.get_translations_as_list(classification, lang, classifications_per_language)
def postprocess_posts_per_classification(self, posts_per_classification_per_language, flat_hierarchy_per_lang=None, hierarchy_lookup_per_lang=None):
"""Rearrange, modify or otherwise use the list of posts per classification and per language."""
self.translation_manager.read_from_config(self.site, 'TAG', posts_per_classification_per_language, False)
|
from functools import partial
from twtxt.mentions import format_mentions
def mock_mention_format(name, url, expected_name, expected_url):
assert name == expected_name
assert url == expected_url
if name:
return '@' + name
else:
return name
def test_format_mentions():
texts = {'No Mention': 'No Mention',
'@<SomeName http://some.url/twtxt.txt>': ('SomeName', 'http://some.url/twtxt.txt'),
'@<Some>Shitty<Name http://some.url/twtxt.txt>': ('Some>Shitty<Name', 'http://some.url/twtxt.txt'),
'@<http://some.url/twtxt.txt>': (None, 'http://some.url/twtxt.txt'),
'@<SomeName>': '@<SomeName>',
'@SomeName': '@SomeName'}
for input, expected in texts.items():
if isinstance(expected, tuple):
format_mentions(input, partial(mock_mention_format, expected_name=expected[0], expected_url=expected[1]))
else:
assert expected == format_mentions(input,
partial(mock_mention_format, expected_name=None, expected_url=None))
def test_format_multi_mentions():
text = '@<SomeName http://url> and another @<AnotherName http://another/url> end'
mentions = (('SomeName', 'http://url'),
('AnotherName', 'http://another/url'))
def mock_multi_mention_format(name, url):
return '@' + name
format_mentions(text, mock_multi_mention_format)
def test_format_multi_mentions_incomplete():
text = '@<http://url> and another @<AnotherName http://another/url> end'
mentions = ((None, 'http://url'),
('AnotherName', 'http://another/url'))
def mock_multi_mention_format(name, url):
if name:
return '@' + name
else:
return '@' + url
format_mentions(text, mock_multi_mention_format)
text = '@<SomeName http://url> and another @<http://another/url> end'
mentions = (('SomeName', 'http://url'),
(None, 'http://another/url'))
|
def combine_context_switchers(context_switchers):
"""Create a single context switcher from multiple switchers.
`context_switchers` is a list of functions that take a frame as an
argument and return a string to use as the new context label.
Returns a function that composites `context_switchers` functions, or None
if `context_switchers` is an empty list.
When invoked, the combined switcher calls `context_switchers` one-by-one
until a string is returned. The combined switcher returns None if all
`context_switchers` return None.
"""
if not context_switchers:
return None
if len(context_switchers) == 1:
return context_switchers[0]
def should_start_context(frame):
"""The combiner for multiple context switchers."""
for switcher in context_switchers:
new_context = switcher(frame)
if new_context is not None:
return new_context
return None
return should_start_context
def should_start_context_test_function(frame):
"""Is this frame calling a test_* function?"""
co_name = frame.f_code.co_name
if co_name.startswith("test") or co_name == "runTest":
return qualname_from_frame(frame)
return None
def qualname_from_frame(frame):
"""Get a qualified name for the code running in `frame`."""
co = frame.f_code
fname = co.co_name
method = None
if co.co_argcount and co.co_varnames[0] == "self":
self = frame.f_locals["self"]
method = getattr(self, fname, None)
if method is None:
func = frame.f_globals.get(fname)
if func is None:
return None
return func.__module__ + '.' + fname
func = getattr(method, '__func__', None)
if func is None:
cls = self.__class__
return cls.__module__ + '.' + cls.__name__ + "." + fname
if hasattr(func, '__qualname__'):
qname = func.__module__ + '.' + func.__qualname__
else:
for cls in getattr(self.__class__, '__mro__', ()):
f = cls.__dict__.get(fname, None)
if f is None:
continue
if f is func:
qname = cls.__module__ + '.' + cls.__name__ + "." + fname
break
else:
# Support for old-style classes.
def mro(bases):
for base in bases:
f = base.__dict__.get(fname, None)
if f is func:
return base.__module__ + '.' + base.__name__ + "." + fname
for base in bases:
qname = mro(base.__bases__)
if qname is not None:
return qname
return None
qname = mro([self.__class__])
if qname is None:
qname = func.__module__ + '.' + fname
return qname
|
import json
import os
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
import yaml
from behave import given
from behave import when
from itest_utils import get_service_connection_string
from paasta_tools import marathon_tools
from paasta_tools import utils
from paasta_tools.api.client import get_paasta_oapi_client_by_url
from paasta_tools.frameworks import native_scheduler
from paasta_tools.utils import decompose_job_id
def _get_marathon_connection_string(service="marathon"):
return "http://%s" % get_service_connection_string(service)
def _get_zookeeper_connection_string(chroot):
return "zk://{}/{}".format(get_service_connection_string("zookeeper"), chroot)
def setup_system_paasta_config():
zk_connection_string = _get_zookeeper_connection_string("mesos-testcluster")
system_paasta_config = utils.SystemPaastaConfig(
{
"cluster": "testcluster",
"deployd_log_level": "DEBUG",
"docker_volumes": [],
"docker_registry": "docker-dev.yelpcorp.com",
"zookeeper": zk_connection_string,
"synapse_port": 3212,
"marathon_servers": [
# if you're updating this list, you should update
# paasta_tools/yelp_package/dockerfiles/itest/api/marathon.json as well
{
"url": _get_marathon_connection_string("marathon"),
"user": None,
"password": None,
},
{
"url": _get_marathon_connection_string("marathon1"),
"user": None,
"password": None,
},
{
"url": _get_marathon_connection_string("marathon2"),
"user": None,
"password": None,
},
],
"dashboard_links": {
"testcluster": {
"Marathon RO": [
"http://accessible-marathon",
"http://accessible-marathon1",
"http://accessible-marathon2",
]
}
},
},
"/some_fake_path_to_config_dir/",
)
return system_paasta_config
def setup_marathon_clients():
system_paasta_config = setup_system_paasta_config()
marathon_servers = marathon_tools.get_marathon_servers(system_paasta_config)
clients = marathon_tools.get_marathon_clients(marathon_servers)
return (clients, marathon_servers, system_paasta_config)
def get_paasta_api_url():
return "http://{}/{}".format(get_service_connection_string("api"), "swagger.json")
def setup_paasta_api_client():
return get_paasta_oapi_client_by_url(urlparse(get_paasta_api_url()))
def _generate_mesos_cli_config(zk_host_and_port):
config = {
"profile": "default",
"default": {
"master": zk_host_and_port,
"log_level": "warning",
"log_file": "None",
"response_timeout": 5,
},
}
return config
def write_mesos_cli_config(config):
with NamedTemporaryFile(mode="w", delete=False) as mesos_cli_config_file:
mesos_cli_config_file.write(json.dumps(config))
return mesos_cli_config_file.name
def write_etc_paasta(context, config, filename):
context.etc_paasta = "/etc/paasta"
if not os.path.exists(context.etc_paasta):
os.makedirs(context.etc_paasta)
with open(os.path.join(context.etc_paasta, filename), "w") as f:
f.write(json.dumps(config))
@given("we add a new docker volume to the public config")
def add_volume_public_config(context):
write_etc_paasta(
context,
{
"volumes": [
{
"hostPath": "/nail/etc/beep",
"containerPath": "/nail/etc/beep",
"mode": "RO",
},
{
"hostPath": "/nail/etc/bop",
"containerPath": "/nail/etc/bop",
"mode": "RO",
},
{
"hostPath": "/nail/etc/boop",
"containerPath": "/nail/etc/boop",
"mode": "RO",
},
{
"hostPath": "/nail/tmp/noob",
"containerPath": "/nail/tmp/noob",
"mode": "RO",
},
]
},
"volumes.json",
)
@given("a working paasta cluster")
def working_paasta_cluster(context):
return working_paasta_cluster_with_registry(context, "docker.io")
@given("a working paasta cluster, with docker registry {docker_registry}")
def working_paasta_cluster_with_registry(context, docker_registry):
"""Adds a working marathon_clients for the purposes of
interacting with them in the test."""
if not hasattr(context, "marathon_clients"):
(
context.marathon_clients,
context.marathon_servers,
context.system_paasta_config,
) = setup_marathon_clients()
else:
print("Marathon connections already established")
if not hasattr(context, "paasta_api_client"):
context.paasta_api_client = setup_paasta_api_client()
mesos_cli_config = _generate_mesos_cli_config(
_get_zookeeper_connection_string("mesos-testcluster")
)
mesos_cli_config_filename = write_mesos_cli_config(mesos_cli_config)
context.tag_version = 0
write_etc_paasta(
context,
{"marathon_servers": context.system_paasta_config.get_marathon_servers()},
"marathon.json",
)
write_etc_paasta(
context,
{
"cluster": "testcluster",
"zookeeper": "zk://zookeeper/mesos-testcluster",
"vault_environment": "devc",
"docker_registry": docker_registry,
},
"cluster.json",
)
write_etc_paasta(context, {"log_writer": {"driver": "null"}}, "logs.json")
write_etc_paasta(context, {"sensu_host": None}, "sensu.json")
write_etc_paasta(
context,
{
"volumes": [
{
"hostPath": "/nail/etc/beep",
"containerPath": "/nail/etc/beep",
"mode": "RO",
},
{
"hostPath": "/nail/etc/bop",
"containerPath": "/nail/etc/bop",
"mode": "RO",
},
{
"hostPath": "/nail/etc/boop",
"containerPath": "/nail/etc/boop",
"mode": "RO",
},
]
},
"volumes.json",
)
write_etc_paasta(
context,
{"paasta_native": {"principal": "paasta_native", "secret": "secret4"}},
"paasta_native.json",
)
write_etc_paasta(
context, {"mesos_config": {"path": mesos_cli_config_filename}}, "mesos.json"
)
write_etc_paasta(
context,
{"api_endpoints": {"testcluster": get_paasta_api_url()}},
"api_endpoints.json",
)
write_etc_paasta(
context,
{
"dashboard_links": {
"testcluster": {
"Marathon RO": [
"http://accessible-marathon",
"http://accessible-marathon1",
"http://accessible-marathon2",
]
}
}
},
"dashboard_links.json",
)
write_etc_paasta(context, {"deployd_use_zk_queue": True}, "deployd.json")
@given(
'I have yelpsoa-configs for the marathon job "{job_id}" on shard {shard:d}, previous shard {previous_shard:d}'
)
@given('I have yelpsoa-configs for the marathon job "{job_id}"')
def write_soa_dir_marathon_job(context, job_id, shard=None, previous_shard=None):
(service, instance, _, __) = decompose_job_id(job_id)
try:
soa_dir = context.soa_dir
except AttributeError:
soa_dir = "/nail/etc/services/"
if not os.path.exists(os.path.join(soa_dir, service)):
os.makedirs(os.path.join(soa_dir, service))
soa = {
str(instance): {
"cpus": 0.1,
"mem": 100,
"marathon_shard": shard,
"previous_marathon_shards": [previous_shard] if previous_shard else None,
}
}
if hasattr(context, "cmd"):
soa[instance]["cmd"] = context.cmd
with open(
os.path.join(soa_dir, service, "marathon-%s.yaml" % context.cluster), "w"
) as f:
f.write(yaml.safe_dump(soa))
context.soa_dir = soa_dir
@given('we have yelpsoa-configs for native service "{job_id}"')
def write_soa_dir_native_service(context, job_id):
(service, instance, _, __) = decompose_job_id(job_id)
try:
soa_dir = context.soa_dir
except AttributeError:
soa_dir = "/nail/etc/services/"
if not os.path.exists(os.path.join(soa_dir, service)):
os.makedirs(os.path.join(soa_dir, service))
with open(
os.path.join(soa_dir, service, "paasta_native-%s.yaml" % context.cluster), "w"
) as f:
f.write(
yaml.safe_dump(
{"%s" % instance: {"cpus": 0.1, "mem": 100, "cmd": "/bin/sleep 300"}}
)
)
context.soa_dir = soa_dir
context.service = service
context.instance = instance
@given("we load_paasta_native_job_config")
def call_load_paasta_native_job_config(context):
context.new_config = native_scheduler.load_paasta_native_job_config(
service=context.service,
instance=context.instance,
cluster=context.cluster,
soa_dir=context.soa_dir,
)
@given(
'we have a deployments.json for the service "{service}" with {disabled} instance '
'"{csv_instances}" image "{image}"'
)
def write_soa_dir_deployments(context, service, disabled, csv_instances, image):
if disabled == "disabled":
desired_state = "stop"
else:
desired_state = "start"
if not os.path.exists(os.path.join(context.soa_dir, service)):
os.makedirs(os.path.join(context.soa_dir, service))
with open(os.path.join(context.soa_dir, service, "deployments.json"), "w") as dp:
dp.write(
json.dumps(
{
"v1": {
"{}:paasta-{}".format(
service, utils.get_paasta_branch(context.cluster, instance)
): {"docker_image": image, "desired_state": desired_state}
for instance in csv_instances.split(",")
},
"v2": {
"deployments": {
f"{context.cluster}.{instance}": {
"docker_image": image,
"git_sha": "deadbeef",
}
for instance in csv_instances.split(",")
},
"controls": {
f"{service}:{context.cluster}.{instance}": {
"desired_state": desired_state,
"force_bounce": None,
}
for instance in csv_instances.split(",")
},
},
}
)
)
@given(
'we have a deployments.json for the service "{service}" with {disabled} instance "{csv_instance}"'
)
def write_soa_dir_deployments_default_image(context, service, disabled, csv_instance):
write_soa_dir_deployments(
context,
service,
disabled,
csv_instance,
"test-image-foobar%d" % context.tag_version,
)
@when(
(
'we set the "{field}" field of the {framework} config for service "{service}"'
' and instance "{instance}" to "{value}"'
)
)
def modify_configs(context, field, framework, service, instance, value):
soa_dir = context.soa_dir
with open(
os.path.join(soa_dir, service, f"{framework}-{context.cluster}.yaml"), "r+"
) as f:
data = yaml.safe_load(f.read())
data[instance][field] = value
f.seek(0)
f.write(yaml.safe_dump(data))
f.truncate()
@when(
(
'we set the "{field}" field of the {framework} config for service "{service}"'
' and instance "{instance}" to the integer {value:d}'
)
)
def modify_configs_for_int(context, field, framework, service, instance, value):
modify_configs(context, field, framework, service, instance, value)
|
import unittest
import numpy as np
import numpy.testing as np_test
from pgmpy.models import NoisyOrModel
class TestNoisyOrModelInit(unittest.TestCase):
def test_init(self):
model = NoisyOrModel(
["x1", "x2", "x3"], [2, 3, 2], [[0.6, 0.4], [0.2, 0.4, 0.7], [0.1, 0.4]]
)
np_test.assert_array_equal(model.variables, np.array(["x1", "x2", "x3"]))
np_test.assert_array_equal(model.cardinality, np.array([2, 3, 2]))
self.assertListEqual(
model.inhibitor_probability, [[0.6, 0.4], [0.2, 0.4, 0.7], [0.1, 0.4]]
)
def test_exceptions(self):
self.assertRaises(
ValueError,
NoisyOrModel,
np.array(["x1", "x2", "x3"]),
[2, 2, 2],
[[0.1, 0.2], [1.0, 0.3], [1.2, 0.1]],
)
self.assertRaises(
ValueError,
NoisyOrModel,
np.array(["x1", "x2", "x3"]),
[2, 4],
[[0.1, 0.2], [0.1, 0.4, 0.2, 0.6]],
)
self.assertRaises(
ValueError,
NoisyOrModel,
np.array(["x1", "x2", "x3"]),
[2, 3, 2, 3],
[[0.1, 0.2], [0.6, 0.3, 0.5], [0.3, 0.2], [0.1, 0.4, 0.3]],
)
self.assertRaises(
ValueError,
NoisyOrModel,
np.array(["x1", "x2", "x3"]),
[2, 3, 2],
[[0.1, 0.2, 0.4], [0.4, 0.1, 0.5], [0.6, 0.1, 0.7]],
)
self.assertRaises(
ValueError,
NoisyOrModel,
np.array(["x1", "x2", "x3"]),
[2, 2, 2],
[[0.1, 0.1], [0.1, 0.1]],
)
class TestNoisyOrModelMethods(unittest.TestCase):
def setUp(self):
self.model = NoisyOrModel(
["x1", "x2", "x3"], [2, 3, 2], [[0.6, 0.4], [0.2, 0.4, 0.7], [0.1, 0.4]]
)
def test_add_variables(self):
self.model.add_variables(["x4"], [3], [0.1, 0.2, 0.4])
np_test.assert_array_equal(
self.model.variables, np.array(["x1", "x2", "x3", "x4"])
)
np_test.assert_array_equal(self.model.cardinality, np.array([2, 3, 2, 3]))
self.assertListEqual(
self.model.inhibitor_probability,
[[0.6, 0.4], [0.2, 0.4, 0.7], [0.1, 0.4], [0.1, 0.2, 0.4]],
)
self.model.add_variables(["x5", "x6"], [3, 2], [[0.1, 0.2, 0.4], [0.5, 0.5]])
np_test.assert_array_equal(
self.model.variables, np.array(["x1", "x2", "x3", "x4", "x5", "x6"])
)
np_test.assert_array_equal(self.model.cardinality, np.array([2, 3, 2, 3, 3, 2]))
self.assertListEqual(
self.model.inhibitor_probability,
[
[0.6, 0.4],
[0.2, 0.4, 0.7],
[0.1, 0.4],
[0.1, 0.2, 0.4],
[0.1, 0.2, 0.4],
[0.5, 0.5],
],
)
def test_del_variables(self):
self.model.del_variables(["x3"])
np_test.assert_array_equal(self.model.variables, np.array(["x1", "x2"]))
np_test.assert_array_equal(self.model.cardinality, np.array([2, 3]))
self.assertListEqual(
self.model.inhibitor_probability, [[0.6, 0.4], [0.2, 0.4, 0.7]]
)
def test_del_multiple_variables(self):
self.model.del_variables(["x1", "x2"])
np_test.assert_array_equal(self.model.variables, np.array(["x3"]))
np_test.assert_array_equal(self.model.cardinality, np.array([2]))
self.assertListEqual(self.model.inhibitor_probability, [[0.1, 0.4]])
|
import asyncio
import logging
from arcam.fmj import ConnectionFailed
from arcam.fmj.client import Client
import async_timeout
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
DOMAIN_DATA_ENTRIES,
DOMAIN_DATA_TASKS,
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.115")
async def _await_cancel(task):
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the component."""
hass.data[DOMAIN_DATA_ENTRIES] = {}
hass.data[DOMAIN_DATA_TASKS] = {}
async def _stop(_):
asyncio.gather(
*[_await_cancel(task) for task in hass.data[DOMAIN_DATA_TASKS].values()]
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: config_entries.ConfigEntry):
"""Set up config entry."""
entries = hass.data[DOMAIN_DATA_ENTRIES]
tasks = hass.data[DOMAIN_DATA_TASKS]
client = Client(entry.data[CONF_HOST], entry.data[CONF_PORT])
entries[entry.entry_id] = client
task = asyncio.create_task(_run_client(hass, client, DEFAULT_SCAN_INTERVAL))
tasks[entry.entry_id] = task
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "media_player")
)
return True
async def async_unload_entry(hass, entry):
"""Cleanup before removing config entry."""
await hass.config_entries.async_forward_entry_unload(entry, "media_player")
task = hass.data[DOMAIN_DATA_TASKS].pop(entry.entry_id)
await _await_cancel(task)
hass.data[DOMAIN_DATA_ENTRIES].pop(entry.entry_id)
return True
async def _run_client(hass, client, interval):
def _listen(_):
hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_CLIENT_DATA, client.host)
while True:
try:
with async_timeout.timeout(interval):
await client.start()
_LOGGER.debug("Client connected %s", client.host)
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_STARTED, client.host
)
try:
with client.listen(_listen):
await client.process()
finally:
await client.stop()
_LOGGER.debug("Client disconnected %s", client.host)
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_STOPPED, client.host
)
except ConnectionFailed:
await asyncio.sleep(interval)
except asyncio.TimeoutError:
continue
except asyncio.CancelledError:
raise
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception, aborting arcam client")
return
|
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import rotate_bbox
from chainercv.utils import generate_random_bbox
@testing.parameterize(*testing.product({
'angle': [180, 90, 0, -90, -180]
}))
class TestRotateBbox(unittest.TestCase):
def test_rotate_bbox(self):
size = (32, 24)
bbox = generate_random_bbox(10, size, 0, 24)
out = rotate_bbox(bbox, self.angle, size)
if self.angle % 180 != 0:
rotate_size = size[::-1]
else:
rotate_size = size
out = rotate_bbox(out, -1 * self.angle, rotate_size)
np.testing.assert_almost_equal(out, bbox, decimal=6)
testing.run_module(__name__, __file__)
|
import posixpath
from absl import flags
from perfkitbenchmarker import os_types
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'gluster_replicas', 3,
'The number of Gluster replicas.')
flags.DEFINE_integer(
'gluster_stripes', 1,
'The number of Gluster stripes.')
def YumInstall(vm):
"""Installs the gluster package on the VM."""
# TODO(user): Install gluster for RHEL.
if FLAGS.os_type != os_types.CENTOS7:
raise NotImplementedError(
'PKB currently only supports installation of gluster on centos7 or '
'Debian-based VMs.')
vm.InstallEpelRepo()
vm.InstallPackages('centos-release-gluster')
vm.InstallPackages('glusterfs-server')
vm.RemoteCommand('sudo glusterd')
def AptInstall(vm):
"""Installs the gluster package on the VM."""
vm.RemoteCommand('sudo add-apt-repository ppa:gluster/glusterfs-6')
vm.AptUpdate()
vm.InstallPackages('glusterfs-server')
def MountGluster(vm, gluster_server, volume_name, mount_point):
"""Mounts a Gluster volume on the Virtual Machine.
Args:
vm: The VM to mount the Gluster volume on.
gluster_server: A Gluster server that knows about the volume.
volume_name: The name of the volume to mount.
mount_point: The location to mount the volume on 'vm'.
"""
vm.Install('gluster')
volume = '{ip}:/{volume_name}'.format(
ip=gluster_server.internal_ip, volume_name=volume_name)
vm.RemoteCommand('sudo mkdir -p %s' % mount_point)
vm.RemoteCommand('sudo mount -t glusterfs {volume} {mount_point}'.format(
volume=volume, mount_point=mount_point))
def _CreateVolume(vm, bricks, volume_name):
"""Creates a GlusterFS volume.
Args:
vm: The Virtual Machine to create the volume from.
bricks: A list of strings of the form "ip_address:/path/to/brick" which
will be combined to form the Gluster volume.
volume_name: The name of the volume which is being created.
"""
replicas = ('replica %s' % FLAGS.gluster_replicas
if FLAGS.gluster_replicas > 1 else '')
stripes = ('stripe %s' % FLAGS.gluster_stripes
if FLAGS.gluster_stripes > 1 else '')
vm.RemoteCommand(('sudo gluster volume create {volume_name} '
'{stripes} {replicas} {bricks}').format(
volume_name=volume_name, replicas=replicas,
stripes=stripes, bricks=' '.join(bricks)))
def _ProbePeer(vm1, vm2):
vm1.RemoteCommand('sudo gluster peer probe {internal_ip}'.format(
internal_ip=vm2.internal_ip))
def ConfigureServers(gluster_servers, volume_name):
"""Configures the Gluster cluster and creates a volume.
This function installs Gluster on all VMs passed into it, creates Gluster
bricks, adds all VMs to the trusted storage pool, creates a volume, and
starts it. After the volume is started, it can be mounted via MountGluster.
Args:
gluster_servers: The VMs that will be used to create the GlusterFS volume.
volume_name: The name of the volume to be created.
"""
vm_util.RunThreaded(lambda vm: vm.Install('gluster'), gluster_servers)
vm_util.RunThreaded(
lambda vm: vm.RemoteCommand('sudo systemctl enable glusterd'),
gluster_servers)
vm_util.RunThreaded(
lambda vm: vm.RemoteCommand('sudo systemctl start glusterd'),
gluster_servers)
bricks = []
for vm in gluster_servers:
for disk in vm.scratch_disks:
brick_path = posixpath.join(disk.mount_point, 'gluster_brick')
bricks.append('{internal_ip}:{brick_path}'.format(
internal_ip=vm.internal_ip, brick_path=brick_path))
vm.RemoteCommand('mkdir -p %s' % brick_path)
# Gluster servers need to be added to the trusted storage pool
# before they can be used to create a volume. Peers can only be
# added to the pool by probing them from a member of the pool.
if len(gluster_servers) > 1:
_ProbePeer(gluster_servers[0], gluster_servers[1])
_ProbePeer(gluster_servers[1], gluster_servers[0])
for vm in gluster_servers[2:]:
_ProbePeer(gluster_servers[0], vm)
_CreateVolume(gluster_servers[0], bricks, volume_name)
gluster_servers[0].RemoteCommand(
'sudo gluster volume start {volume_name}'.format(
volume_name=volume_name))
def DeleteVolume(gluster_server, volume_name):
"""Stops and deletes a Gluster volume."""
gluster_server.RemoteCommand(
'yes | sudo gluster volume stop %s' % volume_name)
gluster_server.RemoteCommand(
'yes | sudo gluster volume delete %s' % volume_name)
|
from datetime import timedelta
from homeassistant.components.vera import SubscriptionRegistry
from homeassistant.core import HomeAssistant
from homeassistant.util.dt import utcnow
from tests.async_mock import MagicMock
from tests.common import async_fire_time_changed
async def test_subscription_registry(hass: HomeAssistant) -> None:
"""Test subscription registry polling."""
subscription_registry = SubscriptionRegistry(hass)
# pylint: disable=protected-access
subscription_registry.poll_server_once = poll_server_once_mock = MagicMock()
poll_server_once_mock.return_value = True
await hass.async_add_executor_job(subscription_registry.start)
async_fire_time_changed(hass, utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
poll_server_once_mock.assert_called_once()
# Last poll was successful and already scheduled the next poll for 1s in the future.
# This will ensure that future poll will fail.
poll_server_once_mock.return_value = False
# Asserting future poll runs.
poll_server_once_mock.reset_mock()
async_fire_time_changed(hass, utcnow() + timedelta(seconds=2))
await hass.async_block_till_done()
poll_server_once_mock.assert_called_once()
# Asserting a future poll is delayed due to the failure set above.
async_fire_time_changed(hass, utcnow() + timedelta(seconds=2))
poll_server_once_mock.reset_mock()
poll_server_once_mock.assert_not_called()
poll_server_once_mock.reset_mock()
async_fire_time_changed(hass, utcnow() + timedelta(seconds=60))
await hass.async_block_till_done()
poll_server_once_mock.assert_called_once()
poll_server_once_mock.reset_mock()
await hass.async_add_executor_job(subscription_registry.stop)
# Assert no further polling is performed.
async_fire_time_changed(hass, utcnow() + timedelta(seconds=65))
await hass.async_block_till_done()
poll_server_once_mock.assert_not_called()
|
import json
import os
import time
from august.activity import (
ACTIVITY_ACTIONS_DOOR_OPERATION,
ACTIVITY_ACTIONS_DOORBELL_DING,
ACTIVITY_ACTIONS_DOORBELL_MOTION,
ACTIVITY_ACTIONS_DOORBELL_VIEW,
ACTIVITY_ACTIONS_LOCK_OPERATION,
DoorbellDingActivity,
DoorbellMotionActivity,
DoorbellViewActivity,
DoorOperationActivity,
LockOperationActivity,
)
from august.authenticator import AuthenticationState
from august.doorbell import Doorbell, DoorbellDetail
from august.lock import Lock, LockDetail
from homeassistant.components.august import (
CONF_LOGIN_METHOD,
CONF_PASSWORD,
CONF_USERNAME,
DOMAIN,
)
from homeassistant.setup import async_setup_component
# from tests.async_mock import AsyncMock
from tests.async_mock import AsyncMock, MagicMock, PropertyMock, patch
from tests.common import load_fixture
def _mock_get_config():
"""Return a default august config."""
return {
DOMAIN: {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "mocked_username",
CONF_PASSWORD: "mocked_password",
}
}
def _mock_authenticator(auth_state):
"""Mock an august authenticator."""
authenticator = MagicMock()
type(authenticator).state = PropertyMock(return_value=auth_state)
return authenticator
@patch("homeassistant.components.august.gateway.ApiAsync")
@patch("homeassistant.components.august.gateway.AuthenticatorAsync.async_authenticate")
async def _mock_setup_august(hass, api_instance, authenticate_mock, api_mock):
"""Set up august integration."""
authenticate_mock.side_effect = MagicMock(
return_value=_mock_august_authentication(
"original_token", 1234, AuthenticationState.AUTHENTICATED
)
)
api_mock.return_value = api_instance
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
return True
async def _create_august_with_devices(
hass, devices, api_call_side_effects=None, activities=None
):
if api_call_side_effects is None:
api_call_side_effects = {}
device_data = {"doorbells": [], "locks": []}
for device in devices:
if isinstance(device, LockDetail):
device_data["locks"].append(
{"base": _mock_august_lock(device.device_id), "detail": device}
)
elif isinstance(device, DoorbellDetail):
device_data["doorbells"].append(
{"base": _mock_august_doorbell(device.device_id), "detail": device}
)
else:
raise ValueError
def _get_device_detail(device_type, device_id):
for device in device_data[device_type]:
if device["detail"].device_id == device_id:
return device["detail"]
raise ValueError
def _get_base_devices(device_type):
base_devices = []
for device in device_data[device_type]:
base_devices.append(device["base"])
return base_devices
def get_lock_detail_side_effect(access_token, device_id):
return _get_device_detail("locks", device_id)
def get_doorbell_detail_side_effect(access_token, device_id):
return _get_device_detail("doorbells", device_id)
def get_operable_locks_side_effect(access_token):
return _get_base_devices("locks")
def get_doorbells_side_effect(access_token):
return _get_base_devices("doorbells")
def get_house_activities_side_effect(access_token, house_id, limit=10):
if activities is not None:
return activities
return []
def lock_return_activities_side_effect(access_token, device_id):
lock = _get_device_detail("locks", device_id)
return [
# There is a check to prevent out of order events
# so we set the doorclosed & lock event in the future
# to prevent a race condition where we reject the event
# because it happened before the dooropen & unlock event.
_mock_lock_operation_activity(lock, "lock", 2000),
_mock_door_operation_activity(lock, "doorclosed", 2000),
]
def unlock_return_activities_side_effect(access_token, device_id):
lock = _get_device_detail("locks", device_id)
return [
_mock_lock_operation_activity(lock, "unlock", 0),
_mock_door_operation_activity(lock, "dooropen", 0),
]
if "get_lock_detail" not in api_call_side_effects:
api_call_side_effects["get_lock_detail"] = get_lock_detail_side_effect
if "get_doorbell_detail" not in api_call_side_effects:
api_call_side_effects["get_doorbell_detail"] = get_doorbell_detail_side_effect
if "get_operable_locks" not in api_call_side_effects:
api_call_side_effects["get_operable_locks"] = get_operable_locks_side_effect
if "get_doorbells" not in api_call_side_effects:
api_call_side_effects["get_doorbells"] = get_doorbells_side_effect
if "get_house_activities" not in api_call_side_effects:
api_call_side_effects["get_house_activities"] = get_house_activities_side_effect
if "lock_return_activities" not in api_call_side_effects:
api_call_side_effects[
"lock_return_activities"
] = lock_return_activities_side_effect
if "unlock_return_activities" not in api_call_side_effects:
api_call_side_effects[
"unlock_return_activities"
] = unlock_return_activities_side_effect
return await _mock_setup_august_with_api_side_effects(hass, api_call_side_effects)
async def _mock_setup_august_with_api_side_effects(hass, api_call_side_effects):
api_instance = MagicMock(name="Api")
if api_call_side_effects["get_lock_detail"]:
type(api_instance).async_get_lock_detail = AsyncMock(
side_effect=api_call_side_effects["get_lock_detail"]
)
if api_call_side_effects["get_operable_locks"]:
type(api_instance).async_get_operable_locks = AsyncMock(
side_effect=api_call_side_effects["get_operable_locks"]
)
if api_call_side_effects["get_doorbells"]:
type(api_instance).async_get_doorbells = AsyncMock(
side_effect=api_call_side_effects["get_doorbells"]
)
if api_call_side_effects["get_doorbell_detail"]:
type(api_instance).async_get_doorbell_detail = AsyncMock(
side_effect=api_call_side_effects["get_doorbell_detail"]
)
if api_call_side_effects["get_house_activities"]:
type(api_instance).async_get_house_activities = AsyncMock(
side_effect=api_call_side_effects["get_house_activities"]
)
if api_call_side_effects["lock_return_activities"]:
type(api_instance).async_lock_return_activities = AsyncMock(
side_effect=api_call_side_effects["lock_return_activities"]
)
if api_call_side_effects["unlock_return_activities"]:
type(api_instance).async_unlock_return_activities = AsyncMock(
side_effect=api_call_side_effects["unlock_return_activities"]
)
return await _mock_setup_august(hass, api_instance)
def _mock_august_authentication(token_text, token_timestamp, state):
authentication = MagicMock(name="august.authentication")
type(authentication).state = PropertyMock(return_value=state)
type(authentication).access_token = PropertyMock(return_value=token_text)
type(authentication).access_token_expires = PropertyMock(
return_value=token_timestamp
)
return authentication
def _mock_august_lock(lockid="mocklockid1", houseid="mockhouseid1"):
return Lock(lockid, _mock_august_lock_data(lockid=lockid, houseid=houseid))
def _mock_august_doorbell(deviceid="mockdeviceid1", houseid="mockhouseid1"):
return Doorbell(
deviceid, _mock_august_doorbell_data(deviceid=deviceid, houseid=houseid)
)
def _mock_august_doorbell_data(deviceid="mockdeviceid1", houseid="mockhouseid1"):
return {
"_id": deviceid,
"DeviceID": deviceid,
"name": f"{deviceid} Name",
"HouseID": houseid,
"UserType": "owner",
"serialNumber": "mockserial",
"battery": 90,
"status": "standby",
"currentFirmwareVersion": "mockfirmware",
"Bridge": {
"_id": "bridgeid1",
"firmwareVersion": "mockfirm",
"operative": True,
},
"LockStatus": {"doorState": "open"},
}
def _mock_august_lock_data(lockid="mocklockid1", houseid="mockhouseid1"):
return {
"_id": lockid,
"LockID": lockid,
"LockName": f"{lockid} Name",
"HouseID": houseid,
"UserType": "owner",
"SerialNumber": "mockserial",
"battery": 90,
"currentFirmwareVersion": "mockfirmware",
"Bridge": {
"_id": "bridgeid1",
"firmwareVersion": "mockfirm",
"operative": True,
},
"LockStatus": {"doorState": "open"},
}
async def _mock_operative_august_lock_detail(hass):
return await _mock_lock_from_fixture(hass, "get_lock.online.json")
async def _mock_inoperative_august_lock_detail(hass):
return await _mock_lock_from_fixture(hass, "get_lock.offline.json")
async def _mock_activities_from_fixture(hass, path):
json_dict = await _load_json_fixture(hass, path)
activities = []
for activity_json in json_dict:
activity = _activity_from_dict(activity_json)
if activity:
activities.append(activity)
return activities
async def _mock_lock_from_fixture(hass, path):
json_dict = await _load_json_fixture(hass, path)
return LockDetail(json_dict)
async def _mock_doorbell_from_fixture(hass, path):
json_dict = await _load_json_fixture(hass, path)
return DoorbellDetail(json_dict)
async def _load_json_fixture(hass, path):
fixture = await hass.async_add_executor_job(
load_fixture, os.path.join("august", path)
)
return json.loads(fixture)
async def _mock_doorsense_enabled_august_lock_detail(hass):
return await _mock_lock_from_fixture(hass, "get_lock.online_with_doorsense.json")
async def _mock_doorsense_missing_august_lock_detail(hass):
return await _mock_lock_from_fixture(hass, "get_lock.online_missing_doorsense.json")
def _mock_lock_operation_activity(lock, action, offset):
return LockOperationActivity(
{
"dateTime": (time.time() + offset) * 1000,
"deviceID": lock.device_id,
"deviceType": "lock",
"action": action,
}
)
def _mock_door_operation_activity(lock, action, offset):
return DoorOperationActivity(
{
"dateTime": (time.time() + offset) * 1000,
"deviceID": lock.device_id,
"deviceType": "lock",
"action": action,
}
)
def _activity_from_dict(activity_dict):
action = activity_dict.get("action")
activity_dict["dateTime"] = time.time() * 1000
if action in ACTIVITY_ACTIONS_DOORBELL_DING:
return DoorbellDingActivity(activity_dict)
if action in ACTIVITY_ACTIONS_DOORBELL_MOTION:
return DoorbellMotionActivity(activity_dict)
if action in ACTIVITY_ACTIONS_DOORBELL_VIEW:
return DoorbellViewActivity(activity_dict)
if action in ACTIVITY_ACTIONS_LOCK_OPERATION:
return LockOperationActivity(activity_dict)
if action in ACTIVITY_ACTIONS_DOOR_OPERATION:
return DoorOperationActivity(activity_dict)
return None
|
from collections import OrderedDict
from contextlib import contextmanager
import time
from absl import flags
from perfkitbenchmarker import sample
MEASUREMENTS_FLAG_NAME = 'timing_measurements'
# Valid options that can be included in the flag's list value.
MEASUREMENTS_NONE = 'none'
MEASUREMENTS_END_TO_END_RUNTIME = 'end_to_end_runtime'
MEASUREMENTS_RUNTIMES = 'runtimes'
MEASUREMENTS_TIMESTAMPS = 'timestamps'
MEASUREMENTS_ALL = OrderedDict([
(MEASUREMENTS_NONE, (
'No measurements included (same as providing an empty list, and cannot '
'be combined with other options).')),
(MEASUREMENTS_END_TO_END_RUNTIME, (
'Includes an end-to-end runtime measurement.')),
(MEASUREMENTS_RUNTIMES, (
'Includes runtimes of all measured intervals, including the end-to-end '
'runtime, the time taken by the benchmark module Prepare, Run, and '
'Cleanup functions, and other important intervals.')),
(MEASUREMENTS_TIMESTAMPS, (
'Includes start and stop timestamps of all measured intervals.'))])
def EndToEndRuntimeMeasurementEnabled():
"""Returns whether end-to-end runtime measurement is globally enabled."""
return (MEASUREMENTS_END_TO_END_RUNTIME in flags.FLAGS.timing_measurements or
RuntimeMeasurementsEnabled())
def RuntimeMeasurementsEnabled():
"""Returns whether runtime measurements are globally enabled."""
return MEASUREMENTS_RUNTIMES in flags.FLAGS.timing_measurements
def TimestampMeasurementsEnabled():
"""Returns whether timestamps measurements are globally enabled."""
return MEASUREMENTS_TIMESTAMPS in flags.FLAGS.timing_measurements
def ValidateMeasurementsFlag(options_list):
"""Verifies correct usage of the measurements configuration flag.
The user of the flag must provide at least one option. All provided options
must be valid. The NONE option cannot be combined with other options.
Args:
options_list: A list of strings parsed from the provided value for the
flag.
Returns:
True if the list of options provided as the value for the flag meets all
the documented requirements.
Raises:
flags.ValidationError: If the list of options provided as the value for
the flag does not meet the documented requirements.
"""
for option in options_list:
if option not in MEASUREMENTS_ALL:
raise flags.ValidationError(
'%s: Invalid value for --%s' % (option, MEASUREMENTS_FLAG_NAME))
if option == MEASUREMENTS_NONE and len(options_list) != 1:
raise flags.ValidationError(
'%s: Cannot combine with other --%s options' % (
option, MEASUREMENTS_FLAG_NAME))
return True
flags.DEFINE_list(
MEASUREMENTS_FLAG_NAME, MEASUREMENTS_END_TO_END_RUNTIME,
'Comma-separated list of values from <%s> that selects which timing '
'measurements to enable. Measurements will be included as samples in the '
'benchmark results. %s' % ('|'.join(MEASUREMENTS_ALL), ' '.join([
'%s: %s' % (option, description)
for option, description in MEASUREMENTS_ALL.items()
])))
flags.register_validator(
MEASUREMENTS_FLAG_NAME, ValidateMeasurementsFlag)
def _GenerateIntervalSamples(interval, include_timestamps):
"""Generates Samples for a single interval timed by IntervalTimer.Measure.
Args:
interval: A (name, start_time, stop_time) tuple from a call to
IntervalTimer.Measure.
include_timestamps: A Boolean that controls whether Samples containing the
start and stop timestamps are added to the generated list.
Returns:
A list of 0 to 3 Samples as specified by the args. When included, the
Samples appear in the order of runtime, start timestamp, stop timestamp.
"""
samples = []
name = interval[0]
start_time = interval[1]
stop_time = interval[2]
elapsed_time = stop_time - start_time
samples.append(sample.Sample(name + ' Runtime', elapsed_time, 'seconds'))
if include_timestamps:
samples.append(sample.Sample(
name + ' Start Timestamp', start_time, 'seconds'))
samples.append(sample.Sample(
name + ' Stop Timestamp', stop_time, 'seconds'))
return samples
class IntervalTimer(object):
"""Class that can measure time and generate samples for each measurement.
Attributes:
intervals: A list of one 3-tuple per measured interval. Each tuple is of the
form (name string, start_time float, stop_time float).
"""
def __init__(self):
self.intervals = []
@contextmanager
def Measure(self, name):
"""Records the start and stop times of the enclosed interval.
Args:
name: A string that names the interval.
"""
start_time = time.time()
yield
stop_time = time.time()
self.intervals.append((name, start_time, stop_time))
def GenerateSamples(self):
"""Generates Samples based on the times recorded in all calls to Measure.
Returns:
A list of Samples. The list contains Samples for each interval that was
wrapped by a call to Measure, with per-interval Samples generated as
specified by the args in the order of runtime, start timestamp, stop
timestamp. All Samples for one interval appear before any Samples from the
next interval.
"""
include_timestamps = TimestampMeasurementsEnabled()
return [
sample for interval in self.intervals for sample in
_GenerateIntervalSamples(interval, include_timestamps)]
|
import os
import unittest
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import mxnet_benchmark
class MxnetBenchmarkTestCase(unittest.TestCase,
test_util.SamplesTestMixin):
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'mxnet_output.txt')
with open(path, 'r') as fp:
self.contents = fp.read()
def testParseSysbenchResult(self):
result = mxnet_benchmark._ExtractThroughput(self.contents)
self.assertEqual(result, 540.0266666666666)
if __name__ == '__main__':
unittest.main()
|
import re
from collections import defaultdict
from appconf import AppConf
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import Group as DjangoGroup
from django.db import models
from django.db.models.signals import m2m_changed, post_save, pre_delete
from django.dispatch import receiver
from django.http import Http404
from django.urls import reverse
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext
from weblate.auth.data import (
ACL_GROUPS,
GLOBAL_PERM_NAMES,
SELECTION_ALL,
SELECTION_ALL_PROTECTED,
SELECTION_ALL_PUBLIC,
SELECTION_COMPONENT_LIST,
SELECTION_MANUAL,
)
from weblate.auth.permissions import SPECIALS, check_global_permission, check_permission
from weblate.auth.utils import (
create_anonymous,
migrate_groups,
migrate_permissions,
migrate_roles,
)
from weblate.lang.models import Language
from weblate.trans.defines import EMAIL_LENGTH, FULLNAME_LENGTH, USERNAME_LENGTH
from weblate.trans.fields import RegexField
from weblate.trans.models import ComponentList, Project
from weblate.utils.decorators import disable_for_loaddata
from weblate.utils.fields import EmailField, UsernameField
from weblate.utils.validators import (
validate_email,
validate_fullname,
validate_username,
)
class Permission(models.Model):
codename = models.CharField(max_length=100, unique=True)
name = models.CharField(max_length=200)
class Meta:
verbose_name = _("Permission")
verbose_name_plural = _("Permissions")
def __str__(self):
name = gettext(self.name)
if self.codename in GLOBAL_PERM_NAMES:
return gettext("%s (site wide permission)") % name
return name
class Role(models.Model):
name = models.CharField(verbose_name=_("Name"), max_length=200)
permissions = models.ManyToManyField(
Permission,
verbose_name=_("Permissions"),
blank=True,
help_text=_("Choose permissions granted to this role."),
)
def __str__(self):
return pgettext("Access control role", self.name)
class GroupManager(BaseUserManager):
def for_project(self, project):
"""All groups for a project."""
return self.filter(
projects=project, internal=True, name__contains="@"
).order_by("name")
class Group(models.Model):
SELECTION_MANUAL = 0
SELECTION_ALL = 1
SELECTION_COMPONENT_LIST = 2
name = models.CharField(_("Name"), max_length=150, unique=True)
roles = models.ManyToManyField(
Role,
verbose_name=_("Roles"),
blank=True,
help_text=_("Choose roles granted to this group."),
)
project_selection = models.IntegerField(
verbose_name=_("Project selection"),
choices=(
(SELECTION_MANUAL, _("As defined")),
(SELECTION_ALL, _("All projects")),
(SELECTION_ALL_PUBLIC, _("All public projects")),
(SELECTION_ALL_PROTECTED, _("All protected projects")),
(SELECTION_COMPONENT_LIST, _("From component list")),
),
default=SELECTION_MANUAL,
)
projects = models.ManyToManyField(
"trans.Project", verbose_name=_("Projects"), blank=True
)
components = models.ManyToManyField(
"trans.Component", verbose_name=_("Components"), blank=True
)
componentlists = models.ManyToManyField(
"trans.ComponentList",
verbose_name=_("Component lists"),
blank=True,
)
language_selection = models.IntegerField(
verbose_name=_("Language selection"),
choices=(
(SELECTION_MANUAL, _("As defined")),
(SELECTION_ALL, _("All languages")),
),
default=SELECTION_MANUAL,
)
languages = models.ManyToManyField(
"lang.Language", verbose_name=_("Languages"), blank=True
)
internal = models.BooleanField(
verbose_name=_("Weblate internal group"), default=False
)
objects = GroupManager()
def __str__(self):
return pgettext("Access control group", self.name)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.language_selection == SELECTION_ALL:
self.languages.set(Language.objects.all())
if self.project_selection == SELECTION_ALL:
self.projects.set(Project.objects.all())
elif self.project_selection == SELECTION_ALL_PUBLIC:
self.projects.set(
Project.objects.filter(access_control=Project.ACCESS_PUBLIC), clear=True
)
elif self.project_selection == SELECTION_ALL_PROTECTED:
self.projects.set(
Project.objects.filter(
access_control__in=(Project.ACCESS_PUBLIC, Project.ACCESS_PROTECTED)
),
clear=True,
)
elif self.project_selection == SELECTION_COMPONENT_LIST:
self.projects.set(
Project.objects.filter(component__componentlist=self.componentlist),
clear=True,
)
@cached_property
def short_name(self):
if "@" in self.name:
return pgettext("Per project access control group", self.name.split("@")[1])
return self.__str__()
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
"""Create and save a User with the given username, e-mail and password."""
if not username:
raise ValueError("The given username must be set")
email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault("is_superuser", False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(username, email, password, **extra_fields)
def for_project(self, project):
"""Return all users having ACL for this project."""
groups = project.group_set.filter(internal=True, name__contains="@")
return self.filter(groups__in=groups).distinct()
def having_perm(self, perm, project):
"""All users having explicit permission on a project.
Note: This intentionally does not list superusers.
"""
groups = Group.objects.filter(
roles__permissions__codename=perm, projects=project
)
return self.filter(groups__in=groups).distinct()
def all_admins(self, project):
"""All admins in a project."""
return self.having_perm("project.edit", project)
def order(self):
return self.order_by("username")
def get_anonymous():
"""Return an anonymous user."""
return User.objects.select_related("profile").get(
username=settings.ANONYMOUS_USER_NAME
)
def convert_groups(objs):
"""Convert Django Group objects to Weblate ones."""
objs = list(objs)
for idx, obj in enumerate(objs):
if isinstance(obj, DjangoGroup):
objs[idx] = Group.objects.get_or_create(name=obj.name)[0]
return objs
def wrap_group(func):
"""Wrapper to replace Django Group instances by Weblate Group instances."""
def group_wrapper(self, *objs, **kwargs):
objs = convert_groups(objs)
return func(self, *objs, **kwargs)
return group_wrapper
def wrap_group_list(func):
"""Wrapper to replace Django Group instances by Weblate Group instances."""
def group_list_wrapper(self, objs, **kwargs):
objs = convert_groups(objs)
return func(self, objs, **kwargs)
return group_list_wrapper
class GroupManyToManyField(models.ManyToManyField):
"""Customized field to accept Django Groups objects as well."""
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Get related descriptor
descriptor = getattr(cls, self.name)
# We care only on forward relation
if not descriptor.reverse:
# Running in migrations
if isinstance(descriptor.rel.model, str):
return
# Get related manager class
related_manager_cls = descriptor.related_manager_cls
# Monkey patch it to accept Django Group instances as well
related_manager_cls.add = wrap_group(related_manager_cls.add)
related_manager_cls.remove = wrap_group(related_manager_cls.remove)
related_manager_cls.set = wrap_group_list(related_manager_cls.set)
class User(AbstractBaseUser):
username = UsernameField(
_("Username"),
max_length=USERNAME_LENGTH,
unique=True,
help_text=_(
"Username may only contain letters, "
"numbers or the following characters: @ . + - _"
),
validators=[validate_username],
error_messages={"unique": _("A user with that username already exists.")},
)
full_name = models.CharField(
_("Full name"),
max_length=FULLNAME_LENGTH,
blank=False,
validators=[validate_fullname],
)
email = EmailField( # noqa: DJ01
_("E-mail"),
blank=False,
null=True,
max_length=EMAIL_LENGTH,
unique=True,
validators=[validate_email],
)
is_superuser = models.BooleanField(
_("Superuser status"),
default=False,
help_text=_("User has all possible permissions."),
)
is_active = models.BooleanField(
_("Active"),
default=True,
help_text=_("Mark user as inactive instead of removing."),
)
date_joined = models.DateTimeField(_("Date joined"), default=timezone.now)
groups = GroupManyToManyField(
Group,
verbose_name=_("Groups"),
blank=True,
help_text=_(
"The user is granted all permissions included in "
"membership of these groups."
),
)
objects = UserManager()
EMAIL_FIELD = "email"
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email", "full_name"]
DUMMY_FIELDS = ("first_name", "last_name", "is_staff")
def __str__(self):
return self.full_name
def get_absolute_url(self):
return reverse("user_page", kwargs={"user": self.username})
def save(self, *args, **kwargs):
if self.is_anonymous:
self.is_active = False
# Generate full name from parts
# This is needed with LDAP authentication when the
# server does not contain full name
if "first_name" in self.extra_data and "last_name" in self.extra_data:
self.full_name = "{first_name} {last_name}".format(**self.extra_data)
elif "first_name" in self.extra_data:
self.full_name = self.extra_data["first_name"]
elif "last_name" in self.extra_data:
self.full_name = self.extra_data["last_name"]
if not self.email:
self.email = None
super().save(*args, **kwargs)
self.clear_cache()
def __init__(self, *args, **kwargs):
self.extra_data = {}
self.cla_cache = {}
self._permissions = None
self.current_subscription = None
for name in self.DUMMY_FIELDS:
if name in kwargs:
self.extra_data[name] = kwargs.pop(name)
super().__init__(*args, **kwargs)
def clear_cache(self):
self.cla_cache = {}
self._permissions = None
perm_caches = (
"project_permissions",
"component_permissions",
"allowed_projects",
"allowed_project_ids",
"watched_projects",
"owned_projects",
)
for name in perm_caches:
if name in self.__dict__:
del self.__dict__[name]
def has_usable_password(self):
# For some reason Django says that empty string is a valid password
return self.password and super().has_usable_password()
@cached_property
def is_anonymous(self):
return self.username == settings.ANONYMOUS_USER_NAME
@cached_property
def is_authenticated(self):
return not self.is_anonymous
def get_full_name(self):
return self.full_name
def get_short_name(self):
return self.full_name
def __setattr__(self, name, value):
"""Mimic first/last name for third party auth and ignore is_staff flag."""
if name in self.DUMMY_FIELDS:
self.extra_data[name] = value
else:
super().__setattr__(name, value)
def has_module_perms(self, module):
"""Compatibility API for admin interface."""
return self.is_superuser
@property
def is_staff(self):
"""Compatibility API for admin interface."""
return self.is_superuser
@property
def first_name(self):
"""Compatibility API for third party modules."""
return ""
@property
def last_name(self):
"""Compatibility API for third party modules."""
return self.full_name
def has_perms(self, perm_list, obj=None):
return all(self.has_perm(perm, obj) for perm in perm_list)
# pylint: disable=keyword-arg-before-vararg
def has_perm(self, perm, obj=None):
"""Permission check."""
# Weblate global scope permissions
if perm in GLOBAL_PERM_NAMES:
return check_global_permission(self, perm, obj)
# Compatibility API for admin interface
if obj is None:
if not self.is_superuser:
return False
# Check permissions restrictions
allowed = settings.AUTH_RESTRICT_ADMINS.get(self.username)
return allowed is None or perm in allowed
# Validate perms, this is expensive to perform, so this only in test by
# default
if settings.AUTH_VALIDATE_PERMS and ":" not in perm:
try:
Permission.objects.get(codename=perm)
except Permission.DoesNotExist:
raise ValueError(f"Invalid permission: {perm}")
# Special permission functions
if perm in SPECIALS:
return SPECIALS[perm](self, perm, obj)
# Generic permission
return check_permission(self, perm, obj)
def can_access_project(self, project):
"""Check access to given project."""
if self.is_superuser:
return True
return project.pk in self.project_permissions
def check_access(self, project):
"""Raise an error if user is not allowed to access this project."""
if not self.can_access_project(project):
raise Http404("Access denied")
def can_access_component(self, component):
"""Check access to given component."""
if self.is_superuser:
return True
if not self.can_access_project(component.project):
return False
return not component.restricted or component.pk in self.component_permissions
def check_access_component(self, component):
"""Raise an error if user is not allowed to access this component."""
if not self.can_access_component(component):
raise Http404("Access denied")
@cached_property
def allowed_projects(self):
"""List of allowed projects."""
if self.is_superuser:
return Project.objects.order()
return Project.objects.filter(pk__in=self.allowed_project_ids)
@cached_property
def allowed_project_ids(self):
"""
Set with ids of allowed projects.
This is more effective to use in queries than doing complex joins.
"""
if self.is_superuser:
return set(Project.objects.values_list("id", flat=True))
return set(self.project_permissions.keys())
@cached_property
def watched_projects(self):
"""
List of watched projects.
Ensure ACL filtering applies (user could have been removed
from the project meanwhile)
"""
return self.profile.watched.filter(id__in=self.allowed_project_ids)
@cached_property
def owned_projects(self):
return self.projects_with_perm("project.edit")
def _fetch_permissions(self):
"""Fetch all user permissions into a dictionary."""
projects = defaultdict(list)
components = defaultdict(list)
for group in self.groups.iterator():
languages = set(
Group.languages.through.objects.filter(group=group).values_list(
"language_id", flat=True
)
)
permissions = set(
group.roles.values_list("permissions__codename", flat=True)
)
# Component list specific permissions
componentlist_values = group.componentlists.values_list(
"components__id", "components__project_id"
)
if componentlist_values:
for component, project in componentlist_values:
components[component].append((permissions, languages))
# Grant access to the project
projects[project].append(((), languages))
continue
# Component specific permissions
component_values = group.components.values_list("id", "project_id")
if component_values:
for component, project in component_values:
components[component].append((permissions, languages))
# Grant access to the project
projects[project].append(((), languages))
continue
# Project specific permissions
for project in Group.projects.through.objects.filter(
group=group
).values_list("project_id", flat=True):
projects[project].append((permissions, languages))
self._permissions = {"projects": projects, "components": components}
@cached_property
def project_permissions(self):
"""Dictionary with all project permissions."""
if self._permissions is None:
self._fetch_permissions()
return self._permissions["projects"]
@cached_property
def component_permissions(self):
"""Dictionary with all project permissions."""
if self._permissions is None:
self._fetch_permissions()
return self._permissions["components"]
def projects_with_perm(self, perm):
if self.is_superuser:
return Project.objects.all().order()
groups = Group.objects.filter(user=self, roles__permissions__codename=perm)
return Project.objects.filter(group__in=groups).distinct().order()
def get_visible_name(self):
# Get full name from database or username
result = self.full_name or self.username
return result.replace("<", "").replace(">", "").replace('"', "")
def get_author_name(self, email=True):
"""Return formatted author name with e-mail."""
# The < > are replace to avoid tricking Git to use
# name as e-mail
full_name = self.get_visible_name()
# Add e-mail if we are asked for it
if not email:
return full_name
return f"{full_name} <{self.email}>"
class AutoGroup(models.Model):
match = RegexField(
verbose_name=_("E-mail regular expression"),
max_length=200,
default="^.*$",
help_text=_("Regular expression used to match user e-mail."),
)
group = models.ForeignKey(
Group, verbose_name=_("Group to assign"), on_delete=models.deletion.CASCADE
)
class Meta:
verbose_name = _("Automatic group assignment")
verbose_name_plural = _("Automatic group assignments")
def __str__(self):
return f"Automatic rule for {self.group}"
def create_groups(update):
"""Creates standard groups and gives them permissions."""
# Create permissions and roles
migrate_permissions(Permission)
new_roles = migrate_roles(Role, Permission)
migrate_groups(Group, Role, update)
# Create anonymous user
create_anonymous(User, Group, update)
# Automatic assignment to the users group
group = Group.objects.get(name="Users")
if not AutoGroup.objects.filter(group=group).exists():
AutoGroup.objects.create(group=group, match="^.*$")
group = Group.objects.get(name="Viewers")
if not AutoGroup.objects.filter(group=group).exists():
AutoGroup.objects.create(group=group, match="^.*$")
# Create new per project groups
if new_roles:
for project in Project.objects.iterator():
project.save()
def sync_create_groups(sender, **kwargs):
"""Create default groups."""
create_groups(False)
def auto_assign_group(user):
"""Automatic group assignment based on user e-mail."""
if user.username == settings.ANONYMOUS_USER_NAME:
return
# Add user to automatic groups
for auto in AutoGroup.objects.prefetch_related("group"):
if re.match(auto.match, user.email or ""):
user.groups.add(auto.group)
@receiver(m2m_changed, sender=ComponentList.components.through)
@disable_for_loaddata
def change_componentlist(sender, instance, action, **kwargs):
if not action.startswith("post_"):
return
groups = Group.objects.filter(
componentlists=instance, project_selection=Group.SELECTION_COMPONENT_LIST
)
for group in groups:
group.projects.set(
Project.objects.filter(component__componentlist=instance), clear=True
)
@receiver(post_save, sender=User)
@disable_for_loaddata
def auto_group_upon_save(sender, instance, created=False, **kwargs):
"""Automatically add user to Users group."""
if created:
auto_assign_group(instance)
@receiver(post_save, sender=Language)
@disable_for_loaddata
def setup_language_groups(sender, instance, **kwargs):
"""Set up group objects upon saving language."""
auto_languages = Group.objects.filter(language_selection=SELECTION_ALL)
for group in auto_languages:
group.languages.add(instance)
@receiver(post_save, sender=Project)
@disable_for_loaddata
def setup_project_groups(sender, instance, **kwargs):
"""Set up group objects upon saving project."""
# Handle group automation to set project visibility
auto_projects = Group.objects.filter(
project_selection__in=(
SELECTION_ALL,
SELECTION_ALL_PUBLIC,
SELECTION_ALL_PROTECTED,
)
)
for group in auto_projects:
group.save()
old_access_control = instance.old_access_control
instance.old_access_control = instance.access_control
if instance.access_control == Project.ACCESS_CUSTOM:
if old_access_control == Project.ACCESS_CUSTOM:
return
# Do cleanup of previous setup
Group.objects.filter(
name__contains="@", internal=True, projects=instance
).delete()
return
# Choose groups to configure
if instance.access_control == Project.ACCESS_PUBLIC:
groups = {"Administration", "Review"}
else:
groups = set(ACL_GROUPS.keys())
# Remove review group if review is not enabled
if not instance.source_review and not instance.translation_review:
groups.remove("Review")
# Remove billing if billing is not installed
if "weblate.billing" not in settings.INSTALLED_APPS:
groups.discard("Billing")
# Create role specific groups
handled = set()
for group_name in groups:
name = f"{instance.name}@{group_name}"
try:
group = instance.group_set.get(
internal=True, name__endswith=f"@{group_name}"
)
# Update exiting group (to handle rename)
if group.name != name:
group.name = name
group.save()
except Group.DoesNotExist:
# Create new group
group, created = Group.objects.get_or_create(
internal=True,
name=name,
defaults={
"project_selection": SELECTION_MANUAL,
"language_selection": SELECTION_ALL,
},
)
if created:
group.projects.add(instance)
group.roles.set(
Role.objects.filter(name=ACL_GROUPS[group_name]), clear=True
)
handled.add(group.pk)
# Remove stale groups
instance.group_set.filter(name__contains="@", internal=True).exclude(
pk__in=handled
).delete()
@receiver(pre_delete, sender=Project)
def cleanup_group_acl(sender, instance, **kwargs):
instance.group_set.filter(name__contains="@", internal=True).delete()
class WeblateAuthConf(AppConf):
"""Authentication settings."""
AUTH_VALIDATE_PERMS = False
AUTH_RESTRICT_ADMINS = {}
# Anonymous user name
ANONYMOUS_USER_NAME = "anonymous"
SESSION_COOKIE_AGE_AUTHENTICATED = 1209600
class Meta:
prefix = ""
|
from __future__ import print_function
import os
import sys
import argparse
import paramiko
SSH_DIRS = [os.path.expanduser('~/.ssh'), os.path.join(os.environ['STASH_ROOT'], '.ssh')]
key_mode = {'rsa': 'rsa', 'dsa': 'dss'}
def main(args):
ap = argparse.ArgumentParser(args)
ap.add_argument('-t', choices=('rsa', 'dsa'), default='rsa', action='store', dest='type', help='Key Type: (rsa,dsa)')
ap.add_argument('-b', action='store', dest='bits', default=1024, type=int, help='bits for key gen. default: 1024')
ap.add_argument('-N', dest='password', default=None, action='store', help='password default: None')
ap.add_argument('-f', dest='filename', default=False, action='store', help='Filename default: id_rsa/dsa')
ns = ap.parse_args()
# Keygen for keypair
for SSH_DIR in SSH_DIRS:
if not os.path.isdir(SSH_DIR):
os.mkdir(SSH_DIR)
try:
k = None
if ns.type == 'rsa':
k = paramiko.RSAKey.generate(ns.bits)
filename = ns.filename or 'id_rsa'
elif ns.type == 'dsa':
k = paramiko.DSSKey.generate(ns.bits)
filename = ns.filename or 'id_dsa'
if k:
for SSH_DIR in SSH_DIRS:
filepath = os.path.join(SSH_DIR, filename)
k.write_private_key_file(filepath, password=ns.password)
with open(filepath + '.pub', 'w') as outs:
outs.write('ssh-' + key_mode[ns.type] + ' ' + k.get_base64())
print('ssh keys generated with %s encryption' % ns.type)
else:
print('Keys not generated')
except Exception as e:
print(e)
if __name__ == '__main__':
main(sys.argv[1:])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import disk
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import aerospike_client
from perfkitbenchmarker.linux_packages import aerospike_server
from six.moves import map
from six.moves import range
FLAGS = flags.FLAGS
flags.DEFINE_integer('aerospike_min_client_threads', 8,
'The minimum number of Aerospike client threads.',
lower_bound=1)
flags.DEFINE_integer('aerospike_max_client_threads', 128,
'The maximum number of Aerospike client threads.',
lower_bound=1)
flags.DEFINE_integer('aerospike_client_threads_step_size', 8,
'The number to increase the Aerospike client threads by '
'for each iteration of the test.',
lower_bound=1)
flags.DEFINE_integer('aerospike_read_percent', 90,
'The percent of operations which are reads.',
lower_bound=0, upper_bound=100)
flags.DEFINE_integer('aerospike_num_keys', 1000000,
'The number of keys to load Aerospike with. The index '
'must fit in memory regardless of where the actual '
'data is being stored and each entry in the '
'index requires 64 bytes.')
BENCHMARK_NAME = 'aerospike'
BENCHMARK_CONFIG = """
aerospike:
description: Runs Aerospike.
vm_groups:
workers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: null
disk_count: 0
client:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS.aerospike_storage_type == aerospike_server.DISK:
if FLAGS.data_disk_type == disk.LOCAL:
# Didn't know max number of local disks, decide later.
config['vm_groups']['workers']['disk_count'] = (
config['vm_groups']['workers']['disk_count'] or None)
else:
config['vm_groups']['workers']['disk_count'] = (
config['vm_groups']['workers']['disk_count'] or 1)
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
aerospike_client.CheckPrerequisites()
def Prepare(benchmark_spec):
"""Install Aerospike server on one VM and Aerospike C client on the other.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
client = benchmark_spec.vm_groups['client'][0]
workers = benchmark_spec.vm_groups['workers']
def _Prepare(vm):
if vm == client:
vm.Install('aerospike_client')
else:
aerospike_server.ConfigureAndStart(vm, [workers[0].internal_ip])
vm_util.RunThreaded(_Prepare, benchmark_spec.vms)
def Run(benchmark_spec):
"""Runs a read/update load test on Aerospike.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
client = benchmark_spec.vm_groups['client'][0]
servers = benchmark_spec.vm_groups['workers']
samples = []
def ParseOutput(output):
"""Parses Aerospike output.
Args:
output: The stdout from running the benchmark.
Returns:
A tuple of average TPS and average latency.
"""
read_latency = re.findall(
r'read.*Overall Average Latency \(ms\) ([0-9]+\.[0-9]+)\n', output)[-1]
write_latency = re.findall(
r'write.*Overall Average Latency \(ms\) ([0-9]+\.[0-9]+)\n', output)[-1]
average_latency = (
(FLAGS.aerospike_read_percent / 100.0) * float(read_latency) +
((100 - FLAGS.aerospike_read_percent) / 100.0) * float(write_latency))
tps = list(map(int, re.findall(r'total\(tps=([0-9]+) ', output)))
return float(sum(tps)) / len(tps), average_latency
load_command = ('./%s/benchmarks/target/benchmarks -z 32 -n test -w I '
'-o B:1000 -k %s -h %s' %
(aerospike_client.CLIENT_DIR, FLAGS.aerospike_num_keys,
','.join(s.internal_ip for s in servers)))
client.RemoteCommand(load_command, should_log=True)
max_throughput_for_completion_latency_under_1ms = 0.0
for threads in range(FLAGS.aerospike_min_client_threads,
FLAGS.aerospike_max_client_threads + 1,
FLAGS.aerospike_client_threads_step_size):
load_command = ('timeout 60 ./%s/benchmarks/target/benchmarks '
'-z %s -n test -w RU,%s -o B:1000 -k %s '
'--latency 5,1 -h %s;:' %
(aerospike_client.CLIENT_DIR, threads,
FLAGS.aerospike_read_percent, FLAGS.aerospike_num_keys,
','.join(s.internal_ip for s in servers)))
stdout, _ = client.RemoteCommand(load_command, should_log=True)
tps, latency = ParseOutput(stdout)
metadata = {
'Average Transactions Per Second': tps,
'Client Threads': threads,
'Storage Type': FLAGS.aerospike_storage_type,
'Read Percent': FLAGS.aerospike_read_percent,
}
samples.append(sample.Sample('Average Latency', latency, 'ms', metadata))
if latency < 1.0:
max_throughput_for_completion_latency_under_1ms = max(
max_throughput_for_completion_latency_under_1ms,
tps)
samples.append(sample.Sample(
'max_throughput_for_completion_latency_under_1ms',
max_throughput_for_completion_latency_under_1ms,
'req/s'))
return samples
def Cleanup(benchmark_spec):
"""Cleanup Aerospike.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
servers = benchmark_spec.vm_groups['workers']
client = benchmark_spec.vm_groups['client'][0]
client.RemoteCommand('sudo rm -rf aerospike*')
def StopServer(server):
server.RemoteCommand('cd %s && nohup sudo make stop' %
aerospike_server.AEROSPIKE_DIR)
server.RemoteCommand('sudo rm -rf aerospike*')
vm_util.RunThreaded(StopServer, servers)
|
from .common import *
class TrashMixin(object):
def list_trash(self) -> list:
"""Retrieves top-level trash list"""
return self.BOReq.paginated_get(self.metadata_url + 'trash')
def move_to_trash(self, node_id: str) -> dict:
r = self.BOReq.put(self.metadata_url + 'trash/' + node_id)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
def restore(self, node_id: str) -> dict:
r = self.BOReq.post(self.metadata_url + 'trash/' + node_id + '/restore')
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
# {"message":"Insufficient permissions granted for operation: purgeNode"}
def purge(self, node_id: str) -> dict:
r = self.BOReq.delete(self.metadata_url + 'nodes/' + node_id)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
|
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import toggle_entity
from homeassistant.const import CONF_DOMAIN
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
TRIGGER_SCHEMA = toggle_entity.TRIGGER_SCHEMA.extend(
{vol.Required(CONF_DOMAIN): DOMAIN}
)
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
return await toggle_entity.async_attach_trigger(
hass, config, action, automation_info
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers."""
return await toggle_entity.async_get_triggers(hass, device_id, DOMAIN)
async def async_get_trigger_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List trigger capabilities."""
return await toggle_entity.async_get_trigger_capabilities(hass, config)
|
import os
from ._config import ROOT_DIR
from invoke import task
@task
def copyright(ctx):
""" list usage of copyright notices
The use of copyright notices should be limited to files that are likely
to be used in other projects, or to make appropriate attributions for code
taken from other projects. Other than that, git geeps track of what person
wrote what.
"""
# Processing the whole root directory
for dirpath, dirnames, filenames in os.walk(ROOT_DIR):
# Check if we should skip this directory
reldirpath = os.path.relpath(dirpath, ROOT_DIR)
if reldirpath[0] in '._' or reldirpath.endswith('__pycache__'):
continue
if os.path.split(reldirpath)[0] in ('build', 'dist'):
continue
# Process files
for fname in filenames:
if not fname.endswith('.py'):
continue
# Open and check
filename = os.path.join(dirpath, fname)
text = open(filename, 'rt', encoding='utf-8').read()
if 'copyright' in text[:200].lower():
print(
'Copyright in %s%s%s' % (reldirpath, os.path.sep, fname))
for i, line in enumerate(text[:200].splitlines()):
if 'copyright' in line.lower():
print(' line %i: %s' % (i+1, line))
|
from unittest.mock import Mock
import pandas as pd
import pytest
import pytz
from qstrader.alpha_model.fixed_signals import FixedSignalsAlphaModel
@pytest.mark.parametrize(
'signals',
[
({'EQ:SPY': 0.75, 'EQ:AGG': 0.75, 'EQ:GLD': 0.75}),
({'EQ:SPY': -0.25, 'EQ:AGG': -0.25, 'EQ:GLD': -0.25})
]
)
def test_fixed_signals_alpha_model(signals):
"""
Checks that the fixed signals alpha model correctly produces
the same signals for each asset in the universe.
"""
universe = Mock()
universe.get_assets.return_value = ['EQ:SPY', 'EQ:AGG', 'EQ:GLD']
alpha = FixedSignalsAlphaModel(universe=universe, signal_weights=signals)
dt = pd.Timestamp('2019-01-01 15:00:00', tz=pytz.utc)
assert alpha(dt) == signals
|
import numpy as np
import unittest
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.experimental.links.model.fcis import FCISTrainChain
from chainercv.utils import mask_to_bbox
from tests.experimental_tests.links_tests.model_tests.fcis_tests.test_fcis \
import _random_array
from tests.experimental_tests.links_tests.model_tests.fcis_tests.test_fcis \
import DummyFCIS
class TestFCISTrainChain(unittest.TestCase):
def setUp(self):
self.n_anchor_base = 6
self.feat_stride = 4
self.n_fg_class = 3
self.n_roi = 24
self.n_bbox = 3
self.model = FCISTrainChain(
DummyFCIS(
n_anchor_base=self.n_anchor_base,
feat_stride=self.feat_stride,
n_fg_class=self.n_fg_class,
n_roi=self.n_roi,
roi_size=21,
min_size=600,
max_size=1000))
self.masks = np.random.randint(
0, 2, size=(1, self.n_bbox, 600, 800)).astype(np.bool)
self.labels = np.random.randint(
0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
self.imgs = _random_array(np, (1, 3, 600, 800))
self.scale = np.array(1.)
def check_call(self, model, imgs, masks, labels, scale):
bboxes = mask_to_bbox(masks[0])[None]
loss = model(imgs, masks, labels, bboxes, scale)
self.assertEqual(loss.shape, ())
def test_call_cpu(self):
self.check_call(
self.model, self.imgs, self.masks, self.labels, self.scale)
@attr.gpu
def test_call_gpu(self):
self.model.to_gpu()
self.check_call(
self.model, cuda.to_gpu(self.imgs),
self.masks, self.labels, self.scale)
testing.run_module(__name__, __file__)
|
class Signal(object):
"""
This Class is representing a Signal which is corresponding to an input action that should start executing neuron
list when triggered
"""
def __init__(self, name=None, parameters=None):
self.name = name
self.parameters = parameters
def serialize(self):
"""
This method allows to serialize in a proper way this object
:return: A dict of name and parameters
:rtype: Dict
"""
return {
'name': self.name,
'parameters': self.parameters
}
def __str__(self):
"""
Return a string that describe the signal. If a parameter contains the word "password",
the output of this parameter will be masked in order to not appears in clean in the console
:return: string description of the neuron
"""
returned_dict = {
'name': self.name,
'parameters': self.parameters
}
cleaned_parameters = dict()
if isinstance(self.parameters, dict):
for key, value in self.parameters.items():
if "password" in key:
cleaned_parameters[key] = "*****"
else:
cleaned_parameters[key] = value
returned_dict["parameters"] = cleaned_parameters
return str(returned_dict)
def __eq__(self, other):
"""
This is used to compare 2 objects
:param other:
:return:
"""
return self.__dict__ == other.__dict__
|
import os
import os.path
import textwrap
import attr
import pytest
import bs4
from qutebrowser.utils import utils
def collect_tests():
basedir = os.path.dirname(__file__)
datadir = os.path.join(basedir, 'data', 'hints', 'html')
files = [f for f in os.listdir(datadir) if f != 'README.md']
return files
@attr.s
class ParsedFile:
target = attr.ib()
qtwebengine_todo = attr.ib()
class InvalidFile(Exception):
def __init__(self, test_name, msg):
super().__init__("Invalid comment found in {}, please read "
"tests/end2end/data/hints/html/README.md - {}".format(
test_name, msg))
def _parse_file(test_name):
"""Parse the given HTML file."""
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'hints', 'html', test_name)
with open(file_path, 'r', encoding='utf-8') as html:
soup = bs4.BeautifulSoup(html, 'html.parser')
comment = str(soup.find(text=lambda text: isinstance(text, bs4.Comment)))
if comment is None:
raise InvalidFile(test_name, "no comment found")
data = utils.yaml_load(comment)
if not isinstance(data, dict):
raise InvalidFile(test_name, "expected yaml dict but got {}".format(
type(data).__name__))
allowed_keys = {'target', 'qtwebengine_todo'}
if not set(data.keys()).issubset(allowed_keys):
raise InvalidFile(test_name, "expected keys {} but found {}".format(
', '.join(allowed_keys),
', '.join(set(data.keys()))))
if 'target' not in data:
raise InvalidFile(test_name, "'target' key not found")
qtwebengine_todo = data.get('qtwebengine_todo', None)
return ParsedFile(target=data['target'], qtwebengine_todo=qtwebengine_todo)
@pytest.mark.parametrize('test_name', collect_tests())
@pytest.mark.parametrize('zoom_text_only', [True, False])
@pytest.mark.parametrize('zoom_level', [100, 66, 33])
@pytest.mark.parametrize('find_implementation', ['javascript', 'python'])
def test_hints(test_name, zoom_text_only, zoom_level, find_implementation,
quteproc, request):
if zoom_text_only and request.config.webengine:
pytest.skip("QtWebEngine doesn't have zoom.text_only")
if find_implementation == 'python' and request.config.webengine:
pytest.skip("QtWebEngine doesn't have a python find implementation")
parsed = _parse_file(test_name)
if parsed.qtwebengine_todo is not None and request.config.webengine:
pytest.xfail("QtWebEngine TODO: {}".format(parsed.qtwebengine_todo))
url_path = 'data/hints/html/{}'.format(test_name)
quteproc.open_path(url_path)
# setup
if not request.config.webengine:
quteproc.set_setting('zoom.text_only', str(zoom_text_only))
quteproc.set_setting('hints.find_implementation', find_implementation)
quteproc.send_cmd(':zoom {}'.format(zoom_level))
# follow hint
quteproc.send_cmd(':hint all normal')
quteproc.wait_for(message='hints: a', category='hints')
quteproc.send_cmd(':follow-hint a')
quteproc.wait_for_load_finished('data/' + parsed.target)
# reset
quteproc.send_cmd(':zoom 100')
if not request.config.webengine:
quteproc.set_setting('zoom.text_only', 'false')
quteproc.set_setting('hints.find_implementation', 'javascript')
@pytest.mark.skip # Too flaky
def test_word_hints_issue1393(quteproc, tmpdir):
dict_file = tmpdir / 'dict'
dict_file.write(textwrap.dedent("""
alph
beta
gamm
delt
epsi
"""))
targets = [
('words', 'words.txt'),
('smart', 'smart.txt'),
('hinting', 'hinting.txt'),
('alph', 'l33t.txt'),
('beta', 'l33t.txt'),
('gamm', 'l33t.txt'),
('delt', 'l33t.txt'),
('epsi', 'l33t.txt'),
]
quteproc.set_setting('hints.mode', 'word')
quteproc.set_setting('hints.dictionary', str(dict_file))
for hint, target in targets:
quteproc.open_path('data/hints/issue1393.html')
quteproc.send_cmd(':hint')
quteproc.wait_for(message='hints: *', category='hints')
quteproc.send_cmd(':follow-hint {}'.format(hint))
quteproc.wait_for_load_finished('data/{}'.format(target))
|
from datetime import timedelta
from geizhals import Device, Geizhals
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
CONF_DESCRIPTION = "description"
CONF_PRODUCT_ID = "product_id"
CONF_LOCALE = "locale"
ICON = "mdi:currency-usd-circle"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_PRODUCT_ID): cv.positive_int,
vol.Optional(CONF_DESCRIPTION, default="Price"): cv.string,
vol.Optional(CONF_LOCALE, default="DE"): vol.In(["AT", "EU", "DE", "UK", "PL"]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Geizwatch sensor."""
name = config.get(CONF_NAME)
description = config.get(CONF_DESCRIPTION)
product_id = config.get(CONF_PRODUCT_ID)
domain = config.get(CONF_LOCALE)
add_entities([Geizwatch(name, description, product_id, domain)], True)
class Geizwatch(Entity):
"""Implementation of Geizwatch."""
def __init__(self, name, description, product_id, domain):
"""Initialize the sensor."""
# internal
self._name = name
self._geizhals = Geizhals(product_id, domain)
self._device = Device()
# external
self.description = description
self.product_id = product_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the best price of the selected product."""
if not self._device.prices:
return None
return self._device.prices[0]
@property
def device_state_attributes(self):
"""Return the state attributes."""
while len(self._device.prices) < 4:
self._device.prices.append("None")
attrs = {
"device_name": self._device.name,
"description": self.description,
"unit_of_measurement": self._device.price_currency,
"product_id": self.product_id,
"price1": self._device.prices[0],
"price2": self._device.prices[1],
"price3": self._device.prices[2],
"price4": self._device.prices[3],
}
return attrs
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest price from geizhals and updates the state."""
self._device = self._geizhals.parse()
|
import logging
import redis
from docker_registry.core import lru
from . import config
logger = logging.getLogger(__name__)
redis_conn = None
cache_prefix = None
cfg = config.load()
def init():
enable_redis_cache(cfg.cache, cfg.storage_path)
enable_redis_lru(cfg.cache_lru, cfg.storage_path)
def enable_redis_cache(cache, path):
global redis_conn, cache_prefix
if not cache or not cache.host:
logger.warn('Cache storage disabled!')
return
logger.info('Enabling storage cache on Redis')
logger.info(
'Redis host: {0}:{1} (db{2})'.format(cache.host, cache.port, cache.db)
)
redis_conn = redis.StrictRedis(
host=cache.host,
port=int(cache.port),
db=int(cache.db),
password=cache.password
)
cache_prefix = 'cache_path:{0}'.format(path or '/')
def enable_redis_lru(cache, path):
if not cache or not cache.host:
logger.warn('LRU cache disabled!')
return
logger.info('Enabling lru cache on Redis')
logger.info(
'Redis lru host: {0}:{1} (db{2})'.format(cache.host, cache.port,
cache.db)
)
lru.init(
host=cache.host,
port=cache.port,
db=cache.db,
password=cache.password,
path=path or '/'
)
init()
|
async def test_sending_location(hass, create_registrations, webhook_client):
"""Test sending a location via a webhook."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={
"type": "update_location",
"data": {
"gps": [10, 20],
"gps_accuracy": 30,
"battery": 40,
"altitude": 50,
"course": 60,
"speed": 70,
"vertical_accuracy": 80,
"location_name": "bar",
},
},
)
assert resp.status == 200
await hass.async_block_till_done()
state = hass.states.get("device_tracker.test_1_2")
assert state is not None
assert state.name == "Test 1"
assert state.state == "bar"
assert state.attributes["source_type"] == "gps"
assert state.attributes["latitude"] == 10
assert state.attributes["longitude"] == 20
assert state.attributes["gps_accuracy"] == 30
assert state.attributes["battery_level"] == 40
assert state.attributes["altitude"] == 50
assert state.attributes["course"] == 60
assert state.attributes["speed"] == 70
assert state.attributes["vertical_accuracy"] == 80
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={
"type": "update_location",
"data": {
"gps": [1, 2],
"gps_accuracy": 3,
"battery": 4,
"altitude": 5,
"course": 6,
"speed": 7,
"vertical_accuracy": 8,
},
},
)
assert resp.status == 200
await hass.async_block_till_done()
state = hass.states.get("device_tracker.test_1_2")
assert state is not None
assert state.state == "not_home"
assert state.attributes["source_type"] == "gps"
assert state.attributes["latitude"] == 1
assert state.attributes["longitude"] == 2
assert state.attributes["gps_accuracy"] == 3
assert state.attributes["battery_level"] == 4
assert state.attributes["altitude"] == 5
assert state.attributes["course"] == 6
assert state.attributes["speed"] == 7
assert state.attributes["vertical_accuracy"] == 8
async def test_restoring_location(hass, create_registrations, webhook_client):
"""Test sending a location via a webhook."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={
"type": "update_location",
"data": {
"gps": [10, 20],
"gps_accuracy": 30,
"battery": 40,
"altitude": 50,
"course": 60,
"speed": 70,
"vertical_accuracy": 80,
"location_name": "bar",
},
},
)
assert resp.status == 200
await hass.async_block_till_done()
state_1 = hass.states.get("device_tracker.test_1_2")
assert state_1 is not None
config_entry = hass.config_entries.async_entries("mobile_app")[1]
# mobile app doesn't support unloading, so we just reload device tracker
await hass.config_entries.async_forward_entry_unload(config_entry, "device_tracker")
await hass.config_entries.async_forward_entry_setup(config_entry, "device_tracker")
await hass.async_block_till_done()
state_2 = hass.states.get("device_tracker.test_1_2")
assert state_2 is not None
assert state_1 is not state_2
assert state_2.name == "Test 1"
assert state_2.attributes["source_type"] == "gps"
assert state_2.attributes["latitude"] == 10
assert state_2.attributes["longitude"] == 20
assert state_2.attributes["gps_accuracy"] == 30
assert state_2.attributes["battery_level"] == 40
assert state_2.attributes["altitude"] == 50
assert state_2.attributes["course"] == 60
assert state_2.attributes["speed"] == 70
assert state_2.attributes["vertical_accuracy"] == 80
|
import copy
import pytest
from molecule import scenarios
@pytest.fixture
def _instance(config_instance):
config_instance_1 = copy.deepcopy(config_instance)
config_instance_1.config['scenario']['name'] = 'two'
config_instance_1.molecule_file = \
config_instance_1.molecule_file.replace('default', '02_foo')
config_instance_2 = copy.deepcopy(config_instance)
config_instance_2.config['scenario']['name'] = 'one'
config_instance_2.molecule_file = \
config_instance_2.molecule_file.replace('default', '01_foo')
config_instance_3 = copy.deepcopy(config_instance)
config_instance_3.config['scenario']['name'] = 'three'
config_instance_3.molecule_file = \
config_instance_3.molecule_file.replace('default', '03_foo')
return scenarios.Scenarios(
[config_instance_1, config_instance_2, config_instance_3])
def test_all_ordered(_instance):
result = _instance.all
assert 3 == len(result)
assert 'one' == result[0].name
assert 'two' == result[1].name
assert 'three' == result[2].name
|
from os import environ
import ssl
import certifi
def client_context() -> ssl.SSLContext:
"""Return an SSL context for making requests."""
# Reuse environment variable definition from requests, since it's already a requirement
# If the environment variable has no value, fall back to using certs from certifi package
cafile = environ.get("REQUESTS_CA_BUNDLE", certifi.where())
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=cafile)
return context
def server_context_modern() -> ssl.SSLContext:
"""Return an SSL context following the Mozilla recommendations.
TLS configuration follows the best-practice guidelines specified here:
https://wiki.mozilla.org/Security/Server_Side_TLS
Modern guidelines are followed.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS) # pylint: disable=no-member
context.options |= (
ssl.OP_NO_SSLv2
| ssl.OP_NO_SSLv3
| ssl.OP_NO_TLSv1
| ssl.OP_NO_TLSv1_1
| ssl.OP_CIPHER_SERVER_PREFERENCE
)
if hasattr(ssl, "OP_NO_COMPRESSION"):
context.options |= ssl.OP_NO_COMPRESSION
context.set_ciphers(
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:"
"ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:"
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:"
"ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:"
"ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256"
)
return context
def server_context_intermediate() -> ssl.SSLContext:
"""Return an SSL context following the Mozilla recommendations.
TLS configuration follows the best-practice guidelines specified here:
https://wiki.mozilla.org/Security/Server_Side_TLS
Intermediate guidelines are followed.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS) # pylint: disable=no-member
context.options |= (
ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_CIPHER_SERVER_PREFERENCE
)
if hasattr(ssl, "OP_NO_COMPRESSION"):
context.options |= ssl.OP_NO_COMPRESSION
context.set_ciphers(
"ECDHE-ECDSA-CHACHA20-POLY1305:"
"ECDHE-RSA-CHACHA20-POLY1305:"
"ECDHE-ECDSA-AES128-GCM-SHA256:"
"ECDHE-RSA-AES128-GCM-SHA256:"
"ECDHE-ECDSA-AES256-GCM-SHA384:"
"ECDHE-RSA-AES256-GCM-SHA384:"
"DHE-RSA-AES128-GCM-SHA256:"
"DHE-RSA-AES256-GCM-SHA384:"
"ECDHE-ECDSA-AES128-SHA256:"
"ECDHE-RSA-AES128-SHA256:"
"ECDHE-ECDSA-AES128-SHA:"
"ECDHE-RSA-AES256-SHA384:"
"ECDHE-RSA-AES128-SHA:"
"ECDHE-ECDSA-AES256-SHA384:"
"ECDHE-ECDSA-AES256-SHA:"
"ECDHE-RSA-AES256-SHA:"
"DHE-RSA-AES128-SHA256:"
"DHE-RSA-AES128-SHA:"
"DHE-RSA-AES256-SHA256:"
"DHE-RSA-AES256-SHA:"
"ECDHE-ECDSA-DES-CBC3-SHA:"
"ECDHE-RSA-DES-CBC3-SHA:"
"EDH-RSA-DES-CBC3-SHA:"
"AES128-GCM-SHA256:"
"AES256-GCM-SHA384:"
"AES128-SHA256:"
"AES256-SHA256:"
"AES128-SHA:"
"AES256-SHA:"
"DES-CBC3-SHA:"
"!DSS"
)
return context
|
import pytest
from homeassistant.components.geonetnz_quakes import (
CONF_MINIMUM_MAGNITUDE,
CONF_MMI,
DOMAIN,
)
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
CONF_UNIT_SYSTEM,
)
from tests.common import MockConfigEntry
@pytest.fixture
def config_entry():
"""Create a mock GeoNet NZ Quakes config entry."""
return MockConfigEntry(
domain=DOMAIN,
data={
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_UNIT_SYSTEM: "metric",
CONF_SCAN_INTERVAL: 300.0,
CONF_MMI: 4,
CONF_MINIMUM_MAGNITUDE: 0.0,
},
title="-41.2, 174.7",
unique_id="-41.2, 174.7",
)
|
import sys, os, tempfile, shutil
from os.path import join, dirname, abspath
import datetime, time
from six.moves import range
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.shellutils import (globfind, find, ProgressBar,
RawInput)
from logilab.common.compat import StringIO
DATA_DIR = join(dirname(abspath(__file__)), 'data', 'find_test')
class FindTC(TestCase):
def test_include(self):
files = set(find(DATA_DIR, '.py'))
self.assertSetEqual(files,
set([join(DATA_DIR, f) for f in ['__init__.py', 'module.py',
'module2.py', 'noendingnewline.py',
'nonregr.py', join('sub', 'momo.py')]]))
files = set(find(DATA_DIR, ('.py',), blacklist=('sub',)))
self.assertSetEqual(files,
set([join(DATA_DIR, f) for f in ['__init__.py', 'module.py',
'module2.py', 'noendingnewline.py',
'nonregr.py']]))
def test_exclude(self):
files = set(find(DATA_DIR, ('.py', '.pyc'), exclude=True))
self.assertSetEqual(files,
set([join(DATA_DIR, f) for f in ['foo.txt',
'newlines.txt',
'normal_file.txt',
'test.ini',
'test1.msg',
'test2.msg',
'spam.txt',
join('sub', 'doc.txt'),
'write_protected_file.txt',
]]))
def test_globfind(self):
files = set(globfind(DATA_DIR, '*.py'))
self.assertSetEqual(files,
set([join(DATA_DIR, f) for f in ['__init__.py', 'module.py',
'module2.py', 'noendingnewline.py',
'nonregr.py', join('sub', 'momo.py')]]))
files = set(globfind(DATA_DIR, 'mo*.py'))
self.assertSetEqual(files,
set([join(DATA_DIR, f) for f in ['module.py', 'module2.py',
join('sub', 'momo.py')]]))
files = set(globfind(DATA_DIR, 'mo*.py', blacklist=('sub',)))
self.assertSetEqual(files,
set([join(DATA_DIR, f) for f in ['module.py', 'module2.py']]))
class ProgressBarTC(TestCase):
def test_refresh(self):
pgb_stream = StringIO()
expected_stream = StringIO()
pgb = ProgressBar(20, stream=pgb_stream)
self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue()) # nothing print before refresh
pgb.refresh()
expected_stream.write("\r["+' '*20+"]")
self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
def test_refresh_g_size(self):
pgb_stream = StringIO()
expected_stream = StringIO()
pgb = ProgressBar(20, 35, stream=pgb_stream)
pgb.refresh()
expected_stream.write("\r["+' '*35+"]")
self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
def test_refresh_l_size(self):
pgb_stream = StringIO()
expected_stream = StringIO()
pgb = ProgressBar(20, 3, stream=pgb_stream)
pgb.refresh()
expected_stream.write("\r["+' '*3+"]")
self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
def _update_test(self, nbops, expected, size = None):
pgb_stream = StringIO()
expected_stream = StringIO()
if size is None:
pgb = ProgressBar(nbops, stream=pgb_stream)
size=20
else:
pgb = ProgressBar(nbops, size, stream=pgb_stream)
last = 0
for round in expected:
if not hasattr(round, '__int__'):
dots, update = round
else:
dots, update = round, None
pgb.update()
if update or (update is None and dots != last):
last = dots
expected_stream.write("\r["+('='*dots)+(' '*(size-dots))+"]")
self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
def test_default(self):
self._update_test(20, range(1, 21))
def test_nbops_gt_size(self):
"""Test the progress bar for nbops > size"""
def half(total):
for counter in range(1, total+1):
yield counter // 2
self._update_test(40, half(40))
def test_nbops_lt_size(self):
"""Test the progress bar for nbops < size"""
def double(total):
for counter in range(1, total+1):
yield counter * 2
self._update_test(10, double(10))
def test_nbops_nomul_size(self):
"""Test the progress bar for size % nbops !=0 (non int number of dots per update)"""
self._update_test(3, (6, 13, 20))
def test_overflow(self):
self._update_test(5, (8, 16, 25, 33, 42, (42, True)), size=42)
def test_update_exact(self):
pgb_stream = StringIO()
expected_stream = StringIO()
size=20
pgb = ProgressBar(100, size, stream=pgb_stream)
last = 0
for dots in range(10, 105, 15):
pgb.update(dots, exact=True)
dots //= 5
expected_stream.write("\r["+('='*dots)+(' '*(size-dots))+"]")
self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
def test_update_relative(self):
pgb_stream = StringIO()
expected_stream = StringIO()
size=20
pgb = ProgressBar(100, size, stream=pgb_stream)
last = 0
for dots in range(5, 105, 5):
pgb.update(5, exact=False)
dots //= 5
expected_stream.write("\r["+('='*dots)+(' '*(size-dots))+"]")
self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
class RawInputTC(TestCase):
def auto_input(self, *args):
self.input_args = args
return self.input_answer
def setUp(self):
null_printer = lambda x: None
self.qa = RawInput(self.auto_input, null_printer)
def test_ask_default(self):
self.input_answer = ''
answer = self.qa.ask('text', ('yes', 'no'), 'yes')
self.assertEqual(answer, 'yes')
self.input_answer = ' '
answer = self.qa.ask('text', ('yes', 'no'), 'yes')
self.assertEqual(answer, 'yes')
def test_ask_case(self):
self.input_answer = 'no'
answer = self.qa.ask('text', ('yes', 'no'), 'yes')
self.assertEqual(answer, 'no')
self.input_answer = 'No'
answer = self.qa.ask('text', ('yes', 'no'), 'yes')
self.assertEqual(answer, 'no')
self.input_answer = 'NO'
answer = self.qa.ask('text', ('yes', 'no'), 'yes')
self.assertEqual(answer, 'no')
self.input_answer = 'nO'
answer = self.qa.ask('text', ('yes', 'no'), 'yes')
self.assertEqual(answer, 'no')
self.input_answer = 'YES'
answer = self.qa.ask('text', ('yes', 'no'), 'yes')
self.assertEqual(answer, 'yes')
def test_ask_prompt(self):
self.input_answer = ''
answer = self.qa.ask('text', ('yes', 'no'), 'yes')
self.assertEqual(self.input_args[0], 'text [Y(es)/n(o)]: ')
answer = self.qa.ask('text', ('y', 'n'), 'y')
self.assertEqual(self.input_args[0], 'text [Y/n]: ')
answer = self.qa.ask('text', ('n', 'y'), 'y')
self.assertEqual(self.input_args[0], 'text [n/Y]: ')
answer = self.qa.ask('text', ('yes', 'no', 'maybe', '1'), 'yes')
self.assertEqual(self.input_args[0], 'text [Y(es)/n(o)/m(aybe)/1]: ')
def test_ask_ambiguous(self):
self.input_answer = 'y'
self.assertRaises(Exception, self.qa.ask, 'text', ('yes', 'yep'), 'yes')
def test_confirm(self):
self.input_answer = 'y'
self.assertEqual(self.qa.confirm('Say yes'), True)
self.assertEqual(self.qa.confirm('Say yes', default_is_yes=False), True)
self.input_answer = 'n'
self.assertEqual(self.qa.confirm('Say yes'), False)
self.assertEqual(self.qa.confirm('Say yes', default_is_yes=False), False)
self.input_answer = ''
self.assertEqual(self.qa.confirm('Say default'), True)
self.assertEqual(self.qa.confirm('Say default', default_is_yes=False), False)
if __name__ == '__main__':
unittest_main()
|
from unittest import TestCase
from weblate.checks.chars import (
BeginNewlineCheck,
BeginSpaceCheck,
DoubleSpaceCheck,
EndColonCheck,
EndEllipsisCheck,
EndExclamationCheck,
EndNewlineCheck,
EndQuestionCheck,
EndSemicolonCheck,
EndSpaceCheck,
EndStopCheck,
EscapedNewlineCountingCheck,
KashidaCheck,
MaxLengthCheck,
NewLineCountCheck,
PunctuationSpacingCheck,
ZeroWidthSpaceCheck,
)
from weblate.checks.tests.test_checks import CheckTestCase, MockUnit
class BeginNewlineCheckTest(CheckTestCase):
check = BeginNewlineCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("\nstring", "\nstring", "")
self.test_failure_1 = ("\nstring", " \nstring", "")
self.test_failure_2 = ("string", "\nstring", "")
class EndNewlineCheckTest(CheckTestCase):
check = EndNewlineCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string\n", "string\n", "")
self.test_failure_1 = ("string\n", "string", "")
self.test_failure_2 = ("string", "string\n", "")
class BeginSpaceCheckTest(CheckTestCase):
check = BeginSpaceCheck()
def setUp(self):
super().setUp()
self.test_good_matching = (" string", " string", "")
self.test_good_ignore = (".", " ", "")
self.test_good_none = (" The ", " ", "")
self.test_failure_1 = (" string", " string", "")
self.test_failure_2 = (" string", " string", "")
class EndSpaceCheckTest(CheckTestCase):
check = EndSpaceCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string ", "string ", "")
self.test_good_ignore = (".", " ", "")
self.test_good_none = (" The ", " ", "")
self.test_failure_1 = ("string ", "string", "")
self.test_failure_2 = ("string", "string ", "")
class DoubleSpaceCheckTest(CheckTestCase):
check = DoubleSpaceCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string string", "string string", "")
self.test_good_ignore = (" ", " ", "")
self.test_failure_1 = ("string string", "string string", "")
class EndStopCheckTest(CheckTestCase):
check = EndStopCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string.", "string.", "")
self.test_good_ignore = (".", " ", "")
self.test_failure_1 = ("string.", "string", "")
self.test_failure_2 = ("string", "string.", "")
def test_japanese(self):
self.do_test(False, ("Text:", "Text。", ""), "ja")
self.do_test(True, ("Text:", "Text", ""), "ja")
def test_hindi(self):
self.do_test(False, ("Text.", "Text।", ""), "hi")
self.do_test(True, ("Text.", "Text", ""), "hi")
def test_armenian(self):
self.do_test(False, ("Text:", "Text`", ""), "hy")
self.do_test(False, ("Text:", "Text՝", ""), "hy")
self.do_test(True, ("Text.", "Text", ""), "hy")
def test_santali(self):
self.do_test(False, ("Text.", "Text.", ""), "sat")
self.do_test(False, ("Text.", "Text᱾", ""), "sat")
self.do_test(True, ("Text.", "Text", ""), "sat")
class EndColonCheckTest(CheckTestCase):
check = EndColonCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string:", "string:", "")
self.test_failure_1 = ("string:", "string", "")
self.test_failure_2 = ("string", "string:", "")
def test_hy(self):
self.do_test(False, ("Text:", "Texte՝", ""), "hy")
self.do_test(True, ("Text:", "Texte", ""), "hy")
self.do_test(False, ("Text", "Texte:", ""), "hy")
def test_japanese(self):
self.do_test(False, ("Text:", "Texte。", ""), "ja")
def test_japanese_ignore(self):
self.do_test(False, ("Text", "Texte", ""), "ja")
class EndQuestionCheckTest(CheckTestCase):
check = EndQuestionCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string?", "string?", "")
self.test_failure_1 = ("string?", "string", "")
self.test_failure_2 = ("string", "string?", "")
def test_hy(self):
self.do_test(False, ("Text?", "Texte՞", ""), "hy")
self.do_test(True, ("Text?", "Texte", ""), "hy")
self.do_test(False, ("Text", "Texte?", ""), "hy")
def test_greek(self):
self.do_test(False, ("Text?", "Texte;", ""), "el")
self.do_test(False, ("Text?", "Texte;", ""), "el")
def test_greek_ignore(self):
self.do_test(False, ("Text", "Texte", ""), "el")
def test_greek_wrong(self):
self.do_test(True, ("Text?", "Texte", ""), "el")
class EndExclamationCheckTest(CheckTestCase):
check = EndExclamationCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string!", "string!", "")
self.test_failure_1 = ("string!", "string", "")
self.test_failure_2 = ("string", "string!", "")
def test_hy(self):
self.do_test(False, ("Text!", "Texte՜", ""), "hy")
self.do_test(False, ("Text!", "Texte", ""), "hy")
self.do_test(False, ("Text", "Texte!", ""), "hy")
def test_eu(self):
self.do_test(False, ("Text!", "¡Texte!", ""), "eu")
class EndEllipsisCheckTest(CheckTestCase):
check = EndEllipsisCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string…", "string…", "")
self.test_failure_1 = ("string…", "string...", "")
self.test_failure_2 = ("string.", "string…", "")
self.test_failure_3 = ("string..", "string…", "")
def test_translate(self):
self.do_test(False, ("string...", "string…", ""))
class EscapedNewlineCountingCheckTest(CheckTestCase):
check = EscapedNewlineCountingCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string\\nstring", "string\\nstring", "")
self.test_failure_1 = ("string\\nstring", "string\\n\\nstring", "")
self.test_failure_2 = ("string\\n\\nstring", "string\\nstring", "")
class NewLineCountCheckTest(CheckTestCase):
check = NewLineCountCheck()
def setUp(self):
super().setUp()
self.test_single_good_matching = ("string\n\nstring", "string\n\nstring", "")
self.test_failure_1 = ("string\nstring", "string\n\n\nstring", "")
self.test_failure_2 = ("string\nstring\n\nstring", "string\nstring\nstring", "")
class ZeroWidthSpaceCheckTest(CheckTestCase):
check = ZeroWidthSpaceCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("str\u200bing", "str\u200bing", "")
self.test_good_none = ("str\u200bing", "string", "")
self.test_failure_1 = ("string", "str\u200bing", "")
class MaxLengthCheckTest(TestCase):
def setUp(self):
self.check = MaxLengthCheck()
self.test_good_matching = ("strings", "less than 21", "max-length:12")
self.test_good_matching_unicode = ("strings", "less than 21", "max-length:12")
def test_check(self):
self.assertFalse(
self.check.check_target(
[self.test_good_matching[0]],
[self.test_good_matching[1]],
MockUnit(flags=self.test_good_matching[2]),
)
)
def test_unicode_check(self):
self.assertFalse(
self.check.check_target(
[self.test_good_matching_unicode[0]],
[self.test_good_matching_unicode[1]],
MockUnit(flags=self.test_good_matching_unicode[2]),
)
)
def test_failure_check(self):
self.assertTrue(
self.check.check_target(
[self.test_good_matching[0]],
[self.test_good_matching[1]],
MockUnit(flags="max-length:10"),
)
)
def test_failure_unicode_check(self):
self.assertTrue(
self.check.check_target(
[self.test_good_matching_unicode[0]],
[self.test_good_matching_unicode[1]],
MockUnit(flags="max-length:10"),
)
)
def test_replace_check(self):
self.assertFalse(
self.check.check_target(
["hi %s"],
["ahoj %s"],
MockUnit(flags="max-length:10"),
)
)
self.assertTrue(
self.check.check_target(
["hi %s"],
["ahoj %s"],
MockUnit(flags='max-length:10, replacements:%s:"very long text"'),
)
)
class EndSemicolonCheckTest(CheckTestCase):
check = EndSemicolonCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string;", "string;", "")
self.test_failure_1 = ("string;", "string", "")
self.test_failure_2 = ("string:", "string;", "")
self.test_failure_3 = ("string", "string;", "")
def test_greek(self):
self.do_test(False, ("Text?", "Texte;", ""), "el")
def test_xml(self):
self.do_test(False, ("Text", "Texte&", ""))
class KashidaCheckTest(CheckTestCase):
check = KashidaCheck()
def setUp(self):
super().setUp()
self.test_good_matching = ("string", "string", "")
self.test_failure_1 = ("string", "string\u0640", "")
self.test_failure_2 = ("string", "string\uFE79", "")
self.test_failure_3 = ("string", "string\uFE7F", "")
class PunctuationSpacingCheckTest(CheckTestCase):
check = PunctuationSpacingCheck()
default_lang = "fr"
def setUp(self):
super().setUp()
self.test_good_matching = (
"string? string! string: string;",
"string ? string\u202F! string ; string\u00A0:",
"",
)
self.test_good_none = (
"string &end; http://example.com",
"string &end; & http://example.com",
"",
)
self.test_failure_1 = ("string", "string!", "")
self.test_failure_2 = ("string", "string\u00A0? string;", "")
self.test_failure_3 = ("string", "string\u00A0; string?", "")
def test_fr_ca(self):
self.do_test(True, ("string", "string!", ""), "fr")
self.do_test(False, ("string", "string!", ""), "fr_CA")
|
from __future__ import print_function
import os
import sys
import argparse
import zipfile
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('zipfile', help='')
ap.add_argument('list', nargs='+', help='')
ap.add_argument('-v', '--verbose', action='store_true', help='be more chatty')
ns = ap.parse_args(args)
relroot = os.path.abspath(os.path.dirname(ns.zipfile))
with zipfile.ZipFile(ns.zipfile, "w", zipfile.ZIP_DEFLATED) as outs:
for path in ns.list:
if os.path.isfile(path):
if ns.verbose:
print(path)
arcname = os.path.relpath(path, relroot)
outs.write(path, arcname=arcname)
elif os.path.isdir(path):
for root, dirs, files in os.walk(path):
this_relroot = os.path.relpath(root, relroot)
# add directory (needed for empty dirs)
outs.write(root, arcname=this_relroot)
if ns.verbose:
print(this_relroot)
for f in files:
filename = os.path.join(root, f)
if os.path.isfile(filename): # regular files only
if ns.verbose:
print(filename)
arcname = os.path.join(this_relroot, f)
outs.write(filename, arcname=arcname)
if __name__ == '__main__':
main(sys.argv[1:])
|
import copy
import json
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import dialogflow, intent_script
from homeassistant.config import async_process_ha_core_config
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
SESSION_ID = "a9b84cec-46b6-484e-8f31-f65dba03ae6d"
INTENT_ID = "c6a74079-a8f0-46cd-b372-5a934d23591c"
INTENT_NAME = "tests"
REQUEST_ID = "19ef7e78-fe15-4e94-99dd-0c0b1e8753c3"
REQUEST_TIMESTAMP = "2017-01-21T17:54:18.952Z"
CONTEXT_NAME = "78a5db95-b7d6-4d50-9c9b-2fc73a5e34c3_id_dialog_context"
@pytest.fixture
async def calls(hass, fixture):
"""Return a list of Dialogflow calls triggered."""
calls = []
@callback
def mock_service(call):
"""Mock action call."""
calls.append(call)
hass.services.async_register("test", "dialogflow", mock_service)
return calls
@pytest.fixture
async def fixture(hass, aiohttp_client):
"""Initialize a Home Assistant server for testing this module."""
await async_setup_component(hass, dialogflow.DOMAIN, {"dialogflow": {}})
await async_setup_component(
hass,
intent_script.DOMAIN,
{
"intent_script": {
"WhereAreWeIntent": {
"speech": {
"type": "plain",
"text": """
{%- if is_state("device_tracker.paulus", "home")
and is_state("device_tracker.anne_therese",
"home") -%}
You are both home, you silly
{%- else -%}
Anne Therese is at {{
states("device_tracker.anne_therese")
}} and Paulus is at {{
states("device_tracker.paulus")
}}
{% endif %}
""",
}
},
"GetZodiacHoroscopeIntent": {
"speech": {
"type": "plain",
"text": "You told us your sign is {{ ZodiacSign }}.",
}
},
"CallServiceIntent": {
"speech": {"type": "plain", "text": "Service called"},
"action": {
"service": "test.dialogflow",
"data_template": {"hello": "{{ ZodiacSign }}"},
"entity_id": "switch.test",
},
},
}
},
)
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"dialogflow", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
webhook_id = result["result"].data["webhook_id"]
return await aiohttp_client(hass.http.app), webhook_id
class _Data:
_v1 = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"action": "GetZodiacHoroscopeIntent",
"actionIncomplete": False,
"parameters": {"ZodiacSign": "virgo"},
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME,
},
"fulfillment": {"speech": "", "messages": [{"type": 0, "speech": ""}]},
"score": 1,
},
"status": {"code": 200, "errorType": "success"},
"sessionId": SESSION_ID,
"originalRequest": None,
}
_v2 = {
"responseId": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"queryResult": {
"queryText": "my zodiac sign is virgo",
"action": "GetZodiacHoroscopeIntent",
"allRequiredParamsPresent": True,
"parameters": {"ZodiacSign": "virgo"},
"intent": {
"name": INTENT_ID,
"webhookState": "true",
"displayName": INTENT_NAME,
},
"fulfillment": {"text": "", "messages": [{"type": 0, "speech": ""}]},
"intentDetectionConfidence": 1,
},
"status": {"code": 200, "errorType": "success"},
"session": SESSION_ID,
"originalDetectIntentRequest": None,
}
@property
def v1(self):
return copy.deepcopy(self._v1)
@property
def v2(self):
return copy.deepcopy(self._v2)
Data = _Data()
async def test_v1_data():
"""Test for version 1 api based on message."""
assert dialogflow.get_api_version(Data.v1) == 1
async def test_v2_data():
"""Test for version 2 api based on message."""
assert dialogflow.get_api_version(Data.v2) == 2
async def test_intent_action_incomplete_v1(fixture):
"""Test when action is not completed."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["actionIncomplete"] = True
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_action_incomplete_v2(fixture):
"""Test when action is not completed."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["allRequiredParamsPresent"] = False
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_slot_filling_v1(fixture):
"""Test when Dialogflow asks for slot-filling return none."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(
resolvedQuery="my zodiac sign is",
speech="",
actionIncomplete=True,
parameters={"ZodiacSign": ""},
contexts=[
{
"name": CONTEXT_NAME,
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 2,
},
{
"name": "tests_ha_dialog_context",
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 2,
},
{
"name": "tests_ha_dialog_params_zodiacsign",
"parameters": {"ZodiacSign.original": "", "ZodiacSign": ""},
"lifespan": 1,
},
],
fulfillment={
"speech": "What is the ZodiacSign?",
"messages": [{"type": 0, "speech": "What is the ZodiacSign?"}],
},
score=0.77,
)
data["result"]["metadata"].update(webhookForSlotFillingUsed="true")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert await response.text() == ""
async def test_intent_request_with_parameters_v1(fixture):
"""Test a request with parameters."""
mock_client, webhook_id = fixture
data = Data.v1
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You told us your sign is virgo."
async def test_intent_request_with_parameters_v2(fixture):
"""Test a request with parameters."""
mock_client, webhook_id = fixture
data = Data.v2
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You told us your sign is virgo."
async def test_intent_request_with_parameters_but_empty_v1(fixture):
"""Test a request with parameters but empty value."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(parameters={"ZodiacSign": ""})
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You told us your sign is ."
async def test_intent_request_with_parameters_but_empty_v2(fixture):
"""Test a request with parameters but empty value."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"].update(parameters={"ZodiacSign": ""})
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You told us your sign is ."
async def test_intent_request_without_slots_v1(hass, fixture):
"""Test a request without slots."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"].update(
resolvedQuery="where are we",
action="WhereAreWeIntent",
parameters={},
contexts=[],
)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You are both home, you silly"
async def test_intent_request_without_slots_v2(hass, fixture):
"""Test a request without slots."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"].update(
queryText="where are we",
action="WhereAreWeIntent",
parameters={},
outputContexts=[],
)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You are both home, you silly"
async def test_intent_request_calling_service_v1(fixture, calls):
"""Test a request for calling a service.
If this request is done async the test could finish before the action
has been executed. Hard to test because it will be a race condition.
"""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["action"] = "CallServiceIntent"
call_count = len(calls)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert len(calls) == call_count + 1
call = calls[-1]
assert call.domain == "test"
assert call.service == "dialogflow"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
async def test_intent_request_calling_service_v2(fixture, calls):
"""Test a request for calling a service.
If this request is done async the test could finish before the action
has been executed. Hard to test because it will be a race condition.
"""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["action"] = "CallServiceIntent"
call_count = len(calls)
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
assert len(calls) == call_count + 1
call = calls[-1]
assert call.domain == "test"
assert call.service == "dialogflow"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
async def test_intent_with_no_action_v1(fixture):
"""Test an intent with no defined action."""
mock_client, webhook_id = fixture
data = Data.v1
del data["result"]["action"]
assert "action" not in data["result"]
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "You have not defined an action in your Dialogflow intent."
async def test_intent_with_no_action_v2(fixture):
"""Test an intent with no defined action."""
mock_client, webhook_id = fixture
data = Data.v2
del data["queryResult"]["action"]
assert "action" not in data["queryResult"]
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "You have not defined an action in your Dialogflow intent."
async def test_intent_with_unknown_action_v1(fixture):
"""Test an intent with an action not defined in the conf."""
mock_client, webhook_id = fixture
data = Data.v1
data["result"]["action"] = "unknown"
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("speech")
assert text == "This intent is not yet configured within Home Assistant."
async def test_intent_with_unknown_action_v2(fixture):
"""Test an intent with an action not defined in the conf."""
mock_client, webhook_id = fixture
data = Data.v2
data["queryResult"]["action"] = "unknown"
response = await mock_client.post(
f"/api/webhook/{webhook_id}", data=json.dumps(data)
)
assert response.status == 200
text = (await response.json()).get("fulfillmentText")
assert text == "This intent is not yet configured within Home Assistant."
|
from io import StringIO
from django.core.management import call_command
from django.test import SimpleTestCase
from weblate.trans.tests.test_commands import WeblateComponentCommandTestCase
from weblate.trans.tests.test_models import RepoTestCase
class ListSameCommandTest(RepoTestCase):
def setUp(self):
super().setUp()
self.component = self.create_component()
def test_list_same_checks(self):
output = StringIO()
call_command("list_same_checks", stdout=output)
self.assertEqual(1, len(output.getvalue().splitlines()))
class UpdateChecksTest(WeblateComponentCommandTestCase):
command_name = "updatechecks"
expected_string = "Processing"
class ListTestCase(SimpleTestCase):
def test_list_checks(self):
output = StringIO()
call_command("list_checks", stdout=output)
self.assertIn(".. _check-same:", output.getvalue())
|
import gc
import sys
import weakref
from flexx.util.testing import run_tests_if_main, skipif, skip, raises
from flexx.event.both_tester import run_in_both, this_is_js
from flexx.util.logging import capture_log
from flexx import event
loop = event.loop
logger = event.logger
## Greedy reactions
class MyObject1(event.Component):
foo = event.IntProp(settable=True)
bar = event.IntProp(settable=True)
@event.reaction('foo')
def report1(self, *events):
print('foo', self.foo)
@event.reaction('bar', mode='greedy')
def report2(self, *events):
print('bar', self.bar)
@run_in_both(MyObject1)
def test_reaction_greedy():
"""
normal greedy
bar 0
foo 0
-
foo 4
-
bar 4
-
foo 6
bar 6
foo 6
"""
m = MyObject1()
print(m.report1.get_mode(), m.report2.get_mode())
loop.iter()
print('-')
# Invoke the reaction by modifying foo
m.set_foo(3)
m.set_foo(4)
loop.iter()
print('-')
# Or bar
m.set_bar(3)
m.set_bar(4)
loop.iter()
print('-')
# But now interleave
m.set_foo(4)
m.set_bar(4)
m.set_foo(5)
m.set_bar(5)
m.set_foo(6)
m.set_bar(6)
loop.iter()
## Automatic reactions
class MyObject2(event.Component):
foo = event.IntProp(settable=True)
bar = event.IntProp(7, settable=True)
@event.reaction
def report(self, *events):
assert len(events) == 0 # of course, you'd leave them out in practice
print(self.foo, self.bar)
@run_in_both(MyObject2)
def test_reaction_auto1():
"""
init
auto
0 7
4 7
4 4
end
"""
print('init')
m = MyObject2()
print(m.report.get_mode())
loop.iter()
# Invoke the reaction by modifying foo
m.set_foo(3)
m.set_foo(4)
loop.iter()
# Or bar
m.set_bar(3)
m.set_bar(24)
m.set_bar(4)
m.set_bar(4)
loop.iter()
# Modifying foo, but value does not change: no reaction
m.set_foo(4)
loop.iter()
print('end')
class MyObject3(event.Component):
foo = event.IntProp(settable=True)
bar = event.IntProp(7, settable=True)
@event.reaction('!spam', mode='auto')
def report(self, *events):
assert len(events) > 0
print(self.foo, self.bar)
@run_in_both(MyObject3)
def test_reaction_auto2():
"""
init
auto
0 7
4 7
4 4
4 4
end
"""
print('init')
m = MyObject3()
print(m.report.get_mode())
loop.iter()
# Invoke the reaction by modifying foo
m.set_foo(3)
m.set_foo(4)
loop.iter()
# Or bar
m.set_bar(3)
m.set_bar(24)
m.set_bar(4)
m.set_bar(4)
loop.iter()
m.emit('spam')
loop.iter()
# Modifying foo, but value does not change: no reaction
m.set_foo(4)
loop.iter()
print('end')
## One liner
class MyObject4(event.Component):
bar = event.IntProp(7, settable=True)
@run_in_both(MyObject4)
def test_reaction_oneliner():
"""
7
2
xx
2
3
"""
m1 = MyObject4(bar=2)
m2 = MyObject4(bar=lambda: m1.bar)
loop.iter()
print(m2.bar)
loop.iter()
print(m2.bar)
print('xx')
m1.set_bar(3)
loop.iter()
print(m2.bar)
loop.iter()
print(m2.bar)
run_tests_if_main()
|
from __future__ import print_function
__docformat__ = "restructuredtext en"
try:
import readline
except ImportError:
readline = None
import os
import os.path as osp
import sys
from pdb import Pdb
import inspect
from logilab.common.compat import StringIO
try:
from IPython import PyColorize
except ImportError:
def colorize(source, *args):
"""fallback colorize function"""
return source
def colorize_source(source, *args):
return source
else:
def colorize(source, start_lineno, curlineno):
"""colorize and annotate source with linenos
(as in pdb's list command)
"""
parser = PyColorize.Parser()
output = StringIO()
parser.format(source, output)
annotated = []
for index, line in enumerate(output.getvalue().splitlines()):
lineno = index + start_lineno
if lineno == curlineno:
annotated.append('%4s\t->\t%s' % (lineno, line))
else:
annotated.append('%4s\t\t%s' % (lineno, line))
return '\n'.join(annotated)
def colorize_source(source):
"""colorize given source"""
parser = PyColorize.Parser()
output = StringIO()
parser.format(source, output)
return output.getvalue()
def getsource(obj):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = inspect.getsourcelines(obj)
return ''.join(lines), lnum
################################################################
class Debugger(Pdb):
"""custom debugger
- sets up a history file
- uses ipython if available to colorize lines of code
- overrides list command to search for current block instead
of using 5 lines of context
"""
def __init__(self, tcbk=None):
Pdb.__init__(self)
self.reset()
if tcbk:
while tcbk.tb_next is not None:
tcbk = tcbk.tb_next
self._tcbk = tcbk
self._histfile = os.path.expanduser("~/.pdbhist")
def setup_history_file(self):
"""if readline is available, read pdb history file
"""
if readline is not None:
try:
# XXX try..except shouldn't be necessary
# read_history_file() can accept None
readline.read_history_file(self._histfile)
except IOError:
pass
def start(self):
"""starts the interactive mode"""
self.interaction(self._tcbk.tb_frame, self._tcbk)
def setup(self, frame, tcbk):
"""setup hook: set up history file"""
self.setup_history_file()
Pdb.setup(self, frame, tcbk)
def set_quit(self):
"""quit hook: save commands in the history file"""
if readline is not None:
readline.write_history_file(self._histfile)
Pdb.set_quit(self)
def complete_p(self, text, line, begin_idx, end_idx):
"""provide variable names completion for the ``p`` command"""
namespace = dict(self.curframe.f_globals)
namespace.update(self.curframe.f_locals)
if '.' in text:
return self.attr_matches(text, namespace)
return [varname for varname in namespace if varname.startswith(text)]
def attr_matches(self, text, namespace):
"""implementation coming from rlcompleter.Completer.attr_matches
Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
object = eval(expr, namespace)
words = dir(object)
if hasattr(object, '__class__'):
words.append('__class__')
words = words + self.get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
def get_class_members(self, klass):
"""implementation coming from rlcompleter.get_class_members"""
ret = dir(klass)
if hasattr(klass, '__bases__'):
for base in klass.__bases__:
ret = ret + self.get_class_members(base)
return ret
## specific / overridden commands
def do_list(self, arg):
"""overrides default list command to display the surrounding block
instead of 5 lines of context
"""
self.lastcmd = 'list'
if not arg:
try:
source, start_lineno = getsource(self.curframe)
print(colorize(''.join(source), start_lineno,
self.curframe.f_lineno))
except KeyboardInterrupt:
pass
except IOError:
Pdb.do_list(self, arg)
else:
Pdb.do_list(self, arg)
do_l = do_list
def do_open(self, arg):
"""opens source file corresponding to the current stack level"""
filename = self.curframe.f_code.co_filename
lineno = self.curframe.f_lineno
cmd = 'emacsclient --no-wait +%s %s' % (lineno, filename)
os.system(cmd)
do_o = do_open
def pm():
"""use our custom debugger"""
dbg = Debugger(sys.last_traceback)
dbg.start()
def set_trace():
Debugger().set_trace(sys._getframe().f_back)
|
import argparse
import glob
import os
import struct
import sys
def clamp_to_min_max(value, min, max):
if value > max:
value = max
elif value < min:
value = min
return value
def clamp_to_u8(value):
return clamp_to_min_max(value, 0, 255)
def parse_args():
parser = argparse.ArgumentParser(description="Set the wireless brightness")
parser.add_argument('-d', '--device', type=str, help="Device string like \"0003:1532:0045.000C\"")
parser.add_argument('--brightness', required=True, type=int, help="Brightness (0-100)")
args = parser.parse_args()
return args
def run():
args = parse_args()
if args.device is None:
mouse_dirs = glob.glob(os.path.join('/sys/bus/hid/drivers/razermouse/', "*:*:*.*"))
if len(mouse_dirs) > 1:
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
if len(mouse_dirs) < 1:
print("No mouse directories found. Make sure the driver is binded", file=sys.stderr)
sys.exit(1)
mouse_dir = mouse_dirs[0]
else:
mouse_dir = os.path.join('/sys/bus/hid/drivers/razermouse/', args.device)
if not os.path.isdir(mouse_dir):
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
brightness = clamp_to_min_max(args.brightness, 0, 100)
brightness_scaled = clamp_to_u8(int(round((255 / 100) * brightness, 0)))
byte_string = bytes(str(brightness_scaled), 'utf-8') # Convert string to bytestring
wireless_brightness_filepath = os.path.join(mouse_dir, "set_wireless_brightness")
with open(wireless_brightness_filepath, 'wb') as wireless_brightness_file:
wireless_brightness_file.write(byte_string)
print("Done")
if __name__ == '__main__':
run()
|
import unittest
import pandas as pd
import numpy as np
from pgmpy.estimators import MmhcEstimator, K2Score
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
class TestMmhcEstimator(unittest.TestCase):
def setUp(self):
self.data1 = pd.DataFrame(
np.random.randint(0, 2, size=(15000, 3)), columns=list("XYZ")
)
self.data1["sum"] = self.data1.sum(axis=1)
self.est1 = MmhcEstimator(self.data1)
@unittest.skip("currently disabled due to non-determenism")
def test_estimate(self):
self.assertTrue(
set(self.est1.estimate().edges()).issubset(
set(
[
("X", "sum"),
("Y", "sum"),
("Z", "sum"),
("sum", "X"),
("sum", "Y"),
("sum", "Z"),
]
)
)
)
self.assertTrue(
set(self.est1.estimate(significance_level=0.001).edges()).issubset(
set(
[
("X", "sum"),
("Y", "sum"),
("Z", "sum"),
("sum", "X"),
("sum", "Y"),
("sum", "Z"),
]
)
)
)
def tearDown(self):
del self.data1
del self.est1
|
from django.conf.urls import url
from django.contrib import admin
from django.db.models import Sum
from django.forms import models, ValidationError
from django.http import HttpResponse
from django.template.loader import select_template
from django.urls import reverse
from django.utils import timezone
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from shop.conf import app_settings
from shop.admin.order import OrderItemInline
from shop.models.order import OrderItemModel
from shop.models.delivery import DeliveryModel
from shop.modifiers.pool import cart_modifiers_pool
from shop.serializers.delivery import DeliverySerializer
from shop.serializers.order import OrderDetailSerializer
class OrderItemForm(models.ModelForm):
"""
This form handles an ordered item, but adds a number field to modify the number of
items to deliver.
"""
class Meta:
model = OrderItemModel
exclude = ()
def __init__(self, *args, **kwargs):
if 'instance' in kwargs:
kwargs.setdefault('initial', {})
deliver_quantity = kwargs['instance'].quantity - self.get_delivered(kwargs['instance'])
kwargs['initial'].update(deliver_quantity=deliver_quantity)
else:
deliver_quantity = None
super().__init__(*args, **kwargs)
if deliver_quantity == 0:
self['deliver_quantity'].field.widget.attrs.update(readonly='readonly')
@classmethod
def get_delivered(cls, instance):
"""
Returns the quantity already delivered for this order item.
"""
aggr = instance.deliver_item.aggregate(delivered=Sum('quantity'))
return aggr['delivered'] or 0
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get('deliver_quantity') is not None:
if cleaned_data['deliver_quantity'] < 0:
raise ValidationError(_("Only a positive number of items can be delivered"), code='invalid')
if cleaned_data['deliver_quantity'] > self.instance.quantity - self.get_delivered(self.instance):
raise ValidationError(_("The number of items to deliver exceeds the ordered quantity"), code='invalid')
return cleaned_data
def has_changed(self):
"""Force form to changed"""
return True
class OrderItemInlineDelivery(OrderItemInline):
def get_fields(self, request, obj=None):
fields = list(super().get_fields(request, obj))
if obj:
if obj.status == 'pick_goods' and obj.unfulfilled_items > 0:
fields[1] += ('deliver_quantity', 'canceled',)
else:
fields[1] += ('get_delivered', 'show_ready',)
return fields
def get_readonly_fields(self, request, obj=None):
readonly_fields = list(super().get_readonly_fields(request, obj))
if obj:
if not (obj.status == 'pick_goods' and obj.unfulfilled_items > 0):
readonly_fields.extend(['get_delivered', 'show_ready'])
return readonly_fields
def get_formset(self, request, obj=None, **kwargs):
"""
Add field `quantity` to the form on the fly, using the same numeric type as `OrderItem.quantity`
"""
labels = {'quantity': _("Deliver quantity")}
attrs = models.fields_for_model(obj.items.model, fields=['quantity'], labels=labels)
# rename to deliver_quantity, since quantity is already used
attrs['deliver_quantity'] = attrs.pop('quantity')
if obj.status == 'pick_goods' and obj.unfulfilled_items > 0:
attrs['deliver_quantity'].widget.attrs.update(style='width: 50px;')
else:
attrs['deliver_quantity'].required = False
form = type(str('OrderItemForm'), (OrderItemForm,), attrs)
labels = {'canceled': _("Cancel this item")}
kwargs.update(form=form, labels=labels)
formset = super().get_formset(request, obj, **kwargs)
return formset
def get_delivered(self, obj=None):
return OrderItemForm.get_delivered(obj)
get_delivered.short_description = _("Delivered quantity")
def show_ready(self, obj=None):
return not obj.canceled
show_ready.boolean = True
show_ready.short_description = _("Ready for delivery")
def get_shipping_choices():
choices = [sm.get_choice() for sm in cart_modifiers_pool.get_shipping_modifiers()]
return choices
class DeliveryForm(models.ModelForm):
shipping_method = models.ChoiceField(
label=_("Shipping by"),
choices=get_shipping_choices,
)
class Meta:
model = DeliveryModel
exclude = []
def has_changed(self):
return True
def clean_shipping_method(self):
if not self.cleaned_data['shipping_method']:
return self.instance.shipping_method
return self.cleaned_data['shipping_method']
class DeliveryInline(admin.TabularInline):
model = DeliveryModel
form = DeliveryForm
extra = 0
fields = ['shipping_id', 'shipping_method', 'delivered_items', 'print_out', 'fulfilled_at', 'shipped_at']
readonly_fields = ['delivered_items', 'print_out', 'fulfilled_at', 'shipped_at']
def has_delete_permission(self, request, obj=None):
return False
def get_max_num(self, request, obj=None, **kwargs):
qs = self.model.objects.filter(order=obj)
return qs.count()
def get_fields(self, request, obj=None):
assert obj is not None, "An Order object can not be added through the Django-Admin"
fields = list(super().get_fields(request, obj))
if not obj.allow_partial_delivery:
fields.remove('delivered_items')
return fields
def get_readonly_fields(self, request, obj=None):
readonly_fields = list(super().get_readonly_fields(request, obj))
if not app_settings.SHOP_OVERRIDE_SHIPPING_METHOD or obj.status == 'ready_for_delivery':
readonly_fields.append('shipping_method')
return readonly_fields
def get_formset(self, request, obj=None, **kwargs):
formset = super().get_formset(request, obj, **kwargs)
if not app_settings.SHOP_OVERRIDE_SHIPPING_METHOD or obj.status == 'ready_for_delivery':
# make readonly field optional
formset.form.base_fields['shipping_method'].required = False
return formset
def delivered_items(self, obj):
aggr = obj.items.aggregate(quantity=Sum('quantity'))
aggr['quantity'] = aggr['quantity'] or 0
aggr.update(items=obj.items.count())
return '{quantity}/{items}'.format(**aggr)
delivered_items.short_description = _("Quantity/Items")
def print_out(self, obj):
if obj.fulfilled_at is None:
return ''
link = reverse('admin:print_delivery_note', args=(obj.id,)), _("Delivery Note")
return format_html(
'<span class="object-tools"><a href="{0}" class="viewsitelink" target="_new">{1}</a></span>',
*link)
print_out.short_description = _("Print out")
def fulfilled(self, obj):
if obj.fulfilled_at:
return timezone.localtime(obj.fulfilled_at).ctime() # TODO: find the correct time format
return _("Pending")
fulfilled.short_description = _("Fulfilled at")
class DeliveryOrderAdminMixin:
"""
Add this mixin to the class defining the OrderAdmin
"""
def get_urls(self):
my_urls = [
url(r'^(?P<delivery_pk>\d+)/print_delivery_note/$',
self.admin_site.admin_view(self.render_delivery_note),
name='print_delivery_note'),
]
my_urls.extend(super().get_urls())
return my_urls
def render_delivery_note(self, request, delivery_pk=None):
template = select_template([
'{}/print/delivery-note.html'.format(app_settings.APP_LABEL.lower()),
'shop/print/delivery-note.html'
])
delivery = DeliveryModel.objects.get(pk=delivery_pk)
context = {'request': request, 'render_label': 'print'}
customer_serializer = app_settings.CUSTOMER_SERIALIZER(delivery.order.customer)
order_serializer = OrderDetailSerializer(delivery.order, context=context)
delivery_serializer = DeliverySerializer(delivery, context=context)
content = template.render({
'customer': customer_serializer.data,
'order': order_serializer.data,
'delivery': delivery_serializer.data,
'object': delivery,
})
return HttpResponse(content)
def get_inline_instances(self, request, obj=None):
assert obj is not None, "An Order object can not be added through the Django-Admin"
assert hasattr(obj, 'associate_with_delivery'), "Add 'shop.shipping.workflows.SimpleShippingWorkflowMixin' " \
"(or a class inheriting from thereof) to SHOP_ORDER_WORKFLOWS."
inline_instances = list(super().get_inline_instances(request, obj))
if obj.associate_with_delivery:
if obj.allow_partial_delivery:
# replace `OrderItemInline` by `OrderItemInlineDelivery` for that instance.
inline_instances = [
OrderItemInlineDelivery(self.model, self.admin_site) if isinstance(instance, OrderItemInline) else instance
for instance in inline_instances
]
inline_instances.append(DeliveryInline(self.model, self.admin_site))
return inline_instances
def save_related(self, request, form, formsets, change):
super().save_related(request, form, formsets, change)
if form.instance.status == 'pack_goods' and 'status' in form.changed_data:
orderitem_formset = [fs for fs in formsets if issubclass(fs.model, OrderItemModel)][0]
form.instance.update_or_create_delivery(orderitem_formset.cleaned_data)
|
import urwid
choices = u'Chapman Cleese Gilliam Idle Jones Palin'.split()
def menu(title, choices):
body = [urwid.Text(title), urwid.Divider()]
for c in choices:
button = urwid.Button(c)
urwid.connect_signal(button, 'click', item_chosen, c)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
def item_chosen(button, choice):
response = urwid.Text([u'You chose ', choice, u'\n'])
done = urwid.Button(u'Ok')
urwid.connect_signal(done, 'click', exit_program)
main.original_widget = urwid.Filler(urwid.Pile([response,
urwid.AttrMap(done, None, focus_map='reversed')]))
def exit_program(button):
raise urwid.ExitMainLoop()
main = urwid.Padding(menu(u'Pythons', choices), left=2, right=2)
top = urwid.Overlay(main, urwid.SolidFill(u'\N{MEDIUM SHADE}'),
align='center', width=('relative', 60),
valign='middle', height=('relative', 60),
min_width=20, min_height=9)
urwid.MainLoop(top, palette=[('reversed', 'standout', '')]).run()
|
import sys
from unittest import TestCase
import numpy as np
import pandas as pd
from scattertext import LogOddsRatioUninformativeDirichletPrior, scale
from scattertext import ScatterChart
from scattertext.ScatterChart import CoordinatesNotRightException, TermDocMatrixHasNoMetadataException, \
NeedToInjectCoordinatesException
from scattertext.test.test_corpusFromPandasWithoutCategories import get_term_doc_matrix_without_categories
from scattertext.test.test_semioticSquare import get_test_corpus
from scattertext.test.test_termDocMatrixFactory \
import build_hamlet_jz_term_doc_mat, build_hamlet_jz_corpus_with_meta
class TestScatterChart(TestCase):
def test_to_json(self):
tdm = build_hamlet_jz_term_doc_mat()
# with self.assertRaises(NoWordMeetsTermFrequencyRequirementsError):
# ScatterChart(term_doc_matrix=tdm).to_dict('hamlet')
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0)
.to_dict('hamlet'))
self.assertEqual(set(j.keys()), set(['info', 'data']))
self.assertEqual(set(j['info'].keys()),
set(['not_category_name',
'category_name',
'category_terms',
'not_category_terms',
'category_internal_name',
'not_category_internal_names',
'neutral_category_internal_names',
'extra_category_internal_names',
'categories']))
expected = {"x": 0.0,
"y": 0.42,
'ox': 0,
'oy': 0.42,
"term": "art",
"cat25k": 758,
"ncat25k": 0,
"neut25k": 0,
'neut': 0,
"extra25k": 0,
'extra': 0,
's': 0.5,
'os': 3,
'bg': 3}
datum = self._get_data_example(j)
for var in ['cat25k', 'ncat25k']:
np.testing.assert_almost_equal(expected[var], datum[var], decimal=1)
self.assertEqual(set(expected.keys()), set(datum.keys()))
self.assertEqual(expected['term'], datum['term'])
def test_to_dict_without_categories(self):
tdm = get_term_doc_matrix_without_categories()
scatter_chart = ScatterChart(term_doc_matrix=tdm, minimum_term_frequency=0)
with self.assertRaises(NeedToInjectCoordinatesException):
scatter_chart.to_dict_without_categories()
x_coords = tdm.get_term_doc_mat().sum(axis=0).A1
y_coords = tdm.get_term_doc_mat().astype(bool).astype(int).sum(axis=0).A1
scatter_chart.inject_coordinates(original_x=x_coords,
original_y=y_coords,
x_coords=scale(x_coords),
y_coords=scale(y_coords))
j = scatter_chart.to_dict_without_categories()
self.assertIsInstance(j, dict)
self.assertEqual(set(j.keys()), set(['data']))
self.assertEqual(len(j['data']), tdm.get_num_terms())
self.assertEqual(j['data'][-1],
{'cat': 4, 'cat25k': 735, 'ox': 4, 'oy': 3,
'term': 'speak', 'x': 1.0, 'y': 1.0})
def test_resuse_is_disabled(self):
corpus = get_test_corpus()
sc = ScatterChart(term_doc_matrix=corpus, minimum_term_frequency=0)
sc.to_dict('hamlet')
with self.assertRaises(Exception):
sc.to_dict('hamlet')
def test_score_transform(self):
corpus = get_test_corpus()
sc = ScatterChart(term_doc_matrix=corpus, minimum_term_frequency=0)
d1 = sc.to_dict('hamlet')
sc = ScatterChart(term_doc_matrix=corpus, minimum_term_frequency=0, score_transform=lambda x:x)
d2 = sc.to_dict('hamlet')
assert sum([datum['s'] for datum in d1['data']]) != sum([datum['s'] for datum in d2['data']])
def test_multi_categories(self):
corpus = get_test_corpus()
j_vs_all = ScatterChart(term_doc_matrix=corpus, minimum_term_frequency=0) \
.to_dict('hamlet')
j_vs_swift = ScatterChart(term_doc_matrix=corpus, minimum_term_frequency=0) \
.to_dict('hamlet', not_categories=['swift'])
self.assertNotEqual(set(j_vs_all['info']['not_category_internal_names']),
set(j_vs_swift['info']['not_category_internal_names']))
self.assertEqual(j_vs_all['info']['categories'], corpus.get_categories())
self.assertEqual(j_vs_swift['info']['categories'], corpus.get_categories())
def test_title_case_names(self):
tdm = build_hamlet_jz_term_doc_mat()
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0)
.to_dict('hamlet', 'HAMLET', 'NOT HAMLET'))
self.assertEqual(j['info']['category_name'], 'HAMLET')
self.assertEqual(j['info']['not_category_name'], 'NOT HAMLET')
tdm = build_hamlet_jz_term_doc_mat()
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0)
.to_dict('hamlet', 'HAMLET', 'NOT HAMLET', title_case_names=True))
self.assertEqual(j['info']['category_name'], 'Hamlet')
self.assertEqual(j['info']['not_category_name'], 'Not Hamlet')
def _get_data_example(self, j):
return [t for t in j['data'] if t['term'] == 'art'][0]
def test_terms_to_include(self):
tdm = build_hamlet_jz_term_doc_mat()
terms_to_include = list(sorted(['both worlds', 'thou', 'the', 'of', 'st', 'returned', 'best', ]))
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0,
terms_to_include=terms_to_include)
.to_dict('hamlet', 'HAMLET', 'NOT HAMLET'))
self.assertEqual(list(sorted(t['term'] for t in j['data'])), terms_to_include)
def test_p_vals(self):
tdm = build_hamlet_jz_term_doc_mat()
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0,
term_significance=LogOddsRatioUninformativeDirichletPrior())
.to_dict('hamlet'))
datum = self._get_data_example(j)
self.assertIn('p', datum.keys())
def test_inject_coordinates(self):
tdm = build_hamlet_jz_term_doc_mat()
freq_df = tdm.get_term_freq_df()
scatter_chart = ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0)
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates([], [])
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates(freq_df[freq_df.columns[0]], [])
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates([], freq_df[freq_df.columns[0]])
x = freq_df[freq_df.columns[1]].astype(np.float)
y = freq_df[freq_df.columns[0]].astype(np.float)
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates(x, y)
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates(x, y / y.max())
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates(x / x.max(), y)
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates(-x / x.max(), -y / y.max())
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates(-x / x.max(), y / y.max())
with self.assertRaises(CoordinatesNotRightException):
scatter_chart.inject_coordinates(x / x.max(), -y / y.max())
scatter_chart.inject_coordinates(x / x.max(), y / y.max())
def test_inject_metadata_term_lists(self):
tdm = build_hamlet_jz_term_doc_mat()
scatter_chart = ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0)
with self.assertRaises(TermDocMatrixHasNoMetadataException):
scatter_chart.inject_metadata_term_lists({'blah': ['a', 'adsf', 'asfd']})
scatter_chart = ScatterChart(term_doc_matrix=build_hamlet_jz_corpus_with_meta(),
minimum_term_frequency=0,
use_non_text_features=True)
with self.assertRaises(TypeError):
scatter_chart.inject_metadata_term_lists({'blash': [3, 1]})
with self.assertRaises(TypeError):
scatter_chart.inject_metadata_term_lists({3: ['a', 'b']})
with self.assertRaises(TypeError):
scatter_chart.inject_metadata_term_lists({'a': {'a', 'b'}})
with self.assertRaises(TypeError):
scatter_chart.inject_metadata_term_lists(3)
self.assertEqual(type(scatter_chart.inject_metadata_term_lists({'a': ['a', 'b']})), ScatterChart)
j = scatter_chart.to_dict('hamlet')
self.assertEqual(set(j.keys()), set(['info', 'data', 'metalists']))
self.assertEqual(set(j['info'].keys()),
set(['not_category_name',
'category_name',
'category_terms',
'not_category_terms',
'category_internal_name',
'not_category_internal_names',
'extra_category_internal_names',
'neutral_category_internal_names',
'categories']))
def test_inject_metadata_descriptions(self):
tdm = build_hamlet_jz_corpus_with_meta()
scatter_chart = ScatterChart(term_doc_matrix=tdm, minimum_term_frequency=0)
with self.assertRaises(AssertionError):
scatter_chart.inject_metadata_descriptions(3323)
if (sys.version_info > (3, 0)):
'''
with self.assertRaisesRegex(Exception, 'The following meta data terms are not present: blah'):
scatter_chart.inject_metadata_descriptions({'blah': 'asjdkflasdjklfsadjk jsdkafsd'})
with self.assertRaisesRegex(Exception, 'The following meta data terms are not present: cat2'):
scatter_chart.inject_metadata_descriptions({'cat1': 'asjdkflasdjklfsadjk jsdkafsd', 'cat2': 'asdf'})
'''
assert scatter_chart == scatter_chart.inject_metadata_descriptions({'cat1': 'asjdkflasdjklfsadjk jsdkafsd'})
j = scatter_chart.to_dict('hamlet')
self.assertEqual(set(j.keys()), set(['info', 'data', 'metadescriptions']))
def test_inject_term_colors(self):
tdm = build_hamlet_jz_corpus_with_meta()
freq_df = tdm.get_term_freq_df()
scatter_chart = ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0)
scatter_chart.inject_term_colors({'t1': '00ffee'})
j = scatter_chart.to_dict('hamlet')
self.assertIn('term_colors', j['info'])
def test_inject_coordinates_original(self):
tdm = build_hamlet_jz_term_doc_mat()
freq_df = tdm.get_term_freq_df()
scatter_chart = ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0)
x = freq_df[freq_df.columns[1]].astype(np.float)
y = freq_df[freq_df.columns[0]].astype(np.float)
scatter_chart.inject_coordinates(x / x.max(), y / y.max(), original_x=x, original_y=y)
j = scatter_chart.to_dict('hamlet')
self.assertEqual(j['data'][0].keys(),
{'x', 'os', 'y', 'ncat25k', 'neut', 'cat25k', 'ox', 'neut25k', 'extra25k', 'extra', 'oy',
'term',
's', 'bg'})
and_term = [t for t in j['data'] if t['term'] == 'and'][0]
self.assertEqual(and_term['ox'], 0)
self.assertEqual(and_term['oy'], 1)
def test_to_json_use_non_text_features(self):
tdm = build_hamlet_jz_corpus_with_meta()
# with self.assertRaises(NoWordMeetsTermFrequencyRequirementsError):
# ScatterChart(term_doc_matrix=tdm).to_dict('hamlet')
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0,
use_non_text_features=True)
.to_dict('hamlet'))
self.assertEqual(set(j.keys()), set(['info', 'data']))
self.assertEqual(set(j['info'].keys()),
set(['not_category_name',
'category_name',
'category_terms',
'not_category_terms',
'category_internal_name',
'not_category_internal_names',
'extra_category_internal_names',
'neutral_category_internal_names',
'categories']))
self.assertEqual({t['term'] for t in j['data']}, {'cat1'}
# {'cat4', 'cat9', 'cat5', 'cat0', 'cat3', 'cat2', 'cat1'}
)
def test_max_terms(self):
tdm = build_hamlet_jz_term_doc_mat()
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0,
max_terms=2)
.to_dict('hamlet'))
self.assertEqual(2, len(j['data']))
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0,
max_terms=10)
.to_dict('hamlet'))
self.assertEqual(10, len(j['data']))
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0,
pmi_threshold_coefficient=0,
max_terms=10000)
.to_dict('hamlet'))
self.assertEqual(len(tdm.get_term_freq_df()), len(j['data']))
j = (ScatterChart(term_doc_matrix=tdm,
minimum_term_frequency=0,
pmi_threshold_coefficient=0,
max_terms=None)
.to_dict('hamlet'))
self.assertEqual(len(tdm.get_term_freq_df()), len(j['data']))
|
import aiohttp
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.adguard import config_flow
from homeassistant.components.adguard.const import DOMAIN
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
CONTENT_TYPE_JSON,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
FIXTURE_USER_INPUT = {
CONF_HOST: "127.0.0.1",
CONF_PORT: 3000,
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
CONF_SSL: True,
CONF_VERIFY_SSL: True,
}
async def test_show_authenticate_form(hass):
"""Test that the setup form is served."""
flow = config_flow.AdGuardHomeFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_connection_error(hass, aioclient_mock):
"""Test we show user form on AdGuard Home connection error."""
aioclient_mock.get(
f"{'https' if FIXTURE_USER_INPUT[CONF_SSL] else 'http'}"
f"://{FIXTURE_USER_INPUT[CONF_HOST]}"
f":{FIXTURE_USER_INPUT[CONF_PORT]}/control/status",
exc=aiohttp.ClientError,
)
flow = config_flow.AdGuardHomeFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=FIXTURE_USER_INPUT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_full_flow_implementation(hass, aioclient_mock):
"""Test registering an integration and finishing flow works."""
aioclient_mock.get(
f"{'https' if FIXTURE_USER_INPUT[CONF_SSL] else 'http'}"
f"://{FIXTURE_USER_INPUT[CONF_HOST]}"
f":{FIXTURE_USER_INPUT[CONF_PORT]}/control/status",
json={"version": "v0.99.0"},
headers={"Content-Type": CONTENT_TYPE_JSON},
)
flow = config_flow.AdGuardHomeFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await flow.async_step_user(user_input=FIXTURE_USER_INPUT)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == FIXTURE_USER_INPUT[CONF_HOST]
assert result["data"][CONF_HOST] == FIXTURE_USER_INPUT[CONF_HOST]
assert result["data"][CONF_PASSWORD] == FIXTURE_USER_INPUT[CONF_PASSWORD]
assert result["data"][CONF_PORT] == FIXTURE_USER_INPUT[CONF_PORT]
assert result["data"][CONF_SSL] == FIXTURE_USER_INPUT[CONF_SSL]
assert result["data"][CONF_USERNAME] == FIXTURE_USER_INPUT[CONF_USERNAME]
assert result["data"][CONF_VERIFY_SSL] == FIXTURE_USER_INPUT[CONF_VERIFY_SSL]
async def test_integration_already_exists(hass):
"""Test we only allow a single config flow."""
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_hassio_single_instance(hass):
"""Test we only allow a single config flow."""
MockConfigEntry(
domain="adguard", data={"host": "mock-adguard", "port": "3000"}
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"adguard",
data={"addon": "AdGuard Home Addon", "host": "mock-adguard", "port": "3000"},
context={"source": "hassio"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_hassio_update_instance_not_running(hass):
"""Test we only allow a single config flow."""
entry = MockConfigEntry(
domain="adguard", data={"host": "mock-adguard", "port": "3000"}
)
entry.add_to_hass(hass)
assert entry.state == config_entries.ENTRY_STATE_NOT_LOADED
result = await hass.config_entries.flow.async_init(
"adguard",
data={
"addon": "AdGuard Home Addon",
"host": "mock-adguard-updated",
"port": "3000",
},
context={"source": "hassio"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "existing_instance_updated"
async def test_hassio_update_instance_running(hass, aioclient_mock):
"""Test we only allow a single config flow."""
aioclient_mock.get(
"http://mock-adguard-updated:3000/control/status",
json={"version": "v0.99.0"},
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"http://mock-adguard:3000/control/status",
json={"version": "v0.99.0"},
headers={"Content-Type": CONTENT_TYPE_JSON},
)
entry = MockConfigEntry(
domain="adguard",
data={
"host": "mock-adguard",
"port": "3000",
"verify_ssl": False,
"username": None,
"password": None,
"ssl": False,
},
)
entry.add_to_hass(hass)
with patch.object(
hass.config_entries,
"async_forward_entry_setup",
return_value=True,
) as mock_load:
assert await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert len(mock_load.mock_calls) == 2
with patch.object(
hass.config_entries,
"async_forward_entry_unload",
return_value=True,
) as mock_unload, patch.object(
hass.config_entries,
"async_forward_entry_setup",
return_value=True,
) as mock_load:
result = await hass.config_entries.flow.async_init(
"adguard",
data={
"addon": "AdGuard Home Addon",
"host": "mock-adguard-updated",
"port": "3000",
},
context={"source": "hassio"},
)
assert len(mock_unload.mock_calls) == 2
assert len(mock_load.mock_calls) == 2
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "existing_instance_updated"
assert entry.data["host"] == "mock-adguard-updated"
async def test_hassio_confirm(hass, aioclient_mock):
"""Test we can finish a config flow."""
aioclient_mock.get(
"http://mock-adguard:3000/control/status",
json={"version": "v0.99.0"},
headers={"Content-Type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
"adguard",
data={"addon": "AdGuard Home Addon", "host": "mock-adguard", "port": 3000},
context={"source": "hassio"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "hassio_confirm"
assert result["description_placeholders"] == {"addon": "AdGuard Home Addon"}
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "AdGuard Home Addon"
assert result["data"][CONF_HOST] == "mock-adguard"
assert result["data"][CONF_PASSWORD] is None
assert result["data"][CONF_PORT] == 3000
assert result["data"][CONF_SSL] is False
assert result["data"][CONF_USERNAME] is None
assert result["data"][CONF_VERIFY_SSL]
async def test_hassio_connection_error(hass, aioclient_mock):
"""Test we show Hass.io confirm form on AdGuard Home connection error."""
aioclient_mock.get(
"http://mock-adguard:3000/control/status", exc=aiohttp.ClientError
)
result = await hass.config_entries.flow.async_init(
"adguard",
data={"addon": "AdGuard Home Addon", "host": "mock-adguard", "port": 3000},
context={"source": "hassio"},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "hassio_confirm"
assert result["errors"] == {"base": "cannot_connect"}
|
from __future__ import division
from builtins import range
import numpy as np
from .format_data import format_data as formatter
from .._shared.helpers import memoize
@memoize
def normalize(x, normalize='across', internal=False, format_data=True):
"""
Z-transform the columns or rows of an array, or list of arrays
This function normalizes the rows or columns of the input array(s). This
can be useful because data reduction and machine learning techniques are
sensitive to scaling differences between features. By default, the function
is set to normalize 'across' the columns of all lists, but it can also
normalize the columns 'within' each individual list, or alternatively, for
each row in the array.
Parameters
----------
x : Numpy array or list of arrays
This can either be a single array, or list of arrays
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
format_data : bool
Whether or not to first call the format_data function (default: True).
Returns
----------
normalized_x : Numpy array or list of arrays
An array or list of arrays where the columns or rows are z-scored. If
the input was a list, a list is returned. Otherwise, an array is
returned.
"""
assert normalize in ['across','within','row', False, None], "scale_type must be across, within, row or none."
if normalize in [False, None]:
return x
else:
if format_data:
x = formatter(x, ppca=True)
zscore = lambda X, y: (y - np.mean(X)) / np.std(X) if len(set(y)) > 1 else np.zeros(y.shape)
if normalize == 'across':
x_stacked=np.vstack(x)
normalized_x = [np.array([zscore(x_stacked[:,j], i[:,j]) for j in range(i.shape[1])]).T for i in x]
elif normalize == 'within':
normalized_x = [np.array([zscore(i[:,j], i[:,j]) for j in range(i.shape[1])]).T for i in x]
elif normalize == 'row':
normalized_x = [np.array([zscore(i[j,:], i[j,:]) for j in range(i.shape[0])]) for i in x]
if internal or len(normalized_x)>1:
return normalized_x
else:
return normalized_x[0]
|
import os
import tensorflow as tf
from datasets import dataset_utils
slim = tf.contrib.slim
VOC_LABELS = {
'none': (0, 'Background'),
'aeroplane': (1, 'Vehicle'),
'bicycle': (2, 'Vehicle'),
'bird': (3, 'Animal'),
'boat': (4, 'Vehicle'),
'bottle': (5, 'Indoor'),
'bus': (6, 'Vehicle'),
'car': (7, 'Vehicle'),
'cat': (8, 'Animal'),
'chair': (9, 'Indoor'),
'cow': (10, 'Animal'),
'diningtable': (11, 'Indoor'),
'dog': (12, 'Animal'),
'horse': (13, 'Animal'),
'motorbike': (14, 'Vehicle'),
'person': (15, 'Person'),
'pottedplant': (16, 'Indoor'),
'sheep': (17, 'Animal'),
'sofa': (18, 'Indoor'),
'train': (19, 'Vehicle'),
'tvmonitor': (20, 'Indoor'),
}
def get_split(split_name, dataset_dir, file_pattern, reader,
items_to_descriptions, num_classes):
"""Gets a dataset tuple with instructions for reading Pascal VOC dataset.
Args:
split_name: A trainval/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in ['trainval', 'test']:
raise ValueError('split name %s was not recognized.' % split_name)
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
# Features in Pascal VOC TFRecords.
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
# else:
# labels_to_names = create_readable_names_for_imagenet_labels()
# dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=split_to_sizes[split_name],
items_to_descriptions=items_to_descriptions,
num_classes=num_classes,
labels_to_names=labels_to_names)
|
import pytest
import sh
from molecule import config
from molecule.verifier.lint import yamllint
@pytest.fixture
def _patched_get_tests(mocker):
m = mocker.patch('molecule.verifier.lint.yamllint.Yamllint._get_tests')
m.return_value = ['test1', 'test2', 'test3']
return m
@pytest.fixture
def _verifier_lint_section_data():
return {
'verifier': {
'name': 'goss',
'lint': {
'name': 'yamllint',
'options': {
'foo': 'bar',
},
'env': {
'FOO': 'bar',
},
}
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(patched_config_validate, config_instance):
return yamllint.Yamllint(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance):
x = {
's': True,
}
assert x == _instance.default_options
def test_default_env_property(_instance):
assert 'MOLECULE_FILE' in _instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in _instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in _instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in _instance.default_env
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_name_property(_instance):
assert 'yamllint' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_options_property(_instance):
x = {
's': True,
'foo': 'bar',
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {'debug': True}
x = {
's': True,
'foo': 'bar',
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_bake(_instance):
_instance._tests = ['test1', 'test2', 'test3']
_instance.bake()
x = [
str(sh.Command('yamllint')),
'-s',
'--foo=bar',
'test1',
'test2',
'test3',
]
result = str(_instance._yamllint_command).split()
assert sorted(x) == sorted(result)
def test_execute(patched_logger_info, patched_logger_success,
patched_run_command, _instance):
_instance._tests = ['test1', 'test2', 'test3']
_instance._yamllint_command = 'patched-command'
_instance.execute()
patched_run_command.assert_called_once_with('patched-command', debug=False)
msg = 'Executing Yamllint on files found in {}/...'.format(
_instance._config.verifier.directory)
patched_logger_info.assert_called_once_with(msg)
msg = 'Lint completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_run_command, patched_logger_warn,
_instance):
_instance._config.config['verifier']['lint']['enabled'] = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, verifier_lint is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_does_not_execute_without_tests(patched_run_command,
patched_logger_warn, _instance):
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, no tests found.'
patched_logger_warn.assert_called_once_with(msg)
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_execute_bakes(patched_run_command, _instance):
_instance._tests = ['test1', 'test2', 'test3']
_instance.execute()
assert _instance._yamllint_command is not None
x = [
str(sh.Command('yamllint')),
'-s',
'--foo=bar',
'test1',
'test2',
'test3',
]
result = str(patched_run_command.mock_calls[0][1][0]).split()
assert sorted(x) == sorted(result)
def test_executes_catches_and_exits_return_code(patched_run_command,
_patched_get_tests, _instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(
sh.yamllint, b'', b'')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
|
import flatbuffers
class HelloNew(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsHelloNew(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = HelloNew()
x.Init(buf, n + offset)
return x
# HelloNew
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# HelloNew
def Roles(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .ClientRoles import ClientRoles
obj = ClientRoles()
obj.Init(self._tab.Bytes, x)
return obj
return None
# HelloNew
def Realm(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# HelloNew
def Authid(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# HelloNew
def Authrole(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# HelloNew
def Authmode(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# HelloNew
def Authfactor1Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# HelloNew
def Authfactor1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# HelloNew
def Authfactor2Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# HelloNew
def Authfactor2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# HelloNew
def Authfactor3Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# HelloNew
def Authfactor3(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# HelloNew
def Resumable(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# HelloNew
def ResumeSession(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# HelloNew
def ResumeToken(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def HelloNewStart(builder): builder.StartObject(14)
def HelloNewAddRoles(builder, roles): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(roles), 0)
def HelloNewAddRealm(builder, realm): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(realm), 0)
def HelloNewAddAuthid(builder, authid): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(authid), 0)
def HelloNewAddAuthrole(builder, authrole): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(authrole), 0)
def HelloNewAddAuthmode(builder, authmode): builder.PrependUint8Slot(4, authmode, 0)
def HelloNewAddAuthfactor1Type(builder, authfactor1Type): builder.PrependUint8Slot(5, authfactor1Type, 0)
def HelloNewAddAuthfactor1(builder, authfactor1): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(authfactor1), 0)
def HelloNewAddAuthfactor2Type(builder, authfactor2Type): builder.PrependUint8Slot(7, authfactor2Type, 0)
def HelloNewAddAuthfactor2(builder, authfactor2): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(authfactor2), 0)
def HelloNewAddAuthfactor3Type(builder, authfactor3Type): builder.PrependUint8Slot(9, authfactor3Type, 0)
def HelloNewAddAuthfactor3(builder, authfactor3): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(authfactor3), 0)
def HelloNewAddResumable(builder, resumable): builder.PrependBoolSlot(11, resumable, 0)
def HelloNewAddResumeSession(builder, resumeSession): builder.PrependUint64Slot(12, resumeSession, 0)
def HelloNewAddResumeToken(builder, resumeToken): builder.PrependUOffsetTRelativeSlot(13, flatbuffers.number_types.UOffsetTFlags.py_type(resumeToken), 0)
def HelloNewEnd(builder): return builder.EndObject()
|
from aioambient import Client
from aioambient.errors import AmbientError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import aiohttp_client
from .const import CONF_APP_KEY, DOMAIN # pylint: disable=unused-import
class AmbientStationFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle an Ambient PWS config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
def __init__(self):
"""Initialize the config flow."""
self.data_schema = vol.Schema(
{vol.Required(CONF_API_KEY): str, vol.Required(CONF_APP_KEY): str}
)
async def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=self.data_schema,
errors=errors if errors else {},
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return await self._show_form()
await self.async_set_unique_id(user_input[CONF_APP_KEY])
self._abort_if_unique_id_configured()
session = aiohttp_client.async_get_clientsession(self.hass)
client = Client(
user_input[CONF_API_KEY], user_input[CONF_APP_KEY], session=session
)
try:
devices = await client.api.get_devices()
except AmbientError:
return await self._show_form({"base": "invalid_key"})
if not devices:
return await self._show_form({"base": "no_devices"})
# The Application Key (which identifies each config entry) is too long
# to show nicely in the UI, so we take the first 12 characters (similar
# to how GitHub does it):
return self.async_create_entry(
title=user_input[CONF_APP_KEY][:12], data=user_input
)
|
from cerberus import Validator
from cerberus.tests import assert_fail, assert_normalized, assert_success
def test_allow_unknown_in_schema():
schema = {
'field': {
'type': 'dict',
'allow_unknown': True,
'schema': {'nested': {'type': 'string'}},
}
}
document = {'field': {'nested': 'foo', 'arb1': 'bar', 'arb2': 42}}
assert_success(document=document, schema=schema)
schema['field']['allow_unknown'] = {'type': 'string'}
assert_fail(document=document, schema=schema)
def test_allow_unknown_with_purge_unknown():
validator = Validator(purge_unknown=True)
schema = {'foo': {'type': 'dict', 'allow_unknown': True}}
document = {'foo': {'bar': True}, 'bar': 'foo'}
expected = {'foo': {'bar': True}}
assert_normalized(document, expected, schema, validator)
def test_allow_unknown_with_purge_unknown_subdocument():
validator = Validator(purge_unknown=True)
schema = {
'foo': {
'type': 'dict',
'schema': {'bar': {'type': 'string'}},
'allow_unknown': True,
}
}
document = {'foo': {'bar': 'baz', 'corge': False}, 'thud': 'xyzzy'}
expected = {'foo': {'bar': 'baz', 'corge': False}}
assert_normalized(document, expected, schema, validator)
def test_allow_unknown_without_schema():
# https://github.com/pyeve/cerberus/issues/302
v = Validator({'a': {'type': 'dict', 'allow_unknown': True}})
v({'a': {}})
|
import tests
import unittest
import sys
from pyVim import connect
from pyVmomi import vim
if sys.version_info >= (3, 3):
from unittest.mock import patch, MagicMock
else:
from mock import patch, MagicMock
class ConnectionTests(tests.VCRTestBase):
@tests.VCRTestBase.my_vcr.use_cassette('basic_connection.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='none')
def test_basic_connection(self):
# see: http://python3porting.com/noconv.html
si = connect.Connect(host='vcsa',
user='my_user',
pwd='my_password')
cookie = si._stub.cookie
session_id = si.content.sessionManager.currentSession.key
# NOTE (hartsock): The cookie value should never change during
# a connected session. That should be verifiable in these tests.
self.assertEqual(cookie, si._stub.cookie)
# NOTE (hartsock): assertIsNotNone does not work in Python 2.6
self.assertTrue(session_id is not None)
self.assertEqual('52b5395a-85c2-9902-7835-13a9b77e1fec', session_id)
@tests.VCRTestBase.my_vcr.use_cassette('sspi_connection.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='none')
def test_sspi_connection(self):
# see: http://python3porting.com/noconv.html
si = connect.Connect(host='vcsa',
mechanism='sspi',
b64token='my_base64token')
cookie = si._stub.cookie
session_id = si.content.sessionManager.currentSession.key
# NOTE (hartsock): The cookie value should never change during
# a connected session. That should be verifiable in these tests.
self.assertEqual(cookie, si._stub.cookie)
# NOTE (hartsock): assertIsNotNone does not work in Python 2.6
self.assertTrue(session_id is not None)
self.assertEqual('52b5395a-85c2-9902-7835-13a9b77e1fec', session_id)
@tests.VCRTestBase.my_vcr.use_cassette('basic_connection_bad_password.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='none')
def test_basic_connection_bad_password(self):
def should_fail():
connect.Connect(host='vcsa',
user='my_user',
pwd='bad_password')
self.assertRaises(vim.fault.InvalidLogin, should_fail)
@tests.VCRTestBase.my_vcr.use_cassette('smart_connection.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='none')
def test_smart_connection(self):
# see: http://python3porting.com/noconv.html
si = connect.SmartConnect(host='vcsa',
user='my_user',
pwd='my_password')
session_id = si.content.sessionManager.currentSession.key
# NOTE (hartsock): assertIsNotNone does not work in Python 2.6
self.assertTrue(session_id is not None)
self.assertEqual('52ad453a-13a7-e8af-9186-a1b5c5ab85b7', session_id)
def test_disconnect_on_no_connection(self):
connect.Disconnect(None)
@tests.VCRTestBase.my_vcr.use_cassette('ssl_tunnel.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='none')
def test_ssl_tunnel(self):
connect.SoapStubAdapter('sdkTunnel', 8089, httpProxyHost='vcsa').GetConnection()
def test_ssl_tunnel_http_failure(self):
import socket
def should_fail():
conn = connect.SoapStubAdapter('vcsa', 80, httpProxyHost='unreachable').GetConnection()
conn.request('GET', '/')
conn.getresponse()
self.assertRaises((OSError, socket.gaierror), should_fail)
@tests.VCRTestBase.my_vcr.use_cassette('ssl_tunnel.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='none')
def test_http_proxy(self):
connect.SoapStubAdapter('sdkTunnel', 8089, httpProxyHost='vcsa').GetConnection()
@patch('six.moves.http_client.HTTPSConnection')
def test_http_proxy_with_cert_file(self, hs):
conn = connect.SoapStubAdapter(
'sdkTunnel', 8089, httpProxyHost='vcsa',
certKeyFile='my_key_file', certFile='my_cert_file').GetConnection()
conn.request('GET', '/')
hs.assert_called_once_with('vcsa:80', cert_file='my_cert_file', key_file='my_key_file')
conn.set_tunnel.assert_called_once_with('sdkTunnel:8089')
@tests.VCRTestBase.my_vcr.use_cassette('http_proxy.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='once')
def test_http_proxy(self):
conn = connect.SoapStubAdapter(
'vcenter.test', httpProxyHost='my-http-proxy',
httpProxyPort=8080).GetConnection()
self.assertEqual(conn._tunnel_host, 'vcenter.test')
self.assertEqual(conn._tunnel_port, 443)
conn.request('GET', '/')
conn.getresponse()
if __name__ == '__main__':
unittest.main()
|
import glances_api
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from . import get_api
from .const import (
CONF_VERSION,
DEFAULT_HOST,
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DEFAULT_VERSION,
DOMAIN,
SUPPORTED_VERSIONS,
)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME, default=DEFAULT_NAME): str,
vol.Required(CONF_HOST, default=DEFAULT_HOST): str,
vol.Optional(CONF_USERNAME): str,
vol.Optional(CONF_PASSWORD): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
vol.Required(CONF_VERSION, default=DEFAULT_VERSION): int,
vol.Optional(CONF_SSL, default=False): bool,
vol.Optional(CONF_VERIFY_SSL, default=False): bool,
}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect."""
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.data[CONF_HOST] == data[CONF_HOST]:
raise AlreadyConfigured
if data[CONF_VERSION] not in SUPPORTED_VERSIONS:
raise WrongVersion
try:
api = get_api(hass, data)
await api.get_data()
except glances_api.exceptions.GlancesApiConnectionError as err:
raise CannotConnect from err
class GlancesFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Glances config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return GlancesOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
await validate_input(self.hass, user_input)
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
except AlreadyConfigured:
return self.async_abort(reason="already_configured")
except CannotConnect:
errors["base"] = "cannot_connect"
except WrongVersion:
errors[CONF_VERSION] = "wrong_version"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, import_config):
"""Import from Glances sensor config."""
return await self.async_step_user(user_input=import_config)
class GlancesOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Glances client options."""
def __init__(self, config_entry):
"""Initialize Glances options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the Glances options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = {
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): int
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class AlreadyConfigured(exceptions.HomeAssistantError):
"""Error to indicate host is already configured."""
class WrongVersion(exceptions.HomeAssistantError):
"""Error to indicate the selected version is wrong."""
|
import pytest
from homeassistant.auth.providers import homeassistant as hass_auth
from homeassistant.scripts import auth as script_auth
from tests.async_mock import Mock, patch
from tests.common import register_auth_provider
@pytest.fixture
def provider(hass):
"""Home Assistant auth provider."""
provider = hass.loop.run_until_complete(
register_auth_provider(hass, {"type": "homeassistant"})
)
hass.loop.run_until_complete(provider.async_initialize())
return provider
async def test_list_user(hass, provider, capsys):
"""Test we can list users."""
data = provider.data
data.add_auth("test-user", "test-pass")
data.add_auth("second-user", "second-pass")
await script_auth.list_users(hass, provider, None)
captured = capsys.readouterr()
assert captured.out == "\n".join(
["test-user", "second-user", "", "Total users: 2", ""]
)
async def test_add_user(hass, provider, capsys, hass_storage):
"""Test we can add a user."""
data = provider.data
await script_auth.add_user(
hass, provider, Mock(username="paulus", password="test-pass")
)
assert len(hass_storage[hass_auth.STORAGE_KEY]["data"]["users"]) == 1
captured = capsys.readouterr()
assert captured.out == "Auth created\n"
assert len(data.users) == 1
data.validate_login("paulus", "test-pass")
async def test_validate_login(hass, provider, capsys):
"""Test we can validate a user login."""
data = provider.data
data.add_auth("test-user", "test-pass")
await script_auth.validate_login(
hass, provider, Mock(username="test-user", password="test-pass")
)
captured = capsys.readouterr()
assert captured.out == "Auth valid\n"
await script_auth.validate_login(
hass, provider, Mock(username="test-user", password="invalid-pass")
)
captured = capsys.readouterr()
assert captured.out == "Auth invalid\n"
await script_auth.validate_login(
hass, provider, Mock(username="invalid-user", password="test-pass")
)
captured = capsys.readouterr()
assert captured.out == "Auth invalid\n"
async def test_change_password(hass, provider, capsys, hass_storage):
"""Test we can change a password."""
data = provider.data
data.add_auth("test-user", "test-pass")
await script_auth.change_password(
hass, provider, Mock(username="test-user", new_password="new-pass")
)
assert len(hass_storage[hass_auth.STORAGE_KEY]["data"]["users"]) == 1
captured = capsys.readouterr()
assert captured.out == "Password changed\n"
data.validate_login("test-user", "new-pass")
with pytest.raises(hass_auth.InvalidAuth):
data.validate_login("test-user", "test-pass")
async def test_change_password_invalid_user(hass, provider, capsys, hass_storage):
"""Test changing password of non-existing user."""
data = provider.data
data.add_auth("test-user", "test-pass")
await script_auth.change_password(
hass, provider, Mock(username="invalid-user", new_password="new-pass")
)
assert hass_auth.STORAGE_KEY not in hass_storage
captured = capsys.readouterr()
assert captured.out == "User not found\n"
data.validate_login("test-user", "test-pass")
with pytest.raises(hass_auth.InvalidAuth):
data.validate_login("invalid-user", "new-pass")
def test_parsing_args(loop):
"""Test we parse args correctly."""
called = False
async def mock_func(hass, provider, args2):
"""Mock function to be called."""
nonlocal called
called = True
assert provider.hass.config.config_dir == "/somewhere/config"
assert args2 is args
args = Mock(config="/somewhere/config", func=mock_func)
with patch("argparse.ArgumentParser.parse_args", return_value=args):
script_auth.run(None)
assert called, "Mock function did not get called"
|
import logging
from homeassistant.components import litejet
from homeassistant.components.switch import SwitchEntity
ATTR_NUMBER = "number"
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LiteJet switch platform."""
litejet_ = hass.data["litejet_system"]
devices = []
for i in litejet_.button_switches():
name = litejet_.get_switch_name(i)
if not litejet.is_ignored(hass, name):
devices.append(LiteJetSwitch(hass, litejet_, i, name))
add_entities(devices, True)
class LiteJetSwitch(SwitchEntity):
"""Representation of a single LiteJet switch."""
def __init__(self, hass, lj, i, name):
"""Initialize a LiteJet switch."""
self._hass = hass
self._lj = lj
self._index = i
self._state = False
self._name = name
lj.on_switch_pressed(i, self._on_switch_pressed)
lj.on_switch_released(i, self._on_switch_released)
def _on_switch_pressed(self):
_LOGGER.debug("Updating pressed for %s", self._name)
self._state = True
self.schedule_update_ha_state()
def _on_switch_released(self):
_LOGGER.debug("Updating released for %s", self._name)
self._state = False
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return if the switch is pressed."""
return self._state
@property
def should_poll(self):
"""Return that polling is not necessary."""
return False
@property
def device_state_attributes(self):
"""Return the device-specific state attributes."""
return {ATTR_NUMBER: self._index}
def turn_on(self, **kwargs):
"""Press the switch."""
self._lj.press_switch(self._index)
def turn_off(self, **kwargs):
"""Release the switch."""
self._lj.release_switch(self._index)
|
from pycoolmasternet_async import CoolMasterNet
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import CONF_HOST, CONF_PORT
# pylint: disable=unused-import
from .const import AVAILABLE_MODES, CONF_SUPPORTED_MODES, DEFAULT_PORT, DOMAIN
MODES_SCHEMA = {vol.Required(mode, default=True): bool for mode in AVAILABLE_MODES}
DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str, **MODES_SCHEMA})
async def _validate_connection(hass: core.HomeAssistant, host):
cool = CoolMasterNet(host, DEFAULT_PORT)
units = await cool.status()
return bool(units)
class CoolmasterConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Coolmaster config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@core.callback
def _async_get_entry(self, data):
supported_modes = [
key for (key, value) in data.items() if key in AVAILABLE_MODES and value
]
return self.async_create_entry(
title=data[CONF_HOST],
data={
CONF_HOST: data[CONF_HOST],
CONF_PORT: DEFAULT_PORT,
CONF_SUPPORTED_MODES: supported_modes,
},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is None:
return self.async_show_form(step_id="user", data_schema=DATA_SCHEMA)
errors = {}
host = user_input[CONF_HOST]
try:
result = await _validate_connection(self.hass, host)
if not result:
errors["base"] = "no_units"
except (OSError, ConnectionRefusedError, TimeoutError):
errors["base"] = "cannot_connect"
if errors:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
return self._async_get_entry(user_input)
|
import asyncio
from collections.abc import Iterable
import logging
from typing import Optional, Sequence
from pysmartthings import Attribute, Capability
from homeassistant.components.climate import DOMAIN as CLIMATE_DOMAIN, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
ATTR_OPERATION_STATE = "operation_state"
MODE_TO_STATE = {
"auto": HVAC_MODE_HEAT_COOL,
"cool": HVAC_MODE_COOL,
"eco": HVAC_MODE_AUTO,
"rush hour": HVAC_MODE_AUTO,
"emergency heat": HVAC_MODE_HEAT,
"heat": HVAC_MODE_HEAT,
"off": HVAC_MODE_OFF,
}
STATE_TO_MODE = {
HVAC_MODE_HEAT_COOL: "auto",
HVAC_MODE_COOL: "cool",
HVAC_MODE_HEAT: "heat",
HVAC_MODE_OFF: "off",
}
OPERATING_STATE_TO_ACTION = {
"cooling": CURRENT_HVAC_COOL,
"fan only": CURRENT_HVAC_FAN,
"heating": CURRENT_HVAC_HEAT,
"idle": CURRENT_HVAC_IDLE,
"pending cool": CURRENT_HVAC_COOL,
"pending heat": CURRENT_HVAC_HEAT,
"vent economizer": CURRENT_HVAC_FAN,
}
AC_MODE_TO_STATE = {
"auto": HVAC_MODE_HEAT_COOL,
"cool": HVAC_MODE_COOL,
"dry": HVAC_MODE_DRY,
"coolClean": HVAC_MODE_COOL,
"dryClean": HVAC_MODE_DRY,
"heat": HVAC_MODE_HEAT,
"heatClean": HVAC_MODE_HEAT,
"fanOnly": HVAC_MODE_FAN_ONLY,
}
STATE_TO_AC_MODE = {
HVAC_MODE_HEAT_COOL: "auto",
HVAC_MODE_COOL: "cool",
HVAC_MODE_DRY: "dry",
HVAC_MODE_HEAT: "heat",
HVAC_MODE_FAN_ONLY: "fanOnly",
}
UNIT_MAP = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add climate entities for a config entry."""
ac_capabilities = [
Capability.air_conditioner_mode,
Capability.air_conditioner_fan_mode,
Capability.switch,
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
]
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
entities = []
for device in broker.devices.values():
if not broker.any_assigned(device.device_id, CLIMATE_DOMAIN):
continue
if all(capability in device.capabilities for capability in ac_capabilities):
entities.append(SmartThingsAirConditioner(device))
else:
entities.append(SmartThingsThermostat(device))
async_add_entities(entities, True)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
supported = [
Capability.air_conditioner_mode,
Capability.demand_response_load_control,
Capability.air_conditioner_fan_mode,
Capability.power_consumption_report,
Capability.relative_humidity_measurement,
Capability.switch,
Capability.temperature_measurement,
Capability.thermostat,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_fan_mode,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
Capability.thermostat_operating_state,
]
# Can have this legacy/deprecated capability
if Capability.thermostat in capabilities:
return supported
# Or must have all of these thermostat capabilities
thermostat_capabilities = [
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
]
if all(capability in capabilities for capability in thermostat_capabilities):
return supported
# Or must have all of these A/C capabilities
ac_capabilities = [
Capability.air_conditioner_mode,
Capability.air_conditioner_fan_mode,
Capability.switch,
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
]
if all(capability in capabilities for capability in ac_capabilities):
return supported
return None
class SmartThingsThermostat(SmartThingsEntity, ClimateEntity):
"""Define a SmartThings climate entities."""
def __init__(self, device):
"""Init the class."""
super().__init__(device)
self._supported_features = self._determine_features()
self._hvac_mode = None
self._hvac_modes = None
def _determine_features(self):
flags = SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE
if self._device.get_capability(
Capability.thermostat_fan_mode, Capability.thermostat
):
flags |= SUPPORT_FAN_MODE
return flags
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
await self._device.set_thermostat_fan_mode(fan_mode, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
mode = STATE_TO_MODE[hvac_mode]
await self._device.set_thermostat_mode(mode, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_set_temperature(self, **kwargs):
"""Set new operation mode and target temperatures."""
# Operation state
operation_state = kwargs.get(ATTR_HVAC_MODE)
if operation_state:
mode = STATE_TO_MODE[operation_state]
await self._device.set_thermostat_mode(mode, set_status=True)
await self.async_update()
# Heat/cool setpoint
heating_setpoint = None
cooling_setpoint = None
if self.hvac_mode == HVAC_MODE_HEAT:
heating_setpoint = kwargs.get(ATTR_TEMPERATURE)
elif self.hvac_mode == HVAC_MODE_COOL:
cooling_setpoint = kwargs.get(ATTR_TEMPERATURE)
else:
heating_setpoint = kwargs.get(ATTR_TARGET_TEMP_LOW)
cooling_setpoint = kwargs.get(ATTR_TARGET_TEMP_HIGH)
tasks = []
if heating_setpoint is not None:
tasks.append(
self._device.set_heating_setpoint(
round(heating_setpoint, 3), set_status=True
)
)
if cooling_setpoint is not None:
tasks.append(
self._device.set_cooling_setpoint(
round(cooling_setpoint, 3), set_status=True
)
)
await asyncio.gather(*tasks)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_update(self):
"""Update the attributes of the climate device."""
thermostat_mode = self._device.status.thermostat_mode
self._hvac_mode = MODE_TO_STATE.get(thermostat_mode)
if self._hvac_mode is None:
_LOGGER.debug(
"Device %s (%s) returned an invalid hvac mode: %s",
self._device.label,
self._device.device_id,
thermostat_mode,
)
modes = set()
supported_modes = self._device.status.supported_thermostat_modes
if isinstance(supported_modes, Iterable):
for mode in supported_modes:
state = MODE_TO_STATE.get(mode)
if state is not None:
modes.add(state)
else:
_LOGGER.debug(
"Device %s (%s) returned an invalid supported thermostat mode: %s",
self._device.label,
self._device.device_id,
mode,
)
else:
_LOGGER.debug(
"Device %s (%s) returned invalid supported thermostat modes: %s",
self._device.label,
self._device.device_id,
supported_modes,
)
self._hvac_modes = list(modes)
@property
def current_humidity(self):
"""Return the current humidity."""
return self._device.status.humidity
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.status.temperature
@property
def fan_mode(self):
"""Return the fan setting."""
return self._device.status.thermostat_fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._device.status.supported_thermostat_fan_modes
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported."""
return OPERATING_STATE_TO_ACTION.get(
self._device.status.thermostat_operating_state
)
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._hvac_mode
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._hvac_modes
@property
def supported_features(self):
"""Return the supported features."""
return self._supported_features
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_COOL:
return self._device.status.cooling_setpoint
if self.hvac_mode == HVAC_MODE_HEAT:
return self._device.status.heating_setpoint
return None
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return self._device.status.cooling_setpoint
return None
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return self._device.status.heating_setpoint
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return UNIT_MAP.get(self._device.status.attributes[Attribute.temperature].unit)
class SmartThingsAirConditioner(SmartThingsEntity, ClimateEntity):
"""Define a SmartThings Air Conditioner."""
def __init__(self, device):
"""Init the class."""
super().__init__(device)
self._hvac_modes = None
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
await self._device.set_fan_mode(fan_mode, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
await self.async_turn_off()
return
tasks = []
# Turn on the device if it's off before setting mode.
if not self._device.status.switch:
tasks.append(self._device.switch_on(set_status=True))
tasks.append(
self._device.set_air_conditioner_mode(
STATE_TO_AC_MODE[hvac_mode], set_status=True
)
)
await asyncio.gather(*tasks)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
tasks = []
# operation mode
operation_mode = kwargs.get(ATTR_HVAC_MODE)
if operation_mode:
if operation_mode == HVAC_MODE_OFF:
tasks.append(self._device.switch_off(set_status=True))
else:
if not self._device.status.switch:
tasks.append(self._device.switch_on(set_status=True))
tasks.append(self.async_set_hvac_mode(operation_mode))
# temperature
tasks.append(
self._device.set_cooling_setpoint(kwargs[ATTR_TEMPERATURE], set_status=True)
)
await asyncio.gather(*tasks)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_turn_on(self):
"""Turn device on."""
await self._device.switch_on(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_turn_off(self):
"""Turn device off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_update(self):
"""Update the calculated fields of the AC."""
modes = {HVAC_MODE_OFF}
for mode in self._device.status.supported_ac_modes:
state = AC_MODE_TO_STATE.get(mode)
if state is not None:
modes.add(state)
else:
_LOGGER.debug(
"Device %s (%s) returned an invalid supported AC mode: %s",
self._device.label,
self._device.device_id,
mode,
)
self._hvac_modes = list(modes)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.status.temperature
@property
def device_state_attributes(self):
"""
Return device specific state attributes.
Include attributes from the Demand Response Load Control (drlc)
and Power Consumption capabilities.
"""
attributes = [
"drlc_status_duration",
"drlc_status_level",
"drlc_status_start",
"drlc_status_override",
"power_consumption_start",
"power_consumption_power",
"power_consumption_energy",
"power_consumption_end",
]
state_attributes = {}
for attribute in attributes:
value = getattr(self._device.status, attribute)
if value is not None:
state_attributes[attribute] = value
return state_attributes
@property
def fan_mode(self):
"""Return the fan setting."""
return self._device.status.fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._device.status.supported_ac_fan_modes
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
if not self._device.status.switch:
return HVAC_MODE_OFF
return AC_MODE_TO_STATE.get(self._device.status.air_conditioner_mode)
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._hvac_modes
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._device.status.cooling_setpoint
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return UNIT_MAP.get(self._device.status.attributes[Attribute.temperature].unit)
|
import os
import pytest
from molecule import config
from molecule.verifier import goss
from molecule.verifier.lint import yamllint
@pytest.fixture
def _patched_ansible_verify(mocker):
m = mocker.patch('molecule.provisioner.ansible.Ansible.verify')
m.return_value = 'patched-ansible-verify-stdout'
return m
@pytest.fixture
def _patched_goss_get_tests(mocker):
m = mocker.patch('molecule.verifier.goss.Goss._get_tests')
m.return_value = [
'foo.py',
'bar.py',
]
return m
@pytest.fixture
def _verifier_section_data():
return {
'verifier': {
'name': 'goss',
'env': {
'FOO': 'bar',
},
'lint': {
'name': 'yamllint',
},
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(_verifier_section_data, patched_config_validate,
config_instance):
return goss.Goss(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance):
assert {} == _instance.default_options
def test_default_env_property(_instance):
assert 'MOLECULE_FILE' in _instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in _instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in _instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in _instance.default_env
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_lint_property(_instance):
assert isinstance(_instance.lint, yamllint.Yamllint)
def test_name_property(_instance):
assert 'goss' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
def test_directory_property(_instance):
parts = _instance.directory.split(os.path.sep)
assert 'tests' == parts[-1]
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_options_property(_instance):
x = {}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {'debug': True}
x = {}
# Does nothing. The `goss` command does not support
# a `debug` flag.
assert x == _instance.options
def test_bake(_instance):
assert _instance.bake() is None
def test_execute(patched_logger_info, _patched_ansible_verify,
_patched_goss_get_tests, patched_logger_success, _instance):
_instance.execute()
_patched_ansible_verify.assert_called_once_with()
msg = 'Executing Goss tests found in {}/...'.format(_instance.directory)
patched_logger_info.assert_called_once_with(msg)
msg = 'Verifier completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_ansible_converge,
patched_logger_warn, _instance):
_instance._config.config['verifier']['enabled'] = False
_instance.execute()
assert not patched_ansible_converge.called
msg = 'Skipping, verifier is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_does_not_execute_without_tests(patched_ansible_converge,
patched_logger_warn, _instance):
_instance.execute()
assert not patched_ansible_converge.called
msg = 'Skipping, no tests found.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_bakes():
pass
|
import my_pypi_dependency
from homeassistant import config_entries
from homeassistant.helpers import config_entry_flow
from .const import DOMAIN
async def _async_has_devices(hass) -> bool:
"""Return if there are devices that can be discovered."""
# TODO Check if there are any devices that can be discovered in the network.
devices = await hass.async_add_executor_job(my_pypi_dependency.discover)
return len(devices) > 0
config_entry_flow.register_discovery_flow(
DOMAIN, "NEW_NAME", _async_has_devices, config_entries.CONN_CLASS_UNKNOWN
)
|
import base64
import io
import logging
import smart_open.bytebuffer
import smart_open.constants
try:
import azure.storage.blob
import azure.core.exceptions
except ImportError:
MISSING_DEPS = True
logger = logging.getLogger(__name__)
_BINARY_TYPES = (bytes, bytearray, memoryview)
"""Allowed binary buffer types for writing to the underlying Azure Blob Storage stream"""
SCHEME = "azure"
"""Supported scheme for Azure Blob Storage in smart_open endpoint URL"""
_DEFAULT_MIN_PART_SIZE = 64 * 1024**2
"""Default minimum part size for Azure Cloud Storage multipart uploads is 64MB"""
DEFAULT_BUFFER_SIZE = 4 * 1024**2
"""Default buffer size for working with Azure Blob Storage is 256MB
https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs
"""
def parse_uri(uri_as_string):
sr = smart_open.utils.safe_urlsplit(uri_as_string)
assert sr.scheme == SCHEME
first = sr.netloc
second = sr.path.lstrip('/')
# https://docs.microsoft.com/en-us/rest/api/storageservices/working-with-the-root-container
if not second:
container_id = '$root'
blob_id = first
else:
container_id = first
blob_id = second
return dict(scheme=SCHEME, container_id=container_id, blob_id=blob_id)
def open_uri(uri, mode, transport_params):
parsed_uri = parse_uri(uri)
kwargs = smart_open.utils.check_kwargs(open, transport_params)
return open(parsed_uri['container_id'], parsed_uri['blob_id'], mode, **kwargs)
def open(
container_id,
blob_id,
mode,
client=None, # type: azure.storage.blob.BlobServiceClient
buffer_size=DEFAULT_BUFFER_SIZE,
min_part_size=_DEFAULT_MIN_PART_SIZE
):
"""Open an Azure Blob Storage blob for reading or writing.
Parameters
----------
container_id: str
The name of the container this object resides in.
blob_id: str
The name of the blob within the bucket.
mode: str
The mode for opening the object. Must be either "rb" or "wb".
client: azure.storage.blob.BlobServiceClient
The Azure Blob Storage client to use when working with azure-storage-blob.
buffer_size: int, optional
The buffer size to use when performing I/O. For reading only.
min_part_size: int, optional
The minimum part size for multipart uploads. For writing only.
"""
if not client:
raise ValueError('you must specify the client to connect to Azure')
if mode == smart_open.constants.READ_BINARY:
return Reader(
container_id,
blob_id,
client,
buffer_size=buffer_size,
line_terminator=smart_open.constants.BINARY_NEWLINE,
)
elif mode == smart_open.constants.WRITE_BINARY:
return Writer(
container_id,
blob_id,
client,
min_part_size=min_part_size
)
else:
raise NotImplementedError('Azure Blob Storage support for mode %r not implemented' % mode)
class _RawReader(object):
"""Read an Azure Blob Storage file."""
def __init__(self, blob, size):
# type: (azure.storage.blob.BlobClient, int) -> None
self._blob = blob
self._size = size
self._position = 0
def seek(self, position):
"""Seek to the specified position (byte offset) in the Azure Blob Storage blob.
:param int position: The byte offset from the beginning of the blob.
Returns the position after seeking.
"""
self._position = position
return self._position
def read(self, size=-1):
if self._position >= self._size:
return b''
binary = self._download_blob_chunk(size)
self._position += len(binary)
return binary
def _download_blob_chunk(self, size):
if self._size == self._position:
#
# When reading, we can't seek to the first byte of an empty file.
# Similarly, we can't seek past the last byte. Do nothing here.
#
return b''
elif size == -1:
stream = self._blob.download_blob(offset=self._position)
else:
stream = self._blob.download_blob(offset=self._position, length=size)
if isinstance(stream, azure.storage.blob.StorageStreamDownloader):
binary = stream.readall()
else:
binary = stream.read()
return binary
class Reader(io.BufferedIOBase):
"""Reads bytes from Azure Blob Storage.
Implements the io.BufferedIOBase interface of the standard library.
:raises azure.core.exceptions.ResourceNotFoundError: Raised when the blob to read from does not exist.
"""
def __init__(
self,
container,
blob,
client, # type: azure.storage.blob.BlobServiceClient
buffer_size=DEFAULT_BUFFER_SIZE,
line_terminator=smart_open.constants.BINARY_NEWLINE,
):
self._container_client = client.get_container_client(container)
# type: azure.storage.blob.ContainerClient
self._blob = self._container_client.get_blob_client(blob)
if self._blob is None:
raise azure.core.exceptions.ResourceNotFoundError(
'blob %s not found in %s' % (blob, container)
)
try:
self._size = self._blob.get_blob_properties()['size']
except KeyError:
self._size = 0
self._raw_reader = _RawReader(self._blob, self._size)
self._position = 0
self._current_part = smart_open.bytebuffer.ByteBuffer(buffer_size)
self._line_terminator = line_terminator
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
#
# Override some methods from io.IOBase.
#
def close(self):
"""Flush and close this stream."""
logger.debug("close: called")
self._blob = None
self._raw_reader = None
def readable(self):
"""Return True if the stream can be read from."""
return True
def seekable(self):
"""If False, seek(), tell() and truncate() will raise IOError.
We offer only seek support, and no truncate support."""
return True
#
# io.BufferedIOBase methods.
#
def detach(self):
"""Unsupported."""
raise io.UnsupportedOperation
def seek(self, offset, whence=smart_open.constants.WHENCE_START):
"""Seek to the specified position.
:param int offset: The offset in bytes.
:param int whence: Where the offset is from.
Returns the position after seeking."""
logger.debug('seeking to offset: %r whence: %r', offset, whence)
if whence not in smart_open.constants.WHENCE_CHOICES:
raise ValueError('invalid whence %i, expected one of %r' % (whence,
smart_open.constants.WHENCE_CHOICES))
if whence == smart_open.constants.WHENCE_START:
new_position = offset
elif whence == smart_open.constants.WHENCE_CURRENT:
new_position = self._position + offset
else:
new_position = self._size + offset
self._position = new_position
self._raw_reader.seek(new_position)
logger.debug('current_pos: %r', self._position)
self._current_part.empty()
return self._position
def tell(self):
"""Return the current position within the file."""
return self._position
def truncate(self, size=None):
"""Unsupported."""
raise io.UnsupportedOperation
def read(self, size=-1):
"""Read up to size bytes from the object and return them."""
if size == 0:
return b''
elif size < 0:
self._position = self._size
return self._read_from_buffer() + self._raw_reader.read()
#
# Return unused data first
#
if len(self._current_part) >= size:
return self._read_from_buffer(size)
if self._position == self._size:
return self._read_from_buffer()
self._fill_buffer()
return self._read_from_buffer(size)
def read1(self, size=-1):
"""This is the same as read()."""
return self.read(size=size)
def readinto(self, b):
"""Read up to len(b) bytes into b, and return the number of bytes read."""
data = self.read(len(b))
if not data:
return 0
b[:len(data)] = data
return len(data)
def readline(self, limit=-1):
"""Read up to and including the next newline. Returns the bytes read."""
if limit != -1:
raise NotImplementedError('limits other than -1 not implemented yet')
the_line = io.BytesIO()
while not (self._position == self._size and len(self._current_part) == 0):
#
# In the worst case, we're reading the unread part of self._current_part
# twice here, once in the if condition and once when calling index.
#
# This is sub-optimal, but better than the alternative: wrapping
# .index in a try..except, because that is slower.
#
remaining_buffer = self._current_part.peek()
if self._line_terminator in remaining_buffer:
next_newline = remaining_buffer.index(self._line_terminator)
the_line.write(self._read_from_buffer(next_newline + 1))
break
else:
the_line.write(self._read_from_buffer())
self._fill_buffer()
return the_line.getvalue()
#
# Internal methods.
#
def _read_from_buffer(self, size=-1):
"""Remove at most size bytes from our buffer and return them."""
# logger.debug('reading %r bytes from %r byte-long buffer', size, len(self._current_part))
size = size if size >= 0 else len(self._current_part)
part = self._current_part.read(size)
self._position += len(part)
# logger.debug('part: %r', part)
return part
def _fill_buffer(self, size=-1):
size = max(size, self._current_part._chunk_size)
while len(self._current_part) < size and not self._position == self._size:
bytes_read = self._current_part.fill(self._raw_reader)
if bytes_read == 0:
logger.debug('reached EOF while filling buffer')
return True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __str__(self):
return "(%s, %r, %r)" % (self.__class__.__name__,
self._container.container_name,
self._blob.blob_name)
def __repr__(self):
return "%s(container=%r, blob=%r)" % (
self.__class__.__name__, self._container_client.container_name, self._blob.blob_name,
)
class Writer(io.BufferedIOBase):
"""Writes bytes to Azure Blob Storage.
Implements the io.BufferedIOBase interface of the standard library."""
def __init__(
self,
container,
blob,
client, # type: azure.storage.blob.BlobServiceClient
min_part_size=_DEFAULT_MIN_PART_SIZE,
):
self._client = client
self._container_client = self._client.get_container_client(container)
# type: azure.storage.blob.ContainerClient
self._blob = self._container_client.get_blob_client(blob) # type: azure.storage.blob.BlobClient
self._min_part_size = min_part_size
self._total_size = 0
self._total_parts = 0
self._bytes_uploaded = 0
self._current_part = io.BytesIO()
self._block_list = []
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
def flush(self):
pass
#
# Override some methods from io.IOBase.
#
def close(self):
logger.debug("closing")
if not self.closed:
if self._current_part.tell() > 0:
self._upload_part()
self._blob.commit_block_list(self._block_list)
self._block_list = []
self._client = None
logger.debug("successfully closed")
@property
def closed(self):
return self._client is None
def writable(self):
"""Return True if the stream supports writing."""
return True
def tell(self):
"""Return the current stream position."""
return self._total_size
#
# io.BufferedIOBase methods.
#
def detach(self):
raise io.UnsupportedOperation("detach() not supported")
def write(self, b):
"""Write the given bytes (binary string) to the Azure Blob Storage file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away."""
if not isinstance(b, _BINARY_TYPES):
raise TypeError("input must be one of %r, got: %r" % (_BINARY_TYPES, type(b)))
self._current_part.write(b)
self._total_size += len(b)
if self._current_part.tell() >= self._min_part_size:
self._upload_part()
return len(b)
def _upload_part(self):
part_num = self._total_parts + 1
content_length = self._current_part.tell()
range_stop = self._bytes_uploaded + content_length - 1
""" # noqa: E501
block_id's must be base64 encoded, all the same length, and less than or equal to 64 bytes in size prior
to encoding.
https://docs.microsoft.com/en-us/python/api/azure-storage-blob/azure.storage.blob.blobclient?view=azure-python#stage-block-block-id--data--length-none----kwargs-
"""
zero_padded_part_num = str(part_num).zfill(64 // 2)
block_id = base64.b64encode(zero_padded_part_num.encode())
self._current_part.seek(0)
self._blob.stage_block(block_id, self._current_part.read(content_length))
self._block_list.append(azure.storage.blob.BlobBlock(block_id=block_id))
logger.info(
"uploading part #%i, %i bytes (total %.3fGB)",
part_num, content_length, range_stop / 1024.0 ** 3,
)
self._total_parts += 1
self._bytes_uploaded += content_length
self._current_part = io.BytesIO(self._current_part.read())
self._current_part.seek(0, io.SEEK_END)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __str__(self):
return "(%s, %r, %r)" % (
self.__class__.__name__,
self._container_client.container_name,
self._blob.blob_name
)
def __repr__(self):
return "%s(container=%r, blob=%r, min_part_size=%r)" % (
self.__class__.__name__,
self._container_client.container_name,
self._blob.blob_name,
self._min_part_size
)
|
import unittest
import re
import os.path
import codecs
from mock import MagicMock, call, patch
from uiautomator import AutomatorDevice, Selector
class TestDevice(unittest.TestCase):
def setUp(self):
self.device = AutomatorDevice()
self.device.server = MagicMock()
self.device.server.jsonrpc = MagicMock()
self.device.server.jsonrpc_wrap = MagicMock()
def test_info(self):
self.device.server.jsonrpc.deviceInfo = MagicMock()
self.device.server.jsonrpc.deviceInfo.return_value = {}
self.assertEqual(self.device.info, {})
self.device.server.jsonrpc.deviceInfo.assert_called_once_with()
def test_click(self):
self.device.server.jsonrpc.click = MagicMock()
self.device.server.jsonrpc.click.return_value = True
self.assertEqual(self.device.click(1, 2), True)
self.device.server.jsonrpc.click.assert_called_once_with(1, 2)
def test_swipe(self):
self.device.server.jsonrpc.swipe = MagicMock()
self.device.server.jsonrpc.swipe.return_value = True
self.assertEqual(self.device.swipe(1, 2, 3, 4, 100), True)
self.device.server.jsonrpc.swipe.assert_called_once_with(1, 2, 3, 4, 100)
def test_long_click(self):
self.device.server.jsonrpc.swipe = MagicMock()
self.device.server.jsonrpc.swipe.return_value = True
x, y = 100, 200
self.assertEqual(self.device.long_click(x, y), True)
self.device.server.jsonrpc.swipe.assert_called_once_with(x, y, x+1, y+1, 100)
def test_drag(self):
self.device.server.jsonrpc.drag = MagicMock()
self.device.server.jsonrpc.drag.return_value = True
self.assertEqual(self.device.drag(1, 2, 3, 4, 100), True)
self.device.server.jsonrpc.drag.assert_called_once_with(1, 2, 3, 4, 100)
def test_dump(self):
self.device.server.jsonrpc.dumpWindowHierarchy = MagicMock()
with codecs.open(os.path.join(os.path.dirname(__file__), "res", "layout.xml"), "r", encoding="utf8") as f:
xml = f.read()
self.device.server.jsonrpc.dumpWindowHierarchy.return_value = xml
self.assertEqual(self.device.dump("/tmp/test.xml"), xml)
self.device.server.jsonrpc.dumpWindowHierarchy.assert_called_once_with(True, None)
self.assertEqual(self.device.dump("/tmp/test.xml", False), xml)
raw_xml = "".join(re.split(r"\n[ ]*", xml))
self.device.server.jsonrpc.dumpWindowHierarchy.return_value = raw_xml
self.assertTrue("\n " in self.device.dump("/tmp/test.xml"))
def test_screenshot(self):
self.device.server.jsonrpc.takeScreenshot = MagicMock()
self.device.server.jsonrpc.takeScreenshot.return_value = "1.png"
self.device.server.adb.cmd = cmd = MagicMock()
self.device.server.screenshot = MagicMock()
self.device.server.screenshot.return_value = None
cmd.return_value.returncode = 0
self.assertEqual(self.device.screenshot("a.png", 1.0, 99), "a.png")
self.device.server.jsonrpc.takeScreenshot.assert_called_once_with("screenshot.png", 1.0, 99)
self.assertEqual(cmd.call_args_list, [call("pull", "1.png", "a.png"), call("shell", "rm", "1.png")])
self.device.server.jsonrpc.takeScreenshot.return_value = None
self.assertEqual(self.device.screenshot("a.png", 1.0, 100), None)
def test_freeze_rotation(self):
self.device.server.jsonrpc.freezeRotation = MagicMock()
self.device.freeze_rotation(True)
self.device.freeze_rotation(False)
self.assertEqual(self.device.server.jsonrpc.freezeRotation.call_args_list, [call(True), call(False)])
def test_orientation(self):
self.device.server.jsonrpc.deviceInfo = MagicMock()
orientation = {
0: "natural",
1: "left",
2: "upsidedown",
3: "right"
}
for i in range(4):
self.device.server.jsonrpc.deviceInfo.return_value = {"displayRotation": i}
self.assertEqual(self.device.orientation, orientation[i])
# set
orientations = [
(0, "natural", "n", 0),
(1, "left", "l", 90),
(2, "upsidedown", "u", 180),
(3, "right", "r", 270)
]
for values in orientations:
for value in values:
self.device.server.jsonrpc.setOrientation = MagicMock()
self.device.orientation = value
self.device.server.jsonrpc.setOrientation.assert_called_once_with(values[1])
with self.assertRaises(ValueError):
self.device.orientation = "invalid orientation"
def test_last_traversed_text(self):
self.device.server.jsonrpc.getLastTraversedText = MagicMock()
self.device.server.jsonrpc.getLastTraversedText.return_value = "abcdef"
self.assertEqual(self.device.last_traversed_text, "abcdef")
self.device.server.jsonrpc.getLastTraversedText.assert_called_once_with()
def test_clear_traversed_text(self):
self.device.server.jsonrpc.clearLastTraversedText = MagicMock()
self.device.clear_traversed_text()
self.device.server.jsonrpc.clearLastTraversedText.assert_called_once_with()
def test_open(self):
self.device.server.jsonrpc.openNotification = MagicMock()
self.device.open.notification()
self.device.server.jsonrpc.openNotification.assert_called_once_with()
self.device.server.jsonrpc.openQuickSettings = MagicMock()
self.device.open.quick_settings()
self.device.server.jsonrpc.openQuickSettings.assert_called_once_with()
def test_watchers(self):
names = ["a", "b", "c"]
self.device.server.jsonrpc.getWatchers = MagicMock()
self.device.server.jsonrpc.getWatchers.return_value = names
self.assertEqual(self.device.watchers, names)
self.device.server.jsonrpc.getWatchers.assert_called_once_with()
self.device.server.jsonrpc.hasAnyWatcherTriggered = MagicMock()
self.device.server.jsonrpc.hasAnyWatcherTriggered.return_value = True
self.assertEqual(self.device.watchers.triggered, True)
self.device.server.jsonrpc.hasAnyWatcherTriggered.assert_called_once_with()
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watchers.remove("a")
self.device.server.jsonrpc.removeWatcher.assert_called_once_with("a")
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watchers.remove()
self.assertEqual(self.device.server.jsonrpc.removeWatcher.call_args_list, [call(name) for name in names])
self.device.server.jsonrpc.resetWatcherTriggers = MagicMock()
self.device.watchers.reset()
self.device.server.jsonrpc.resetWatcherTriggers.assert_called_once_with()
self.device.server.jsonrpc.runWatchers = MagicMock()
self.device.watchers.run()
self.device.server.jsonrpc.runWatchers.assert_called_once_with()
def test_watcher(self):
self.device.server.jsonrpc.hasWatcherTriggered = MagicMock()
self.device.server.jsonrpc.hasWatcherTriggered.return_value = False
self.assertFalse(self.device.watcher("name").triggered)
self.device.server.jsonrpc.hasWatcherTriggered.assert_called_once_with("name")
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watcher("a").remove()
self.device.server.jsonrpc.removeWatcher.assert_called_once_with("a")
self.device.server.jsonrpc.registerClickUiObjectWatcher = MagicMock()
condition1 = {"text": "my text", "className": "android"}
condition2 = {"description": "my desc", "clickable": True}
target = {"className": "android.widget.Button", "text": "OK"}
self.device.watcher("watcher").when(**condition1).when(**condition2).click(**target)
self.device.server.jsonrpc.registerClickUiObjectWatcher.assert_called_once_with(
"watcher",
[Selector(**condition1), Selector(**condition2)],
Selector(**target)
)
self.device.server.jsonrpc.registerPressKeyskWatcher = MagicMock()
self.device.watcher("watcher2").when(**condition1).when(**condition2).press.back.home.power("menu")
self.device.server.jsonrpc.registerPressKeyskWatcher.assert_called_once_with(
"watcher2", [Selector(**condition1), Selector(**condition2)], ("back", "home", "power", "menu"))
def test_press(self):
key = ["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
self.device.server.jsonrpc.pressKey = MagicMock()
self.device.server.jsonrpc.pressKey.return_value = True
self.assertTrue(self.device.press.home())
self.device.server.jsonrpc.pressKey.return_value = False
self.assertFalse(self.device.press.back())
self.device.server.jsonrpc.pressKey.return_value = False
for k in key:
self.assertFalse(self.device.press(k))
self.assertEqual(self.device.server.jsonrpc.pressKey.call_args_list, [call("home"), call("back")] + [call(k) for k in key])
self.device.server.jsonrpc.pressKeyCode.return_value = True
self.assertTrue(self.device.press(1))
self.assertTrue(self.device.press(1, 2))
self.assertEqual(self.device.server.jsonrpc.pressKeyCode.call_args_list, [call(1), call(1, 2)])
def test_wakeup(self):
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.wakeup()
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.screen.on()
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.screen("on")
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
def test_screen_status(self):
self.device.server.jsonrpc.deviceInfo = MagicMock()
self.device.server.jsonrpc.deviceInfo.return_value = {"screenOn": True}
self.assertTrue(self.device.screen == "on")
self.assertTrue(self.device.screen != "off")
self.device.server.jsonrpc.deviceInfo.return_value = {"screenOn": False}
self.assertTrue(self.device.screen == "off")
self.assertTrue(self.device.screen != "on")
def test_sleep(self):
self.device.server.jsonrpc.sleep = MagicMock()
self.device.sleep()
self.device.server.jsonrpc.sleep.assert_called_once_with()
self.device.server.jsonrpc.sleep = MagicMock()
self.device.screen.off()
self.device.server.jsonrpc.sleep.assert_called_once_with()
self.device.server.jsonrpc.sleep = MagicMock()
self.device.screen("off")
self.device.server.jsonrpc.sleep.assert_called_once_with()
def test_wait_idle(self):
self.device.server.jsonrpc_wrap.return_value.waitForIdle = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForIdle.return_value = True
self.assertTrue(self.device.wait.idle(timeout=10))
self.device.server.jsonrpc_wrap.return_value.waitForIdle.assert_called_once_with(10)
self.device.server.jsonrpc_wrap.return_value.waitForIdle = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForIdle.return_value = False
self.assertFalse(self.device.wait("idle", timeout=10))
self.device.server.jsonrpc_wrap.return_value.waitForIdle.assert_called_once_with(10)
def test_wait_update(self):
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.return_value = True
self.assertTrue(self.device.wait.update(timeout=10, package_name="android"))
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.assert_called_once_with("android", 10)
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.return_value = False
self.assertFalse(self.device.wait("update", timeout=100, package_name="android"))
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.assert_called_once_with("android", 100)
def test_get_info_attr(self):
info = {"test_a": 1, "test_b": "string", "displayWidth": 720, "displayHeight": 1024}
self.device.server.jsonrpc.deviceInfo = MagicMock()
self.device.server.jsonrpc.deviceInfo.return_value = info
for k in info:
self.assertEqual(getattr(self.device, k), info[k])
self.assertEqual(self.device.width, info["displayWidth"])
self.assertEqual(self.device.height, info["displayHeight"])
with self.assertRaises(AttributeError):
self.device.not_exists
def test_device_obj(self):
with patch("uiautomator.AutomatorDeviceObject") as AutomatorDeviceObject:
kwargs = {"text": "abc", "description": "description...", "clickable": True}
self.device(**kwargs)
AutomatorDeviceObject.assert_called_once_with(self.device, Selector(**kwargs))
with patch("uiautomator.AutomatorDeviceObject") as AutomatorDeviceObject:
AutomatorDeviceObject.return_value.exists = True
self.assertTrue(self.device.exists(clickable=True))
AutomatorDeviceObject.return_value.exists = False
self.assertFalse(self.device.exists(text="..."))
class TestDeviceWithSerial(unittest.TestCase):
def test_serial(self):
with patch('uiautomator.AutomatorServer') as AutomatorServer:
AutomatorDevice("abcdefhijklmn")
AutomatorServer.assert_called_once_with(serial="abcdefhijklmn", local_port=None, adb_server_host=None, adb_server_port=None)
|
from django.test import TestCase
from weblate.checks.consistency import PluralsCheck, SamePluralsCheck, TranslatedCheck
from weblate.checks.models import Check
from weblate.checks.tests.test_checks import MockUnit
from weblate.trans.models import Change
from weblate.trans.tests.test_views import ViewTestCase
class PluralsCheckTest(TestCase):
def setUp(self):
self.check = PluralsCheck()
def test_none(self):
self.assertFalse(
self.check.check_target(["string"], ["string"], MockUnit("plural_none"))
)
def test_empty(self):
self.assertFalse(
self.check.check_target(
["string", "plural"], ["", ""], MockUnit("plural_empty")
)
)
def test_hit(self):
self.assertTrue(
self.check.check_target(
["string", "plural"], ["string", ""], MockUnit("plural_partial_empty")
)
)
def test_good(self):
self.assertFalse(
self.check.check_target(
["string", "plural"],
["translation", "trplural"],
MockUnit("plural_good"),
)
)
class SamePluralsCheckTest(PluralsCheckTest):
def setUp(self):
self.check = SamePluralsCheck()
def test_hit(self):
self.assertTrue(
self.check.check_target(
["string", "plural"],
["string", "string"],
MockUnit("plural_partial_empty"),
)
)
class TranslatedCheckTest(ViewTestCase):
def setUp(self):
super().setUp()
self.check = TranslatedCheck()
def run_check(self):
unit = self.get_unit()
return self.check.check_target(
unit.get_source_plurals(), unit.get_target_plurals(), unit
)
def test_none(self):
self.assertFalse(self.run_check())
def test_translated(self):
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
self.assertFalse(self.run_check())
def test_untranslated(self):
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
self.edit_unit("Hello, world!\n", "")
self.assertTrue(self.run_check())
def test_source_change(self):
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
self.edit_unit("Hello, world!\n", "")
unit = self.get_unit()
unit.change_set.create(action=Change.ACTION_SOURCE_CHANGE)
self.assertFalse(self.run_check())
def test_get_description(self):
self.test_untranslated()
check = Check(unit=self.get_unit())
self.assertEqual(
self.check.get_description(check), 'Last translation was "Nazdar svete!\n".'
)
|
import codecs
import platform
import six
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
from colorama import init
from termcolor import colored
init(autoreset=True)
def is_supported():
"""Check whether operating system supports main symbols or not.
Returns
-------
boolean
Whether operating system supports main symbols or not
"""
os_arch = platform.system()
if os_arch != 'Windows':
return True
return False
def get_environment():
"""Get the environment in which halo is running
Returns
-------
str
Environment name
"""
try:
from IPython import get_ipython
except ImportError:
return 'terminal'
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole
return 'jupyter'
elif shell == 'TerminalInteractiveShell': # Terminal running IPython
return 'ipython'
else:
return 'terminal' # Other type (?)
except NameError:
return 'terminal'
def colored_frame(frame, color):
"""Color the frame with given color and returns.
Parameters
----------
frame : str
Frame to be colored
color : str
Color to be applied
Returns
-------
str
Colored frame
"""
return colored(frame, color, attrs=['bold'])
def is_text_type(text):
"""Check if given parameter is a string or not
Parameters
----------
text : *
Parameter to be checked for text type
Returns
-------
bool
Whether parameter is a string or not
"""
if isinstance(text, six.text_type) or isinstance(text, six.string_types):
return True
return False
def decode_utf_8_text(text):
"""Decode the text from utf-8 format
Parameters
----------
text : str
String to be decoded
Returns
-------
str
Decoded string
"""
try:
return codecs.decode(text, 'utf-8')
except (TypeError, ValueError):
return text
def encode_utf_8_text(text):
"""Encodes the text to utf-8 format
Parameters
----------
text : str
String to be encoded
Returns
-------
str
Encoded string
"""
try:
return codecs.encode(text, 'utf-8', 'ignore')
except (TypeError, ValueError):
return text
def get_terminal_columns():
"""Determine the amount of available columns in the terminal
Returns
-------
int
Terminal width
"""
terminal_size = get_terminal_size()
# If column size is 0 either we are not connected
# to a terminal or something else went wrong. Fallback to 80.
if terminal_size.columns == 0:
return 80
else:
return terminal_size.columns
|
import logging
from pyhap.const import CATEGORY_HUMIDIFIER
from homeassistant.components.humidifier.const import (
ATTR_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_MIN_HUMIDITY,
DEFAULT_MAX_HUMIDITY,
DEFAULT_MIN_HUMIDITY,
DEVICE_CLASS_DEHUMIDIFIER,
DEVICE_CLASS_HUMIDIFIER,
DOMAIN,
SERVICE_SET_HUMIDITY,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
PERCENTAGE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change_event
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_ACTIVE,
CHAR_CURRENT_HUMIDIFIER_DEHUMIDIFIER,
CHAR_CURRENT_HUMIDITY,
CHAR_DEHUMIDIFIER_THRESHOLD_HUMIDITY,
CHAR_HUMIDIFIER_THRESHOLD_HUMIDITY,
CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER,
CONF_LINKED_HUMIDITY_SENSOR,
PROP_MAX_VALUE,
PROP_MIN_STEP,
PROP_MIN_VALUE,
SERV_HUMIDIFIER_DEHUMIDIFIER,
)
_LOGGER = logging.getLogger(__name__)
HC_HUMIDIFIER = 1
HC_DEHUMIDIFIER = 2
HC_HASS_TO_HOMEKIT_DEVICE_CLASS = {
DEVICE_CLASS_HUMIDIFIER: HC_HUMIDIFIER,
DEVICE_CLASS_DEHUMIDIFIER: HC_DEHUMIDIFIER,
}
HC_HASS_TO_HOMEKIT_DEVICE_CLASS_NAME = {
DEVICE_CLASS_HUMIDIFIER: "Humidifier",
DEVICE_CLASS_DEHUMIDIFIER: "Dehumidifier",
}
HC_DEVICE_CLASS_TO_TARGET_CHAR = {
HC_HUMIDIFIER: CHAR_HUMIDIFIER_THRESHOLD_HUMIDITY,
HC_DEHUMIDIFIER: CHAR_DEHUMIDIFIER_THRESHOLD_HUMIDITY,
}
HC_STATE_INACTIVE = 0
HC_STATE_IDLE = 1
HC_STATE_HUMIDIFYING = 2
HC_STATE_DEHUMIDIFYING = 3
@TYPES.register("HumidifierDehumidifier")
class HumidifierDehumidifier(HomeAccessory):
"""Generate a HumidifierDehumidifier accessory for a humidifier."""
def __init__(self, *args):
"""Initialize a HumidifierDehumidifier accessory object."""
super().__init__(*args, category=CATEGORY_HUMIDIFIER)
self.chars = []
state = self.hass.states.get(self.entity_id)
device_class = state.attributes.get(ATTR_DEVICE_CLASS, DEVICE_CLASS_HUMIDIFIER)
self._hk_device_class = HC_HASS_TO_HOMEKIT_DEVICE_CLASS[device_class]
self._target_humidity_char_name = HC_DEVICE_CLASS_TO_TARGET_CHAR[
self._hk_device_class
]
self.chars.append(self._target_humidity_char_name)
serv_humidifier_dehumidifier = self.add_preload_service(
SERV_HUMIDIFIER_DEHUMIDIFIER, self.chars
)
# Current and target mode characteristics
self.char_current_humidifier_dehumidifier = (
serv_humidifier_dehumidifier.configure_char(
CHAR_CURRENT_HUMIDIFIER_DEHUMIDIFIER, value=0
)
)
self.char_target_humidifier_dehumidifier = (
serv_humidifier_dehumidifier.configure_char(
CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER,
value=self._hk_device_class,
valid_values={
HC_HASS_TO_HOMEKIT_DEVICE_CLASS_NAME[
device_class
]: self._hk_device_class
},
)
)
# Current and target humidity characteristics
self.char_current_humidity = serv_humidifier_dehumidifier.configure_char(
CHAR_CURRENT_HUMIDITY, value=0
)
max_humidity = state.attributes.get(ATTR_MAX_HUMIDITY, DEFAULT_MAX_HUMIDITY)
max_humidity = round(max_humidity)
max_humidity = min(max_humidity, 100)
min_humidity = state.attributes.get(ATTR_MIN_HUMIDITY, DEFAULT_MIN_HUMIDITY)
min_humidity = round(min_humidity)
min_humidity = max(min_humidity, 0)
self.char_target_humidity = serv_humidifier_dehumidifier.configure_char(
self._target_humidity_char_name,
value=45,
properties={
PROP_MIN_VALUE: min_humidity,
PROP_MAX_VALUE: max_humidity,
PROP_MIN_STEP: 1,
},
)
# Active/inactive characteristics
self.char_active = serv_humidifier_dehumidifier.configure_char(
CHAR_ACTIVE, value=False
)
self.async_update_state(state)
serv_humidifier_dehumidifier.setter_callback = self._set_chars
self.linked_humidity_sensor = self.config.get(CONF_LINKED_HUMIDITY_SENSOR)
if self.linked_humidity_sensor:
humidity_state = self.hass.states.get(self.linked_humidity_sensor)
if humidity_state:
self._async_update_current_humidity(humidity_state)
async def run_handler(self):
"""Handle accessory driver started event.
Run inside the Home Assistant event loop.
"""
if self.linked_humidity_sensor:
async_track_state_change_event(
self.hass,
[self.linked_humidity_sensor],
self.async_update_current_humidity_event,
)
await super().run_handler()
@callback
def async_update_current_humidity_event(self, event):
"""Handle state change event listener callback."""
self._async_update_current_humidity(event.data.get("new_state"))
@callback
def _async_update_current_humidity(self, new_state):
"""Handle linked humidity sensor state change to update HomeKit value."""
if new_state is None:
_LOGGER.error(
"%s: Unable to update from linked humidity sensor %s: the entity state is None",
self.entity_id,
self.linked_humidity_sensor,
)
return
try:
current_humidity = float(new_state.state)
if self.char_current_humidity.value != current_humidity:
_LOGGER.debug(
"%s: Linked humidity sensor %s changed to %d",
self.entity_id,
self.linked_humidity_sensor,
current_humidity,
)
self.char_current_humidity.set_value(current_humidity)
except ValueError as ex:
_LOGGER.error(
"%s: Unable to update from linked humidity sensor %s: %s",
self.entity_id,
self.linked_humidity_sensor,
ex,
)
def _set_chars(self, char_values):
_LOGGER.debug("HumidifierDehumidifier _set_chars: %s", char_values)
if CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER in char_values:
hk_value = char_values[CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER]
if self._hk_device_class != hk_value:
_LOGGER.error(
"%s is not supported", CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER
)
if CHAR_ACTIVE in char_values:
self.call_service(
DOMAIN,
SERVICE_TURN_ON if char_values[CHAR_ACTIVE] else SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.entity_id},
f"{CHAR_ACTIVE} to {char_values[CHAR_ACTIVE]}",
)
if self._target_humidity_char_name in char_values:
humidity = round(char_values[self._target_humidity_char_name])
self.call_service(
DOMAIN,
SERVICE_SET_HUMIDITY,
{ATTR_ENTITY_ID: self.entity_id, ATTR_HUMIDITY: humidity},
f"{self._target_humidity_char_name} to "
f"{char_values[self._target_humidity_char_name]}{PERCENTAGE}",
)
@callback
def async_update_state(self, new_state):
"""Update state without rechecking the device features."""
is_active = new_state.state == STATE_ON
# Update active state
if self.char_active.value != is_active:
self.char_active.set_value(is_active)
# Set current state
if is_active:
if self._hk_device_class == HC_HUMIDIFIER:
current_state = HC_STATE_HUMIDIFYING
else:
current_state = HC_STATE_DEHUMIDIFYING
else:
current_state = HC_STATE_INACTIVE
if self.char_current_humidifier_dehumidifier.value != current_state:
self.char_current_humidifier_dehumidifier.set_value(current_state)
# Update target humidity
target_humidity = new_state.attributes.get(ATTR_HUMIDITY)
if isinstance(target_humidity, (int, float)):
if self.char_target_humidity.value != target_humidity:
self.char_target_humidity.set_value(target_humidity)
|
from homeassistant.components.group import (
DOMAIN,
GROUP_SCHEMA,
GroupIntegrationRegistry,
)
from homeassistant.config import GROUP_CONFIG_PATH
from homeassistant.const import SERVICE_RELOAD
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from . import EditKeyBasedConfigView
async def async_setup(hass):
"""Set up the Group config API."""
async def hook(action, config_key):
"""post_write_hook for Config View that reloads groups."""
await hass.services.async_call(DOMAIN, SERVICE_RELOAD)
hass.http.register_view(
EditKeyBasedConfigView(
"group",
"config",
GROUP_CONFIG_PATH,
cv.slug,
GROUP_SCHEMA,
post_write_hook=hook,
)
)
return True
@callback
def async_describe_on_off_states(
hass: HomeAssistantType, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
return
|
import numpy as np
import pytest
import tensornetwork as tn
from tensornetwork.block_sparse import (BlockSparseTensor, Index, BaseCharge,
U1Charge)
@pytest.mark.parametrize("num_charges", [1, 2])
def test_sparse_shape(num_charges):
np.random.seed(10)
dtype = np.float64
shape = [10, 11, 12, 13]
R = len(shape)
charges = [
BaseCharge(
np.random.randint(-5, 5, (shape[n], num_charges)),
charge_types=[U1Charge] * num_charges) for n in range(R)
]
flows = list(np.full(R, fill_value=False, dtype=np.bool))
indices = [Index(charges[n], flows[n]) for n in range(R)]
a = BlockSparseTensor.random(indices=indices, dtype=dtype)
node = tn.Node(a, backend='symmetric')
for s1, s2 in zip(node.sparse_shape, indices):
assert s1 == s2
|
from typing import Dict
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_DISKS
from homeassistant.helpers.typing import HomeAssistantType
from . import SynologyDSMDeviceEntity, SynologyDSMEntity
from .const import (
DOMAIN,
SECURITY_BINARY_SENSORS,
STORAGE_DISK_BINARY_SENSORS,
SYNO_API,
UPGRADE_BINARY_SENSORS,
)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Synology NAS binary sensor."""
api = hass.data[DOMAIN][entry.unique_id][SYNO_API]
entities = [
SynoDSMSecurityBinarySensor(
api, sensor_type, SECURITY_BINARY_SENSORS[sensor_type]
)
for sensor_type in SECURITY_BINARY_SENSORS
]
entities += [
SynoDSMUpgradeBinarySensor(
api, sensor_type, UPGRADE_BINARY_SENSORS[sensor_type]
)
for sensor_type in UPGRADE_BINARY_SENSORS
]
# Handle all disks
if api.storage.disks_ids:
for disk in entry.data.get(CONF_DISKS, api.storage.disks_ids):
entities += [
SynoDSMStorageBinarySensor(
api, sensor_type, STORAGE_DISK_BINARY_SENSORS[sensor_type], disk
)
for sensor_type in STORAGE_DISK_BINARY_SENSORS
]
async_add_entities(entities)
class SynoDSMSecurityBinarySensor(SynologyDSMEntity, BinarySensorEntity):
"""Representation a Synology Security binary sensor."""
@property
def is_on(self) -> bool:
"""Return the state."""
return getattr(self._api.security, self.entity_type) != "safe"
@property
def available(self) -> bool:
"""Return True if entity is available."""
return bool(self._api.security)
@property
def device_state_attributes(self) -> Dict[str, str]:
"""Return security checks details."""
return self._api.security.status_by_check
class SynoDSMStorageBinarySensor(SynologyDSMDeviceEntity, BinarySensorEntity):
"""Representation a Synology Storage binary sensor."""
@property
def is_on(self) -> bool:
"""Return the state."""
return getattr(self._api.storage, self.entity_type)(self._device_id)
class SynoDSMUpgradeBinarySensor(SynologyDSMEntity, BinarySensorEntity):
"""Representation a Synology Upgrade binary sensor."""
@property
def is_on(self) -> bool:
"""Return the state."""
return getattr(self._api.upgrade, self.entity_type)
@property
def available(self) -> bool:
"""Return True if entity is available."""
return bool(self._api.upgrade)
|
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(n_node_names,)
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int64)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float64) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolate connections around a single node when user left clicks a node.
On right click, resets all connections.
"""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of array | None
Two arrays with indices of connections for which the connections
strengths are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape (n_node_names,) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuple | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str | instance of matplotlib.colors.LinearSegmentedColormap
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : tuple, shape (2,)
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.figure.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | tuple, shape (3,)
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure handle.
axes : instance of matplotlib.projections.polar.PolarAxes
The subplot handle.
Notes
-----
This code is based on a circle graph example by Nicolas P. Rougier
By default, :func:`matplotlib.pyplot.savefig` does not take ``facecolor``
into account when saving, even if set when a figure is generated. This
can be addressed via, e.g.::
>>> fig.savefig(fname_fig, facecolor='black') # doctest:+SKIP
If ``facecolor`` is not set via :func:`matplotlib.pyplot.savefig`, the
figure labels, title, and legend may be cut off in the output figure.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
try:
spectral = plt.cm.spectral
except AttributeError:
spectral = plt.cm.Spectral
node_colors = [spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, str):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True)
axes.set_facecolor(facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
del con_abs
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int64)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
|
from __future__ import print_function
__docformat__ = "restructuredtext en"
import os
import glob
import shutil
import stat
import sys
import tempfile
import time
import fnmatch
import errno
import string
import random
import subprocess
from os.path import exists, isdir, islink, basename, join
from six import string_types
from six.moves import range, input as raw_input
from logilab.common import STD_BLACKLIST, _handle_blacklist
from logilab.common.compat import str_to_bytes
from logilab.common.deprecation import deprecated
class tempdir(object):
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, exctype, value, traceback):
# rmtree in all cases
shutil.rmtree(self.path)
return traceback is None
class pushd(object):
def __init__(self, directory):
self.directory = directory
def __enter__(self):
self.cwd = os.getcwd()
os.chdir(self.directory)
return self.directory
def __exit__(self, exctype, value, traceback):
os.chdir(self.cwd)
def chown(path, login=None, group=None):
"""Same as `os.chown` function but accepting user login or group name as
argument. If login or group is omitted, it's left unchanged.
Note: you must own the file to chown it (or be root). Otherwise OSError is raised.
"""
if login is None:
uid = -1
else:
try:
uid = int(login)
except ValueError:
import pwd # Platforms: Unix
uid = pwd.getpwnam(login).pw_uid
if group is None:
gid = -1
else:
try:
gid = int(group)
except ValueError:
import grp
gid = grp.getgrnam(group).gr_gid
os.chown(path, uid, gid)
def mv(source, destination, _action=shutil.move):
"""A shell-like mv, supporting wildcards.
"""
sources = glob.glob(source)
if len(sources) > 1:
assert isdir(destination)
for filename in sources:
_action(filename, join(destination, basename(filename)))
else:
try:
source = sources[0]
except IndexError:
raise OSError('No file matching %s' % source)
if isdir(destination) and exists(destination):
destination = join(destination, basename(source))
try:
_action(source, destination)
except OSError as ex:
raise OSError('Unable to move %r to %r (%s)' % (
source, destination, ex))
def rm(*files):
"""A shell-like rm, supporting wildcards.
"""
for wfile in files:
for filename in glob.glob(wfile):
if islink(filename):
os.remove(filename)
elif isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
def cp(source, destination):
"""A shell-like cp, supporting wildcards.
"""
mv(source, destination, _action=shutil.copy)
def find(directory, exts, exclude=False, blacklist=STD_BLACKLIST):
"""Recursively find files ending with the given extensions from the directory.
:type directory: str
:param directory:
directory where the search should start
:type exts: basestring or list or tuple
:param exts:
extensions or lists or extensions to search
:type exclude: boolean
:param exts:
if this argument is True, returning files NOT ending with the given
extensions
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all matching files
"""
if isinstance(exts, string_types):
exts = (exts,)
if exclude:
def match(filename, exts):
for ext in exts:
if filename.endswith(ext):
return False
return True
else:
def match(filename, exts):
for ext in exts:
if filename.endswith(ext):
return True
return False
files = []
for dirpath, dirnames, filenames in os.walk(directory):
_handle_blacklist(blacklist, dirnames, filenames)
# don't append files if the directory is blacklisted
dirname = basename(dirpath)
if dirname in blacklist:
continue
files.extend([join(dirpath, f) for f in filenames if match(f, exts)])
return files
def globfind(directory, pattern, blacklist=STD_BLACKLIST):
"""Recursively finds files matching glob `pattern` under `directory`.
This is an alternative to `logilab.common.shellutils.find`.
:type directory: str
:param directory:
directory where the search should start
:type pattern: basestring
:param pattern:
the glob pattern (e.g *.py, foo*.py, etc.)
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: iterator
:return:
iterator over the list of all matching files
"""
for curdir, dirnames, filenames in os.walk(directory):
_handle_blacklist(blacklist, dirnames, filenames)
for fname in fnmatch.filter(filenames, pattern):
yield join(curdir, fname)
def unzip(archive, destdir):
import zipfile
if not exists(destdir):
os.mkdir(destdir)
zfobj = zipfile.ZipFile(archive)
for name in zfobj.namelist():
if name.endswith('/'):
os.mkdir(join(destdir, name))
else:
outfile = open(join(destdir, name), 'wb')
outfile.write(zfobj.read(name))
outfile.close()
class Execute:
"""This is a deadlock safe version of popen2 (no stdin), that returns
an object with errorlevel, out and err.
"""
def __init__(self, command):
cmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.out, self.err = cmd.communicate()
self.status = os.WEXITSTATUS(cmd.returncode)
Execute = deprecated('Use subprocess.Popen instead')(Execute)
class ProgressBar(object):
"""A simple text progression bar."""
def __init__(self, nbops, size=20, stream=sys.stdout, title=''):
if title:
self._fstr = '\r%s [%%-%ss]' % (title, int(size))
else:
self._fstr = '\r[%%-%ss]' % int(size)
self._stream = stream
self._total = nbops
self._size = size
self._current = 0
self._progress = 0
self._current_text = None
self._last_text_write_size = 0
def _get_text(self):
return self._current_text
def _set_text(self, text=None):
if text != self._current_text:
self._current_text = text
self.refresh()
def _del_text(self):
self.text = None
text = property(_get_text, _set_text, _del_text)
def update(self, offset=1, exact=False):
"""Move FORWARD to new cursor position (cursor will never go backward).
:offset: fraction of ``size``
:exact:
- False: offset relative to current cursor position if True
- True: offset as an asbsolute position
"""
if exact:
self._current = offset
else:
self._current += offset
progress = int((float(self._current)/float(self._total))*self._size)
if progress > self._progress:
self._progress = progress
self.refresh()
def refresh(self):
"""Refresh the progression bar display."""
self._stream.write(self._fstr % ('=' * min(self._progress, self._size)) )
if self._last_text_write_size or self._current_text:
template = ' %%-%is' % (self._last_text_write_size)
text = self._current_text
if text is None:
text = ''
self._stream.write(template % text)
self._last_text_write_size = len(text.rstrip())
self._stream.flush()
def finish(self):
self._stream.write('\n')
self._stream.flush()
class DummyProgressBar(object):
__slots__ = ('text',)
def refresh(self):
pass
def update(self):
pass
def finish(self):
pass
_MARKER = object()
class progress(object):
def __init__(self, nbops=_MARKER, size=_MARKER, stream=_MARKER, title=_MARKER, enabled=True):
self.nbops = nbops
self.size = size
self.stream = stream
self.title = title
self.enabled = enabled
def __enter__(self):
if self.enabled:
kwargs = {}
for attr in ('nbops', 'size', 'stream', 'title'):
value = getattr(self, attr)
if value is not _MARKER:
kwargs[attr] = value
self.pb = ProgressBar(**kwargs)
else:
self.pb = DummyProgressBar()
return self.pb
def __exit__(self, exc_type, exc_val, exc_tb):
self.pb.finish()
class RawInput(object):
def __init__(self, input=None, printer=None):
self._input = input or raw_input
self._print = printer
def ask(self, question, options, default):
assert default in options
choices = []
for option in options:
if option == default:
label = option[0].upper()
else:
label = option[0].lower()
if len(option) > 1:
label += '(%s)' % option[1:].lower()
choices.append((option, label))
prompt = "%s [%s]: " % (question,
'/'.join([opt[1] for opt in choices]))
tries = 3
while tries > 0:
answer = self._input(prompt).strip().lower()
if not answer:
return default
possible = [option for option, label in choices
if option.lower().startswith(answer)]
if len(possible) == 1:
return possible[0]
elif len(possible) == 0:
msg = '%s is not an option.' % answer
else:
msg = ('%s is an ambiguous answer, do you mean %s ?' % (
answer, ' or '.join(possible)))
if self._print:
self._print(msg)
else:
print(msg)
tries -= 1
raise Exception('unable to get a sensible answer')
def confirm(self, question, default_is_yes=True):
default = default_is_yes and 'y' or 'n'
answer = self.ask(question, ('y', 'n'), default)
return answer == 'y'
ASK = RawInput()
def getlogin():
"""avoid using os.getlogin() because of strange tty / stdin problems
(man 3 getlogin)
Another solution would be to use $LOGNAME, $USER or $USERNAME
"""
if sys.platform != 'win32':
import pwd # Platforms: Unix
return pwd.getpwuid(os.getuid())[0]
else:
return os.environ['USERNAME']
def generate_password(length=8, vocab=string.ascii_letters + string.digits):
"""dumb password generation function"""
pwd = ''
for i in range(length):
pwd += random.choice(vocab)
return pwd
|
import sys
import argparse
import openrazer.client
import openrazer.client.constants as c
def ripple_single_type() -> callable:
"""
Creates a simple callable which will convert int, int, int, float
:return: Function
:rtype: callable
"""
count = 0
def parse(arg_value):
nonlocal count
if count < 3:
count += 1
try:
return int(arg_value)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not an integer".format(arg_value))
try:
return float(arg_value)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a float".format(arg_value))
return parse
parser = argparse.ArgumentParser()
action = parser.add_mutually_exclusive_group(required=True)
action.add_argument('--breath-random', action='store_true')
action.add_argument('--breath-single', nargs=3, metavar=('R', 'G', 'B'), type=int)
action.add_argument('--breath-dual', nargs=6, metavar=('R1', 'G1', 'B1', 'R2', 'G2', 'B2'), type=int)
action.add_argument('--reactive', nargs=4, metavar=('TIME', 'R', 'G', 'B'), type=int)
action.add_argument('--spectrum', action='store_true')
action.add_argument('--static', nargs=3, metavar=('R', 'G', 'B'), type=int)
action.add_argument('--wave', metavar='DIRECTION', choices=('LEFT', 'RIGHT'), type=str)
action.add_argument('--ripple-single', nargs=4, metavar='R G B REFRESH_RATE', type=ripple_single_type())
action.add_argument('--ripple-random', metavar='REFRESH_RATE', type=float)
args = parser.parse_args()
device_manager = openrazer.client.DeviceManager()
keyboard = None
for device in device_manager.devices:
if device.type == 'keyboard':
keyboard = device
break
else:
print("Could not find suitable keyboard", file=sys.stderr)
sys.exit(1)
if args.breath_random:
if keyboard.fx.has("breath_random"):
keyboard.fx.breath_random()
else:
print("Keyboard doesn't support random breath mode", file=sys.stderr)
sys.exit(1)
elif args.breath_single is not None:
r, g, b = args.breath_single
assert 0 <= r <= 255, "Red component must be between 0-255 inclusive"
assert 0 <= g <= 255, "Green component must be between 0-255 inclusive"
assert 0 <= b <= 255, "Blue component must be between 0-255 inclusive"
if keyboard.fx.has("breath_single"):
keyboard.fx.breath_single(r, g, b)
else:
print("Keyboard doesn't support single breath mode", file=sys.stderr)
sys.exit(1)
elif args.breath_dual is not None:
r1, g1, b1, r2, g2, b2 = args.breath_dual
assert 0 <= r1 <= 255, "Red component must be between 0-255 inclusive"
assert 0 <= g1 <= 255, "Green component must be between 0-255 inclusive"
assert 0 <= b1 <= 255, "Blue component must be between 0-255 inclusive"
assert 0 <= r2 <= 255, "Red component must be between 0-255 inclusive"
assert 0 <= g2 <= 255, "Green component must be between 0-255 inclusive"
assert 0 <= b2 <= 255, "Blue component must be between 0-255 inclusive"
if keyboard.fx.has("breath_dual"):
keyboard.fx.breath_dual(r1, g1, b1, r2, g2, b2)
else:
print("Keyboard doesn't support dual breath mode", file=sys.stderr)
sys.exit(1)
elif args.reactive is not None:
t, r, g, b = args.reactive
assert t in (c.REACTIVE_500MS, c.REACTIVE_1000MS, c.REACTIVE_1500MS, c.REACTIVE_2000MS)
assert 0 <= r <= 255, "Red component must be between 0-255 inclusive"
assert 0 <= g <= 255, "Green component must be between 0-255 inclusive"
assert 0 <= b <= 255, "Blue component must be between 0-255 inclusive"
if keyboard.fx.has("reactive"):
keyboard.fx.reactive(t, r, g, b)
else:
print("Keyboard doesn't support reactive mode", file=sys.stderr)
sys.exit(1)
elif args.spectrum:
if keyboard.fx.has("spectrum"):
keyboard.fx.spectrum()
else:
print("Keyboard doesn't support spectrum mode", file=sys.stderr)
sys.exit(1)
elif args.static is not None:
r, g, b = args.static
assert 0 <= r <= 255, "Red component must be between 0-255 inclusive"
assert 0 <= g <= 255, "Green component must be between 0-255 inclusive"
assert 0 <= b <= 255, "Blue component must be between 0-255 inclusive"
if keyboard.fx.has("static"):
keyboard.fx.static(r, g, b)
else:
print("Keyboard doesn't support static mode", file=sys.stderr)
sys.exit(1)
elif args.wave is not None:
direction = args.wave
if direction == 'LEFT':
direction = c.WAVE_LEFT
else:
direction = c.WAVE_RIGHT
if keyboard.fx.has("wave"):
keyboard.fx.wave(direction)
else:
print("Keyboard doesn't support wave mode", file=sys.stderr)
sys.exit(1)
elif args.ripple_single:
r, g, b, refresh_rate = args.ripple_single
assert 0 <= r <= 255, "Red component must be between 0-255 inclusive"
assert 0 <= g <= 255, "Green component must be between 0-255 inclusive"
assert 0 <= b <= 255, "Blue component must be between 0-255 inclusive"
assert refresh_rate > 0, "Refresh rate cannot be negative"
if keyboard.fx.has("ripple"):
keyboard.fx.ripple(r, g, b, refresh_rate)
else:
print("Keyboard doesn't support static mode", file=sys.stderr)
sys.exit(1)
elif args.ripple_random:
refresh_rate = args.ripple_random
assert refresh_rate > 0, "Refresh rate cannot be negative"
if keyboard.fx.has("ripple"):
keyboard.fx.ripple_random(refresh_rate)
else:
print("Keyboard doesn't support static mode", file=sys.stderr)
sys.exit(1)
else:
# Logically impossible to reach here
print("Unknown option", file=sys.stderr)
sys.exit(1)
|
import cherrypy
from cherrypy.test import helper
class WSGI_VirtualHost_Test(helper.CPWebCase):
@staticmethod
def setup_server():
class ClassOfRoot(object):
def __init__(self, name):
self.name = name
@cherrypy.expose
def index(self):
return 'Welcome to the %s website!' % self.name
default = cherrypy.Application(None)
domains = {}
for year in range(1997, 2008):
app = cherrypy.Application(ClassOfRoot('Class of %s' % year))
domains['www.classof%s.example' % year] = app
cherrypy.tree.graft(cherrypy._cpwsgi.VirtualHost(default, domains))
def test_welcome(self):
if not cherrypy.server.using_wsgi:
return self.skip('skipped (not using WSGI)... ')
for year in range(1997, 2008):
self.getPage(
'/', headers=[('Host', 'www.classof%s.example' % year)])
self.assertBody('Welcome to the Class of %s website!' % year)
|
import posixpath
from absl import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import cuda_toolkit
from perfkitbenchmarker.linux_packages import google_cloud_sdk
flags.DEFINE_string('nccl_version', '2.7.8-1',
'NCCL version to install. '
'Input "None" to bypass installation.')
flags.DEFINE_string('nccl_net_plugin', None, 'NCCL network plugin path')
flags.DEFINE_string('nccl_mpi', '/usr/bin/mpirun', 'MPI binary path')
flags.DEFINE_string('nccl_mpi_home', '/usr/lib/x86_64-linux-gnu/openmpi',
'MPI home')
flags.DEFINE_string('nccl_home', '$HOME/nccl/build', 'NCCL home')
FLAGS = flags.FLAGS
GIT_REPO = 'https://github.com/NVIDIA/nccl.git'
def _Build(vm):
"""Installs the NCCL package on the VM."""
vm.RemoteCommand('[ -d "nccl" ] || git clone {git_repo} --branch v{version}'
.format(git_repo=GIT_REPO, version=FLAGS.nccl_version))
cuda_home = cuda_toolkit.CUDA_HOME
vm.InstallPackages('build-essential devscripts debhelper fakeroot')
env_vars = {}
env_vars['PATH'] = (r'{cuda_bin_path}:$PATH'
.format(cuda_bin_path=posixpath.join(cuda_home, 'bin')))
env_vars['CUDA_HOME'] = (r'{cuda_home}'.format(cuda_home=cuda_home))
env_vars['LD_LIBRARY_PATH'] = (r'{lib_path}:$LD_LIBRARY_PATH'
.format(lib_path=posixpath.join(
cuda_home, 'lib64')))
vm.RemoteCommand('cd nccl && {env} make -j 20 pkg.debian.build'
.format(env=vm_util.DictionaryToEnvString(env_vars)))
def AptInstall(vm):
"""Installs the NCCL package on the VM."""
if FLAGS.nccl_version == 'None':
return
vm.Install('cuda_toolkit')
_Build(vm)
vm.InstallPackages('{build}libnccl2_{nccl}+cuda{cuda}_amd64.deb '
'{build}libnccl-dev_{nccl}+cuda{cuda}_amd64.deb'
.format(
build='./nccl/build/pkg/deb/',
nccl=FLAGS.nccl_version,
cuda=FLAGS.cuda_toolkit_version))
if FLAGS.nccl_net_plugin:
vm.Install('google_cloud_sdk')
vm.RemoteCommand('sudo {gsutil_path} cp {nccl_net_plugin_path} '
'/usr/lib/x86_64-linux-gnu/libnccl-net.so'.format(
gsutil_path=google_cloud_sdk.GSUTIL_PATH,
nccl_net_plugin_path=FLAGS.nccl_net_plugin))
else:
vm.RemoteCommand('sudo rm -rf /usr/lib/x86_64-linux-gnu/libnccl-net.so')
vm.RemoteCommand('sudo rm -rf /usr/local/nccl2') # Preexisting NCCL in DLVM
vm.RemoteCommand('sudo ldconfig') # Refresh LD cache
if FLAGS.aws_efa:
vm.InstallPackages('libudev-dev libtool autoconf')
vm.RemoteCommand('git clone https://github.com/aws/aws-ofi-nccl.git -b aws')
vm.RemoteCommand('cd aws-ofi-nccl && ./autogen.sh && ./configure '
'--with-mpi={mpi} '
'--with-libfabric=/opt/amazon/efa '
'--with-nccl={nccl} '
'--with-cuda={cuda} && sudo make && '
'sudo make install'.format(
mpi=FLAGS.nccl_mpi_home,
nccl=FLAGS.nccl_home,
cuda='/usr/local/cuda-{}'.format(
FLAGS.cuda_toolkit_version)))
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.signal import hilbert
from mne.connectivity import envelope_correlation
def _compute_corrs_orig(data):
# This is the version of the code by Sheraz and Denis.
# For this version (epochs, labels, time) must be -> (labels, time, epochs)
n_epochs, n_labels, _ = data.shape
corr = np.zeros((n_labels, n_labels))
for epoch_data in data:
for ii in range(n_labels):
for jj in range(n_labels):
# Get timeseries for each pair
x, y = epoch_data[ii], epoch_data[jj]
x_mag = np.abs(x)
x_conj_scaled = x.conj()
x_conj_scaled /= x_mag
# Calculate orthogonalization
y_orth_x = (y * x_conj_scaled).imag
y_orth_x_mag = np.abs(y_orth_x)
# Estimate correlation
corr[ii, jj] += np.abs(np.corrcoef(x_mag, y_orth_x_mag)[0, 1])
corr = (corr + corr.T) / (2. * n_epochs)
corr.flat[::n_labels + 1] = 0.
return corr
def test_envelope_correlation():
"""Test the envelope correlation function."""
rng = np.random.RandomState(0)
data = rng.randn(2, 4, 64)
data_hilbert = hilbert(data, axis=-1)
corr_orig = _compute_corrs_orig(data_hilbert)
assert (0 <= corr_orig).all()
assert (corr_orig < 1).all()
# using complex data
corr = envelope_correlation(data_hilbert)
assert_allclose(corr, corr_orig)
# using callable
corr = envelope_correlation(data_hilbert,
combine=lambda data: np.mean(data, axis=0))
assert_allclose(corr, corr_orig)
# do Hilbert internally, and don't combine
corr = envelope_correlation(data, combine=None)
assert corr.shape == (data.shape[0],) + corr_orig.shape
corr = np.mean(corr, axis=0)
assert_allclose(corr, corr_orig)
# degenerate
with pytest.raises(ValueError, match='float'):
envelope_correlation(data.astype(int))
with pytest.raises(ValueError, match='entry in data must be 2D'):
envelope_correlation(data[np.newaxis])
with pytest.raises(ValueError, match='n_nodes mismatch'):
envelope_correlation([rng.randn(2, 8), rng.randn(3, 8)])
with pytest.raises(ValueError, match='mean or callable'):
envelope_correlation(data, 1.)
with pytest.raises(ValueError, match='Combine option'):
envelope_correlation(data, 'foo')
with pytest.raises(ValueError, match='Invalid value.*orthogonalize.*'):
envelope_correlation(data, orthogonalize='foo')
corr_plain = envelope_correlation(data, combine=None, orthogonalize=False)
assert corr_plain.shape == (data.shape[0],) + corr_orig.shape
assert np.min(corr_plain) < 0
corr_plain_mean = np.mean(corr_plain, axis=0)
assert_allclose(np.diag(corr_plain_mean), 1)
np_corr = np.array([np.corrcoef(np.abs(x)) for x in data_hilbert])
assert_allclose(corr_plain, np_corr)
# check against FieldTrip, which uses the square-log-norm version
# from scipy.io import savemat
# savemat('data.mat', dict(data_hilbert=data_hilbert))
# matlab
# load data
# ft_connectivity_powcorr_ortho(reshape(data_hilbert(1,:,:), [4, 64]))
# ft_connectivity_powcorr_ortho(reshape(data_hilbert(2,:,:), [4, 64]))
ft_vals = np.array([
[[np.nan, 0.196734553900236, 0.063173148355451, -0.242638384630448],
[0.196734553900236, np.nan, 0.041799775495150, -0.088205187548542],
[0.063173148355451, 0.041799775495150, np.nan, 0.090331428512317],
[-0.242638384630448, -0.088205187548542, 0.090331428512317, np.nan]],
[[np.nan, -0.013270857462890, 0.185200598081295, 0.140284351572544],
[-0.013270857462890, np.nan, 0.150981508043722, -0.000671809276372],
[0.185200598081295, 0.150981508043722, np.nan, 0.137460244313337],
[0.140284351572544, -0.000671809276372, 0.137460244313337, np.nan]],
], float)
ft_vals[np.isnan(ft_vals)] = 0
corr_log = envelope_correlation(
data, combine=None, log=True, absolute=False)
assert_allclose(corr_log, ft_vals)
|
from homeassistant.components.accuweather.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import STATE_UNAVAILABLE
from tests.async_mock import patch
from tests.common import MockConfigEntry
from tests.components.accuweather import init_integration
async def test_async_setup_entry(hass):
"""Test a successful setup entry."""
await init_integration(hass)
state = hass.states.get("weather.home")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == "sunny"
async def test_config_not_ready(hass):
"""Test for setup failure if connection to AccuWeather is missing."""
entry = MockConfigEntry(
domain=DOMAIN,
title="Home",
unique_id="0123456",
data={
"api_key": "32-character-string-1234567890qw",
"latitude": 55.55,
"longitude": 122.12,
"name": "Home",
},
)
with patch(
"homeassistant.components.accuweather.AccuWeather._async_get_data",
side_effect=ConnectionError(),
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
|
import functools
import os
import tempfile
from absl import flags
from perfkitbenchmarker import version
_PERFKITBENCHMARKER = 'perfkitbenchmarker'
_RUNS = 'runs'
_VERSIONS = 'versions'
_TEMP_DIR = os.path.join(tempfile.gettempdir(), _PERFKITBENCHMARKER)
flags.DEFINE_string('temp_dir', _TEMP_DIR, 'Temp directory PKB uses.')
FLAGS = flags.FLAGS
def GetAllRunsDirPath():
"""Gets path to the directory containing the states of all PKB runs."""
return os.path.join(FLAGS.temp_dir, _RUNS)
# Caching this will have the effect that even if the
# run_uri changes, the temp dir will stay the same.
@functools.lru_cache()
def GetRunDirPath():
"""Gets path to the directory containing files specific to a PKB run."""
return os.path.join(
FLAGS.temp_dir, _RUNS, str(flags.FLAGS.run_uri))
def GetSshConnectionsDir():
"""Returns the directory for SSH ControlPaths (for connection reuse)."""
return os.path.join(GetRunDirPath(), 'ssh')
def GetVersionDirPath(version=version.VERSION):
"""Gets path to the directory containing files specific to a PKB version."""
return os.path.join(FLAGS.temp_dir, _VERSIONS, version)
def CreateTemporaryDirectories():
"""Creates the temporary sub-directories needed by the current run."""
for path in (GetRunDirPath(), GetVersionDirPath(), GetSshConnectionsDir()):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
|
import urllib
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.doorbird import CONF_CUSTOM_URL, CONF_TOKEN
from homeassistant.components.doorbird.const import CONF_EVENTS, DOMAIN
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry, init_recorder_component
VALID_CONFIG = {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "friend",
CONF_PASSWORD: "password",
CONF_NAME: "mydoorbird",
}
def _get_mock_doorbirdapi_return_values(ready=None, info=None):
doorbirdapi_mock = MagicMock()
type(doorbirdapi_mock).ready = MagicMock(return_value=ready)
type(doorbirdapi_mock).info = MagicMock(return_value=info)
return doorbirdapi_mock
def _get_mock_doorbirdapi_side_effects(ready=None, info=None):
doorbirdapi_mock = MagicMock()
type(doorbirdapi_mock).ready = MagicMock(side_effect=ready)
type(doorbirdapi_mock).info = MagicMock(side_effect=info)
return doorbirdapi_mock
async def test_user_form(hass):
"""Test we get the user form."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
doorbirdapi = _get_mock_doorbirdapi_return_values(
ready=[True], info={"WIFI_MAC_ADDR": "macaddr"}
)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
), patch(
"homeassistant.components.doorbird.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.doorbird.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "1.2.3.4"
assert result2["data"] == {
"host": "1.2.3.4",
"name": "mydoorbird",
"password": "password",
"username": "friend",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_import(hass):
"""Test we get the form with import source."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
import_config = VALID_CONFIG.copy()
import_config[CONF_EVENTS] = ["event1", "event2", "event3"]
import_config[CONF_TOKEN] = "imported_token"
import_config[
CONF_CUSTOM_URL
] = "http://legacy.custom.url/should/only/come/in/from/yaml"
doorbirdapi = _get_mock_doorbirdapi_return_values(
ready=[True], info={"WIFI_MAC_ADDR": "macaddr"}
)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
), patch("homeassistant.components.logbook.async_setup", return_value=True), patch(
"homeassistant.components.doorbird.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.doorbird.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=import_config,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "1.2.3.4"
assert result["data"] == {
"host": "1.2.3.4",
"name": "mydoorbird",
"password": "password",
"username": "friend",
"events": ["event1", "event2", "event3"],
"token": "imported_token",
# This will go away once we convert to cloud hooks
"hass_url_override": "http://legacy.custom.url/should/only/come/in/from/yaml",
}
# It is not possible to import options at this time
# so they end up in the config entry data and are
# used a fallback when they are not in options
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_import_with_zeroconf_already_discovered(hass):
"""Test we get the form with import source."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
# Running the zeroconf init will make the unique id
# in progress
zero_conf = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"properties": {"macaddress": "1CCAE3DOORBIRD"},
"name": "Doorstation - abc123._axis-video._tcp.local.",
"host": "192.168.1.5",
},
)
assert zero_conf["type"] == data_entry_flow.RESULT_TYPE_FORM
assert zero_conf["step_id"] == "user"
assert zero_conf["errors"] == {}
import_config = VALID_CONFIG.copy()
import_config[CONF_EVENTS] = ["event1", "event2", "event3"]
import_config[CONF_TOKEN] = "imported_token"
import_config[
CONF_CUSTOM_URL
] = "http://legacy.custom.url/should/only/come/in/from/yaml"
doorbirdapi = _get_mock_doorbirdapi_return_values(
ready=[True], info={"WIFI_MAC_ADDR": "1CCAE3DOORBIRD"}
)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
), patch("homeassistant.components.logbook.async_setup", return_value=True), patch(
"homeassistant.components.doorbird.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.doorbird.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=import_config,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "1.2.3.4"
assert result["data"] == {
"host": "1.2.3.4",
"name": "mydoorbird",
"password": "password",
"username": "friend",
"events": ["event1", "event2", "event3"],
"token": "imported_token",
# This will go away once we convert to cloud hooks
"hass_url_override": "http://legacy.custom.url/should/only/come/in/from/yaml",
}
# It is not possible to import options at this time
# so they end up in the config entry data and are
# used a fallback when they are not in options
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_zeroconf_wrong_oui(hass):
"""Test we abort when we get the wrong OUI via zeroconf."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"properties": {"macaddress": "notdoorbirdoui"},
"host": "192.168.1.8",
"name": "Doorstation - abc123._axis-video._tcp.local.",
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_doorbird_device"
async def test_form_zeroconf_link_local_ignored(hass):
"""Test we abort when we get a link local address via zeroconf."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"properties": {"macaddress": "1CCAE3DOORBIRD"},
"host": "169.254.103.61",
"name": "Doorstation - abc123._axis-video._tcp.local.",
},
)
assert result["type"] == "abort"
assert result["reason"] == "link_local_address"
async def test_form_zeroconf_correct_oui(hass):
"""Test we can setup from zeroconf with the correct OUI source."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"properties": {"macaddress": "1CCAE3DOORBIRD"},
"name": "Doorstation - abc123._axis-video._tcp.local.",
"host": "192.168.1.5",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
doorbirdapi = _get_mock_doorbirdapi_return_values(
ready=[True], info={"WIFI_MAC_ADDR": "macaddr"}
)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
), patch("homeassistant.components.logbook.async_setup", return_value=True), patch(
"homeassistant.components.doorbird.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.doorbird.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], VALID_CONFIG
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "1.2.3.4"
assert result2["data"] == {
"host": "1.2.3.4",
"name": "mydoorbird",
"password": "password",
"username": "friend",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_cannot_connect(hass):
"""Test we handle cannot connect error."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
doorbirdapi = _get_mock_doorbirdapi_side_effects(ready=OSError)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_user_invalid_auth(hass):
"""Test we handle cannot invalid auth error."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_urllib_error = urllib.error.HTTPError(
"http://xyz.tld", 401, "login failed", {}, None
)
doorbirdapi = _get_mock_doorbirdapi_side_effects(ready=mock_urllib_error)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data=VALID_CONFIG,
options={CONF_EVENTS: ["event1", "event2", "event3"]},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.doorbird.async_setup_entry", return_value=True
):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_EVENTS: "eventa, eventc, eventq"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_EVENTS: ["eventa", "eventc", "eventq"]}
|
import time
from kalliope.core.NeuronModule import NeuronModule
class Systemdate(NeuronModule):
def __init__(self, **kwargs):
# get the cache if set by the user, if not, set it to false as it is not necessary
cache = kwargs.get('cache', None)
if cache is not None:
kwargs["cache"] = cache
else:
kwargs["cache"] = False
super(Systemdate, self).__init__(**kwargs)
# local time and date
hour = time.strftime("%H") # Hour (24-hour clock) as a decimal number [00,23].
minute = time.strftime("%M") # Minute as a decimal number [00,59].
weekday = time.strftime("%w") # Weekday as a decimal number [0(Sunday),6].
day_month = time.strftime("%d") # Day of the month as a decimal number [01,31].
month = time.strftime("%m") # Month as a decimal number [01,12].
year = time.strftime("%Y") # Year with century as a decimal number. E.g: 2016
self.message = {
"hours": hour,
"minutes": minute,
"weekday": weekday,
"month": month,
"day_month": day_month,
"year": year
}
self.say(self.message)
|
from unittest import mock
from aiohomekit import AccessoryDisconnectedError
from aiohomekit.testing import FakePairing
from homeassistant.components.climate.const import (
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ENTRY_STATE_SETUP_RETRY
from tests.components.homekit_controller.common import (
Helper,
device_config_changed,
setup_accessories_from_file,
setup_test_accessories,
time_changed,
)
async def test_ecobee3_setup(hass):
"""Test that a Ecbobee 3 can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "ecobee3.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
climate = entity_registry.async_get("climate.homew")
assert climate.unique_id == "homekit-123456789012-16"
climate_helper = Helper(
hass, "climate.homew", pairing, accessories[0], config_entry
)
climate_state = await climate_helper.poll_and_get_state()
assert climate_state.attributes["friendly_name"] == "HomeW"
assert climate_state.attributes["supported_features"] == (
SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_HUMIDITY
)
assert climate_state.attributes["hvac_modes"] == [
"off",
"heat",
"cool",
"heat_cool",
]
assert climate_state.attributes["min_temp"] == 7.2
assert climate_state.attributes["max_temp"] == 33.3
assert climate_state.attributes["min_humidity"] == 20
assert climate_state.attributes["max_humidity"] == 50
occ1 = entity_registry.async_get("binary_sensor.kitchen")
assert occ1.unique_id == "homekit-AB1C-56"
occ1_helper = Helper(
hass, "binary_sensor.kitchen", pairing, accessories[0], config_entry
)
occ1_state = await occ1_helper.poll_and_get_state()
assert occ1_state.attributes["friendly_name"] == "Kitchen"
occ2 = entity_registry.async_get("binary_sensor.porch")
assert occ2.unique_id == "homekit-AB2C-56"
occ3 = entity_registry.async_get("binary_sensor.basement")
assert occ3.unique_id == "homekit-AB3C-56"
device_registry = await hass.helpers.device_registry.async_get_registry()
climate_device = device_registry.async_get(climate.device_id)
assert climate_device.manufacturer == "ecobee Inc."
assert climate_device.name == "HomeW"
assert climate_device.model == "ecobee3"
assert climate_device.sw_version == "4.2.394"
assert climate_device.via_device_id is None
# Check that an attached sensor has its own device entity that
# is linked to the bridge
sensor_device = device_registry.async_get(occ1.device_id)
assert sensor_device.manufacturer == "ecobee Inc."
assert sensor_device.name == "Kitchen"
assert sensor_device.model == "REMOTE SENSOR"
assert sensor_device.sw_version == "1.0.0"
assert sensor_device.via_device_id == climate_device.id
async def test_ecobee3_setup_from_cache(hass, hass_storage):
"""Test that Ecbobee can be correctly setup from its cached entity map."""
accessories = await setup_accessories_from_file(hass, "ecobee3.json")
hass_storage["homekit_controller-entity-map"] = {
"version": 1,
"data": {
"pairings": {
"00:00:00:00:00:00": {
"config_num": 1,
"accessories": [
a.to_accessory_and_service_list() for a in accessories
],
}
}
},
}
await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
climate = entity_registry.async_get("climate.homew")
assert climate.unique_id == "homekit-123456789012-16"
occ1 = entity_registry.async_get("binary_sensor.kitchen")
assert occ1.unique_id == "homekit-AB1C-56"
occ2 = entity_registry.async_get("binary_sensor.porch")
assert occ2.unique_id == "homekit-AB2C-56"
occ3 = entity_registry.async_get("binary_sensor.basement")
assert occ3.unique_id == "homekit-AB3C-56"
async def test_ecobee3_setup_connection_failure(hass):
"""Test that Ecbobee can be correctly setup from its cached entity map."""
accessories = await setup_accessories_from_file(hass, "ecobee3.json")
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Test that the connection fails during initial setup.
# No entities should be created.
list_accessories = "list_accessories_and_characteristics"
with mock.patch.object(FakePairing, list_accessories) as laac:
laac.side_effect = AccessoryDisconnectedError("Connection failed")
# If there is no cached entity map and the accessory connection is
# failing then we have to fail the config entry setup.
config_entry, pairing = await setup_test_accessories(hass, accessories)
assert config_entry.state == ENTRY_STATE_SETUP_RETRY
climate = entity_registry.async_get("climate.homew")
assert climate is None
# When accessory raises ConfigEntryNoteReady HA will retry - lets make
# sure there is no cruft causing conflicts left behind by now doing
# a successful setup.
# We just advance time by 5 minutes so that the retry happens, rather
# than manually invoking async_setup_entry.
await time_changed(hass, 5 * 60)
climate = entity_registry.async_get("climate.homew")
assert climate.unique_id == "homekit-123456789012-16"
occ1 = entity_registry.async_get("binary_sensor.kitchen")
assert occ1.unique_id == "homekit-AB1C-56"
occ2 = entity_registry.async_get("binary_sensor.porch")
assert occ2.unique_id == "homekit-AB2C-56"
occ3 = entity_registry.async_get("binary_sensor.basement")
assert occ3.unique_id == "homekit-AB3C-56"
async def test_ecobee3_add_sensors_at_runtime(hass):
"""Test that new sensors are automatically added."""
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Set up a base Ecobee 3 with no additional sensors.
# There shouldn't be any entities but climate visible.
accessories = await setup_accessories_from_file(hass, "ecobee3_no_sensors.json")
await setup_test_accessories(hass, accessories)
climate = entity_registry.async_get("climate.homew")
assert climate.unique_id == "homekit-123456789012-16"
occ1 = entity_registry.async_get("binary_sensor.kitchen")
assert occ1 is None
occ2 = entity_registry.async_get("binary_sensor.porch")
assert occ2 is None
occ3 = entity_registry.async_get("binary_sensor.basement")
assert occ3 is None
# Now added 3 new sensors at runtime - sensors should appear and climate
# shouldn't be duplicated.
accessories = await setup_accessories_from_file(hass, "ecobee3.json")
await device_config_changed(hass, accessories)
occ1 = entity_registry.async_get("binary_sensor.kitchen")
assert occ1.unique_id == "homekit-AB1C-56"
occ2 = entity_registry.async_get("binary_sensor.porch")
assert occ2.unique_id == "homekit-AB2C-56"
occ3 = entity_registry.async_get("binary_sensor.basement")
assert occ3.unique_id == "homekit-AB3C-56"
|
import logging
from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import SUPPORT_ALARM_ARM_AWAY
from homeassistant.const import (
ATTR_ATTRIBUTION,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_DISARMED,
)
from .const import DEFAULT_ATTRIBUTION, DOMAIN
_LOGGER = logging.getLogger(__name__)
ICON = "mdi:security"
async def async_setup_entry(hass, config, async_add_entities):
"""Set up the Blink Alarm Control Panels."""
data = hass.data[DOMAIN][config.entry_id]
sync_modules = []
for sync_name, sync_module in data.sync.items():
sync_modules.append(BlinkSyncModule(data, sync_name, sync_module))
async_add_entities(sync_modules)
class BlinkSyncModule(AlarmControlPanelEntity):
"""Representation of a Blink Alarm Control Panel."""
def __init__(self, data, name, sync):
"""Initialize the alarm control panel."""
self.data = data
self.sync = sync
self._name = name
self._state = None
@property
def unique_id(self):
"""Return the unique id for the sync module."""
return self.sync.serial
@property
def icon(self):
"""Return icon."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_AWAY
@property
def name(self):
"""Return the name of the panel."""
return f"{DOMAIN} {self._name}"
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = self.sync.attributes
attr["network_info"] = self.data.networks
attr["associated_cameras"] = list(self.sync.cameras)
attr[ATTR_ATTRIBUTION] = DEFAULT_ATTRIBUTION
return attr
def update(self):
"""Update the state of the device."""
_LOGGER.debug("Updating Blink Alarm Control Panel %s", self._name)
self.data.refresh()
mode = self.sync.arm
if mode:
self._state = STATE_ALARM_ARMED_AWAY
else:
self._state = STATE_ALARM_DISARMED
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.sync.arm = False
self.sync.refresh()
def alarm_arm_away(self, code=None):
"""Send arm command."""
self.sync.arm = True
self.sync.refresh()
|
from __future__ import print_function, division
import warnings
from abc import abstractmethod
import datetime
from plumbum.lib import six
from plumbum.cli.termsize import get_terminal_size
import sys
class ProgressBase(six.ABC):
"""Base class for progress bars. Customize for types of progress bars.
:param iterator: The iterator to wrap with a progress bar
:param length: The length of the iterator (will use ``__len__`` if None)
:param timer: Try to time the completion status of the iterator
:param body: True if the slow portion occurs outside the iterator (in a loop, for example)
:param has_output: True if the iteration body produces output to the screen (forces rewrite off)
:param clear: Clear the progress bar afterwards, if applicable.
"""
def __init__(self,
iterator=None,
length=None,
timer=True,
body=False,
has_output=False,
clear=True):
if length is None:
length = len(iterator)
elif iterator is None:
iterator = range(length)
elif length is None and iterator is None:
raise TypeError("Expected either an iterator or a length")
self.length = length
self.iterator = iterator
self.timer = timer
self.body = body
self.has_output = has_output
self.clear = clear
def __len__(self):
return self.length
def __iter__(self):
self.start()
return self
@abstractmethod
def start(self):
"""This should initialize the progress bar and the iterator"""
self.iter = iter(self.iterator)
self.value = -1 if self.body else 0
self._start_time = datetime.datetime.now()
def __next__(self):
try:
rval = next(self.iter)
self.increment()
except StopIteration:
self.done()
raise
return rval
def next(self):
return self.__next__()
@property
def value(self):
"""This is the current value, as a property so setting it can be customized"""
return self._value
@value.setter
def value(self, val):
self._value = val
@abstractmethod
def display(self):
"""Called to update the progress bar"""
pass
def increment(self):
"""Sets next value and displays the bar"""
self.value += 1
self.display()
def time_remaining(self):
"""Get the time remaining for the progress bar, guesses"""
if self.value < 1:
return None, None
elapsed_time = datetime.datetime.now() - self._start_time
time_each = (elapsed_time.days * 24 * 60 * 60 + elapsed_time.seconds +
elapsed_time.microseconds / 1000000.0) / self.value
time_remaining = time_each * (self.length - self.value)
return elapsed_time, datetime.timedelta(0, time_remaining, 0)
def str_time_remaining(self):
"""Returns a string version of time remaining"""
if self.value < 1:
return "Starting... "
else:
elapsed_time, time_remaining = list(
map(str, self.time_remaining()))
return "{0} completed, {1} remaining".format(
elapsed_time.split('.')[0],
time_remaining.split('.')[0])
@abstractmethod
def done(self):
"""Is called when the iterator is done."""
pass
@classmethod
def range(cls, *value, **kargs):
"""Fast shortcut to create a range based progress bar, assumes work done in body"""
return cls(range(*value), body=True, **kargs)
@classmethod
def wrap(cls, iterator, length=None, **kargs):
"""Shortcut to wrap an iterator that does not do all the work internally"""
return cls(iterator, length, body=True, **kargs)
class Progress(ProgressBase):
def start(self):
super(Progress, self).start()
self.display()
def done(self):
self.value = self.length
self.display()
if self.clear and not self.has_output:
print("\r", len(str(self)) * " ", "\r", end='', sep='')
else:
print()
def __str__(self):
width = get_terminal_size(default=(0, 0))[0]
if self.length == 0:
self.width = 0
return "0/0 complete"
percent = max(self.value, 0) / self.length
ending = ' ' + (self.str_time_remaining()
if self.timer else '{0} of {1} complete'.format(
self.value, self.length))
if width - len(ending) < 10 or self.has_output:
self.width = 0
if self.timer:
return "{0:.0%} complete: {1}".format(
percent, self.str_time_remaining())
else:
return "{0:.0%} complete".format(percent)
else:
self.width = width - len(ending) - 2 - 1
nstars = int(percent * self.width)
pbar = '[' + '*' * nstars + ' ' * (
self.width - nstars) + ']' + ending
str_percent = ' {0:.0%} '.format(percent)
return pbar[:self.width // 2 -
2] + str_percent + pbar[self.width // 2 +
len(str_percent) - 2:]
def display(self):
disptxt = str(self)
if self.width == 0 or self.has_output:
print(disptxt)
else:
print("\r", end='')
print(disptxt, end='')
sys.stdout.flush()
class ProgressIPy(ProgressBase): # pragma: no cover
HTMLBOX = '<div class="widget-hbox widget-progress"><div class="widget-label" style="display:block;">{0}</div></div>'
def __init__(self, *args, **kargs):
# Ipython gives warnings when using widgets about the API potentially changing
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from ipywidgets import IntProgress, HTML, HBox # type: ignore
except ImportError: # Support IPython < 4.0
from IPython.html.widgets import IntProgress, HTML, HBox # type: ignore
super(ProgressIPy, self).__init__(*args, **kargs)
self.prog = IntProgress(max=self.length)
self._label = HTML()
self._box = HBox((self.prog, self._label))
def start(self):
from IPython.display import display # type: ignore
display(self._box)
super(ProgressIPy, self).start()
@property
def value(self):
"""This is the current value, -1 allowed (automatically fixed for display)"""
return self._value
@value.setter
def value(self, val):
self._value = val
self.prog.value = max(val, 0)
self.prog.description = "{0:.2%}".format(self.value / self.length)
if self.timer and val > 0:
self._label.value = self.HTMLBOX.format(self.str_time_remaining())
def display(self):
pass
def done(self):
if self.clear:
self._box.close()
class ProgressAuto(ProgressBase):
"""Automatically selects the best progress bar (IPython HTML or text). Does not work with qtconsole
(as that is correctly identified as identical to notebook, since the kernel is the same); it will still
iterate, but no graphical indication will be diplayed.
:param iterator: The iterator to wrap with a progress bar
:param length: The length of the iterator (will use ``__len__`` if None)
:param timer: Try to time the completion status of the iterator
:param body: True if the slow portion occurs outside the iterator (in a loop, for example)
"""
def __new__(cls, *args, **kargs):
"""Uses the generator trick that if a cls instance is returned, the __init__ method is not called."""
try: # pragma: no cover
__IPYTHON__
try:
from traitlets import TraitError # type: ignore
except ImportError: # Support for IPython < 4.0
from IPython.utils.traitlets import TraitError # type: ignore
try:
return ProgressIPy(*args, **kargs)
except TraitError:
raise NameError()
except (NameError, ImportError):
return Progress(*args, **kargs)
ProgressAuto.register(ProgressIPy)
ProgressAuto.register(Progress)
def main():
import time
tst = Progress.range(20)
for i in tst:
time.sleep(1)
if __name__ == '__main__':
main()
|
import pytest
import voluptuous as vol
from homeassistant import data_entry_flow
from homeassistant.components import mqtt
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.fixture(autouse=True)
def mock_finish_setup():
"""Mock out the finish setup method."""
with patch(
"homeassistant.components.mqtt.MQTT.async_connect", return_value=True
) as mock_finish:
yield mock_finish
@pytest.fixture
def mock_try_connection():
"""Mock the try connection method."""
with patch("homeassistant.components.mqtt.config_flow.try_connection") as mock_try:
yield mock_try
async def test_user_connection_works(hass, mock_try_connection, mock_finish_setup):
"""Test we can finish a config flow."""
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": "user"}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"broker": "127.0.0.1"}
)
assert result["type"] == "create_entry"
assert result["result"].data == {
"broker": "127.0.0.1",
"port": 1883,
"discovery": True,
}
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry got setup
assert len(mock_finish_setup.mock_calls) == 1
async def test_user_connection_fails(hass, mock_try_connection, mock_finish_setup):
"""Test if connection cannot be made."""
mock_try_connection.return_value = False
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": "user"}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"broker": "127.0.0.1"}
)
assert result["type"] == "form"
assert result["errors"]["base"] == "cannot_connect"
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry did not setup
assert len(mock_finish_setup.mock_calls) == 0
async def test_manual_config_set(hass, mock_try_connection, mock_finish_setup):
"""Test we ignore entry if manual config available."""
assert await async_setup_component(hass, "mqtt", {"mqtt": {"broker": "bla"}})
await hass.async_block_till_done()
assert len(mock_finish_setup.mock_calls) == 1
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": "user"}
)
assert result["type"] == "abort"
async def test_user_single_instance(hass):
"""Test we only allow a single config flow."""
MockConfigEntry(domain="mqtt").add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_hassio_single_instance(hass):
"""Test we only allow a single config flow."""
MockConfigEntry(domain="mqtt").add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": "hassio"}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_hassio_confirm(hass, mock_try_connection, mock_finish_setup):
"""Test we can finish a config flow."""
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
"mqtt",
data={
"addon": "Mock Addon",
"host": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
context={"source": "hassio"},
)
assert result["type"] == "form"
assert result["step_id"] == "hassio_confirm"
assert result["description_placeholders"] == {"addon": "Mock Addon"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"discovery": True}
)
assert result["type"] == "create_entry"
assert result["result"].data == {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"discovery": True,
}
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry got setup
assert len(mock_finish_setup.mock_calls) == 1
async def test_option_flow(hass, mqtt_mock, mock_try_connection):
"""Test config flow options."""
mock_try_connection.return_value = True
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mqtt_mock.async_connect.reset_mock()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "broker"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "options"
await hass.async_block_till_done()
assert mqtt_mock.async_connect.call_count == 0
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_DISCOVERY: True,
"birth_enable": True,
"birth_topic": "ha_state/online",
"birth_payload": "online",
"birth_qos": 1,
"birth_retain": True,
"will_enable": True,
"will_topic": "ha_state/offline",
"will_payload": "offline",
"will_qos": 2,
"will_retain": True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] is None
assert config_entry.data == {
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
mqtt.CONF_DISCOVERY: True,
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "ha_state/online",
mqtt.ATTR_PAYLOAD: "online",
mqtt.ATTR_QOS: 1,
mqtt.ATTR_RETAIN: True,
},
mqtt.CONF_WILL_MESSAGE: {
mqtt.ATTR_TOPIC: "ha_state/offline",
mqtt.ATTR_PAYLOAD: "offline",
mqtt.ATTR_QOS: 2,
mqtt.ATTR_RETAIN: True,
},
}
await hass.async_block_till_done()
assert mqtt_mock.async_connect.call_count == 1
async def test_disable_birth_will(hass, mqtt_mock, mock_try_connection):
"""Test disabling birth and will."""
mock_try_connection.return_value = True
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mqtt_mock.async_connect.reset_mock()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "broker"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "options"
await hass.async_block_till_done()
assert mqtt_mock.async_connect.call_count == 0
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_DISCOVERY: True,
"birth_enable": False,
"birth_topic": "ha_state/online",
"birth_payload": "online",
"birth_qos": 1,
"birth_retain": True,
"will_enable": False,
"will_topic": "ha_state/offline",
"will_payload": "offline",
"will_qos": 2,
"will_retain": True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] is None
assert config_entry.data == {
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
mqtt.CONF_DISCOVERY: True,
mqtt.CONF_BIRTH_MESSAGE: {},
mqtt.CONF_WILL_MESSAGE: {},
}
await hass.async_block_till_done()
assert mqtt_mock.async_connect.call_count == 1
def get_default(schema, key):
"""Get default value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.default == vol.UNDEFINED:
return None
return k.default()
def get_suggested(schema, key):
"""Get suggested value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.description is None or "suggested_value" not in k.description:
return None
return k.description["suggested_value"]
async def test_option_flow_default_suggested_values(
hass, mqtt_mock, mock_try_connection
):
"""Test config flow options has default/suggested values."""
mock_try_connection.return_value = True
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
mqtt.CONF_DISCOVERY: True,
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "ha_state/online",
mqtt.ATTR_PAYLOAD: "online",
mqtt.ATTR_QOS: 1,
mqtt.ATTR_RETAIN: True,
},
mqtt.CONF_WILL_MESSAGE: {
mqtt.ATTR_TOPIC: "ha_state/offline",
mqtt.ATTR_PAYLOAD: "offline",
mqtt.ATTR_QOS: 2,
mqtt.ATTR_RETAIN: False,
},
}
# Test default/suggested values from config
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "broker"
defaults = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
suggested = {
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
}
for k, v in defaults.items():
assert get_default(result["data_schema"].schema, k) == v
for k, v in suggested.items():
assert get_suggested(result["data_schema"].schema, k) == v
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "us3r",
mqtt.CONF_PASSWORD: "p4ss",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "options"
defaults = {
mqtt.CONF_DISCOVERY: True,
"birth_qos": 1,
"birth_retain": True,
"will_qos": 2,
"will_retain": False,
}
suggested = {
"birth_topic": "ha_state/online",
"birth_payload": "online",
"will_topic": "ha_state/offline",
"will_payload": "offline",
}
for k, v in defaults.items():
assert get_default(result["data_schema"].schema, k) == v
for k, v in suggested.items():
assert get_suggested(result["data_schema"].schema, k) == v
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_DISCOVERY: False,
"birth_topic": "ha_state/onl1ne",
"birth_payload": "onl1ne",
"birth_qos": 2,
"birth_retain": False,
"will_topic": "ha_state/offl1ne",
"will_payload": "offl1ne",
"will_qos": 1,
"will_retain": True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
# Test updated default/suggested values from config
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "broker"
defaults = {
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
}
suggested = {
mqtt.CONF_USERNAME: "us3r",
mqtt.CONF_PASSWORD: "p4ss",
}
for k, v in defaults.items():
assert get_default(result["data_schema"].schema, k) == v
for k, v in suggested.items():
assert get_suggested(result["data_schema"].schema, k) == v
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={mqtt.CONF_BROKER: "another-broker", mqtt.CONF_PORT: 2345},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "options"
defaults = {
mqtt.CONF_DISCOVERY: False,
"birth_qos": 2,
"birth_retain": False,
"will_qos": 1,
"will_retain": True,
}
suggested = {
"birth_topic": "ha_state/onl1ne",
"birth_payload": "onl1ne",
"will_topic": "ha_state/offl1ne",
"will_payload": "offl1ne",
}
for k, v in defaults.items():
assert get_default(result["data_schema"].schema, k) == v
for k, v in suggested.items():
assert get_suggested(result["data_schema"].schema, k) == v
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_DISCOVERY: True,
"birth_topic": "ha_state/onl1ne",
"birth_payload": "onl1ne",
"birth_qos": 2,
"birth_retain": False,
"will_topic": "ha_state/offl1ne",
"will_payload": "offl1ne",
"will_qos": 1,
"will_retain": True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_options_user_connection_fails(hass, mock_try_connection):
"""Test if connection cannot be made."""
config_entry = MockConfigEntry(domain=mqtt.DOMAIN)
config_entry.add_to_hass(hass)
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mock_try_connection.return_value = False
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={mqtt.CONF_BROKER: "bad-broker", mqtt.CONF_PORT: 2345},
)
assert result["type"] == "form"
assert result["errors"]["base"] == "cannot_connect"
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry did not update
assert config_entry.data == {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
async def test_options_bad_birth_message_fails(hass, mock_try_connection):
"""Test bad birth message."""
config_entry = MockConfigEntry(domain=mqtt.DOMAIN)
config_entry.add_to_hass(hass)
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mock_try_connection.return_value = True
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={mqtt.CONF_BROKER: "another-broker", mqtt.CONF_PORT: 2345},
)
assert result["type"] == "form"
assert result["step_id"] == "options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"birth_topic": "ha_state/online/#"},
)
assert result["type"] == "form"
assert result["errors"]["base"] == "bad_birth"
# Check config entry did not update
assert config_entry.data == {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
async def test_options_bad_will_message_fails(hass, mock_try_connection):
"""Test bad will message."""
config_entry = MockConfigEntry(domain=mqtt.DOMAIN)
config_entry.add_to_hass(hass)
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mock_try_connection.return_value = True
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={mqtt.CONF_BROKER: "another-broker", mqtt.CONF_PORT: 2345},
)
assert result["type"] == "form"
assert result["step_id"] == "options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"will_topic": "ha_state/offline/#"},
)
assert result["type"] == "form"
assert result["errors"]["base"] == "bad_will"
# Check config entry did not update
assert config_entry.data == {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
|
import pytest
from qstrader.asset.cash import Cash
@pytest.mark.parametrize(
'currency,expected',
[
('USD', 'USD'),
('GBP', 'GBP'),
('EUR', 'EUR')
]
)
def test_cash(currency, expected):
"""
Tests that the Cash asset is correctly instantiated.
"""
cash = Cash(currency)
assert cash.cash_like
assert cash.currency == expected
|
import json
from libpurecool.const import (
FanPower,
FanSpeed,
FanState,
FocusMode,
HeatMode,
HeatState,
HeatTarget,
)
from libpurecool.dyson_pure_hotcool import DysonPureHotCool
from libpurecool.dyson_pure_hotcool_link import DysonPureHotCoolLink
from libpurecool.dyson_pure_state import DysonPureHotCoolState
from libpurecool.dyson_pure_state_v2 import DysonPureHotCoolV2State
import pytest
from homeassistant.components.climate import (
DOMAIN,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.components.climate.const import (
ATTR_CURRENT_HUMIDITY,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_TEMP,
ATTR_MIN_TEMP,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
)
from homeassistant.components.dyson import CONF_LANGUAGE, DOMAIN as DYSON_DOMAIN
from homeassistant.components.dyson.climate import FAN_DIFFUSE, FAN_FOCUS, SUPPORT_FLAGS
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
CONF_DEVICES,
CONF_PASSWORD,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.setup import async_setup_component
from .common import load_mock_device
from tests.async_mock import Mock, call, patch
class MockDysonState(DysonPureHotCoolState):
"""Mock Dyson state."""
# pylint: disable=super-init-not-called
def __init__(self):
"""Create new Mock Dyson State."""
def __repr__(self):
"""Mock repr because original one fails since constructor not called."""
return "<MockDysonState>"
def _get_config():
"""Return a config dictionary."""
return {
DYSON_DOMAIN: {
CONF_USERNAME: "email",
CONF_PASSWORD: "password",
CONF_LANGUAGE: "GB",
CONF_DEVICES: [
{"device_id": "XX-XXXXX-XX", "device_ip": "192.168.0.1"},
{"device_id": "YY-YYYYY-YY", "device_ip": "192.168.0.2"},
],
}
}
def _get_dyson_purehotcool_device():
"""Return a valid device as provided by the Dyson web services."""
device = Mock(spec=DysonPureHotCool)
load_mock_device(device)
device.name = "Living room"
device.state.heat_target = "0000"
device.state.heat_mode = HeatMode.HEAT_OFF.value
device.state.fan_power = FanPower.POWER_OFF.value
device.environmental_state.humidity = 42
device.environmental_state.temperature = 298
return device
def _get_device_off():
"""Return a device with state off."""
device = Mock(spec=DysonPureHotCoolLink)
load_mock_device(device)
return device
def _get_device_cool():
"""Return a device with state of cooling."""
device = Mock(spec=DysonPureHotCoolLink)
load_mock_device(device)
device.state.focus_mode = FocusMode.FOCUS_OFF.value
device.state.heat_target = HeatTarget.celsius(12)
device.state.heat_mode = HeatMode.HEAT_OFF.value
device.state.heat_state = HeatState.HEAT_STATE_OFF.value
return device
def _get_device_heat_on():
"""Return a device with state of heating."""
device = Mock(spec=DysonPureHotCoolLink)
load_mock_device(device)
device.serial = "YY-YYYYY-YY"
device.state.heat_target = HeatTarget.celsius(23)
device.state.heat_mode = HeatMode.HEAT_ON.value
device.state.heat_state = HeatState.HEAT_STATE_ON.value
device.environmental_state.temperature = 289
device.environmental_state.humidity = 53
return device
@pytest.fixture(autouse=True)
def patch_platforms_fixture():
"""Only set up the climate platform for the climate tests."""
with patch("homeassistant.components.dyson.DYSON_PLATFORMS", new=[DOMAIN]):
yield
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_device_heat_on()],
)
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
async def test_pure_hot_cool_link_set_mode(mocked_login, mocked_devices, hass):
"""Test set climate mode."""
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
device = mocked_devices.return_value[0]
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.temp_name", ATTR_HVAC_MODE: HVAC_MODE_HEAT},
True,
)
set_config = device.set_configuration
assert set_config.call_args == call(heat_mode=HeatMode.HEAT_ON)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.temp_name", ATTR_HVAC_MODE: HVAC_MODE_COOL},
True,
)
set_config = device.set_configuration
assert set_config.call_args == call(heat_mode=HeatMode.HEAT_OFF)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_device_heat_on()],
)
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
async def test_pure_hot_cool_link_set_fan(mocked_login, mocked_devices, hass):
"""Test set climate fan."""
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
device = mocked_devices.return_value[0]
device.temp_unit = TEMP_CELSIUS
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.temp_name", ATTR_FAN_MODE: FAN_FOCUS},
True,
)
set_config = device.set_configuration
assert set_config.call_args == call(focus_mode=FocusMode.FOCUS_ON)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.temp_name", ATTR_FAN_MODE: FAN_DIFFUSE},
True,
)
set_config = device.set_configuration
assert set_config.call_args == call(focus_mode=FocusMode.FOCUS_OFF)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_device_heat_on()],
)
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
async def test_pure_hot_cool_link_state(mocked_login, mocked_devices, hass):
"""Test set climate temperature."""
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_name")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORT_FLAGS
assert state.attributes[ATTR_TEMPERATURE] == 23
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 289 - 273
assert state.attributes[ATTR_CURRENT_HUMIDITY] == 53
assert state.state == HVAC_MODE_HEAT
assert len(state.attributes[ATTR_HVAC_MODES]) == 2
assert HVAC_MODE_HEAT in state.attributes[ATTR_HVAC_MODES]
assert HVAC_MODE_COOL in state.attributes[ATTR_HVAC_MODES]
assert len(state.attributes[ATTR_FAN_MODES]) == 2
assert FAN_FOCUS in state.attributes[ATTR_FAN_MODES]
assert FAN_DIFFUSE in state.attributes[ATTR_FAN_MODES]
device = mocked_devices.return_value[0]
update_callback = device.add_message_listener.call_args[0][0]
device.state.focus_mode = FocusMode.FOCUS_ON.value
await hass.async_add_executor_job(update_callback, MockDysonState())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_name")
assert state.attributes[ATTR_FAN_MODE] == FAN_FOCUS
device.state.focus_mode = FocusMode.FOCUS_OFF.value
await hass.async_add_executor_job(update_callback, MockDysonState())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_name")
assert state.attributes[ATTR_FAN_MODE] == FAN_DIFFUSE
device.state.heat_mode = HeatMode.HEAT_ON.value
device.state.heat_state = HeatState.HEAT_STATE_OFF.value
await hass.async_add_executor_job(update_callback, MockDysonState())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_name")
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
device.environmental_state.humidity = 0
await hass.async_add_executor_job(update_callback, MockDysonState())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_name")
assert state.attributes.get(ATTR_CURRENT_HUMIDITY) is None
device.environmental_state = None
await hass.async_add_executor_job(update_callback, MockDysonState())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_name")
assert state.attributes.get(ATTR_CURRENT_HUMIDITY) is None
device.state.heat_mode = HeatMode.HEAT_OFF.value
device.state.heat_state = HeatState.HEAT_STATE_OFF.value
await hass.async_add_executor_job(update_callback, MockDysonState())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_name")
assert state.state == HVAC_MODE_COOL
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[],
)
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
async def test_setup_component_without_devices(mocked_login, mocked_devices, hass):
"""Test setup component with no devices."""
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
entity_ids = hass.states.async_entity_ids(DOMAIN)
assert not entity_ids
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_device_heat_on()],
)
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
async def test_dyson_set_temperature(mocked_login, mocked_devices, hass):
"""Test set climate temperature."""
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
device = mocked_devices.return_value[0]
device.temp_unit = TEMP_CELSIUS
# Without correct target temp.
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.temp_name",
ATTR_TARGET_TEMP_HIGH: 25.0,
ATTR_TARGET_TEMP_LOW: 15.0,
},
True,
)
set_config = device.set_configuration
assert set_config.call_count == 0
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.temp_name", ATTR_TEMPERATURE: 23},
True,
)
set_config = device.set_configuration
assert set_config.call_args == call(
heat_mode=HeatMode.HEAT_ON, heat_target=HeatTarget.celsius(23)
)
# Should clip the target temperature between 1 and 37 inclusive.
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.temp_name", ATTR_TEMPERATURE: 50},
True,
)
set_config = device.set_configuration
assert set_config.call_args == call(
heat_mode=HeatMode.HEAT_ON, heat_target=HeatTarget.celsius(37)
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.temp_name", ATTR_TEMPERATURE: -5},
True,
)
set_config = device.set_configuration
assert set_config.call_args == call(
heat_mode=HeatMode.HEAT_ON, heat_target=HeatTarget.celsius(1)
)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_device_cool()],
)
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
async def test_dyson_set_temperature_when_cooling_mode(
mocked_login, mocked_devices, hass
):
"""Test set climate temperature when heating is off."""
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
device = mocked_devices.return_value[0]
device.temp_unit = TEMP_CELSIUS
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.temp_name", ATTR_TEMPERATURE: 23},
True,
)
set_config = device.set_configuration
assert set_config.call_args == call(
heat_mode=HeatMode.HEAT_ON, heat_target=HeatTarget.celsius(23)
)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_device_heat_on(), _get_device_cool()],
)
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
async def test_setup_component_with_parent_discovery(
mocked_login, mocked_devices, hass
):
"""Test setup_component using discovery."""
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
entity_ids = hass.states.async_entity_ids(DOMAIN)
assert len(entity_ids) == 2
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_component_setup_only_once(devices, login, hass):
"""Test if entities are created only once."""
config = _get_config()
await async_setup_component(hass, DYSON_DOMAIN, config)
await hass.async_block_till_done()
entity_ids = hass.states.async_entity_ids(DOMAIN)
assert len(entity_ids) == 1
state = hass.states.get(entity_ids[0])
assert state.name == "Living room"
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_device_off()],
)
async def test_purehotcoollink_component_setup_only_once(devices, login, hass):
"""Test if entities are created only once."""
config = _get_config()
await async_setup_component(hass, DYSON_DOMAIN, config)
await hass.async_block_till_done()
entity_ids = hass.states.async_entity_ids(DOMAIN)
assert len(entity_ids) == 1
state = hass.states.get(entity_ids[0])
assert state.name == "Temp Name"
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_update_state(devices, login, hass):
"""Test state update."""
device = devices.return_value[0]
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
event = {
"msg": "CURRENT-STATE",
"product-state": {
"fpwr": "ON",
"fdir": "OFF",
"auto": "OFF",
"oscs": "ON",
"oson": "ON",
"nmod": "OFF",
"rhtm": "ON",
"fnst": "FAN",
"ercd": "11E1",
"wacd": "NONE",
"nmdv": "0004",
"fnsp": "0002",
"bril": "0002",
"corf": "ON",
"cflr": "0085",
"hflr": "0095",
"sltm": "OFF",
"osal": "0045",
"osau": "0095",
"ancp": "CUST",
"tilt": "OK",
"hsta": "HEAT",
"hmax": "2986",
"hmod": "HEAT",
},
}
device.state = DysonPureHotCoolV2State(json.dumps(event))
update_callback = device.add_message_listener.call_args[0][0]
await hass.async_add_executor_job(update_callback, device.state)
await hass.async_block_till_done()
state = hass.states.get("climate.living_room")
attributes = state.attributes
assert attributes[ATTR_TEMPERATURE] == 25
assert attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_empty_env_attributes(devices, login, hass):
"""Test empty environmental state update."""
device = devices.return_value[0]
device.environmental_state.temperature = 0
device.environmental_state.humidity = None
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.living_room")
attributes = state.attributes
assert ATTR_CURRENT_HUMIDITY not in attributes
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_fan_state_off(devices, login, hass):
"""Test device fan state off."""
device = devices.return_value[0]
device.state.fan_state = FanState.FAN_OFF.value
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.living_room")
attributes = state.attributes
assert attributes[ATTR_FAN_MODE] == FAN_OFF
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_hvac_action_cool(devices, login, hass):
"""Test device HVAC action cool."""
device = devices.return_value[0]
device.state.fan_power = FanPower.POWER_ON.value
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.living_room")
attributes = state.attributes
assert attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_hvac_action_idle(devices, login, hass):
"""Test device HVAC action idle."""
device = devices.return_value[0]
device.state.fan_power = FanPower.POWER_ON.value
device.state.heat_mode = HeatMode.HEAT_ON.value
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.living_room")
attributes = state.attributes
assert attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_set_temperature(devices, login, hass):
"""Test set temperature."""
device = devices.return_value[0]
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.living_room")
attributes = state.attributes
min_temp = attributes[ATTR_MIN_TEMP]
max_temp = attributes[ATTR_MAX_TEMP]
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.bed_room", ATTR_TEMPERATURE: 23},
True,
)
device.set_heat_target.assert_not_called()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_TEMPERATURE: 23},
True,
)
assert device.set_heat_target.call_count == 1
device.set_heat_target.assert_called_with("2960")
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_TEMPERATURE: min_temp - 1},
True,
)
assert device.set_heat_target.call_count == 2
device.set_heat_target.assert_called_with(HeatTarget.celsius(min_temp))
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_TEMPERATURE: max_temp + 1},
True,
)
assert device.set_heat_target.call_count == 3
device.set_heat_target.assert_called_with(HeatTarget.celsius(max_temp))
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_set_fan_mode(devices, login, hass):
"""Test set fan mode."""
device = devices.return_value[0]
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.bed_room", ATTR_FAN_MODE: FAN_OFF},
True,
)
device.turn_off.assert_not_called()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_FAN_MODE: FAN_OFF},
True,
)
assert device.turn_off.call_count == 1
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_FAN_MODE: FAN_LOW},
True,
)
assert device.set_fan_speed.call_count == 1
device.set_fan_speed.assert_called_with(FanSpeed.FAN_SPEED_4)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_FAN_MODE: FAN_MEDIUM},
True,
)
assert device.set_fan_speed.call_count == 2
device.set_fan_speed.assert_called_with(FanSpeed.FAN_SPEED_7)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_FAN_MODE: FAN_HIGH},
True,
)
assert device.set_fan_speed.call_count == 3
device.set_fan_speed.assert_called_with(FanSpeed.FAN_SPEED_10)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_FAN_MODE: FAN_AUTO},
True,
)
assert device.set_fan_speed.call_count == 4
device.set_fan_speed.assert_called_with(FanSpeed.FAN_SPEED_AUTO)
@patch("homeassistant.components.dyson.DysonAccount.login", return_value=True)
@patch(
"homeassistant.components.dyson.DysonAccount.devices",
return_value=[_get_dyson_purehotcool_device()],
)
async def test_purehotcool_set_hvac_mode(devices, login, hass):
"""Test set HVAC mode."""
device = devices.return_value[0]
await async_setup_component(hass, DYSON_DOMAIN, _get_config())
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.bed_room", ATTR_HVAC_MODE: HVAC_MODE_OFF},
True,
)
device.turn_off.assert_not_called()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_HVAC_MODE: HVAC_MODE_OFF},
True,
)
assert device.turn_off.call_count == 1
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_HVAC_MODE: HVAC_MODE_HEAT},
True,
)
assert device.turn_on.call_count == 1
assert device.enable_heat_mode.call_count == 1
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.living_room", ATTR_HVAC_MODE: HVAC_MODE_COOL},
True,
)
assert device.turn_on.call_count == 2
assert device.disable_heat_mode.call_count == 1
|
import weakref
from ._action import BaseDescriptor
def emitter(func):
""" Decorator to turn a method of a Component into an
:class:`Emitter <flexx.event.Emitter>`.
An emitter makes it easy to emit specific events, and is also a
placeholder for documenting an event.
.. code-block:: python
class MyObject(event.Component):
@emitter
def spam(self, v):
return dict(value=v)
m = MyObject()
m.spam(42) # emit the spam event
The method being decorated can have any number of arguments, and
should return a dictionary that represents the event to generate.
The method's docstring is used as the emitter's docstring.
"""
if not callable(func):
raise TypeError('The event.emitter() decorator needs a function.')
if getattr(func, '__self__', None) is not None: # builtin funcs have __self__
raise TypeError('Invalid use of emitter decorator.')
return EmitterDescriptor(func, func.__name__, func.__doc__)
class EmitterDescriptor(BaseDescriptor):
""" Placeholder for documentation and easy emitting of the event.
"""
def __init__(self, func, name, doc):
self._func = func
self._name = name
self.__doc__ = self._format_doc('emitter', name, doc, func)
def __get__(self, instance, owner):
if instance is None:
return self
private_name = '_' + self._name + '_emitter'
try:
emitter = getattr(instance, private_name)
except AttributeError:
emitter = Emitter(instance, self._func, self._name, self.__doc__)
setattr(instance, private_name, emitter)
emitter._use_once(self._func) # make super() work, see _action.py
return emitter
class Emitter:
""" Emitter objects are wrappers around Component methods. They take
care of emitting an event when called and function as a placeholder
for documenting an event. This class should not be instantiated
directly; use ``event.emitter()`` instead.
"""
def __init__(self, ob, func, name, doc):
assert callable(func)
# Store func, name, and docstring (e.g. for sphinx docs)
self._ob1 = weakref.ref(ob)
self._func = func
self._func_once = func
self._name = name
self.__doc__ = doc
def __repr__(self):
cname = self.__class__.__name__
return '<%s %r at 0x%x>' % (cname, self._name, id(self))
def _use_once(self, func):
""" To support super().
"""
self._func_once = func
def __call__(self, *args):
""" Emit the event.
"""
func = self._func_once
self._func_once = self._func
ob = self._ob1()
if ob is not None:
ev = func(ob, *args)
if ev is not None:
ob.emit(self._name, ev)
|
from bson.objectid import ObjectId
from flask import Blueprint, request
from app.commons import build_response
from app.intents.models import Intent
train = Blueprint('train_blueprint', __name__,
url_prefix='/train')
@train.route('/<story_id>/data', methods=['POST'])
def save_training_data(story_id):
"""
Save training data for given story
:param story_id:
:return:
"""
story = Intent.objects.get(id=ObjectId(story_id))
story.trainingData = request.json
story.save()
return build_response.sent_ok()
@train.route('/<story_id>/data', methods=['GET'])
def get_training_data(story_id):
"""
retrieve training data for a given story
:param story_id:
:return:
"""
story = Intent.objects.get(id=ObjectId(story_id))
return build_response.build_json(story.trainingData)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import re
from absl import flags
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import sample
GIT_REPO = 'https://github.com/RedisLabs/memtier_benchmark'
GIT_TAG = '1.2.15'
LIBEVENT_TAR = 'libevent-2.0.21-stable.tar.gz'
LIBEVENT_URL = 'https://github.com/downloads/libevent/libevent/' + LIBEVENT_TAR
LIBEVENT_DIR = '%s/libevent-2.0.21-stable' % linux_packages.INSTALL_DIR
MEMTIER_DIR = '%s/memtier_benchmark' % linux_packages.INSTALL_DIR
APT_PACKAGES = ('autoconf automake libpcre3-dev '
'libevent-dev pkg-config zlib1g-dev')
YUM_PACKAGES = 'zlib-devel pcre-devel libmemcached-devel'
MEMTIER_RESULTS = 'memtier_results'
FLAGS = flags.FLAGS
flags.DEFINE_enum('memtier_protocol', 'memcache_binary',
['memcache_binary', 'redis', 'memcache_text'],
'Protocol to use. Supported protocols are redis, '
'memcache_text, and memcache_binary. '
'Defaults to memcache_binary.')
flags.DEFINE_integer('memtier_run_count', 1,
'Number of full-test iterations to perform. '
'Defaults to 1.')
flags.DEFINE_integer('memtier_run_duration', None,
'Duration for each client count in seconds. '
'By default, test length is set '
'by memtier_requests, the number of requests sent by each '
'client. By specifying run_duration, key space remains '
'the same (from 1 to memtier_requests), but test stops '
'once run_duration is passed. '
'Total test duration = run_duration * runs * '
'len(memtier_clients).')
flags.DEFINE_integer('memtier_requests', 10000,
'Number of total requests per client. Defaults to 10000.')
flags.DEFINE_list('memtier_clients', [50],
'Comma separated list of number of clients per thread. '
'Specify more than 1 value to vary the number of clients. '
'Defaults to [50].')
flags.DEFINE_list('memtier_threads', [4],
'Number of threads. Defaults to 4.')
flags.DEFINE_integer('memtier_ratio', 9,
'Set:Get ratio. Defaults to 9x Get versus Sets (9 Gets to '
'1 Set in 10 total requests).')
flags.DEFINE_integer('memtier_data_size', 32,
'Object data size. Defaults to 32 bytes.')
flags.DEFINE_string('memtier_key_pattern', 'R:R',
'Set:Get key pattern. G for Gaussian distribution, R for '
'uniform Random, S for Sequential. Defaults to R:R.')
flags.DEFINE_list('memtier_pipeline', [1],
'Number of pipelines to use for memtier. Defaults to 1, '
'i.e. no pipelining.')
def YumInstall(vm):
"""Installs the memtier package on the VM."""
vm.Install('build_tools')
vm.InstallPackages(YUM_PACKAGES)
vm.Install('wget')
vm.RemoteCommand('wget {0} -P {1}'.format(LIBEVENT_URL,
linux_packages.INSTALL_DIR))
vm.RemoteCommand('cd {0} && tar xvzf {1}'.format(linux_packages.INSTALL_DIR,
LIBEVENT_TAR))
vm.RemoteCommand('cd {0} && ./configure && sudo make install'.format(
LIBEVENT_DIR))
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, MEMTIER_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(MEMTIER_DIR, GIT_TAG))
pkg_config = 'PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:${PKG_CONFIG_PATH}'
vm.RemoteCommand('cd {0} && autoreconf -ivf && {1} ./configure && '
'sudo make install'.format(MEMTIER_DIR, pkg_config))
def AptInstall(vm):
"""Installs the memtier package on the VM."""
vm.Install('build_tools')
vm.InstallPackages(APT_PACKAGES)
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, MEMTIER_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(MEMTIER_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && autoreconf -ivf && ./configure && '
'sudo make install'.format(MEMTIER_DIR))
def _Uninstall(vm):
"""Uninstalls the memtier package on the VM."""
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(MEMTIER_DIR))
def YumUninstall(vm):
"""Uninstalls the memtier package on the VM."""
_Uninstall(vm)
def AptUninstall(vm):
"""Uninstalls the memtier package on the VM."""
_Uninstall(vm)
def Load(client_vm, server_ip, server_port):
"""Preload the server with data."""
cmd = [
'memtier_benchmark',
'-s', server_ip,
'-p', str(server_port),
'-P', FLAGS.memtier_protocol,
'--clients', '1',
'--threads', '1',
'--ratio', '1:0',
'--data-size', str(FLAGS.memtier_data_size),
'--pipeline', '100',
'--key-minimum', '1',
'--key-maximum', str(FLAGS.memtier_requests),
'-n', 'allkeys']
client_vm.RemoteCommand(' '.join(cmd))
def RunOverAllThreadsAndPipelines(client_vm, server_ip, server_port):
"""Runs memtier over all pipeline and thread combinations."""
samples = []
for pipeline in FLAGS.memtier_pipeline:
for client_thread in FLAGS.memtier_threads:
logging.info(
'Start benchmarking memcached using memtier:\n'
'\tmemtier threads: %s'
'\tmemtier pipeline, %s',
client_thread, pipeline)
tmp_samples = Run(
client_vm, server_ip, server_port,
client_thread, pipeline)
samples.extend(tmp_samples)
return samples
def Run(vm, server_ip, server_port, threads, pipeline):
"""Runs the memtier benchmark on the vm."""
memtier_ratio = '1:{0}'.format(FLAGS.memtier_ratio)
samples = []
for client_count in FLAGS.memtier_clients:
vm.RemoteCommand('rm -f {0}'.format(MEMTIER_RESULTS))
cmd = [
'memtier_benchmark',
'-s', server_ip,
'-p', str(server_port),
'-P', FLAGS.memtier_protocol,
'--run-count', str(FLAGS.memtier_run_count),
'--clients', str(client_count),
'--threads', str(threads),
'--ratio', memtier_ratio,
'--data-size', str(FLAGS.memtier_data_size),
'--key-pattern', FLAGS.memtier_key_pattern,
'--pipeline', str(pipeline),
'--key-minimum', '1',
'--key-maximum', str(FLAGS.memtier_requests),
'--random-data']
if FLAGS.memtier_run_duration:
cmd.extend(['--test-time', str(FLAGS.memtier_run_duration)])
else:
cmd.extend(['--requests', str(FLAGS.memtier_requests)])
cmd.extend(['>', MEMTIER_RESULTS])
vm.RemoteCommand(' '.join(cmd))
results, _ = vm.RemoteCommand('cat {0}'.format(MEMTIER_RESULTS))
metadata = GetMetadata(threads, pipeline)
metadata['memtier_clients'] = client_count
samples.extend(ParseResults(results, metadata))
return samples
def GetMetadata(threads, pipeline):
"""Metadata for memtier test."""
meta = {'memtier_protocol': FLAGS.memtier_protocol,
'memtier_run_count': FLAGS.memtier_run_count,
'memtier_requests': FLAGS.memtier_requests,
'memtier_threads': threads,
'memtier_ratio': FLAGS.memtier_ratio,
'memtier_data_size': FLAGS.memtier_data_size,
'memtier_key_pattern': FLAGS.memtier_key_pattern,
'memtier_pipeline': pipeline,
'memtier_version': GIT_TAG}
if FLAGS.memtier_run_duration:
meta['memtier_run_duration'] = FLAGS.memtier_run_duration
return meta
def ParseResults(memtier_results, meta):
"""Parse memtier_benchmark result textfile into samples.
Args:
memtier_results: Text output of running Memtier benchmark.
meta: metadata associated with the results.
Yields:
List of sample.Sample objects.
Example memtier_benchmark output, note Hits/sec and Misses/sec are displayed
in error for version 1.2.8+ due to bug:
https://github.com/RedisLabs/memtier_benchmark/issues/46
4 Threads
50 Connections per thread
20 Seconds
Type Ops/sec Hits/sec Misses/sec Latency KB/sec
------------------------------------------------------------------------
Sets 4005.50 --- --- 4.50600 308.00
Gets 40001.05 0.00 40001.05 4.54300 1519.00
Totals 44006.55 0.00 40001.05 4.54000 1828.00
Request Latency Distribution
Type <= msec Percent
------------------------------------------------------------------------
SET 0 9.33
SET 1 71.07
...
SET 33 100.00
SET 36 100.00
---
GET 0 10.09
GET 1 70.88
..
GET 40 100.00
GET 41 100.00
"""
set_histogram = []
get_histogram = []
total_requests = FLAGS.memtier_requests
approx_total_sets = round(float(total_requests) / (FLAGS.memtier_ratio + 1))
last_total_sets = 0
approx_total_gets = total_requests - approx_total_sets
last_total_gets = 0
for raw_line in memtier_results.splitlines():
line = raw_line.strip()
if re.match(r'^Totals', line):
_, ops, _, _, _, kilobyte = line.split()
yield sample.Sample('Ops Throughput', float(ops), 'ops/s', meta)
yield sample.Sample('KB Throughput', float(kilobyte), 'KB/s', meta)
last_total_sets = _ParseLine(
r'^SET',
line,
approx_total_sets,
last_total_sets,
set_histogram)
last_total_gets = _ParseLine(
r'^GET',
line,
approx_total_gets,
last_total_gets,
get_histogram)
for name, histogram in [('get', get_histogram), ('set', set_histogram)]:
hist_meta = meta.copy()
hist_meta.update({'histogram': json.dumps(histogram)})
yield sample.Sample('{0} latency histogram'.format(name), 0, '', hist_meta)
def _ParseLine(pattern, line, approx_total, last_total, histogram):
"""Helper function to parse an output line."""
if not re.match(pattern, line):
return last_total
_, msec, percent = line.split()
counts = _ConvertPercentToAbsolute(approx_total, float(percent))
bucket_counts = int(round(counts - last_total))
if bucket_counts > 0:
histogram.append({'microsec': float(msec) * 1000,
'count': bucket_counts})
return counts
def _ConvertPercentToAbsolute(total_value, percent):
"""Given total value and a 100-based percentage, returns the actual value."""
return percent / 100 * total_value
|
from httpobs.conf import API_ALLOW_VERBOSE_STATS_FROM_PUBLIC, API_COOLDOWN
from httpobs.scanner import STATES
from httpobs.scanner.grader import get_score_description, GRADES
from httpobs.scanner.utils import valid_hostname
from httpobs.website import add_response_headers, sanitized_api_response
from flask import Blueprint, jsonify, make_response, request
from werkzeug.http import http_date
import httpobs.database as database
import json
import os.path
api = Blueprint('api', __name__)
# TODO: Implement API to write public and private headers to the database
@api.route('/api/v1/analyze', methods=['GET', 'OPTIONS', 'POST'])
@add_response_headers(cors=True)
@sanitized_api_response
def api_post_scan_hostname():
# TODO: Allow people to accidentally use https://mozilla.org and convert to mozilla.org
# Get the hostname
hostname = request.args.get('host', '').lower()
# Fail if it's not a valid hostname (not in DNS, not a real hostname, etc.)
ip = True if valid_hostname(hostname) is None else False
hostname = valid_hostname(hostname) or valid_hostname('www.' + hostname) # prepend www. if necessary
if ip:
return {
'error': 'invalid-hostname-ip',
'text': 'Cannot scan IP addresses',
}
elif not hostname:
return {
'error': 'invalid-hostname',
'text': '{hostname} is an invalid hostname'.format(hostname=request.args.get('host', '')),
}
# Get the site's id number
try:
site_id = database.select_site_id(hostname)
except IOError:
return {
'error': 'database-down',
'text': 'Unable to connect to database',
}
# Next, let's see if there's a recent scan; if there was a recent scan, let's just return it
# Setting rescan shortens what "recent" means
rescan = True if request.form.get('rescan', 'false') == 'true' else False
if rescan:
row = database.select_scan_recent_scan(site_id, API_COOLDOWN)
else:
row = database.select_scan_recent_scan(site_id)
# Otherwise, let's queue up the scan
if not row:
hidden = request.form.get('hidden', 'false')
# Begin the dispatch process if it was a POST
if request.method == 'POST':
row = database.insert_scan(site_id, hidden=hidden)
else:
return {
'error': 'recent-scan-not-found',
'text': 'Recently completed scan for {hostname} not found'.format(
hostname=request.args.get('host', ''))
}
# If there was a rescan attempt and it returned a row, it's because the rescan was done within the cooldown window
elif rescan and request.method == 'POST':
return {
'error': 'rescan-attempt-too-soon',
'text': '{hostname} is on temporary cooldown'.format(hostname=request.args.get('host', ''))
}
# Return the scan row
return row
# TODO: Deprecate this and replace with __stats__ once website is updated
@api.route('/api/v1/getGradeDistribution', methods=['GET', 'OPTIONS'])
@add_response_headers(cors=True)
def api_get_grade_totals():
totals = database.select_star_from('grade_distribution')
# If a grade isn't in the database, return it with quantity 0
totals = {grade: totals.get(grade, 0) for grade in GRADES}
return jsonify(totals)
@api.route('/api/v1/getHostHistory', methods=['GET', 'OPTIONS'])
@add_response_headers(cors=True)
def api_get_host_history():
# Get the hostname
hostname = request.args.get('host', '').lower()
# Fail if it's not a valid hostname (not in DNS, not a real hostname, etc.)
hostname = valid_hostname(hostname) or valid_hostname('www.' + hostname) # prepend www. if necessary
if not hostname:
return jsonify({'error': '{hostname} is an invalid hostname'.format(hostname=request.args.get('host', ''))})
# Get the site's id number
try:
site_id = database.select_site_id(hostname)
except IOError:
return jsonify({'error': 'Unable to connect to database'})
# Get the host history
history = database.select_scan_host_history(site_id)
# Gracefully handle when there's no history
if not history:
return jsonify({'error': 'No history found'})
# Prune for when the score doesn't change; thanks to chuck for the elegant list comprehension
pruned_history = [v for k, v in enumerate(history) if history[k].get('score') is not history[k - 1].get('score') or
k == 0]
# Return the host history
return jsonify(pruned_history)
@api.route('/api/v1/getRecentScans', methods=['GET', 'OPTIONS'])
@add_response_headers(cors=True)
def api_get_recent_scans():
try:
# Get the min and max scores, if they're there
min_score = int(request.args.get('min', 0))
max_score = int(request.args.get('max', 1000))
num_scans = int(request.args.get('num', 10))
min_score = max(0, min_score)
max_score = min(1000, max_score)
num_scans = min(25, num_scans)
except ValueError:
return {'error': 'invalid-parameters'}
return jsonify(database.select_scan_recent_finished_scans(num_scans=num_scans,
min_score=min_score,
max_score=max_score))
# TODO: Deprecate
@api.route('/api/v1/getScannerStates', methods=['GET', 'OPTIONS'])
@add_response_headers(cors=True)
def api_get_scanner_states():
stats = database.select_scan_scanner_statistics(verbose=True)
return jsonify({state: stats['states'].get(state, 0) for state in STATES})
@api.route('/api/v1/__stats__', methods=['GET', 'OPTIONS'])
@add_response_headers(cors=True)
def api_get_scanner_stats():
pretty = True if request.args.get('pretty', '').lower() == 'true' else False
verbose = True if request.args.get('verbose', '').lower() == 'true' else False
# Disallow verbose stat requests from the public if this setting is set
if verbose and not API_ALLOW_VERBOSE_STATS_FROM_PUBLIC:
verbose = True if request.access_route[0] == '127.0.0.1' else False
# Get the scanner statistics from the backend database, defaulting to the quick stats only
stats = database.select_scan_scanner_statistics(verbose)
# If a grade isn't in the database, return it with quantity 0
grade_distribution = {grade: stats['grade_distribution'].get(grade, 0) for grade in GRADES}
grade_distribution_all_scans = {grade: stats['grade_distribution_all_scans'].get(grade, 0) for grade in GRADES}
# Get the number of grade improvements
grade_improvements_all = stats['scan_score_difference_distribution_summation']
# Make sure we only list the ones that are improvements, with a maximum of 5 letter grades
grade_improvements = {k: 0 for k in range(0, 6)}
for k, v in grade_improvements_all.items():
grade_improvements[min(5, max(0, int(k / 20)))] += v
# Convert all the datetimes to HTTP strings
stats['most_recent_scan_datetime'] = http_date(stats['most_recent_scan_datetime'].utctimetuple())
stats['recent_scans'] = {http_date(i.utctimetuple()): v for i, v in stats['recent_scans']}
resp = make_response(json.dumps({
'gradeDistribution': {
'latest': grade_distribution,
'all': grade_distribution_all_scans,
},
'gradeImprovements': grade_improvements,
'misc': {
'mostRecentScanDate': stats['most_recent_scan_datetime'],
'numHoursWithoutScansInLast24Hours': 24 - len(stats['recent_scans']) if verbose else -1,
'numImprovedSites': sum([v for k, v in grade_improvements_all.items() if k > 0]),
'numScans': stats['scan_count'],
'numScansLast24Hours': sum(stats['recent_scans'].values()) if verbose else -1,
'numSuccessfulScans': sum(grade_distribution_all_scans.values()),
'numUniqueSites': sum(grade_improvements_all.values())
},
'recent': {
'scans': {
'best': database.select_scan_recent_finished_scans(13, 90, 1000), # 13, as there are 13 grades
'recent': database.select_scan_recent_finished_scans(13, 0, 1000), # 13, as there are 13 grades
'worst': database.select_scan_recent_finished_scans(13, 0, 20), # 13, as there are 13 grades
'numPerHourLast24Hours': stats['recent_scans'],
},
},
'states': {state: stats['states'].get(state, 0) for state in STATES},
}, indent=4 if pretty else None, sort_keys=pretty, default=str))
resp.mimetype = 'application/json'
return resp
@api.route('/api/v1/getScanResults', methods=['GET', 'OPTIONS'])
@add_response_headers(cors=True)
@sanitized_api_response
def api_get_scan_results():
scan_id = request.args.get('scan')
if not scan_id:
return {'error': 'scan-not-found'}
# Check for invalid scan_id numbers
try:
scan_id = int(scan_id)
# <3 :atoll
if scan_id < 1 or scan_id > 2147483646: # the first rule of autoincrement club
raise ValueError
except ValueError:
return {'error': 'invalid-scan-id'}
# Get all the test results for the given scan id
tests = dict(database.select_test_results(scan_id))
# For each test, get the test score description and add that in
for test in tests:
tests[test]['score_description'] = get_score_description(tests[test]['result'])
return tests
@api.route('/contribute.json', methods=['GET'])
@add_response_headers()
def contribute_json():
__dirname = os.path.abspath(os.path.dirname(__file__))
__filename = os.path.join(__dirname, '..', 'docs', 'contribute.json')
# Return the included contribute.json file
try:
with open(__filename, 'r') as f:
resp = make_response(f.read())
resp.mimetype = 'application/json'
return resp
except:
return jsonify({'error': 'no-contribute-json'})
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.