text
stringlengths 213
32.3k
|
---|
import datetime
from typing import Union
from py17track.package import Package
import pytest
from homeassistant.components.seventeentrack.sensor import (
CONF_SHOW_ARCHIVED,
CONF_SHOW_DELIVERED,
)
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.setup import async_setup_component
from homeassistant.util import utcnow
from tests.async_mock import MagicMock, patch
from tests.common import async_fire_time_changed
VALID_CONFIG_MINIMAL = {
"sensor": {
"platform": "seventeentrack",
CONF_USERNAME: "test",
CONF_PASSWORD: "test",
}
}
INVALID_CONFIG = {"sensor": {"platform": "seventeentrack", "boom": "test"}}
VALID_CONFIG_FULL = {
"sensor": {
"platform": "seventeentrack",
CONF_USERNAME: "test",
CONF_PASSWORD: "test",
CONF_SHOW_ARCHIVED: True,
CONF_SHOW_DELIVERED: True,
}
}
VALID_CONFIG_FULL_NO_DELIVERED = {
"sensor": {
"platform": "seventeentrack",
CONF_USERNAME: "test",
CONF_PASSWORD: "test",
CONF_SHOW_ARCHIVED: False,
CONF_SHOW_DELIVERED: False,
}
}
DEFAULT_SUMMARY = {
"Not Found": 0,
"In Transit": 0,
"Expired": 0,
"Ready to be Picked Up": 0,
"Undelivered": 0,
"Delivered": 0,
"Returned": 0,
}
NEW_SUMMARY_DATA = {
"Not Found": 1,
"In Transit": 1,
"Expired": 1,
"Ready to be Picked Up": 1,
"Undelivered": 1,
"Delivered": 1,
"Returned": 1,
}
class ClientMock:
"""Mock the py17track client to inject the ProfileMock."""
def __init__(self, websession) -> None:
"""Mock the profile."""
self.profile = ProfileMock()
class ProfileMock:
"""ProfileMock will mock data coming from 17track."""
package_list = []
login_result = True
summary_data = DEFAULT_SUMMARY
account_id = "123"
@classmethod
def reset(cls):
"""Reset data to defaults."""
cls.package_list = []
cls.login_result = True
cls.summary_data = DEFAULT_SUMMARY
cls.account_id = "123"
def __init__(self) -> None:
"""Override Account id."""
self.account_id = self.__class__.account_id
async def login(self, email: str, password: str) -> bool:
"""Login mock."""
return self.__class__.login_result
async def packages(
self, package_state: Union[int, str] = "", show_archived: bool = False
) -> list:
"""Packages mock."""
return self.__class__.package_list[:]
async def summary(self, show_archived: bool = False) -> dict:
"""Summary mock."""
return self.__class__.summary_data
@pytest.fixture(autouse=True, name="mock_client")
def fixture_mock_client():
"""Mock py17track client."""
with patch(
"homeassistant.components.seventeentrack.sensor.SeventeenTrackClient",
new=ClientMock,
):
yield
ProfileMock.reset()
async def _setup_seventeentrack(hass, config=None, summary_data=None):
"""Set up component using config."""
if not config:
config = VALID_CONFIG_MINIMAL
if not summary_data:
summary_data = {}
ProfileMock.summary_data = summary_data
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
async def _goto_future(hass, future=None):
"""Move to future."""
if not future:
future = utcnow() + datetime.timedelta(minutes=10)
with patch("homeassistant.util.utcnow", return_value=future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
async def test_full_valid_config(hass):
"""Ensure everything starts correctly."""
assert await async_setup_component(hass, "sensor", VALID_CONFIG_FULL)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == len(ProfileMock.summary_data.keys())
async def test_valid_config(hass):
"""Ensure everything starts correctly."""
assert await async_setup_component(hass, "sensor", VALID_CONFIG_MINIMAL)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == len(ProfileMock.summary_data.keys())
async def test_invalid_config(hass):
"""Ensure nothing is created when config is wrong."""
assert await async_setup_component(hass, "sensor", INVALID_CONFIG)
assert not hass.states.async_entity_ids()
async def test_add_package(hass):
"""Ensure package is added correctly when user add a new package."""
package = Package(
"456", 206, "friendly name 1", "info text 1", "location 1", 206, 2
)
ProfileMock.package_list = [package]
await _setup_seventeentrack(hass)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert len(hass.states.async_entity_ids()) == 1
package2 = Package(
"789", 206, "friendly name 2", "info text 2", "location 2", 206, 2
)
ProfileMock.package_list = [package, package2]
await _goto_future(hass)
assert hass.states.get("sensor.seventeentrack_package_789") is not None
assert len(hass.states.async_entity_ids()) == 2
async def test_remove_package(hass):
"""Ensure entity is not there anymore if package is not there."""
package1 = Package(
"456", 206, "friendly name 1", "info text 1", "location 1", 206, 2
)
package2 = Package(
"789", 206, "friendly name 2", "info text 2", "location 2", 206, 2
)
ProfileMock.package_list = [package1, package2]
await _setup_seventeentrack(hass)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert hass.states.get("sensor.seventeentrack_package_789") is not None
assert len(hass.states.async_entity_ids()) == 2
ProfileMock.package_list = [package2]
await _goto_future(hass)
assert hass.states.get("sensor.seventeentrack_package_456") is None
assert hass.states.get("sensor.seventeentrack_package_789") is not None
assert len(hass.states.async_entity_ids()) == 1
async def test_friendly_name_changed(hass):
"""Test friendly name change."""
package = Package(
"456", 206, "friendly name 1", "info text 1", "location 1", 206, 2
)
ProfileMock.package_list = [package]
await _setup_seventeentrack(hass)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert len(hass.states.async_entity_ids()) == 1
package = Package(
"456", 206, "friendly name 2", "info text 1", "location 1", 206, 2
)
ProfileMock.package_list = [package]
await _goto_future(hass)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
entity = hass.data["entity_components"]["sensor"].get_entity(
"sensor.seventeentrack_package_456"
)
assert entity.name == "Seventeentrack Package: friendly name 2"
assert len(hass.states.async_entity_ids()) == 1
async def test_delivered_not_shown(hass):
"""Ensure delivered packages are not shown."""
package = Package(
"456", 206, "friendly name 1", "info text 1", "location 1", 206, 2, 40
)
ProfileMock.package_list = [package]
hass.components.persistent_notification = MagicMock()
await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED)
await _goto_future(hass)
assert not hass.states.async_entity_ids()
hass.components.persistent_notification.create.assert_called()
async def test_delivered_shown(hass):
"""Ensure delivered packages are show when user choose to show them."""
package = Package(
"456", 206, "friendly name 1", "info text 1", "location 1", 206, 2, 40
)
ProfileMock.package_list = [package]
hass.components.persistent_notification = MagicMock()
await _setup_seventeentrack(hass, VALID_CONFIG_FULL)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert len(hass.states.async_entity_ids()) == 1
hass.components.persistent_notification.create.assert_not_called()
async def test_becomes_delivered_not_shown_notification(hass):
"""Ensure notification is triggered when package becomes delivered."""
package = Package(
"456", 206, "friendly name 1", "info text 1", "location 1", 206, 2
)
ProfileMock.package_list = [package]
await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert len(hass.states.async_entity_ids()) == 1
package_delivered = Package(
"456", 206, "friendly name 1", "info text 1", "location 1", 206, 2, 40
)
ProfileMock.package_list = [package_delivered]
hass.components.persistent_notification = MagicMock()
await _goto_future(hass)
hass.components.persistent_notification.create.assert_called()
assert not hass.states.async_entity_ids()
async def test_summary_correctly_updated(hass):
"""Ensure summary entities are not duplicated."""
await _setup_seventeentrack(hass, summary_data=DEFAULT_SUMMARY)
assert len(hass.states.async_entity_ids()) == 7
for state in hass.states.async_all():
assert state.state == "0"
ProfileMock.summary_data = NEW_SUMMARY_DATA
await _goto_future(hass)
assert len(hass.states.async_entity_ids()) == 7
for state in hass.states.async_all():
assert state.state == "1"
|
from test.common import test_dict
from box import Box, ConfigBox
class TestConfigBox:
def test_config_box(self):
g = {
"b0": "no",
"b1": "yes",
"b2": "True",
"b3": "false",
"b4": True,
"i0": "34",
"f0": "5.5",
"f1": "3.333",
"l0": "4,5,6,7,8",
"l1": "[2 3 4 5 6]",
}
cns = ConfigBox(bb=g)
assert cns.bb.list("l1", spliter=" ") == ["2", "3", "4", "5", "6"]
assert cns.bb.list("l0", mod=lambda x: int(x)) == [4, 5, 6, 7, 8]
assert not cns.bb.bool("b0")
assert cns.bb.bool("b1")
assert cns.bb.bool("b2")
assert not cns.bb.bool("b3")
assert cns.bb.int("i0") == 34
assert cns.bb.float("f0") == 5.5
assert cns.bb.float("f1") == 3.333
assert cns.bb.getboolean("b4"), cns.bb.getboolean("b4")
assert cns.bb.getfloat("f0") == 5.5
assert cns.bb.getint("i0") == 34
assert cns.bb.getint("Hello!", 5) == 5
assert cns.bb.getfloat("Wooo", 4.4) == 4.4
assert cns.bb.getboolean("huh", True) is True
assert cns.bb.list("Waaaa", [1]) == [1]
assert repr(cns).startswith("<ConfigBox")
def test_dir(self):
b = ConfigBox(test_dict)
for item in ("to_yaml", "to_dict", "to_json", "int", "list", "float"):
assert item in dir(b)
def test_config_default(self):
bx4 = Box(default_box=True, default_box_attr=ConfigBox)
assert isinstance(bx4.bbbbb, ConfigBox)
|
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
from rest_framework import serializers
from shop.conf import app_settings
class SerializeFormAsTextField(serializers.SerializerMethodField):
def __init__(self, form_class_name, **kwargs):
try:
self.form_class = import_string(app_settings.SHOP_CASCADE_FORMS[form_class_name])
except ImportError:
msg = "Can not import Form class. Please check your settings directive SHOP_CASCADE_FORMS['{}']."
raise ImproperlyConfigured(msg.format(form_class_name))
super().__init__(**kwargs)
def to_representation(self, value):
method = getattr(self.parent, self.method_name)
try:
return method(self.form_class, value)
except AttributeError:
return
class CheckoutSerializer(serializers.Serializer):
"""
Serializer to digest a summary of data required for the checkout.
"""
customer_tag = SerializeFormAsTextField('CustomerForm')
shipping_address_tag = SerializeFormAsTextField('ShippingAddressForm')
billing_address_tag = SerializeFormAsTextField('BillingAddressForm')
shipping_method_tag = SerializeFormAsTextField('ShippingMethodForm')
payment_method_tag = SerializeFormAsTextField('PaymentMethodForm')
extra_annotation_tag = SerializeFormAsTextField('ExtraAnnotationForm')
def get_customer_tag(self, form_class, cart):
form = form_class(instance=cart.customer)
return form.as_text()
def get_shipping_address_tag(self, form_class, cart):
form = form_class(instance=cart.shipping_address, cart=cart)
return form.as_text()
def get_billing_address_tag(self, form_class, cart):
form = form_class(instance=cart.billing_address, cart=cart)
return form.as_text()
def get_shipping_method_tag(self, form_class, cart):
form = form_class(initial=cart.extra, cart=cart)
return form.as_text()
def get_payment_method_tag(self, form_class, cart):
form = form_class(initial=cart.extra, cart=cart)
return form.as_text()
def get_extra_annotation_tag(self, form_class, cart):
form = form_class(initial=cart.extra, cart=cart)
return form.as_text()
|
import importlib.machinery
import importlib.util
import sys
from unittest import mock
import pytest
@pytest.fixture(autouse=True)
def default_icon_theme():
# Our tests need to run on a system with no default display, so all
# our display-specific get_default() stuff will break.
from gi.repository import Gtk
with mock.patch(
'gi.repository.Gtk.IconTheme.get_default',
mock.Mock(spec=Gtk.IconTheme.get_default)):
yield
@pytest.fixture(autouse=True)
def template_resources():
import gi # noqa: F401
with mock.patch(
'gi._gtktemplate.validate_resource_path',
mock.Mock(return_value=True)):
yield
def import_meld_conf():
loader = importlib.machinery.SourceFileLoader(
'meld.conf', './meld/conf.py.in')
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
import meld
meld.conf = mod
sys.modules['meld.conf'] = mod
import_meld_conf()
|
import os
from subprocess import CalledProcessError
import mock
import pytest
from paasta_tools.apply_external_resources import main
@pytest.fixture
def mock_run():
with mock.patch(
"paasta_tools.apply_external_resources.run", autospec=True
) as mock_runner:
yield mock_runner
@pytest.fixture(autouse=True)
def setup_external_files(fs):
fs.create_file(
"/external_resources/00-common/10-foo/10-deployment.yaml", contents="foo: bar",
)
fs.create_file(
"/external_resources/00-common/10-foo/20-service.yaml", contents="fizz: buzz",
)
fs.create_file(
"/external_resources/20-common/10-foo/20-deployment.yaml", contents="baz: biz",
)
fs.create_file(
"/external_resources/.applied/00-common/10-foo/10-deployment.yaml",
contents="foo: bar",
)
fs.create_file(
"/external_resources/.applied/00-common/10-foo/20-service.yaml",
contents="fizz: buzz",
)
fs.create_file(
"/external_resources/.applied/20-common/10-foo/20-deployment.yaml",
contents="baz: biz",
)
def test_no_changes(mock_run):
assert main("/external_resources") == 0
assert mock_run.call_count == 0
def test_resources_added_in_order(mock_run, fs):
fs.create_file(
"/external_resources/00-common/10-foo/30-hpa.yaml", contents="blah: blah",
)
fs.create_file(
"/external_resources/00-common/10-foo/40-service.yaml", contents="blah: blah",
)
fs.create_file(
"/external_resources/00-common/30-foo/10-deployment.yaml",
contents="blah: blah",
)
assert main("/external_resources") == 0
assert mock_run.call_args_list == [
mock.call(
[
"kubectl",
"apply",
"-f",
"/external_resources/00-common/10-foo/30-hpa.yaml",
],
check=True,
),
mock.call(
[
"kubectl",
"apply",
"-f",
"/external_resources/00-common/10-foo/40-service.yaml",
],
check=True,
),
mock.call(
[
"kubectl",
"apply",
"-f",
"/external_resources/00-common/30-foo/10-deployment.yaml",
],
check=True,
),
]
assert os.path.exists("/external_resources/.applied/00-common/10-foo/30-hpa.yaml")
assert os.path.exists(
"/external_resources/.applied/00-common/10-foo/40-service.yaml"
)
assert os.path.exists(
"/external_resources/.applied/00-common/30-foo/10-deployment.yaml"
)
def test_resources_deleted_in_reverse_order(mock_run, fs):
fs.create_file(
"/external_resources/.applied/00-common/10-foo/30-hpa.yaml",
contents="blah: blah",
)
fs.create_file(
"/external_resources/.applied/00-common/10-foo/40-service.yaml",
contents="blah: blah",
)
assert main("/external_resources") == 0
assert mock_run.call_args_list == [
mock.call(
[
"kubectl",
"delete",
"--ignore-not-found=true",
"-f",
"/external_resources/.applied/00-common/10-foo/40-service.yaml",
],
check=True,
),
mock.call(
[
"kubectl",
"delete",
"--ignore-not-found=true",
"-f",
"/external_resources/.applied/00-common/10-foo/30-hpa.yaml",
],
check=True,
),
]
assert not os.path.exists(
"/external_resources/.applied/00-common/10-foo/30-hpa.yaml"
)
assert not os.path.exists(
"/external_resources/.applied/00-common/10-foo/40-service.yaml"
)
def test_kubectl_fails(mock_run, fs):
mock_run.side_effect = [CalledProcessError(cmd="kubectl", returncode=1), None]
fs.create_file(
"/external_resources/00-common/10-foo/30-hpa.yaml", contents="blah: blah",
)
fs.create_file(
"/external_resources/00-common/10-foo/40-service.yaml", contents="blah: blah",
)
assert main("/external_resources") == 1
assert not os.path.exists(
"/external_resources/.applied/00-common/10-foo/30-hpa.yaml"
)
assert os.path.exists(
"/external_resources/.applied/00-common/10-foo/40-service.yaml"
)
|
import json
import datetime
from django.test import TestCase
from django.utils import timezone
from optional_django.env import DJANGO_CONFIGURED
from react.render import render_component
from react import conf
from .settings import Components
class TestDjangoIntegration(TestCase):
__test__ = DJANGO_CONFIGURED
def test_can_serialize_datetime_values_in_props(self):
component = render_component(
Components.HELLO_WORLD_JSX,
{
'name': 'world!',
'datetime': datetime.datetime(2015, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
'date': datetime.date(2015, 1, 2),
'time': datetime.time(3, 4, 5),
},
)
deserialized = json.loads(component.props)
self.assertEqual(
deserialized,
{
'name': 'world!',
'datetime': '2015-01-02T03:04:05Z',
'date': '2015-01-02',
'time': '03:04:05',
}
)
def test_relative_paths_are_resolved_via_the_static_file_finder(self):
component = render_component(Components.DJANGO_REL_PATH, to_static_markup=True)
self.assertEqual(str(component), '<span>You found me.</span>')
def test_django_settings_are_proxied(self):
self.assertEqual(conf.settings.RENDER, True)
with self.settings(REACT={'RENDER': False}):
self.assertEqual(conf.settings.RENDER, False)
|
import json
from behave import given
from behave import then
from behave import when
from paasta_tools.frameworks.task_store import MesosTaskParameters
from paasta_tools.frameworks.task_store import ZKTaskStore
@given("a ZKTaskStore")
def given_a_zktaskstore(context):
context.task_store = ZKTaskStore(
service_name="service",
instance_name="instance",
system_paasta_config=context.system_paasta_config,
framework_id="testing_zk_task_store",
)
# clean up any old data
for path in context.task_store.zk_client.get_children("/"):
context.task_store.zk_client.delete(path)
@then("get_all_tasks should return {return_json}")
def then_get_all_tasks_should_return(context, return_json):
all_tasks = context.task_store.get_all_tasks()
expected_tasks = {
k: MesosTaskParameters(**v) for k, v in json.loads(return_json).items()
}
assert all_tasks == expected_tasks
@when('we overwrite_task with task_id "{task_id}" and params {params_json}')
def when_we_overwrite_task(context, task_id, params_json):
context.task_store.overwrite_task(
task_id, MesosTaskParameters(**json.loads(params_json))
)
|
import logging
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.helpers import config_validation as cv
# Reuse data and API logic from the sensor implementation
from .sensor import (
ATTRIBUTION,
CONF_STATION_ID,
ZamgData,
closest_station,
zamg_stations,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION_ID): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ZAMG weather platform."""
name = config.get(CONF_NAME)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
station_id = config.get(CONF_STATION_ID) or closest_station(
latitude, longitude, hass.config.config_dir
)
if station_id not in zamg_stations(hass.config.config_dir):
_LOGGER.error(
"Configured ZAMG %s (%s) is not a known station",
CONF_STATION_ID,
station_id,
)
return False
probe = ZamgData(station_id=station_id)
try:
probe.update()
except (ValueError, TypeError) as err:
_LOGGER.error("Received error from ZAMG: %s", err)
return False
add_entities([ZamgWeather(probe, name)], True)
class ZamgWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, zamg_data, stationname=None):
"""Initialise the platform with a data instance and station name."""
self.zamg_data = zamg_data
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
return (
self.stationname
or f"ZAMG {self.zamg_data.data.get('Name') or '(unknown station)'}"
)
@property
def condition(self):
"""Return the current condition."""
return None
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def temperature(self):
"""Return the platform temperature."""
return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self.zamg_data.get_data(ATTR_WEATHER_PRESSURE)
@property
def humidity(self):
"""Return the humidity."""
return self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY)
@property
def wind_speed(self):
"""Return the wind speed."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING)
def update(self):
"""Update current conditions."""
self.zamg_data.update()
|
from homeassistant.components.switch import SwitchEntity
from .const import DOMAIN, JUICENET_API, JUICENET_COORDINATOR
from .entity import JuiceNetDevice
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the JuiceNet switches."""
entities = []
juicenet_data = hass.data[DOMAIN][config_entry.entry_id]
api = juicenet_data[JUICENET_API]
coordinator = juicenet_data[JUICENET_COORDINATOR]
for device in api.devices:
entities.append(JuiceNetChargeNowSwitch(device, coordinator))
async_add_entities(entities)
class JuiceNetChargeNowSwitch(JuiceNetDevice, SwitchEntity):
"""Implementation of a JuiceNet switch."""
def __init__(self, device, coordinator):
"""Initialise the switch."""
super().__init__(device, "charge_now", coordinator)
@property
def name(self):
"""Return the name of the device."""
return f"{self.device.name} Charge Now"
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.override_time != 0
async def async_turn_on(self, **kwargs):
"""Charge now."""
await self.device.set_override(True)
async def async_turn_off(self, **kwargs):
"""Don't charge now."""
await self.device.set_override(False)
|
import numpy as np
from . import dtypes, nputils
def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1):
"""Wrapper to apply bottleneck moving window funcs on dask arrays"""
import dask.array as da
dtype, fill_value = dtypes.maybe_promote(a.dtype)
a = a.astype(dtype)
# inputs for overlap
if axis < 0:
axis = a.ndim + axis
depth = {d: 0 for d in range(a.ndim)}
depth[axis] = (window + 1) // 2
boundary = {d: fill_value for d in range(a.ndim)}
# Create overlap array.
ag = da.overlap.overlap(a, depth=depth, boundary=boundary)
# apply rolling func
out = da.map_blocks(
moving_func, ag, window, min_count=min_count, axis=axis, dtype=a.dtype
)
# trim array
result = da.overlap.trim_internal(out, depth)
return result
def rolling_window(a, axis, window, center, fill_value):
"""Dask's equivalence to np.utils.rolling_window"""
import dask.array as da
if not hasattr(axis, "__len__"):
axis = [axis]
window = [window]
center = [center]
orig_shape = a.shape
depth = {d: 0 for d in range(a.ndim)}
offset = [0] * a.ndim
drop_size = [0] * a.ndim
pad_size = [0] * a.ndim
for ax, win, cent in zip(axis, window, center):
if ax < 0:
ax = a.ndim + ax
depth[ax] = int(win / 2)
# For evenly sized window, we need to crop the first point of each block.
offset[ax] = 1 if win % 2 == 0 else 0
if depth[ax] > min(a.chunks[ax]):
raise ValueError(
"For window size %d, every chunk should be larger than %d, "
"but the smallest chunk size is %d. Rechunk your array\n"
"with a larger chunk size or a chunk size that\n"
"more evenly divides the shape of your array."
% (win, depth[ax], min(a.chunks[ax]))
)
# Although da.overlap pads values to boundaries of the array,
# the size of the generated array is smaller than what we want
# if center == False.
if cent:
start = int(win / 2) # 10 -> 5, 9 -> 4
end = win - 1 - start
else:
start, end = win - 1, 0
pad_size[ax] = max(start, end) + offset[ax] - depth[ax]
drop_size[ax] = 0
# pad_size becomes more than 0 when the overlapped array is smaller than
# needed. In this case, we need to enlarge the original array by padding
# before overlapping.
if pad_size[ax] > 0:
if pad_size[ax] < depth[ax]:
# overlapping requires each chunk larger than depth. If pad_size is
# smaller than the depth, we enlarge this and truncate it later.
drop_size[ax] = depth[ax] - pad_size[ax]
pad_size[ax] = depth[ax]
# TODO maybe following two lines can be summarized.
a = da.pad(
a, [(p, 0) for p in pad_size], mode="constant", constant_values=fill_value
)
boundary = {d: fill_value for d in range(a.ndim)}
# create overlap arrays
ag = da.overlap.overlap(a, depth=depth, boundary=boundary)
def func(x, window, axis):
x = np.asarray(x)
index = [slice(None)] * x.ndim
for ax, win in zip(axis, window):
x = nputils._rolling_window(x, win, ax)
index[ax] = slice(offset[ax], None)
return x[tuple(index)]
chunks = list(a.chunks) + window
new_axis = [a.ndim + i for i in range(len(axis))]
out = da.map_blocks(
func,
ag,
dtype=a.dtype,
new_axis=new_axis,
chunks=chunks,
window=window,
axis=axis,
)
# crop boundary.
index = [slice(None)] * a.ndim
for ax in axis:
index[ax] = slice(drop_size[ax], drop_size[ax] + orig_shape[ax])
return out[tuple(index)]
def least_squares(lhs, rhs, rcond=None, skipna=False):
import dask.array as da
lhs_da = da.from_array(lhs, chunks=(rhs.chunks[0], lhs.shape[1]))
if skipna:
added_dim = rhs.ndim == 1
if added_dim:
rhs = rhs.reshape(rhs.shape[0], 1)
results = da.apply_along_axis(
nputils._nanpolyfit_1d,
0,
rhs,
lhs_da,
dtype=float,
shape=(lhs.shape[1] + 1,),
rcond=rcond,
)
coeffs = results[:-1, ...]
residuals = results[-1, ...]
if added_dim:
coeffs = coeffs.reshape(coeffs.shape[0])
residuals = residuals.reshape(residuals.shape[0])
else:
# Residuals here are (1, 1) but should be (K,) as rhs is (N, K)
# See issue dask/dask#6516
coeffs, residuals, _, _ = da.linalg.lstsq(lhs_da, rhs)
return coeffs, residuals
|
import numbers
import numpy as np
from .options import OPTIONS, _get_keep_attrs
from .pycompat import dask_array_type
from .utils import not_implemented
class SupportsArithmetic:
"""Base class for xarray types that support arithmetic.
Used by Dataset, DataArray, Variable and GroupBy.
"""
__slots__ = ()
# TODO: implement special methods for arithmetic here rather than injecting
# them in xarray/core/ops.py. Ideally, do so by inheriting from
# numpy.lib.mixins.NDArrayOperatorsMixin.
# TODO: allow extending this with some sort of registration system
_HANDLED_TYPES = (
np.ndarray,
np.generic,
numbers.Number,
bytes,
str,
) + dask_array_type
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
from .computation import apply_ufunc
# See the docstring example for numpy.lib.mixins.NDArrayOperatorsMixin.
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (SupportsArithmetic,)):
return NotImplemented
if ufunc.signature is not None:
raise NotImplementedError(
"{} not supported: xarray objects do not directly implement "
"generalized ufuncs. Instead, use xarray.apply_ufunc or "
"explicitly convert to xarray objects to NumPy arrays "
"(e.g., with `.values`).".format(ufunc)
)
if method != "__call__":
# TODO: support other methods, e.g., reduce and accumulate.
raise NotImplementedError(
"{} method for ufunc {} is not implemented on xarray objects, "
"which currently only support the __call__ method. As an "
"alternative, consider explicitly converting xarray objects "
"to NumPy arrays (e.g., with `.values`).".format(method, ufunc)
)
if any(isinstance(o, SupportsArithmetic) for o in out):
# TODO: implement this with logic like _inplace_binary_op. This
# will be necessary to use NDArrayOperatorsMixin.
raise NotImplementedError(
"xarray objects are not yet supported in the `out` argument "
"for ufuncs. As an alternative, consider explicitly "
"converting xarray objects to NumPy arrays (e.g., with "
"`.values`)."
)
join = dataset_join = OPTIONS["arithmetic_join"]
return apply_ufunc(
ufunc,
*inputs,
input_core_dims=((),) * ufunc.nin,
output_core_dims=((),) * ufunc.nout,
join=join,
dataset_join=dataset_join,
dataset_fill_value=np.nan,
kwargs=kwargs,
dask="allowed",
keep_attrs=_get_keep_attrs(default=True),
)
# this has no runtime function - these are listed so IDEs know these
# methods are defined and don't warn on these operations
__lt__ = (
__le__
) = (
__ge__
) = (
__gt__
) = (
__add__
) = (
__sub__
) = (
__mul__
) = (
__truediv__
) = (
__floordiv__
) = (
__mod__
) = (
__pow__
) = __and__ = __xor__ = __or__ = __div__ = __eq__ = __ne__ = not_implemented
|
from babelfish import LanguageReverseConverter
from ..exceptions import ConfigurationError
class LegendasTVConverter(LanguageReverseConverter):
def __init__(self):
self.from_legendastv = {1: ('por', 'BR'), 2: ('eng',), 3: ('spa',), 4: ('fra',), 5: ('deu',), 6: ('jpn',),
7: ('dan',), 8: ('nor',), 9: ('swe',), 10: ('por',), 11: ('ara',), 12: ('ces',),
13: ('zho',), 14: ('kor',), 15: ('bul',), 16: ('ita',), 17: ('pol',)}
self.to_legendastv = {v: k for k, v in self.from_legendastv.items()}
self.codes = set(self.from_legendastv.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3, country) in self.to_legendastv:
return self.to_legendastv[(alpha3, country)]
if (alpha3,) in self.to_legendastv:
return self.to_legendastv[(alpha3,)]
raise ConfigurationError('Unsupported language code for legendastv: %s, %s, %s' % (alpha3, country, script))
def reverse(self, legendastv):
if legendastv in self.from_legendastv:
return self.from_legendastv[legendastv]
raise ConfigurationError('Unsupported language number for legendastv: %s' % legendastv)
|
import os
import sys
from invoke import task
from ._config import ROOT_DIR, NAME
@task
def lint(ctx):
""" alias for "invoke test --style"
"""
test_style()
@task(optional=['unit', 'style'],
help=dict(unit='run unit tests (pytest) on given subdir (default ".")',
style='run style tests (flake8) on given subdir (default ".")',
cover='show test coverage'))
def test(ctx, unit='', style='', cover=False):
""" run tests (unit, style)
"""
if not (unit or style or cover):
sys.exit('Test task needs --unit, --style or --cover')
if unit:
test_unit('.' if not isinstance(unit, str) else unit)
if style:
test_style('.' if not isinstance(style, str) else style)
if cover:
show_coverage_html()
def test_unit(rel_path='.'):
# Ensure we have pytest
try:
import pytest # noqa
except ImportError:
sys.exit('Cannot do unit tests, pytest not installed')
# Get path to test
py2 = sys.version_info[0] == 2
rel_path = 'flexx_legacy/' + rel_path if py2 else 'flexx/' + rel_path
test_path = os.path.join(ROOT_DIR, rel_path)
# Import flexx, from installed, or from ROOT_DIR
if py2 or os.getenv('TEST_INSTALL', '').lower() in ('1', 'yes', 'true'):
if ROOT_DIR in sys.path:
sys.path.remove(ROOT_DIR)
os.chdir(os.path.expanduser('~'))
m = __import__(NAME)
assert ROOT_DIR not in os.path.abspath(m.__path__[0])
else:
os.chdir(ROOT_DIR)
m = __import__(NAME)
assert ROOT_DIR in os.path.abspath(m.__path__[0])
# Start tests
_enable_faulthandler()
try:
res = pytest.main(['--cov', NAME, '--cov-config=.coveragerc',
'--cov-report=term', '--cov-report=html', test_path])
sys.exit(res)
finally:
m = __import__(NAME)
print('Unit tests were performed on', str(m))
def show_coverage_term():
from coverage import coverage
cov = coverage(auto_data=False, branch=True, data_suffix=None,
source=[NAME]) # should match testing/_coverage.py
cov.load()
cov.report()
def show_coverage_html():
import webbrowser
from coverage import coverage
print('Generating HTML...')
os.chdir(ROOT_DIR)
cov = coverage(auto_data=False, branch=True, data_suffix=None,
source=[NAME]) # should match testing/_coverage.py
cov.load()
cov.html_report()
print('Done, launching browser.')
fname = os.path.join(os.getcwd(), 'htmlcov', 'index.html')
if not os.path.isfile(fname):
raise IOError('Generated file not found: %s' % fname)
webbrowser.open_new_tab(fname)
def test_style(rel_path='.'):
# Ensure we have flake8
try:
import flake8 # noqa
from flake8.main.application import Application
except ImportError as err:
sys.exit('Cannot do style test: ' + str(err))
# Prepare
os.chdir(ROOT_DIR)
if rel_path in ('', '.'):
sys.argv[1:] = ['flexx', 'flexxamples']
else:
sys.argv[1:] = ['flexx/' + rel_path]
# Do test
print('Running flake8 tests ...')
app = Application()
app.run()
# Report
nerrors = app.result_count
if nerrors:
print('Arg! Found %i style errors.' % nerrors)
else:
print('Hooray, no style errors found!')
# Exit (will exit(1) if errors)
app.exit()
def _enable_faulthandler():
""" Enable faulthandler (if we can), so that we get tracebacks
on segfaults.
"""
try:
import faulthandler
faulthandler.enable()
print('Faulthandler enabled')
except Exception:
print('Could not enable faulthandler')
|
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_CURRENCY, ENERGY_KILO_WATT_HOUR, POWER_WATT
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "https://engage.efergy.com/mobile_proxy/"
CONF_APPTOKEN = "app_token"
CONF_UTC_OFFSET = "utc_offset"
CONF_MONITORED_VARIABLES = "monitored_variables"
CONF_SENSOR_TYPE = "type"
CONF_PERIOD = "period"
CONF_INSTANT = "instant_readings"
CONF_AMOUNT = "amount"
CONF_BUDGET = "budget"
CONF_COST = "cost"
CONF_CURRENT_VALUES = "current_values"
DEFAULT_PERIOD = "year"
DEFAULT_UTC_OFFSET = "0"
SENSOR_TYPES = {
CONF_INSTANT: ["Energy Usage", POWER_WATT],
CONF_AMOUNT: ["Energy Consumed", ENERGY_KILO_WATT_HOUR],
CONF_BUDGET: ["Energy Budget", None],
CONF_COST: ["Energy Cost", None],
CONF_CURRENT_VALUES: ["Per-Device Usage", POWER_WATT],
}
TYPES_SCHEMA = vol.In(SENSOR_TYPES)
SENSORS_SCHEMA = vol.Schema(
{
vol.Required(CONF_SENSOR_TYPE): TYPES_SCHEMA,
vol.Optional(CONF_CURRENCY, default=""): cv.string,
vol.Optional(CONF_PERIOD, default=DEFAULT_PERIOD): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_APPTOKEN): cv.string,
vol.Optional(CONF_UTC_OFFSET, default=DEFAULT_UTC_OFFSET): cv.string,
vol.Required(CONF_MONITORED_VARIABLES): [SENSORS_SCHEMA],
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Efergy sensor."""
app_token = config.get(CONF_APPTOKEN)
utc_offset = str(config.get(CONF_UTC_OFFSET))
dev = []
for variable in config[CONF_MONITORED_VARIABLES]:
if variable[CONF_SENSOR_TYPE] == CONF_CURRENT_VALUES:
url_string = f"{_RESOURCE}getCurrentValuesSummary?token={app_token}"
response = requests.get(url_string, timeout=10)
for sensor in response.json():
sid = sensor["sid"]
dev.append(
EfergySensor(
variable[CONF_SENSOR_TYPE],
app_token,
utc_offset,
variable[CONF_PERIOD],
variable[CONF_CURRENCY],
sid,
)
)
dev.append(
EfergySensor(
variable[CONF_SENSOR_TYPE],
app_token,
utc_offset,
variable[CONF_PERIOD],
variable[CONF_CURRENCY],
)
)
add_entities(dev, True)
class EfergySensor(Entity):
"""Implementation of an Efergy sensor."""
def __init__(self, sensor_type, app_token, utc_offset, period, currency, sid=None):
"""Initialize the sensor."""
self.sid = sid
if sid:
self._name = f"efergy_{sid}"
else:
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self.app_token = app_token
self.utc_offset = utc_offset
self._state = None
self.period = period
self.currency = currency
if self.type == "cost":
self._unit_of_measurement = f"{self.currency}/{self.period}"
else:
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the Efergy monitor data from the web service."""
try:
if self.type == "instant_readings":
url_string = f"{_RESOURCE}getInstant?token={self.app_token}"
response = requests.get(url_string, timeout=10)
self._state = response.json()["reading"]
elif self.type == "amount":
url_string = f"{_RESOURCE}getEnergy?token={self.app_token}&offset={self.utc_offset}&period={self.period}"
response = requests.get(url_string, timeout=10)
self._state = response.json()["sum"]
elif self.type == "budget":
url_string = f"{_RESOURCE}getBudget?token={self.app_token}"
response = requests.get(url_string, timeout=10)
self._state = response.json()["status"]
elif self.type == "cost":
url_string = f"{_RESOURCE}getCost?token={self.app_token}&offset={self.utc_offset}&period={self.period}"
response = requests.get(url_string, timeout=10)
self._state = response.json()["sum"]
elif self.type == "current_values":
url_string = (
f"{_RESOURCE}getCurrentValuesSummary?token={self.app_token}"
)
response = requests.get(url_string, timeout=10)
for sensor in response.json():
if self.sid == sensor["sid"]:
measurement = next(iter(sensor["data"][0].values()))
self._state = measurement
else:
self._state = None
except (requests.RequestException, ValueError, KeyError):
_LOGGER.warning("Could not update status for %s", self.name)
|
import logging
from collections import Counter
import numpy as np
import plotly.graph_objs as go
from gensim.models.poincare import PoincareKeyedVectors
logger = logging.getLogger(__name__)
def poincare_2d_visualization(model, tree, figure_title, num_nodes=50, show_node_labels=()):
"""Create a 2-d plot of the nodes and edges of a 2-d poincare embedding.
Parameters
----------
model : :class:`~gensim.models.poincare.PoincareModel`
The model to visualize, model size must be 2.
tree : set
Set of tuples containing the direct edges present in the original dataset.
figure_title : str
Title of the plotted figure.
num_nodes : int or None
Number of nodes for which edges are to be plotted.
If `None`, all edges are plotted.
Helpful to limit this in case the data is too large to avoid a messy plot.
show_node_labels : iterable
Iterable of nodes for which to show labels by default.
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot.
"""
vectors = model.kv.syn0
if vectors.shape[1] != 2:
raise ValueError('Can only plot 2-D vectors')
node_labels = model.kv.index_to_key
nodes_x = list(vectors[:, 0])
nodes_y = list(vectors[:, 1])
nodes = go.Scatter(
x=nodes_x, y=nodes_y,
mode='markers',
marker=dict(color='rgb(30, 100, 200)'),
text=node_labels,
textposition='bottom center'
)
nodes_x, nodes_y, node_labels = [], [], []
for node in show_node_labels:
vector = model.kv[node]
nodes_x.append(vector[0])
nodes_y.append(vector[1])
node_labels.append(node)
nodes_with_labels = go.Scatter(
x=nodes_x, y=nodes_y,
mode='markers+text',
marker=dict(color='rgb(200, 100, 200)'),
text=node_labels,
textposition='bottom center'
)
node_out_degrees = Counter(hypernym_pair[1] for hypernym_pair in tree)
if num_nodes is None:
chosen_nodes = list(node_out_degrees.keys())
else:
chosen_nodes = list(sorted(node_out_degrees.keys(), key=lambda k: -node_out_degrees[k]))[:num_nodes]
edges_x = []
edges_y = []
for u, v in tree:
if not(u in chosen_nodes or v in chosen_nodes):
continue
vector_u = model.kv[u]
vector_v = model.kv[v]
edges_x += [vector_u[0], vector_v[0], None]
edges_y += [vector_u[1], vector_v[1], None]
edges = go.Scatter(
x=edges_x, y=edges_y, mode="lines", hoverinfo='none',
line=dict(color='rgb(50,50,50)', width=1))
layout = go.Layout(
title=figure_title, showlegend=False, hovermode='closest', width=800, height=800)
return go.Figure(data=[edges, nodes, nodes_with_labels], layout=layout)
def poincare_distance_heatmap(origin_point, x_range=(-1.0, 1.0), y_range=(-1.0, 1.0), num_points=100):
"""Create a heatmap of Poincare distances from `origin_point` for each point (x, y),
where x and y lie in `x_range` and `y_range` respectively, with `num_points` points chosen uniformly in both ranges.
Parameters
----------
origin_point : tuple (int, int)
(x, y) from which distances are to be measured and plotted.
x_range : tuple (int, int)
Range for x-axis from which to choose `num_points` points.
y_range : tuple (int, int)
Range for y-axis from which to choose `num_points` points.
num_points : int
Number of points to choose from `x_range` and `y_range`.
Notes
-----
Points outside the unit circle are ignored, since the Poincare distance is defined
only for points inside the circle boundaries (exclusive of the boundary).
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot
"""
epsilon = 1e-8 # Can't choose (-1.0, -1.0) or (1.0, 1.0), distance undefined
x_range, y_range = list(x_range), list(y_range)
if x_range[0] == -1.0 and y_range[0] == -1.0:
x_range[0] += epsilon
y_range[0] += epsilon
if x_range[0] == 1.0 and y_range[0] == 1.0:
x_range[0] -= epsilon
y_range[0] -= epsilon
x_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
y_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
x, y = np.meshgrid(x_axis_values, y_axis_values)
all_points = np.dstack((x, y)).swapaxes(1, 2).swapaxes(0, 1).reshape(2, num_points ** 2).T
norms = np.linalg.norm(all_points, axis=1)
all_points = all_points[norms < 1]
origin_point = np.array(origin_point)
all_distances = PoincareKeyedVectors.poincare_dists(origin_point, all_points)
distances = go.Scatter(
x=all_points[:, 0],
y=all_points[:, 1],
mode='markers',
marker=dict(
size='9',
color=all_distances,
colorscale='Viridis',
showscale=True,
colorbar=go.ColorBar(
title='Poincare Distance'
),
),
text=[
'Distance from (%.2f, %.2f): %.2f' % (origin_point[0], origin_point[1], d)
for d in all_distances],
name='', # To avoid the default 'trace 0'
)
origin = go.Scatter(
x=[origin_point[0]],
y=[origin_point[1]],
name='Distance from (%.2f, %.2f)' % (origin_point[0], origin_point[1]),
mode='markers+text',
marker=dict(
size='10',
color='rgb(200, 50, 50)'
)
)
layout = go.Layout(
width=900,
height=800,
showlegend=False,
title='Poincare Distances from (%.2f, %.2f)' % (origin_point[0], origin_point[1]),
hovermode='closest',
)
return go.Figure(data=[distances, origin], layout=layout)
|
from homeassistant.components.powerwall.const import DOMAIN
from homeassistant.const import PERCENTAGE
from homeassistant.setup import async_setup_component
from .mocks import _mock_get_config, _mock_powerwall_with_fixtures
from tests.async_mock import patch
async def test_sensors(hass):
"""Test creation of the sensors."""
mock_powerwall = await _mock_powerwall_with_fixtures(hass)
with patch(
"homeassistant.components.powerwall.config_flow.Powerwall",
return_value=mock_powerwall,
), patch(
"homeassistant.components.powerwall.Powerwall", return_value=mock_powerwall
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
device_registry = await hass.helpers.device_registry.async_get_registry()
reg_device = device_registry.async_get_device(
identifiers={("powerwall", "TG0123456789AB_TG9876543210BA")},
connections=set(),
)
assert reg_device.model == "PowerWall 2 (GW1)"
assert reg_device.sw_version == "1.45.1"
assert reg_device.manufacturer == "Tesla"
assert reg_device.name == "MySite"
state = hass.states.get("sensor.powerwall_site_now")
assert state.state == "0.032"
expected_attributes = {
"frequency": 60,
"energy_exported_(in_kW)": 10429.5,
"energy_imported_(in_kW)": 4824.2,
"instant_average_voltage": 120.7,
"unit_of_measurement": "kW",
"friendly_name": "Powerwall Site Now",
"device_class": "power",
"is_active": False,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
for key, value in expected_attributes.items():
assert state.attributes[key] == value
state = hass.states.get("sensor.powerwall_load_now")
assert state.state == "1.971"
expected_attributes = {
"frequency": 60,
"energy_exported_(in_kW)": 1056.8,
"energy_imported_(in_kW)": 4693.0,
"instant_average_voltage": 120.7,
"unit_of_measurement": "kW",
"friendly_name": "Powerwall Load Now",
"device_class": "power",
"is_active": True,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
for key, value in expected_attributes.items():
assert state.attributes[key] == value
state = hass.states.get("sensor.powerwall_battery_now")
assert state.state == "-8.55"
expected_attributes = {
"frequency": 60.0,
"energy_exported_(in_kW)": 3620.0,
"energy_imported_(in_kW)": 4216.2,
"instant_average_voltage": 240.6,
"unit_of_measurement": "kW",
"friendly_name": "Powerwall Battery Now",
"device_class": "power",
"is_active": True,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
for key, value in expected_attributes.items():
assert state.attributes[key] == value
state = hass.states.get("sensor.powerwall_solar_now")
assert state.state == "10.49"
expected_attributes = {
"frequency": 60,
"energy_exported_(in_kW)": 9864.2,
"energy_imported_(in_kW)": 28.2,
"instant_average_voltage": 120.7,
"unit_of_measurement": "kW",
"friendly_name": "Powerwall Solar Now",
"device_class": "power",
"is_active": True,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
for key, value in expected_attributes.items():
assert state.attributes[key] == value
state = hass.states.get("sensor.powerwall_charge")
assert state.state == "47"
expected_attributes = {
"unit_of_measurement": PERCENTAGE,
"friendly_name": "Powerwall Charge",
"device_class": "battery",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
for key, value in expected_attributes.items():
assert state.attributes[key] == value
|
from typing import Dict, List
import voluptuous as vol
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import DOMAIN
from .const import (
CONDITION_ARMED_AWAY,
CONDITION_ARMED_CUSTOM_BYPASS,
CONDITION_ARMED_HOME,
CONDITION_ARMED_NIGHT,
CONDITION_DISARMED,
CONDITION_TRIGGERED,
)
CONDITION_TYPES = {
CONDITION_TRIGGERED,
CONDITION_DISARMED,
CONDITION_ARMED_HOME,
CONDITION_ARMED_AWAY,
CONDITION_ARMED_NIGHT,
CONDITION_ARMED_CUSTOM_BYPASS,
}
CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES),
}
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions for Alarm control panel devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
# We need a state or else we can't populate the different armed conditions
if state is None:
continue
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
# Add conditions for each entity that belongs to this integration
conditions += [
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_DISARMED,
},
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_TRIGGERED,
},
]
if supported_features & SUPPORT_ALARM_ARM_HOME:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_ARMED_HOME,
}
)
if supported_features & SUPPORT_ALARM_ARM_AWAY:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_ARMED_AWAY,
}
)
if supported_features & SUPPORT_ALARM_ARM_NIGHT:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_ARMED_NIGHT,
}
)
if supported_features & SUPPORT_ALARM_ARM_CUSTOM_BYPASS:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_ARMED_CUSTOM_BYPASS,
}
)
return conditions
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == CONDITION_TRIGGERED:
state = STATE_ALARM_TRIGGERED
elif config[CONF_TYPE] == CONDITION_DISARMED:
state = STATE_ALARM_DISARMED
elif config[CONF_TYPE] == CONDITION_ARMED_HOME:
state = STATE_ALARM_ARMED_HOME
elif config[CONF_TYPE] == CONDITION_ARMED_AWAY:
state = STATE_ALARM_ARMED_AWAY
elif config[CONF_TYPE] == CONDITION_ARMED_NIGHT:
state = STATE_ALARM_ARMED_NIGHT
elif config[CONF_TYPE] == CONDITION_ARMED_CUSTOM_BYPASS:
state = STATE_ALARM_ARMED_CUSTOM_BYPASS
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
return condition.state(hass, config[ATTR_ENTITY_ID], state)
return test_is_state
|
import requests
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_WINDOW,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICES
from .const import CONF_CONNECTIONS, DOMAIN as FRITZBOX_DOMAIN, LOGGER
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Fritzbox binary sensor from config_entry."""
entities = []
devices = hass.data[FRITZBOX_DOMAIN][CONF_DEVICES]
fritz = hass.data[FRITZBOX_DOMAIN][CONF_CONNECTIONS][config_entry.entry_id]
for device in await hass.async_add_executor_job(fritz.get_devices):
if device.has_alarm and device.ain not in devices:
entities.append(FritzboxBinarySensor(device, fritz))
devices.add(device.ain)
async_add_entities(entities, True)
class FritzboxBinarySensor(BinarySensorEntity):
"""Representation of a binary Fritzbox device."""
def __init__(self, device, fritz):
"""Initialize the Fritzbox binary sensor."""
self._device = device
self._fritz = fritz
@property
def device_info(self):
"""Return device specific attributes."""
return {
"name": self.name,
"identifiers": {(FRITZBOX_DOMAIN, self._device.ain)},
"manufacturer": self._device.manufacturer,
"model": self._device.productname,
"sw_version": self._device.fw_version,
}
@property
def unique_id(self):
"""Return the unique ID of the device."""
return self._device.ain
@property
def name(self):
"""Return the name of the entity."""
return self._device.name
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_WINDOW
@property
def is_on(self):
"""Return true if sensor is on."""
if not self._device.present:
return False
return self._device.alert_state
def update(self):
"""Get latest data from the Fritzbox."""
try:
self._device.update()
except requests.exceptions.HTTPError as ex:
LOGGER.warning("Connection error: %s", ex)
self._fritz.login()
|
import logging
from pyrainbird import RainbirdController
import voluptuous as vol
from homeassistant.components import binary_sensor, sensor, switch
from homeassistant.const import (
CONF_FRIENDLY_NAME,
CONF_HOST,
CONF_PASSWORD,
CONF_TRIGGER_TIME,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
CONF_ZONES = "zones"
SUPPORTED_PLATFORMS = [switch.DOMAIN, sensor.DOMAIN, binary_sensor.DOMAIN]
_LOGGER = logging.getLogger(__name__)
RAINBIRD_CONTROLLER = "controller"
DATA_RAINBIRD = "rainbird"
DOMAIN = "rainbird"
SENSOR_TYPE_RAINDELAY = "raindelay"
SENSOR_TYPE_RAINSENSOR = "rainsensor"
# sensor_type [ description, unit, icon ]
SENSOR_TYPES = {
SENSOR_TYPE_RAINSENSOR: ["Rainsensor", None, "mdi:water"],
SENSOR_TYPE_RAINDELAY: ["Raindelay", None, "mdi:water-off"],
}
TRIGGER_TIME_SCHEMA = vol.All(
cv.time_period, cv.positive_timedelta, lambda td: (td.total_seconds() // 60)
)
ZONE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_TRIGGER_TIME): TRIGGER_TIME_SCHEMA,
}
)
CONTROLLER_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_TRIGGER_TIME): TRIGGER_TIME_SCHEMA,
vol.Optional(CONF_ZONES): vol.Schema({cv.positive_int: ZONE_SCHEMA}),
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [CONTROLLER_SCHEMA]))},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Rain Bird component."""
hass.data[DATA_RAINBIRD] = []
success = False
for controller_config in config[DOMAIN]:
success = success or _setup_controller(hass, controller_config, config)
return success
def _setup_controller(hass, controller_config, config):
"""Set up a controller."""
server = controller_config[CONF_HOST]
password = controller_config[CONF_PASSWORD]
controller = RainbirdController(server, password)
position = len(hass.data[DATA_RAINBIRD])
try:
controller.get_serial_number()
except Exception as exc: # pylint: disable=broad-except
_LOGGER.error("Unable to setup controller: %s", exc)
return False
hass.data[DATA_RAINBIRD].append(controller)
_LOGGER.debug("Rain Bird Controller %d set to: %s", position, server)
for platform in SUPPORTED_PLATFORMS:
discovery.load_platform(
hass,
platform,
DOMAIN,
{RAINBIRD_CONTROLLER: position, **controller_config},
config,
)
return True
|
import glob
import os.path
import pathlib
import platform
import sys
import sysconfig
from cx_Freeze import Executable, setup
def get_non_python_libs():
"""Returns list of tuples containing extra dependencies required to run
meld on current platform.
Every pair corresponds to a single library file.
First tuple item is path in local filesystem during build.
Second tuple item correspond to path expected in meld installation
relative to meld prefix.
Note that for returned dynamic libraries and executables dependencies
are expected to be resolved by caller, for example by cx_freeze.
"""
local_bin = os.path.join(sys.prefix, "bin")
inst_root = [] # local paths of files "to put at freezed root"
inst_lib = [] # local paths of files "to put at freezed 'lib' subdir"
if 'mingw' in sysconfig.get_platform():
# dll imported by dll dependencies expected to be auto-resolved later
inst_root = [os.path.join(local_bin, 'libgtksourceview-4-0.dll')]
# required for communicating multiple instances
inst_lib.append(os.path.join(local_bin, 'gdbus.exe'))
# gspawn-helper is needed for Gtk.show_uri function
if platform.architecture()[0] == '32bit':
inst_lib.append(os.path.join(local_bin, 'gspawn-win32-helper.exe'))
else:
inst_lib.append(os.path.join(local_bin, 'gspawn-win64-helper.exe'))
return [
(f, os.path.basename(f)) for f in inst_root
] + [
(f, os.path.join('lib', os.path.basename(f))) for f in inst_lib
]
gtk_data_dirs = [
'etc/fonts',
'etc/gtk-3.0',
'lib/gdk-pixbuf-2.0',
'lib/girepository-1.0',
'share/fontconfig',
'share/glib-2.0',
'share/gtksourceview-4',
'share/icons',
]
gtk_data_files = []
for data_dir in gtk_data_dirs:
local_data_dir = os.path.join(sys.prefix, data_dir)
for local_data_subdir, dirs, files in os.walk(local_data_dir):
data_subdir = os.path.relpath(local_data_subdir, local_data_dir)
gtk_data_files.append((
os.path.join(data_dir, data_subdir),
[os.path.join(local_data_subdir, file) for file in files]
))
manually_added_libs = {
# add libgdk_pixbuf-2.0-0.dll manually to forbid auto-pulling of gdiplus.dll
"libgdk_pixbuf-2.0-0.dll": os.path.join(sys.prefix, 'bin'),
# libcroco and librsvg are needed for SVG loading in gdkpixbuf
"libcroco-0.6-3.dll": os.path.join(sys.prefix, 'bin'),
"librsvg-2-2.dll": os.path.join(sys.prefix, 'bin'),
}
for lib, possible_path in manually_added_libs.items():
local_lib = os.path.join(possible_path, lib)
if os.path.isfile(local_lib):
gtk_data_files.append((os.path.dirname(lib), [local_lib]))
build_exe_options = {
"includes": ["gi"],
"excludes": ["tkinter"],
"packages": ["gi", "weakref"],
"include_files": get_non_python_libs(),
"bin_excludes": list(manually_added_libs.keys()),
"zip_exclude_packages": [],
"zip_include_packages": ["*"],
}
# Create our registry key, and fill with install directory and exe
registry_table = [
('MeldKLM', 2, r'SOFTWARE\Meld', '*', None, 'TARGETDIR'),
('MeldInstallDir', 2, r'SOFTWARE\Meld', 'InstallDir', '[TARGETDIR]', 'TARGETDIR'),
('MeldExecutable', 2, r'SOFTWARE\Meld', 'Executable', '[TARGETDIR]Meld.exe', 'TARGETDIR'),
]
# Provide the locator and app search to give MSI the existing install directory
# for future upgrades
reg_locator_table = [
('MeldInstallDirLocate', 2, r'SOFTWARE\Meld', 'InstallDir', 0)
]
app_search_table = [('TARGETDIR', 'MeldInstallDirLocate')]
msi_data = {
'Registry': registry_table,
'RegLocator': reg_locator_table,
'AppSearch': app_search_table
}
bdist_msi_options = {
"upgrade_code": "{1d303789-b4e2-4d6e-9515-c301e155cd50}",
"data": msi_data,
}
executable_options = {
"script": "bin/meld",
"icon": "data/icons/org.gnome.meld.ico",
}
if 'mingw' in sysconfig.get_platform():
executable_options.update({
"base": "Win32GUI", # comment to build console version to see stderr
"targetName": "Meld.exe",
"shortcutName": "Meld",
"shortcutDir": "ProgramMenuFolder",
})
# Copy conf.py in place if necessary
base_path = pathlib.Path(__file__).parent
conf_path = base_path / 'meld' / 'conf.py'
if not conf_path.exists():
import shutil
shutil.copyfile(conf_path.with_suffix('.py.in'), conf_path)
import meld.build_helpers # noqa: E402
import meld.conf # noqa: E402
setup(
name="Meld",
version=meld.conf.__version__,
description='Visual diff and merge tool',
author='The Meld project',
author_email='[email protected]',
maintainer='Kai Willadsen',
url='http://meldmerge.org',
license='GPLv2+',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Desktop Environment :: Gnome',
'Topic :: Software Development',
'Topic :: Software Development :: Version Control',
],
keywords=['diff', 'merge'],
options={
"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options,
# cx_freeze + bdist_dumb fails on non-empty prefix
"install": {"prefix": "."},
# freezed binary doesn't use source files, they are only for humans
"install_lib": {"compile": False},
},
executables=[
Executable(**executable_options),
],
packages=[
'meld',
'meld.matchers',
'meld.ui',
'meld.vc',
],
package_data={
'meld': ['README', 'COPYING', 'NEWS'],
'meld.vc': ['README', 'COPYING'],
},
scripts=['bin/meld'],
data_files=[
('share/man/man1',
['data/meld.1']
),
('share/doc/meld-' + meld.conf.__version__,
['COPYING', 'NEWS']
),
('share/meld/icons',
glob.glob("data/icons/*.png") +
glob.glob("data/icons/COPYING*")
),
('share/meld/styles',
glob.glob("data/styles/*.xml")
),
('share/meld/ui',
glob.glob("data/ui/*.ui") + glob.glob("data/ui/*.xml")
),
] + gtk_data_files,
cmdclass={
"build_i18n": meld.build_helpers.build_i18n,
"build_help": meld.build_helpers.build_help,
"build_icons": meld.build_helpers.build_icons,
"build_data": meld.build_helpers.build_data,
}
)
|
from __future__ import division
import numpy as np
import os
from six.moves.urllib import request
import unittest
from chainer import testing
from chainercv.evaluations import calc_instance_segmentation_voc_prec_rec
from chainercv.evaluations import eval_instance_segmentation_voc
@testing.parameterize(*(
testing.product_dict(
[{
'pred_masks': [
[[[False, False], [True, True]],
[[True, True], [False, True]],
[[True, False], [True, True]]]
],
'pred_labels': [
[0, 0, 0],
],
'pred_scores': [
[0.8, 0.9, 1],
],
'gt_masks': [
[[[True, True], [False, False]]],
],
'gt_labels': [
[0],
],
}],
[
{
'iou_thresh': 0.5,
'prec': [
[0., 1 / 2, 1 / 3],
],
'rec': [
[0., 1., 1.],
],
},
{
'iou_thresh': 0.97,
'prec': [
[0., 0., 0.],
],
'rec': [
[0., 0., 0.],
],
},
]
) +
[
{
'pred_masks': [
[[[False, False], [True, True]],
[[True, True], [False, False]]],
[[[True, True], [True, True]],
[[True, True], [False, True]],
[[True, False], [True, True]]],
],
'pred_labels': [
[0, 0],
[0, 2, 2],
],
'pred_scores': [
[1, 0.9],
[0.7, 0.6, 0.8],
],
'gt_masks': [
[[[False, True], [True, True]],
[[True, True], [True, False]]],
[[[True, False], [False, True]]],
],
'gt_labels': [
[0, 0],
[2],
],
'iou_thresh': 0.5,
'prec': [
[1., 1., 2 / 3],
None,
[1., 0.5],
],
'rec': [
[1 / 2, 1., 1.],
None,
[1., 1.],
],
},
{
'pred_masks': [
[[[False, True], [True, False]],
[[True, False], [False, True]],
[[True, False], [True, False]]]
],
'pred_labels': [
[0, 0, 0],
],
'pred_scores': [
[0.8, 0.9, 1],
],
'gt_masks': [
[[[False, True], [True, True]],
[[True, True], [False, True]]]
],
'gt_labels': [
[0, 0],
],
'iou_thresh': 0.5,
'prec': [
[0, 1 / 2, 2 / 3],
],
'rec': [
[0, 1 / 2, 1.],
],
},
]
))
class TestCalcInstanceSegmentationVOCPrecRec(unittest.TestCase):
def setUp(self):
self.pred_masks = (np.array(mask) for mask in self.pred_masks)
self.pred_labels = (np.array(label) for label in self.pred_labels)
self.pred_scores = (np.array(score) for score in self.pred_scores)
self.gt_masks = (np.array(mask) for mask in self.gt_masks)
self.gt_labels = (np.array(label) for label in self.gt_labels)
def test_calc_instance_segmentation_voc_prec_rec(self):
prec, rec = calc_instance_segmentation_voc_prec_rec(
self.pred_masks, self.pred_labels, self.pred_scores,
self.gt_masks, self.gt_labels,
iou_thresh=self.iou_thresh)
self.assertEqual(len(prec), len(self.prec))
for prec_l, expected_prec_l in zip(prec, self.prec):
if prec_l is None and expected_prec_l is None:
continue
np.testing.assert_equal(prec_l, expected_prec_l)
self.assertEqual(len(rec), len(self.rec))
for rec_l, expected_rec_l in zip(rec, self.rec):
if rec_l is None and expected_rec_l is None:
continue
np.testing.assert_equal(rec_l, expected_rec_l)
class TestEvalInstanceSegmentationVOCAP(unittest.TestCase):
@classmethod
def setUpClass(cls):
base_url = 'https://chainercv-models.preferred.jp/tests'
cls.dataset = np.load(request.urlretrieve(os.path.join(
base_url,
'eval_instance_segmentation_voc_dataset_2018_04_04.npz'))[0],
allow_pickle=True,
encoding='latin1')
cls.result = np.load(request.urlretrieve(os.path.join(
base_url,
'eval_instance_segmentation_voc_result_2018_04_04.npz'))[0],
allow_pickle=True,
encoding='latin1')
def test_eval_instance_segmentation_voc(self):
pred_masks = self.result['masks']
pred_labels = self.result['labels']
pred_scores = self.result['scores']
gt_masks = self.dataset['masks']
gt_labels = self.dataset['labels']
result = eval_instance_segmentation_voc(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels,
use_07_metric=True)
# calculated from original python code
expected = [
0.159091,
0.945455,
0.679545,
0.378293,
0.430303,
1.000000,
0.581055,
0.905195,
0.415757,
0.909091,
1.000000,
0.697256,
0.856061,
0.681818,
0.793274,
0.362141,
0.948052,
0.545455,
0.840909,
0.618182
]
np.testing.assert_almost_equal(result['ap'], expected, decimal=5)
np.testing.assert_almost_equal(
result['map'], np.nanmean(expected), decimal=5)
testing.run_module(__name__, __file__)
|
from __future__ import print_function
import dedupe
import unittest
import codecs
import json
import sys
class SerializerTest(unittest.TestCase):
def test_writeTraining(self):
if sys.version < '3':
from StringIO import StringIO
output = StringIO()
encoded_file = codecs.EncodedFile(output,
data_encoding='utf8',
file_encoding='ascii')
else:
from io import StringIO
encoded_file = StringIO()
training_pairs = {u"distinct": [[{u'bar': frozenset([u'barë']),
'baz': [1, 2],
'bang': [1, 2],
u'foo': u'baz'},
{u'foo': u'baz'}]],
u"match": []}
json.dump(training_pairs,
encoded_file,
default=dedupe.serializer._to_json,
ensure_ascii=True)
encoded_file.seek(0)
loaded_training_pairs = json.load(encoded_file,
cls=dedupe.serializer.dedupe_decoder)
assert loaded_training_pairs["distinct"][0][0] ==\
dict(training_pairs["distinct"][0][0])
assert isinstance(loaded_training_pairs["distinct"][0][0]["bar"],
frozenset)
deduper = dedupe.Dedupe([{'field': 'foo', 'type': 'String'}])
deduper.classifier.cv = False
encoded_file.seek(0)
deduper._read_training(encoded_file)
print(deduper.training_pairs)
print(training_pairs)
assert deduper.training_pairs == training_pairs
encoded_file.close()
if __name__ == "__main__":
unittest.main()
|
import logging
import re
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
# Version of python to match with pip.
_EXPECTED_PIP_PYTHON_VERSION = 2
# Where the correct pip file could be found
_KNOWN_PIP_PATHS = [
# Some OSes install to /usr/bin/pip2.
'/usr/bin/pip{}'.format(_EXPECTED_PIP_PYTHON_VERSION),
# Sometimes pip is available to the PKB user and not root.
'/usr/local/bin/pip{}'.format(_EXPECTED_PIP_PYTHON_VERSION),
'/usr/local/bin/pip'
]
# Regex to match the output of "pip --version"
_PIP_VERSION_RE = re.compile(r'^pip (?P<pip_version>\S+) '
r'from (?P<pip_path>.*?)\s+'
r'\(python (?P<python_version>\S+)\)$')
# Use this file if pip not in the path or a different version of python is used
_DEFAULT_PIP_PATH = '/usr/bin/pip'
# Symlink command for putting pip in the path
_SYMLINK_COMMAND = 'sudo ln -s {} ' + _DEFAULT_PIP_PATH
def Install(vm, package_name='python-pip'):
"""Install pip on the VM."""
vm.InstallPackages(package_name)
# Make sure pip is available as the PKB user and as root.
_MakePipSymlink(vm, as_root=False)
_MakePipSymlink(vm, as_root=True)
if vm.PYTHON_PIP_PACKAGE_VERSION:
vm.RemoteCommand(
'sudo pip install --upgrade '
'--force-reinstall pip=={0}'.format(vm.PYTHON_PIP_PACKAGE_VERSION))
else:
vm.RemoteCommand('sudo pip install -U pip') # Make pip upgrade pip
vm.RemoteCommand('mkdir -p {0} && pip freeze > {0}/requirements.txt'.format(
linux_packages.INSTALL_DIR))
def YumInstall(vm):
"""Installs the pip package on the VM."""
vm.InstallEpelRepo()
Install(vm, vm.PYTHON_PACKAGE + '-pip')
def SwupdInstall(vm):
"""Installs the pip package on the VM."""
vm.InstallPackages('which')
package_name = 'python-basic'
Install(vm, package_name)
def Uninstall(vm):
"""Uninstalls the pip package on the VM."""
vm.RemoteCommand('pip freeze | grep --fixed-strings --line-regexp '
'--invert-match --file {0}/requirements.txt | '
'xargs --no-run-if-empty sudo pip uninstall -y'.format(
linux_packages.INSTALL_DIR))
def _MakePipSymlink(vm, as_root=False):
"""If needed makes a symlink at /usr/bin/pip for correct pip version.
Args:
vm: Virtual Machine to run on.
as_root: Whether to run the commands as root.
"""
# first see if we are okay
major_version, python_version, pip_path = PythonVersionForPip(vm, as_root)
if major_version == _EXPECTED_PIP_PYTHON_VERSION:
logging.info('Good: "pip" (root=%s) in PATH is %s and is for python %s',
as_root, pip_path, python_version)
return
if pip_path == _DEFAULT_PIP_PATH:
# Only remove if /usr/bin/pip as will later make symlink to it.
vm.RemoteCommand('sudo rm {}'.format(pip_path))
for path in _KNOWN_PIP_PATHS:
if vm.TryRemoteCommand('ls {}'.format(path)):
vm.RemoteCommand(_SYMLINK_COMMAND.format(path))
break
major_version, python_version, pip_path = PythonVersionForPip(vm, as_root)
if major_version != _EXPECTED_PIP_PYTHON_VERSION:
raise errors.Setup.InvalidConfigurationError(
'"pip" {} (root={}) uses python {}'.format(pip_path, as_root,
python_version))
def PythonVersionForPip(vm, as_root):
"""Returns tuple about the "pip" command in the path at the given location.
Args:
vm: Virtual Machine to run on.
as_root: Whether to run the commands as root.
Returns:
Tuple of (python major version, python version, pip path) or (None, None,
None) if not found.
"""
cmd_prefix = 'sudo ' if as_root else ''
real_pip_path, _, exit_code = vm.RemoteCommandWithReturnCode(
cmd_prefix + 'which pip', ignore_failure=True)
if exit_code:
return None, None, None
pip_text, _ = vm.RemoteCommand(cmd_prefix + 'pip --version')
m = _PIP_VERSION_RE.match(pip_text)
if not m:
raise ValueError('{} --version "{}" does not match expected "{}"'.format(
real_pip_path, pip_text, _PIP_VERSION_RE.pattern))
python_version = m.group('python_version')
major_python_version = int(re.search(r'^(\d+)', python_version).group(1))
return major_python_version, python_version, real_pip_path.strip()
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.water_heater import DOMAIN
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_get_actions(hass, device_reg, entity_reg):
"""Test we get the expected actions from a water_heater."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "water_heater.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "water_heater.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_action(hass):
"""Test for turn_on and turn_off actions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_off",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "water_heater.entity",
"type": "turn_off",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_on",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "water_heater.entity",
"type": "turn_on",
},
},
]
},
)
turn_off_calls = async_mock_service(hass, "water_heater", "turn_off")
turn_on_calls = async_mock_service(hass, "water_heater", "turn_on")
hass.bus.async_fire("test_event_turn_off")
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
assert len(turn_on_calls) == 0
hass.bus.async_fire("test_event_turn_on")
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
assert len(turn_on_calls) == 1
|
import typing
import keras
import tensorflow as tf
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class DRMMTKS(BaseModel):
"""
DRMMTKS Model.
Examples:
>>> model = DRMMTKS()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 100
>>> model.params['top_k'] = 20
>>> model.params['mlp_num_layers'] = 1
>>> model.params['mlp_num_units'] = 5
>>> model.params['mlp_num_fan_out'] = 1
>>> model.params['mlp_activation_func'] = 'tanh'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=True
)
params.add(Param(name='mask_value', value=-1,
desc="The value to be masked from inputs."))
params['input_shapes'] = [(5,), (300,)]
params.add(Param(
'top_k', value=10,
hyper_space=hyper_spaces.quniform(low=2, high=100),
desc="Size of top-k pooling layer."
))
return params
def build(self):
"""Build model structure."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# K = size of top-k
# Left input and right input.
# shape = [B, L]
# shape = [B, R]
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
# Process left input.
# shape = [B, L, D]
embed_query = embedding(query)
# shape = [B, R, D]
embed_doc = embedding(doc)
# shape = [B, L]
atten_mask = tf.not_equal(query, self._params['mask_value'])
# shape = [B, L]
atten_mask = tf.cast(atten_mask, keras.backend.floatx())
# shape = [B, L, 1]
atten_mask = tf.expand_dims(atten_mask, axis=2)
# shape = [B, L, 1]
attention_probs = self.attention_layer(embed_query, atten_mask)
# Matching histogram of top-k
# shape = [B, L, R]
matching_matrix = keras.layers.Dot(axes=[2, 2], normalize=True)(
[embed_query,
embed_doc])
# shape = [B, L, K]
effective_top_k = min(self._params['top_k'],
self.params['input_shapes'][0][0],
self.params['input_shapes'][1][0])
matching_topk = keras.layers.Lambda(
lambda x: tf.nn.top_k(x, k=effective_top_k, sorted=True)[0]
)(matching_matrix)
# Process right input.
# shape = [B, L, 1]
dense_output = self._make_multi_layer_perceptron_layer()(matching_topk)
# shape = [B, 1, 1]
dot_score = keras.layers.Dot(axes=[1, 1])(
[attention_probs, dense_output])
flatten_score = keras.layers.Flatten()(dot_score)
x_out = self._make_output_layer()(flatten_score)
self._backend = keras.Model(inputs=[query, doc], outputs=x_out)
@classmethod
def attention_layer(cls, attention_input: typing.Any,
attention_mask: typing.Any = None
) -> keras.layers.Layer:
"""
Performs attention on the input.
:param attention_input: The input tensor for attention layer.
:param attention_mask: A tensor to mask the invalid values.
:return: The masked output tensor.
"""
# shape = [B, L, 1]
dense_input = keras.layers.Dense(1, use_bias=False)(attention_input)
if attention_mask is not None:
# Since attention_mask is 1.0 for positions we want to attend and
# 0.0 for masked positions, this operation will create a tensor
# which is 0.0 for positions we want to attend and -10000.0 for
# masked positions.
# shape = [B, L, 1]
dense_input = keras.layers.Lambda(
lambda x: x + (1.0 - attention_mask) * -10000.0,
name="attention_mask"
)(dense_input)
# shape = [B, L, 1]
attention_probs = keras.layers.Lambda(
lambda x: tf.nn.softmax(x, axis=1),
output_shape=lambda s: (s[0], s[1], s[2]),
name="attention_probs"
)(dense_input)
return attention_probs
|
import logging
import mimetypes
from pushbullet import InvalidKeyError, PushBullet, PushError
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_URL = "url"
ATTR_FILE = "file"
ATTR_FILE_URL = "file_url"
ATTR_LIST = "list"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_API_KEY): cv.string})
def get_service(hass, config, discovery_info=None):
"""Get the Pushbullet notification service."""
try:
pushbullet = PushBullet(config[CONF_API_KEY])
except InvalidKeyError:
_LOGGER.error("Wrong API key supplied")
return None
return PushBulletNotificationService(pushbullet)
class PushBulletNotificationService(BaseNotificationService):
"""Implement the notification service for Pushbullet."""
def __init__(self, pb):
"""Initialize the service."""
self.pushbullet = pb
self.pbtargets = {}
self.refresh()
def refresh(self):
"""Refresh devices, contacts, etc.
pbtargets stores all targets available from this Pushbullet instance
into a dict. These are Pushbullet objects!. It sacrifices a bit of
memory for faster processing at send_message.
As of sept 2015, contacts were replaced by chats. This is not
implemented in the module yet.
"""
self.pushbullet.refresh()
self.pbtargets = {
"device": {tgt.nickname.lower(): tgt for tgt in self.pushbullet.devices},
"channel": {
tgt.channel_tag.lower(): tgt for tgt in self.pushbullet.channels
},
}
def send_message(self, message=None, **kwargs):
"""Send a message to a specified target.
If no target specified, a 'normal' push will be sent to all devices
linked to the Pushbullet account.
Email is special, these are assumed to always exist. We use a special
call which doesn't require a push object.
"""
targets = kwargs.get(ATTR_TARGET)
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA)
refreshed = False
if not targets:
# Backward compatibility, notify all devices in own account.
self._push_data(message, title, data, self.pushbullet)
_LOGGER.info("Sent notification to self")
return
# Main loop, process all targets specified.
for target in targets:
try:
ttype, tname = target.split("/", 1)
except ValueError:
_LOGGER.error("Invalid target syntax: %s", target)
continue
# Target is email, send directly, don't use a target object.
# This also seems to work to send to all devices in own account.
if ttype == "email":
self._push_data(message, title, data, self.pushbullet, email=tname)
_LOGGER.info("Sent notification to email %s", tname)
continue
# Target is sms, send directly, don't use a target object.
if ttype == "sms":
self._push_data(
message, title, data, self.pushbullet, phonenumber=tname
)
_LOGGER.info("Sent sms notification to %s", tname)
continue
# Refresh if name not found. While awaiting periodic refresh
# solution in component, poor mans refresh.
if ttype not in self.pbtargets:
_LOGGER.error("Invalid target syntax: %s", target)
continue
tname = tname.lower()
if tname not in self.pbtargets[ttype] and not refreshed:
self.refresh()
refreshed = True
# Attempt push_note on a dict value. Keys are types & target
# name. Dict pbtargets has all *actual* targets.
try:
self._push_data(message, title, data, self.pbtargets[ttype][tname])
_LOGGER.info("Sent notification to %s/%s", ttype, tname)
except KeyError:
_LOGGER.error("No such target: %s/%s", ttype, tname)
continue
def _push_data(self, message, title, data, pusher, email=None, phonenumber=None):
"""Create the message content."""
if data is None:
data = {}
data_list = data.get(ATTR_LIST)
url = data.get(ATTR_URL)
filepath = data.get(ATTR_FILE)
file_url = data.get(ATTR_FILE_URL)
try:
email_kwargs = {}
if email:
email_kwargs["email"] = email
if phonenumber:
device = pusher.devices[0]
pusher.push_sms(device, phonenumber, message)
elif url:
pusher.push_link(title, url, body=message, **email_kwargs)
elif filepath:
if not self.hass.config.is_allowed_path(filepath):
_LOGGER.error("Filepath is not valid or allowed")
return
with open(filepath, "rb") as fileh:
filedata = self.pushbullet.upload_file(fileh, filepath)
if filedata.get("file_type") == "application/x-empty":
_LOGGER.error("Can not send an empty file")
return
filedata.update(email_kwargs)
pusher.push_file(title=title, body=message, **filedata)
elif file_url:
if not file_url.startswith("http"):
_LOGGER.error("URL should start with http or https")
return
pusher.push_file(
title=title,
body=message,
file_name=file_url,
file_url=file_url,
file_type=(mimetypes.guess_type(file_url)[0]),
**email_kwargs,
)
elif data_list:
pusher.push_list(title, data_list, **email_kwargs)
else:
pusher.push_note(title, message, **email_kwargs)
except PushError as err:
_LOGGER.error("Notify failed: %s", err)
|
from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from elephas.spark_model import SparkModel
from elephas.utils.rdd_utils import to_simple_rdd
from pyspark import SparkContext, SparkConf
import pytest
pytest.mark.usefixtures("spark_context")
def test_sync_mode(spark_context):
# Define basic parameters
batch_size = 64
nb_classes = 10
epochs = 10
# Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(128, input_dim=784))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1)
model.compile(sgd, 'categorical_crossentropy', ['acc'])
# Build RDD from numpy features and labels
rdd = to_simple_rdd(spark_context, x_train, y_train)
# Initialize SparkModel from Keras model and Spark context
spark_model = SparkModel(model, mode='synchronous')
# Train Spark model
spark_model.fit(rdd, epochs=epochs, batch_size=batch_size,
verbose=2, validation_split=0.1)
# Evaluate Spark model by evaluating the underlying model
score = spark_model.master_network.evaluate(x_test, y_test, verbose=2)
assert score[1] >= 0.70
if __name__ == '__main__':
pytest.main([__file__])
|
import sys
import pandas as pd
from scattertext.Corpus import Corpus
from scattertext.indexstore.IndexStore import IndexStore
class ParsedCorpus(Corpus):
def __init__(self,
df,
X,
mX,
y,
term_idx_store,
category_idx_store,
metadata_idx_store,
parsed_col,
category_col,
unigram_frequency_path=None):
'''
Parameters
----------
convention_df pd.DataFrame, contains parsed_col and metadata
X, csr_matrix
mX csr_matrix
y, np.array
term_idx_store, IndexStore
category_idx_store, IndexStore
parsed_col str, column in convention_df containing parsed documents
category_col str, columns in convention_df containing category
unigram_frequency_path str, None by default, path of unigram counts file
'''
self._df = df
self._parsed_col = parsed_col
self._category_col = category_col
Corpus.__init__(self, X, mX, y, term_idx_store, category_idx_store,
metadata_idx_store,
self._df[self._parsed_col],
unigram_frequency_path)
def get_texts(self):
'''
Returns
-------
pd.Series, all raw documents
'''
if sys.version_info[0] == 2:
return self._df[self._parsed_col]
return self._df[self._parsed_col].apply(str)
def get_df(self):
'''
Returns
-------
pd.DataFrame
'''
return self._df
def get_field(self, field):
'''
Parameters
----------
field: str, field name
Returns
-------
pd.Series, all members of field
'''
return self._df[field]
def get_parsed_docs(self):
'''
Returns
-------
pd.Series, Doc represententions of texts.
'''
return self._df[self._parsed_col]
def search(self, ngram):
'''
Parameters
----------
ngram, str or unicode, string to search for
Returns
-------
pd.DataFrame, {self._parsed_col: <matching texts>, self._category_col: <corresponding categories>, ...}
'''
mask = self._document_index_mask(ngram)
return self._df[mask]
def _document_index_mask(self, ngram):
idx = self._term_idx_store.getidxstrict(ngram.lower())
mask = (self._X[:, idx] > 0).todense().A1
return mask
def term_group_freq_df(self, group_col):
# type: (str) -> pd.DataFrame
'''
Returns a dataframe indexed on the number of groups a term occured in.
Parameters
----------
group_col
Returns
-------
pd.DataFrame
'''
group_idx_store = IndexStore()
X = self._X
group_idx_to_cat_idx, row_group_cat \
= self._get_group_docids_and_index_store(X, group_col, group_idx_store)
newX = self._change_document_type_in_matrix(X, row_group_cat)
newX = self._make_all_positive_data_ones(newX)
category_row = newX.tocoo().row
for group_idx, cat_idx in group_idx_to_cat_idx.items():
category_row[category_row == group_idx] = cat_idx
catX = self._change_document_type_in_matrix(newX, category_row)
return self._term_freq_df_from_matrix(catX)
def _make_new_term_doc_matrix(self,
new_X=None,
new_mX=None,
new_y=None,
new_term_idx_store=None,
new_category_idx_store=None,
new_metadata_idx_store=None,
new_y_mask=None):
return ParsedCorpus(
X=new_X if new_X is not None else self._X,
mX=new_mX if new_mX is not None else self._mX,
y=new_y if new_y is not None else self._y,
parsed_col=self._parsed_col,
category_col=self._category_col,
term_idx_store=new_term_idx_store if new_term_idx_store is not None else self._term_idx_store,
category_idx_store=new_category_idx_store if new_category_idx_store is not None else self._category_idx_store,
metadata_idx_store=new_metadata_idx_store if new_metadata_idx_store is not None else self._metadata_idx_store,
df=self._df[new_y_mask] if new_y_mask is not None else self._df,
unigram_frequency_path=self._unigram_frequency_path
)
def _get_group_docids_and_index_store(self, X, group_col, group_idx_store):
row_group_cat = X.tocoo().row
group_idx_to_cat_idx = {}
for doc_idx, row in self._df.iterrows():
group_idx = group_idx_store.getidx(row[group_col] + '-' + row[self._category_col])
row_group_cat[row_group_cat == doc_idx] = group_idx
group_idx_to_cat_idx[group_idx] = self._y[doc_idx]
return group_idx_to_cat_idx, row_group_cat
|
from lemur import database
from lemur.users.models import User
def create(username, password, email, active, profile_picture, roles):
"""
Create a new user
:param username:
:param password:
:param email:
:param active:
:param profile_picture:
:param roles:
:return:
"""
user = User(
password=password,
username=username,
email=email,
active=active,
profile_picture=profile_picture,
)
user.roles = roles
return database.create(user)
def update(user_id, username, email, active, profile_picture, roles):
"""
Updates an existing user
:param user_id:
:param username:
:param email:
:param active:
:param profile_picture:
:param roles:
:return:
"""
user = get(user_id)
user.username = username
user.email = email
user.active = active
user.profile_picture = profile_picture
update_roles(user, roles)
return database.update(user)
def update_roles(user, roles):
"""
Replaces the roles with new ones. This will detect
when are roles added as well as when there are roles
removed.
:param user:
:param roles:
"""
for ur in user.roles:
for r in roles:
if r.id == ur.id:
break
else:
user.roles.remove(ur)
for r in roles:
for ur in user.roles:
if r.id == ur.id:
break
else:
user.roles.append(r)
def get(user_id):
"""
Retrieve a user from the database
:param user_id:
:return:
"""
return database.get(User, user_id)
def get_by_email(email):
"""
Retrieve a user from the database by their email address
:param email:
:return:
"""
return database.get(User, email, field="email")
def get_by_username(username):
"""
Retrieve a user from the database by their username
:param username:
:return:
"""
return database.get(User, username, field="username")
def get_all():
"""
Retrieve all users from the database.
:return:
"""
query = database.session_query(User)
return database.find_all(query, User, {}).all()
def render(args):
"""
Helper that paginates and filters data when requested
through the REST Api
:param args:
:return:
"""
query = database.session_query(User)
filt = args.pop("filter")
if filt:
terms = filt.split(";")
query = database.filter(query, User, terms)
return database.sort_and_page(query, User, args)
|
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola import utils
from nikola.plugin_categories import RestExtension
# WARNING: the directive name is post-list
# (with a DASH instead of an UNDERSCORE)
class Plugin(RestExtension):
"""Plugin for reST post-list directive."""
name = "rest_post_list"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
directives.register_directive('post-list', PostListDirective)
directives.register_directive('post_list', PostListDirective)
PostListDirective.site = site
return super().set_site(site)
class PostListDirective(Directive):
"""Provide a reStructuredText directive to create a list of posts."""
option_spec = {
'start': int,
'stop': int,
'reverse': directives.flag,
'sort': directives.unchanged,
'tags': directives.unchanged,
'require_all_tags': directives.flag,
'categories': directives.unchanged,
'sections': directives.unchanged,
'slugs': directives.unchanged,
'post_type': directives.unchanged,
'type': directives.unchanged,
'lang': directives.unchanged,
'template': directives.path,
'id': directives.unchanged,
'date': directives.unchanged,
}
def run(self):
"""Run post-list directive."""
start = self.options.get('start')
stop = self.options.get('stop')
reverse = self.options.get('reverse', False)
tags = self.options.get('tags')
require_all_tags = 'require_all_tags' in self.options
categories = self.options.get('categories')
sections = self.options.get('sections')
slugs = self.options.get('slugs')
post_type = self.options.get('post_type')
type = self.options.get('type', False)
lang = self.options.get('lang', utils.LocaleBorg().current_lang)
template = self.options.get('template', 'post_list_directive.tmpl')
sort = self.options.get('sort')
date = self.options.get('date')
filename = self.state.document.settings._nikola_source_path
output, deps = self.site.plugin_manager.getPluginByName(
'post_list', 'ShortcodePlugin').plugin_object.handler(
start,
stop,
reverse,
tags,
require_all_tags,
categories,
sections,
slugs,
post_type,
type,
lang,
template,
sort,
state=self.state,
site=self.site,
date=date,
filename=filename)
self.state.document.settings.record_dependencies.add(
"####MAGIC####TIMELINE")
for d in deps:
self.state.document.settings.record_dependencies.add(d)
if output:
return [nodes.raw('', output, format='html')]
else:
return []
|
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.contrib import admin
from django.contrib.sites.models import Site
from django.utils.translation import gettext_lazy as _
try:
from django_elasticsearch_dsl.registries import registry as elasticsearch_registry
except ImportError:
elasticsearch_registry = type('DocumentRegistry', (), {'get_documents': lambda *args: []})()
from adminsortable2.admin import SortableInlineAdminMixin
from cms.models import Page
from shop.models.related import ProductPageModel, ProductImageModel
class ProductImageInline(SortableInlineAdminMixin, admin.StackedInline):
model = ProductImageModel
extra = 1
ordering = ['order']
def _find_catalog_list_apphook():
from shop.cms_apphooks import CatalogListCMSApp
from cms.apphook_pool import apphook_pool
for name, app in apphook_pool.apps.items():
if isinstance(app, CatalogListCMSApp):
return name
else:
raise ImproperlyConfigured("You must register a CMS apphook of type `CatalogListCMSApp`.")
class CategoryModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
if Site.objects.count() >=2 :
page_sitename=str(Site.objects.filter(djangocms_nodes=obj.node_id).first().name)
return '{} | {}'.format(str(obj), page_sitename)
else:
return str(obj)
class CMSPageAsCategoryMixin:
"""
Add this mixin class to the ModelAdmin class for products wishing to be assigned to djangoCMS
pages when used as categories.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(self.model, 'cms_pages'):
raise ImproperlyConfigured("Product model requires a field named `cms_pages`")
def get_fieldsets(self, request, obj=None):
fieldsets = list(super().get_fieldsets(request, obj=obj))
fieldsets.append((_("Categories"), {'fields': ('cms_pages',)}),)
return fieldsets
def get_fields(self, request, obj=None):
# In ``get_fieldsets()``, ``cms_pages`` is added, so remove it from ``fields`` to
# avoid showing it twice.
fields = list(super().get_fields(request, obj))
try:
fields.remove('cms_pages')
except ValueError:
pass
return fields
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'cms_pages':
# restrict many-to-many field for cms_pages to ProductApp only
limit_choices_to = {
'publisher_is_draft': False,
'application_urls': getattr(self, 'limit_to_cmsapp', _find_catalog_list_apphook()),
}
queryset = Page.objects.filter(**limit_choices_to)
widget = admin.widgets.FilteredSelectMultiple(_("CMS Pages"), False)
required = not db_field.blank
field = CategoryModelMultipleChoiceField(queryset=queryset, widget=widget, required=required)
return field
return super().formfield_for_manytomany(db_field, request, **kwargs)
def save_related(self, request, form, formsets, change):
old_cms_pages = form.instance.cms_pages.all()
new_cms_pages = form.cleaned_data.pop('cms_pages')
# remove old
for page in old_cms_pages:
if page not in new_cms_pages:
for pp in ProductPageModel.objects.filter(product=form.instance, page=page):
pp.delete()
# add new
for page in new_cms_pages:
if page not in old_cms_pages:
ProductPageModel.objects.create(product=form.instance, page=page)
return super().save_related(request, form, formsets, change)
class SearchProductIndexMixin:
"""
If Elasticsearch is used to create a full text search index, add this mixin class to Django's
``ModelAdmin`` backend for the corresponding product model.
"""
def save_model(self, request, product, form, change):
super().save_model(request, product, form, change)
if change:
product.update_search_index()
def delete_model(self, request, product):
product.active = False
product.update_search_index()
super().delete_model(request, product)
class InvalidateProductCacheMixin:
"""
If Redis caching is used to create a HTML snippets for product representation, add this mixin
class to Django's ``ModelAdmin`` backend for the corresponding product model.
"""
def save_model(self, request, product, form, change):
if change:
product.invalidate_cache()
return super().save_model(request, product, form, change)
def delete_model(self, request, product):
product.invalidate_cache()
super().delete_model(request, product)
class UnitPriceMixin:
def get_list_display(self, request):
list_display = list(super().get_list_display(request))
list_display.append('get_unit_price')
return list_display
def get_unit_price(self, obj):
return str(obj.unit_price)
get_unit_price.short_description = _("Unit Price")
class CMSPageFilter(admin.SimpleListFilter):
title = _("Category")
parameter_name = 'category'
def lookups(self, request, model_admin):
limit_choices_to = {
'publisher_is_draft': False,
'application_urls': getattr(self, 'limit_to_cmsapp', _find_catalog_list_apphook())
}
queryset = Page.objects.filter(**limit_choices_to)
return [(page.id, page.get_title()) for page in queryset]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(cms_pages__id=self.value())
|
import contextlib
import keyword
import pkgutil
from importlib import import_module, invalidate_caches
from importlib.machinery import ModuleSpec
from pathlib import Path
from typing import Union, List, Optional
import redbot.cogs
from redbot.core.utils import deduplicate_iterables
import discord
from . import checks, commands
from .config import Config
from .i18n import Translator, cog_i18n
from .data_manager import cog_data_path
from .utils.chat_formatting import box, pagify
__all__ = ["CogManager"]
class NoSuchCog(ImportError):
"""Thrown when a cog is missing.
Different from ImportError because some ImportErrors can happen inside cogs.
"""
class CogManager:
"""Directory manager for Red's cogs.
This module allows you to load cogs from multiple directories and even from
outside the bot directory. You may also set a directory for downloader to
install new cogs to, the default being the :code:`cogs/` folder in the root
bot directory.
"""
CORE_PATH = Path(redbot.cogs.__path__[0])
def __init__(self):
self.config = Config.get_conf(self, 2938473984732, True)
tmp_cog_install_path = cog_data_path(self) / "cogs"
tmp_cog_install_path.mkdir(parents=True, exist_ok=True)
self.config.register_global(paths=[], install_path=str(tmp_cog_install_path))
async def paths(self) -> List[Path]:
"""Get all currently valid path directories, in order of priority
Returns
-------
List[pathlib.Path]
A list of paths where cog packages can be found. The
install path is highest priority, followed by the
user-defined paths, and the core path has the lowest
priority.
"""
return deduplicate_iterables(
[await self.install_path()], await self.user_defined_paths(), [self.CORE_PATH]
)
async def install_path(self) -> Path:
"""Get the install path for 3rd party cogs.
Returns
-------
pathlib.Path
The path to the directory where 3rd party cogs are stored.
"""
return Path(await self.config.install_path()).resolve()
async def user_defined_paths(self) -> List[Path]:
"""Get a list of user-defined cog paths.
All paths will be absolute and unique, in order of priority.
Returns
-------
List[pathlib.Path]
A list of user-defined paths.
"""
return list(map(Path, deduplicate_iterables(await self.config.paths())))
async def set_install_path(self, path: Path) -> Path:
"""Set the install path for 3rd party cogs.
Note
----
The bot will not remember your old cog install path which means
that **all previously installed cogs** will no longer be found.
Parameters
----------
path : pathlib.Path
The new directory for cog installs.
Returns
-------
pathlib.Path
Absolute path to the new install directory.
Raises
------
ValueError
If :code:`path` is not an existing directory.
"""
if not path.is_dir():
raise ValueError("The install path must be an existing directory.")
resolved = path.resolve()
await self.config.install_path.set(str(resolved))
return resolved
@staticmethod
def _ensure_path_obj(path: Union[Path, str]) -> Path:
"""Guarantee an object will be a path object.
Parameters
----------
path : `pathlib.Path` or `str`
Returns
-------
pathlib.Path
"""
return Path(path)
async def add_path(self, path: Union[Path, str]) -> None:
"""Add a cog path to current list.
This will ignore duplicates.
Parameters
----------
path : `pathlib.Path` or `str`
Path to add.
Raises
------
ValueError
If :code:`path` does not resolve to an existing directory.
"""
path = self._ensure_path_obj(path)
# This makes the path absolute, will break if a bot install
# changes OS/Computer?
path = path.resolve()
if not path.is_dir():
raise ValueError("'{}' is not a valid directory.".format(path))
if path == await self.install_path():
raise ValueError("Cannot add the install path as an additional path.")
if path == self.CORE_PATH:
raise ValueError("Cannot add the core path as an additional path.")
current_paths = await self.user_defined_paths()
if path not in current_paths:
current_paths.append(path)
await self.set_paths(current_paths)
async def remove_path(self, path: Union[Path, str]) -> None:
"""Remove a path from the current paths list.
Parameters
----------
path : `pathlib.Path` or `str`
Path to remove.
"""
path = self._ensure_path_obj(path)
paths = await self.user_defined_paths()
paths.remove(path)
await self.set_paths(paths)
async def set_paths(self, paths_: List[Path]):
"""Set the current paths list.
Parameters
----------
paths_ : `list` of `pathlib.Path`
List of paths to set.
"""
str_paths = list(map(str, paths_))
await self.config.paths.set(str_paths)
async def _find_ext_cog(self, name: str) -> ModuleSpec:
"""
Attempts to find a spec for a third party installed cog.
Parameters
----------
name : str
Name of the cog package to look for.
Returns
-------
importlib.machinery.ModuleSpec
Module spec to be used for cog loading.
Raises
------
NoSuchCog
When no cog with the requested name was found.
"""
if not name.isidentifier() or keyword.iskeyword(name):
# reject package names that can't be valid python identifiers
raise NoSuchCog(
f"No 3rd party module by the name of '{name}' was found in any available path.",
name=name,
)
real_paths = list(map(str, [await self.install_path()] + await self.user_defined_paths()))
for finder, module_name, _ in pkgutil.iter_modules(real_paths):
if name == module_name:
spec = finder.find_spec(name)
if spec:
return spec
raise NoSuchCog(
f"No 3rd party module by the name of '{name}' was found in any available path.",
name=name,
)
@staticmethod
async def _find_core_cog(name: str) -> ModuleSpec:
"""
Attempts to find a spec for a core cog.
Parameters
----------
name : str
Returns
-------
importlib.machinery.ModuleSpec
Raises
------
RuntimeError
When no matching spec can be found.
"""
real_name = ".{}".format(name)
package = "redbot.cogs"
try:
mod = import_module(real_name, package=package)
except ImportError as e:
if e.name == package + real_name:
raise NoSuchCog(
"No core cog by the name of '{}' could be found.".format(name),
path=e.path,
name=e.name,
) from e
raise
return mod.__spec__
# noinspection PyUnreachableCode
async def find_cog(self, name: str) -> Optional[ModuleSpec]:
"""Find a cog in the list of available paths.
Parameters
----------
name : str
Name of the cog to find.
Returns
-------
Optional[importlib.machinery.ModuleSpec]
A module spec to be used for specialized cog loading, if found.
"""
with contextlib.suppress(NoSuchCog):
return await self._find_ext_cog(name)
with contextlib.suppress(NoSuchCog):
return await self._find_core_cog(name)
async def available_modules(self) -> List[str]:
"""Finds the names of all available modules to load."""
paths = list(map(str, await self.paths()))
ret = []
for finder, module_name, _ in pkgutil.iter_modules(paths):
# reject package names that can't be valid python identifiers
if module_name.isidentifier() and not keyword.iskeyword(module_name):
ret.append(module_name)
return ret
@staticmethod
def invalidate_caches():
"""Re-evaluate modules in the py cache.
This is an alias for an importlib internal and should be called
any time that a new module has been installed to a cog directory.
"""
invalidate_caches()
_ = Translator("CogManagerUI", __file__)
@cog_i18n(_)
class CogManagerUI(commands.Cog):
"""Commands to interface with Red's cog manager."""
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete (Core Config is handled in a bot method ) """
return
@commands.command()
@checks.is_owner()
async def paths(self, ctx: commands.Context):
"""
Lists current cog paths in order of priority.
"""
cog_mgr = ctx.bot._cog_mgr
install_path = await cog_mgr.install_path()
core_path = cog_mgr.CORE_PATH
cog_paths = await cog_mgr.user_defined_paths()
msg = _("Install Path: {install_path}\nCore Path: {core_path}\n\n").format(
install_path=install_path, core_path=core_path
)
partial = []
for i, p in enumerate(cog_paths, start=1):
partial.append("{}. {}".format(i, p))
msg += "\n".join(partial)
await ctx.send(box(msg))
@commands.command()
@checks.is_owner()
async def addpath(self, ctx: commands.Context, path: Path):
"""
Add a path to the list of available cog paths.
"""
if not path.is_dir():
await ctx.send(_("That path does not exist or does not point to a valid directory."))
return
try:
await ctx.bot._cog_mgr.add_path(path)
except ValueError as e:
await ctx.send(str(e))
else:
await ctx.send(_("Path successfully added."))
@commands.command()
@checks.is_owner()
async def removepath(self, ctx: commands.Context, path_number: int):
"""
Removes a path from the available cog paths given the `path_number` from `[p]paths`.
"""
path_number -= 1
if path_number < 0:
await ctx.send(_("Path numbers must be positive."))
return
cog_paths = await ctx.bot._cog_mgr.user_defined_paths()
try:
to_remove = cog_paths.pop(path_number)
except IndexError:
await ctx.send(_("That is an invalid path number."))
return
await ctx.bot._cog_mgr.remove_path(to_remove)
await ctx.send(_("Path successfully removed."))
@commands.command()
@checks.is_owner()
async def reorderpath(self, ctx: commands.Context, from_: int, to: int):
"""
Reorders paths internally to allow discovery of different cogs.
"""
# Doing this because in the paths command they're 1 indexed
from_ -= 1
to -= 1
if from_ < 0 or to < 0:
await ctx.send(_("Path numbers must be positive."))
return
all_paths = await ctx.bot._cog_mgr.user_defined_paths()
try:
to_move = all_paths.pop(from_)
except IndexError:
await ctx.send(_("Invalid 'from' index."))
return
try:
all_paths.insert(to, to_move)
except IndexError:
await ctx.send(_("Invalid 'to' index."))
return
await ctx.bot._cog_mgr.set_paths(all_paths)
await ctx.send(_("Paths reordered."))
@commands.command()
@checks.is_owner()
async def installpath(self, ctx: commands.Context, path: Path = None):
"""
Returns the current install path or sets it if one is provided.
The provided path must be absolute or relative to the bot's
directory and it must already exist.
No installed cogs will be transferred in the process.
"""
if path:
if not path.is_absolute():
path = (ctx.bot._main_dir / path).resolve()
try:
await ctx.bot._cog_mgr.set_install_path(path)
except ValueError:
await ctx.send(_("That path does not exist."))
return
install_path = await ctx.bot._cog_mgr.install_path()
await ctx.send(
_("The bot will install new cogs to the `{}` directory.").format(install_path)
)
@commands.command()
@checks.is_owner()
async def cogs(self, ctx: commands.Context):
"""
Lists all loaded and available cogs.
"""
loaded = set(ctx.bot.extensions.keys())
all_cogs = set(await ctx.bot._cog_mgr.available_modules())
unloaded = all_cogs - loaded
loaded = sorted(list(loaded), key=str.lower)
unloaded = sorted(list(unloaded), key=str.lower)
if await ctx.embed_requested():
loaded = _("**{} loaded:**\n").format(len(loaded)) + ", ".join(loaded)
unloaded = _("**{} unloaded:**\n").format(len(unloaded)) + ", ".join(unloaded)
for page in pagify(loaded, delims=[", ", "\n"], page_length=1800):
if page.startswith(", "):
page = page[2:]
e = discord.Embed(description=page, colour=discord.Colour.dark_green())
await ctx.send(embed=e)
for page in pagify(unloaded, delims=[", ", "\n"], page_length=1800):
if page.startswith(", "):
page = page[2:]
e = discord.Embed(description=page, colour=discord.Colour.dark_red())
await ctx.send(embed=e)
else:
loaded_count = _("**{} loaded:**\n").format(len(loaded))
loaded = ", ".join(loaded)
unloaded_count = _("**{} unloaded:**\n").format(len(unloaded))
unloaded = ", ".join(unloaded)
loaded_count_sent = False
unloaded_count_sent = False
for page in pagify(loaded, delims=[", ", "\n"], page_length=1800):
if page.startswith(", "):
page = page[2:]
if not loaded_count_sent:
await ctx.send(loaded_count + box(page, lang="css"))
loaded_count_sent = True
else:
await ctx.send(box(page, lang="css"))
for page in pagify(unloaded, delims=[", ", "\n"], page_length=1800):
if page.startswith(", "):
page = page[2:]
if not unloaded_count_sent:
await ctx.send(unloaded_count + box(page, lang="ldif"))
unloaded_count_sent = True
else:
await ctx.send(box(page, lang="ldif"))
|
import os
from flask import Flask, render_template, request, redirect
from react.render import render_component
DEBUG = True
app = Flask(__name__)
app.debug = DEBUG
comments = []
@app.route('/')
def index():
rendered = render_component(
os.path.join(os.getcwd(), 'static', 'js', 'CommentBox.jsx'),
{
'comments': comments,
'url': '/comment/',
},
to_static_markup=True,
)
return render_template('index.html', rendered=rendered)
@app.route('/comment/', methods=('POST',))
def comment():
comments.append({
'author': request.form['author'],
'text': request.form['text'],
})
return redirect('/')
if __name__ == '__main__':
app.run()
|
import copy
import logging
from openzwavemqtt.const import (
EVENT_INSTANCE_STATUS_CHANGED,
EVENT_VALUE_CHANGED,
OZW_READY_STATES,
CommandClass,
ValueIndex,
)
from openzwavemqtt.models.node import OZWNode
from openzwavemqtt.models.value import OZWValue
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from . import const
from .const import DOMAIN, PLATFORMS
from .discovery import check_node_schema, check_value_schema
_LOGGER = logging.getLogger(__name__)
OZW_READY_STATES_VALUES = {st.value for st in OZW_READY_STATES}
class ZWaveDeviceEntityValues:
"""Manages entity access to the underlying Z-Wave value objects."""
def __init__(self, hass, options, schema, primary_value):
"""Initialize the values object with the passed entity schema."""
self._hass = hass
self._entity_created = False
self._schema = copy.deepcopy(schema)
self._values = {}
self.options = options
# Go through values listed in the discovery schema, initialize them,
# and add a check to the schema to make sure the Instance matches.
for name, disc_settings in self._schema[const.DISC_VALUES].items():
self._values[name] = None
disc_settings[const.DISC_INSTANCE] = (primary_value.instance,)
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
def async_setup(self):
"""Set up values instance."""
# Check values that have already been discovered for node
# and see if they match the schema and need added to the entity.
for value in self._node.values():
self.async_check_value(value)
# Check if all the _required_ values in the schema are present and
# create the entity.
self._async_check_entity_ready()
def __getattr__(self, name):
"""Get the specified value for this entity."""
return self._values.get(name, None)
def __iter__(self):
"""Allow iteration over all values."""
return iter(self._values.values())
def __contains__(self, name):
"""Check if the specified name/key exists in the values."""
return name in self._values
@callback
def async_check_value(self, value):
"""Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
"""
# Make sure the node matches the schema for this entity.
if not check_node_schema(value.node, self._schema):
return
# Go through the possible values for this entity defined by the schema.
for name in self._values:
# Skip if it's already been added.
if self._values[name] is not None:
continue
# Skip if the value doesn't match the schema.
if not check_value_schema(value, self._schema[const.DISC_VALUES][name]):
continue
# Add value to mapping.
self._values[name] = value
# If the entity has already been created, notify it of the new value.
if self._entity_created:
async_dispatcher_send(
self._hass, f"{DOMAIN}_{self.values_id}_value_added"
)
# Check if entity has all required values and create the entity if needed.
self._async_check_entity_ready()
@callback
def _async_check_entity_ready(self):
"""Check if all required values are discovered and create entity."""
# Abort if the entity has already been created
if self._entity_created:
return
# Go through values defined in the schema and abort if a required value is missing.
for name, disc_settings in self._schema[const.DISC_VALUES].items():
if self._values[name] is None and not disc_settings.get(
const.DISC_OPTIONAL
):
return
# We have all the required values, so create the entity.
component = self._schema[const.DISC_COMPONENT]
_LOGGER.debug(
"Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Index=%s, Value type=%s, "
"Genre=%s as %s",
self._node.node_id,
self._node.node_generic,
self._node.node_specific,
self.primary.command_class,
self.primary.index,
self.primary.type,
self.primary.genre,
component,
)
self._entity_created = True
if component in PLATFORMS:
async_dispatcher_send(self._hass, f"{DOMAIN}_new_{component}", self)
@property
def values_id(self):
"""Identification for this values collection."""
return create_value_id(self.primary)
class ZWaveDeviceEntity(Entity):
"""Generic Entity Class for a Z-Wave Device."""
def __init__(self, values):
"""Initialize a generic Z-Wave device entity."""
self.values = values
self.options = values.options
@callback
def on_value_update(self):
"""Call when a value is added/updated in the entity EntityValues Collection.
To be overridden by platforms needing this event.
"""
async def async_added_to_hass(self):
"""Call when entity is added."""
# Add dispatcher and OZW listeners callbacks.
# Add to on_remove so they will be cleaned up on entity removal.
self.async_on_remove(
self.options.listen(EVENT_VALUE_CHANGED, self._value_changed)
)
self.async_on_remove(
self.options.listen(EVENT_INSTANCE_STATUS_CHANGED, self._instance_updated)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, const.SIGNAL_DELETE_ENTITY, self._delete_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self.values.values_id}_value_added",
self._value_added,
)
)
@property
def device_info(self):
"""Return device information for the device registry."""
node = self.values.primary.node
node_instance = self.values.primary.instance
dev_id = create_device_id(node, self.values.primary.instance)
node_firmware = node.get_value(
CommandClass.VERSION, ValueIndex.VERSION_APPLICATION
)
device_info = {
"identifiers": {(DOMAIN, dev_id)},
"name": create_device_name(node),
"manufacturer": node.node_manufacturer_name,
"model": node.node_product_name,
}
if node_firmware is not None:
device_info["sw_version"] = node_firmware.value
# device with multiple instances is split up into virtual devices for each instance
if node_instance > 1:
parent_dev_id = create_device_id(node)
device_info["name"] += f" - Instance {node_instance}"
device_info["via_device"] = (DOMAIN, parent_dev_id)
return device_info
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {const.ATTR_NODE_ID: self.values.primary.node.node_id}
@property
def name(self):
"""Return the name of the entity."""
node = self.values.primary.node
return f"{create_device_name(node)}: {self.values.primary.label}"
@property
def unique_id(self):
"""Return the unique_id of the entity."""
return self.values.values_id
@property
def available(self) -> bool:
"""Return entity availability."""
# Use OZW Daemon status for availability.
instance_status = self.values.primary.ozw_instance.get_status()
return instance_status and instance_status.status in OZW_READY_STATES_VALUES
@callback
def _value_changed(self, value):
"""Call when a value from ZWaveDeviceEntityValues is changed.
Should not be overridden by subclasses.
"""
if value.value_id_key in (v.value_id_key for v in self.values if v):
self.on_value_update()
self.async_write_ha_state()
@callback
def _value_added(self):
"""Call when a value from ZWaveDeviceEntityValues is added.
Should not be overridden by subclasses.
"""
self.on_value_update()
@callback
def _instance_updated(self, new_status):
"""Call when the instance status changes.
Should not be overridden by subclasses.
"""
self.on_value_update()
self.async_write_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
async def _delete_callback(self, values_id):
"""Remove this entity."""
if not self.values:
return # race condition: delete already requested
if values_id == self.values.values_id:
await self.async_remove()
def create_device_name(node: OZWNode):
"""Generate sensible (short) default device name from a OZWNode."""
# Prefer custom name set by OZWAdmin if present
if node.node_name:
return node.node_name
# Prefer short devicename from metadata if present
if node.meta_data and node.meta_data.get("Name"):
return node.meta_data["Name"]
# Fallback to productname or devicetype strings
if node.node_product_name:
return node.node_product_name
if node.node_device_type_string:
return node.node_device_type_string
if node.node_specific_string:
return node.node_specific_string
# Last resort: use Node id (should never happen, but just in case)
return f"Node {node.id}"
def create_device_id(node: OZWNode, node_instance: int = 1):
"""Generate unique device_id from a OZWNode."""
ozw_instance = node.parent.id
dev_id = f"{ozw_instance}.{node.node_id}.{node_instance}"
return dev_id
def create_value_id(value: OZWValue):
"""Generate unique value_id from an OZWValue."""
# [OZW_INSTANCE_ID]-[NODE_ID]-[VALUE_ID_KEY]
return f"{value.node.parent.id}-{value.node.id}-{value.value_id_key}"
|
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import hashlib
import itertools
import math
import time
import typing
import six
from requests import Session
from ._impl import json
from ._utils import get_shared_data
if typing.TYPE_CHECKING:
from typing import Any, Dict, Iterator, Iterable, Optional, Text
__all__ = [
"PageIterator",
"HashtagIterator",
"ProfileIterator",
]
@six.add_metaclass(abc.ABCMeta)
class PageIterator(typing.Iterator[typing.Dict[typing.Text, typing.Any]]):
"""An abstract Instagram page iterator.
"""
PAGE_SIZE = 50
INTERVAL = 2
_BASE_URL = "https://www.instagram.com/graphql/query/"
_section_generic = NotImplemented # type: Text
_section_media = NotImplemented # type: Text
_URL = NotImplemented # type: Text
def __init__(self, session, rhx):
# type: (Session, Text) -> None
self._finished = False
self._cursor = None # type: Optional[Text]
self._current_page = 0
self._data_it = iter(self._page_loader(session, rhx))
@abc.abstractmethod
def _getparams(self, cursor):
# type: (Optional[Text]) -> Text
return NotImplemented
def _page_loader(self, session, rhx):
# type: (Session, Text) -> Iterable[Dict[Text, Dict[Text, Any]]]
while True:
# Cache cursor for later
cursor = self._cursor
# Query data
try:
# Prepare the query
params = self._getparams(cursor)
json_params = json.dumps(params, separators=(',', ':'))
magic = "{}:{}".format(rhx, json_params)
session.headers['x-instagram-gis'] = hashlib.md5(magic.encode('utf-8')).hexdigest()
url = self._URL.format(json_params)
# Query the server for data
with session.get(url) as res:
self._last_page = data = res.json()
# Yield that same data until cursor is updated
while self._cursor == cursor:
yield data['data']
except KeyError as e:
if data.get('message') == 'rate limited':
raise RuntimeError("Query rate exceeded (wait before next run)")
time.sleep(10)
# Sleep before next query
time.sleep(self.INTERVAL)
def __length_hint__(self):
# type: () -> int
try:
data = next(self._data_it)
c = data[self._section_generic][self._section_media]['count']
total = int(math.ceil(c / self.PAGE_SIZE))
except (StopIteration, TypeError):
total = 0
return total - self._current_page
def __iter__(self):
return self
def __next__(self):
if self._finished:
raise StopIteration
data = next(self._data_it)
try:
media_info = data[self._section_generic][self._section_media]
except (TypeError, KeyError):
self._finished = True
raise StopIteration
if not media_info['page_info']['has_next_page']:
self._finished = True
elif not media_info['edges']:
self._finished = True
raise StopIteration
else:
self._cursor = media_info['page_info']['end_cursor']
self._current_page += 1
return data[self._section_generic]
if six.PY2:
next = __next__
class HashtagIterator(PageIterator):
"""An iterator over the pages refering to a specific hashtag.
"""
_QUERY_ID = "17882293912014529"
_URL = "{}?query_id={}&variables={{}}".format(PageIterator._BASE_URL, _QUERY_ID)
_section_generic = "hashtag"
_section_media = "edge_hashtag_to_media"
def __init__(self, hashtag, session, rhx):
super(HashtagIterator, self).__init__(session, rhx)
self.hashtag = hashtag
def _getparams(self, cursor):
return {
"tag_name": self.hashtag,
"first": self.PAGE_SIZE,
"after": cursor
}
def __next__(self):
item = super(HashtagIterator, self).__next__()
for media in item[self._section_media].get("edges", []):
media["node"].setdefault(
"__typename",
"GraphVideo" if media["node"].get("is_video", False) else "GraphImage"
)
return item
if six.PY2:
next = __next__
class ProfileIterator(PageIterator):
"""An iterator over the pages of a user profile.
"""
_QUERY_HASH = "42323d64886122307be10013ad2dcc44"
#_QUERY_HASH = "472f257a40c653c64c666ce877d59d2b"
_URL = "{}?query_hash={}&variables={{}}".format(PageIterator._BASE_URL, _QUERY_HASH)
_section_generic = "user"
_section_media = "edge_owner_to_timeline_media"
@classmethod
def _user_data(cls, username, session):
url = "https://www.instagram.com/{}/".format(username)
try:
with session.get(url) as res:
return get_shared_data(res.text)
except (ValueError, AttributeError):
raise ValueError("user not found: '{}'".format(username))
@classmethod
def from_username(cls, username, session):
user_data = cls._user_data(username, session)
if 'ProfilePage' not in user_data['entry_data']:
raise ValueError("user not found: '{}'".format(username))
data = user_data['entry_data']['ProfilePage'][0]['graphql']['user']
if data['is_private'] and not data['followed_by_viewer']:
con_id = next((c.value for c in session.cookies if c.name == "ds_user_id"), None)
if con_id != data['id']:
raise RuntimeError("user '{}' is private".format(username))
return cls(data['id'], session, user_data.get('rhx_gis', ''))
def __init__(self, owner_id, session, rhx):
super(ProfileIterator, self).__init__(session, rhx)
self.owner_id = owner_id
def _getparams(self, cursor):
return {
"id": self.owner_id,
"first": self.PAGE_SIZE,
"after": cursor,
}
|
import pandas as pd
import pytz
from qstrader.system.rebalance.rebalance import Rebalance
class DailyRebalance(Rebalance):
"""
Generates a list of rebalance timestamps for pre- or post-market,
for all business days (Monday-Friday) between two dates.
Does not take into account holiday calendars.
All timestamps produced are set to UTC.
Parameters
----------
start_date : `pd.Timestamp`
The starting timestamp of the rebalance range.
end_date : `pd.Timestamp`
The ending timestamp of the rebalance range.
pre_market : `Boolean`, optional
Whether to carry out the rebalance at market open/close.
"""
def __init__(
self,
start_date,
end_date,
pre_market=False
):
self.start_date = start_date
self.end_date = end_date
self.market_time = self._set_market_time(pre_market)
self.rebalances = self._generate_rebalances()
def _set_market_time(self, pre_market):
"""
Determines whether to use market open or market close
as the rebalance time.
Parameters
----------
pre_market : `Boolean`
Whether to use market open or market close
as the rebalance time.
Returns
-------
`str`
The string representation of the market time.
"""
return "14:30:00" if pre_market else "21:00:00"
def _generate_rebalances(self):
"""
Output the rebalance timestamp list.
Returns
-------
`list[pd.Timestamp]`
The list of rebalance timestamps.
"""
rebalance_dates = pd.bdate_range(
start=self.start_date, end=self.end_date,
)
rebalance_times = [
pd.Timestamp(
"%s %s" % (date, self.market_time), tz=pytz.utc
)
for date in rebalance_dates
]
return rebalance_times
|
import mock
from pytest import fixture
from pytest import raises
from paasta_tools.secret_providers.vault import SecretProvider
@fixture
def mock_secret_provider():
with mock.patch(
"paasta_tools.secret_providers.vault.SecretProvider.get_vault_ecosystems_for_clusters",
autospec=True,
return_value=["devc"],
), mock.patch(
"paasta_tools.secret_providers.vault.get_vault_client", autospec=True
):
return SecretProvider(
soa_dir="/nail/blah",
service_name="universe",
cluster_names=["mesosstage"],
vault_auth_method="token",
)
def test_secret_provider(mock_secret_provider):
assert mock_secret_provider.ecosystems == ["devc"]
assert mock_secret_provider.clients["devc"]
def test_decrypt_environment(mock_secret_provider):
with mock.patch(
"paasta_tools.secret_providers.vault.get_secret_name_from_ref", autospec=True
) as mock_get_secret_name_from_ref, mock.patch(
"paasta_tools.secret_providers.vault.get_plaintext", autospec=False
) as mock_get_plaintext:
mock_get_plaintext.return_value = b"SECRETSQUIRREL"
mock_env = {
"MY_VAR": "SECRET(test-secret)",
"ANOTHER_VAR": "SECRET(another-secret)",
}
mock_get_secret_name_from_ref.return_value = "secret_name"
ret = mock_secret_provider.decrypt_environment(
environment=mock_env, some="kwarg"
)
mock_get_secret_name_from_ref.assert_has_calls(
[mock.call("SECRET(test-secret)"), mock.call("SECRET(another-secret)")]
)
expected = {"MY_VAR": "SECRETSQUIRREL", "ANOTHER_VAR": "SECRETSQUIRREL"}
assert ret == expected
def test_get_vault_ecosystems_for_clusters(mock_secret_provider):
mock_secret_provider.cluster_names = ["mesosstage", "devc", "prod"]
mock_secret_provider.vault_cluster_config = {
"mesosstage": "devc",
"devc": "devc",
"prod": "prod",
}
assert sorted(mock_secret_provider.get_vault_ecosystems_for_clusters()) == sorted(
["devc", "prod"]
)
mock_secret_provider.cluster_names = ["mesosstage", "devc", "prod1"]
with raises(KeyError):
mock_secret_provider.get_vault_ecosystems_for_clusters()
def test_write_secret(mock_secret_provider):
with mock.patch(
"paasta_tools.secret_providers.vault.TempGpgKeyring", autospec=False
), mock.patch(
"paasta_tools.secret_providers.vault.encrypt_secret", autospec=False
) as mock_encrypt_secret:
mock_secret_provider.write_secret(
action="add",
secret_name="mysecret",
plaintext=b"SECRETSQUIRREL",
cross_environment_motivation="because ...",
)
mock_encrypt_secret.assert_called_with(
client=mock_secret_provider.clients["devc"],
action="add",
ecosystem="devc",
secret_name="mysecret",
plaintext=b"SECRETSQUIRREL",
service_name="universe",
soa_dir="/nail/blah",
transit_key="paasta",
cross_environment_motivation="because ...",
)
mock_secret_provider.encryption_key = "special-key"
mock_secret_provider.write_secret(
action="add", secret_name="mysecret", plaintext=b"SECRETSQUIRREL"
)
mock_encrypt_secret.assert_called_with(
client=mock_secret_provider.clients["devc"],
action="add",
ecosystem="devc",
secret_name="mysecret",
plaintext=b"SECRETSQUIRREL",
service_name="universe",
soa_dir="/nail/blah",
transit_key="special-key",
cross_environment_motivation=None,
)
def test_decrypt_secret(mock_secret_provider):
with mock.patch(
"paasta_tools.secret_providers.vault.get_plaintext", autospec=False
) as mock_get_plaintext:
mock_get_plaintext.return_value = b"SECRETSQUIRREL"
assert mock_secret_provider.decrypt_secret("mysecret") == "SECRETSQUIRREL"
mock_get_plaintext.assert_called_with(
client=mock_secret_provider.clients["devc"],
path="/nail/blah/universe/secrets/mysecret.json",
env="devc",
cache_enabled=False,
cache_key=None,
cache_dir=None,
context="universe",
rescue_failures=False,
)
def test_decrypt_secret_raw(mock_secret_provider):
with mock.patch(
"paasta_tools.secret_providers.vault.get_plaintext", autospec=False
) as mock_get_plaintext:
mock_get_plaintext.return_value = b"SECRETSQUIRREL"
assert mock_secret_provider.decrypt_secret_raw("mysecret") == b"SECRETSQUIRREL"
mock_get_plaintext.assert_called_with(
client=mock_secret_provider.clients["devc"],
path="/nail/blah/universe/secrets/mysecret.json",
env="devc",
cache_enabled=False,
cache_key=None,
cache_dir=None,
context="universe",
rescue_failures=False,
)
def test_get_secret_signature_from_data(mock_secret_provider):
with mock.patch(
"paasta_tools.secret_providers.vault.get_plaintext", autospec=False
):
assert not mock_secret_provider.get_secret_signature_from_data(
{"environments": {"devc": {}}}
)
assert (
mock_secret_provider.get_secret_signature_from_data(
{"environments": {"devc": {"signature": "abc"}}}
)
== "abc"
)
def test_get_secret_signature_from_data_missing(mock_secret_provider):
mock_secret_provider.cluster_names = ["mesosstage", "devc", "prod"]
mock_secret_provider.vault_cluster_config = {
"mesosstage": "devc",
"devc": "devc",
"prod": "prod",
}
with mock.patch(
"paasta_tools.secret_providers.vault.get_plaintext", autospec=False
):
# Should not raise errors
assert not mock_secret_provider.get_secret_signature_from_data(
{"environments": {"westeros": {}}}
)
def test_renew_issue_cert(mock_secret_provider):
with mock.patch(
"paasta_tools.secret_providers.vault.do_cert_renew", autospec=True
) as mock_do_cert_renew:
mock_secret_provider.renew_issue_cert("paasta", "30m")
assert mock_do_cert_renew.called
|
from lemur.plugins.bases import IssuerPlugin, SourcePlugin
import requests
from lemur.plugins import lemur_adcs as ADCS
from certsrv import Certsrv
from OpenSSL import crypto
from flask import current_app
class ADCSIssuerPlugin(IssuerPlugin):
title = "ADCS"
slug = "adcs-issuer"
description = "Enables the creation of certificates by ADCS (Active Directory Certificate Services)"
version = ADCS.VERSION
author = "sirferl"
author_url = "https://github.com/sirferl/lemur"
def __init__(self, *args, **kwargs):
"""Initialize the issuer with the appropriate details."""
self.session = requests.Session()
super(ADCSIssuerPlugin, self).__init__(*args, **kwargs)
@staticmethod
def create_authority(options):
"""Create an authority.
Creates an authority, this authority is then used by Lemur to
allow a user to specify which Certificate Authority they want
to sign their certificate.
:param options:
:return:
"""
adcs_root = current_app.config.get("ADCS_ROOT")
adcs_issuing = current_app.config.get("ADCS_ISSUING")
role = {"username": "", "password": "", "name": "adcs"}
return adcs_root, adcs_issuing, [role]
def create_certificate(self, csr, issuer_options):
adcs_server = current_app.config.get("ADCS_SERVER")
adcs_user = current_app.config.get("ADCS_USER")
adcs_pwd = current_app.config.get("ADCS_PWD")
adcs_auth_method = current_app.config.get("ADCS_AUTH_METHOD")
# if there is a config variable ADCS_TEMPLATE_<upper(authority.name)> take the value as Cert template
# else default to ADCS_TEMPLATE to be compatible with former versions
authority = issuer_options.get("authority").name.upper()
adcs_template = current_app.config.get("ADCS_TEMPLATE_{0}".format(authority), current_app.config.get("ADCS_TEMPLATE"))
ca_server = Certsrv(
adcs_server, adcs_user, adcs_pwd, auth_method=adcs_auth_method
)
current_app.logger.info("Requesting CSR: {0}".format(csr))
current_app.logger.info("Issuer options: {0}".format(issuer_options))
cert = (
ca_server.get_cert(csr, adcs_template, encoding="b64")
.decode("utf-8")
.replace("\r\n", "\n")
)
chain = (
ca_server.get_ca_cert(encoding="b64").decode("utf-8").replace("\r\n", "\n")
)
return cert, chain, None
def revoke_certificate(self, certificate, reason):
raise NotImplementedError("Not implemented\n", self, certificate, reason)
def get_ordered_certificate(self, order_id):
raise NotImplementedError("Not implemented\n", self, order_id)
def canceled_ordered_certificate(self, pending_cert, **kwargs):
raise NotImplementedError("Not implemented\n", self, pending_cert, **kwargs)
class ADCSSourcePlugin(SourcePlugin):
title = "ADCS"
slug = "adcs-source"
description = "Enables the collecion of certificates"
version = ADCS.VERSION
author = "sirferl"
author_url = "https://github.com/sirferl/lemur"
def get_certificates(self, options, **kwargs):
adcs_server = current_app.config.get("ADCS_SERVER")
adcs_user = current_app.config.get("ADCS_USER")
adcs_pwd = current_app.config.get("ADCS_PWD")
adcs_auth_method = current_app.config.get("ADCS_AUTH_METHOD")
adcs_start = current_app.config.get("ADCS_START")
adcs_stop = current_app.config.get("ADCS_STOP")
ca_server = Certsrv(
adcs_server, adcs_user, adcs_pwd, auth_method=adcs_auth_method
)
out_certlist = []
for id in range(adcs_start, adcs_stop):
try:
cert = (
ca_server.get_existing_cert(id, encoding="b64")
.decode("utf-8")
.replace("\r\n", "\n")
)
except Exception as err:
if "{0}".format(err).find("CERTSRV_E_PROPERTY_EMPTY"):
# this error indicates end of certificate list(?), so we stop
break
else:
# We do nothing in case there is no certificate returned for other reasons
current_app.logger.info("Error with id {0}: {1}".format(id, err))
else:
# we have a certificate
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
# loop through extensions to see if we find "TLS Web Server Authentication"
for e_id in range(0, pubkey.get_extension_count() - 1):
try:
extension = "{0}".format(pubkey.get_extension(e_id))
except Exception:
extensionn = ""
if extension.find("TLS Web Server Authentication") != -1:
out_certlist.append(
{"name": format(pubkey.get_subject().CN), "body": cert}
)
break
return out_certlist
def get_endpoints(self, options, **kwargs):
# There are no endpoints in the ADCS
raise NotImplementedError("Not implemented\n", self, options, **kwargs)
|
from decimal import Decimal, DecimalException
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TIME_DAYS,
TIME_HOURS,
TIME_MINUTES,
TIME_SECONDS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_SOURCE_ID = "source"
CONF_SOURCE_SENSOR = "source"
CONF_ROUND_DIGITS = "round"
CONF_UNIT_PREFIX = "unit_prefix"
CONF_UNIT_TIME = "unit_time"
CONF_UNIT_OF_MEASUREMENT = "unit"
CONF_METHOD = "method"
TRAPEZOIDAL_METHOD = "trapezoidal"
LEFT_METHOD = "left"
RIGHT_METHOD = "right"
INTEGRATION_METHOD = [TRAPEZOIDAL_METHOD, LEFT_METHOD, RIGHT_METHOD]
# SI Metric prefixes
UNIT_PREFIXES = {None: 1, "k": 10 ** 3, "M": 10 ** 6, "G": 10 ** 9, "T": 10 ** 12}
# SI Time prefixes
UNIT_TIME = {
TIME_SECONDS: 1,
TIME_MINUTES: 60,
TIME_HOURS: 60 * 60,
TIME_DAYS: 24 * 60 * 60,
}
ICON = "mdi:chart-histogram"
DEFAULT_ROUND = 3
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_ROUND_DIGITS, default=DEFAULT_ROUND): vol.Coerce(int),
vol.Optional(CONF_UNIT_PREFIX, default=None): vol.In(UNIT_PREFIXES),
vol.Optional(CONF_UNIT_TIME, default=TIME_HOURS): vol.In(UNIT_TIME),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_METHOD, default=TRAPEZOIDAL_METHOD): vol.In(
INTEGRATION_METHOD
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the integration sensor."""
integral = IntegrationSensor(
config[CONF_SOURCE_SENSOR],
config.get(CONF_NAME),
config[CONF_ROUND_DIGITS],
config[CONF_UNIT_PREFIX],
config[CONF_UNIT_TIME],
config.get(CONF_UNIT_OF_MEASUREMENT),
config[CONF_METHOD],
)
async_add_entities([integral])
class IntegrationSensor(RestoreEntity):
"""Representation of an integration sensor."""
def __init__(
self,
source_entity,
name,
round_digits,
unit_prefix,
unit_time,
unit_of_measurement,
integration_method,
):
"""Initialize the integration sensor."""
self._sensor_source_id = source_entity
self._round_digits = round_digits
self._state = 0
self._method = integration_method
self._name = name if name is not None else f"{source_entity} integral"
if unit_of_measurement is None:
self._unit_template = (
f"{'' if unit_prefix is None else unit_prefix}{{}}{unit_time}"
)
# we postpone the definition of unit_of_measurement to later
self._unit_of_measurement = None
else:
self._unit_of_measurement = unit_of_measurement
self._unit_prefix = UNIT_PREFIXES[unit_prefix]
self._unit_time = UNIT_TIME[unit_time]
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
try:
self._state = Decimal(state.state)
except ValueError as err:
_LOGGER.warning("Could not restore last state: %s", err)
@callback
def calc_integration(event):
"""Handle the sensor state changes."""
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
if (
old_state is None
or old_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
or new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
):
return
if self._unit_of_measurement is None:
unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._unit_of_measurement = self._unit_template.format(
"" if unit is None else unit
)
try:
# integration as the Riemann integral of previous measures.
area = 0
elapsed_time = (
new_state.last_updated - old_state.last_updated
).total_seconds()
if self._method == TRAPEZOIDAL_METHOD:
area = (
(Decimal(new_state.state) + Decimal(old_state.state))
* Decimal(elapsed_time)
/ 2
)
elif self._method == LEFT_METHOD:
area = Decimal(old_state.state) * Decimal(elapsed_time)
elif self._method == RIGHT_METHOD:
area = Decimal(new_state.state) * Decimal(elapsed_time)
integral = area / (self._unit_prefix * self._unit_time)
assert isinstance(integral, Decimal)
except ValueError as err:
_LOGGER.warning("While calculating integration: %s", err)
except DecimalException as err:
_LOGGER.warning(
"Invalid state (%s > %s): %s", old_state.state, new_state.state, err
)
except AssertionError as err:
_LOGGER.error("Could not calculate integral: %s", err)
else:
self._state += integral
self.async_write_ha_state()
async_track_state_change_event(
self.hass, [self._sensor_source_id], calc_integration
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return round(self._state, self._round_digits)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_SOURCE_ID: self._sensor_source_id}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
from unittest import TestCase
import numpy as np
from scattertext.termscoring.ScaledFScore import ScaledFScore, ScaledFScorePresets
class TestScaledFScore(TestCase):
def test_get_scores(self):
cat_counts, not_cat_counts = self._get_counts()
scores = ScaledFScore.get_scores(cat_counts, not_cat_counts, beta=1.)
np.testing.assert_almost_equal(scores,
np.array([0.2689108, 0., 0.2689108, 0.1266617, 1.,
0.5, 0.5590517, 0.5, 0.5, 0.5720015]))
def test_get_scores_zero_all_same(self):
cat_counts = np.array([0, 0, 0, 0, 0, 0, 1, 2])
not_cat_counts = np.array([1, 1, 2, 1, 1, 1, 1, 2])
scores = ScaledFScore.get_scores(cat_counts, not_cat_counts)
np.testing.assert_almost_equal(scores, [0.5, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 1.])
def test_score_difference(self):
cat_counts = np.array([0, 0, 0, 0, 0, 0, 1, 2])
not_cat_counts = np.array([1, 1, 2, 1, 1, 1, 1, 2])
scores = ScaledFScorePresets(use_score_difference=True).get_scores(cat_counts, not_cat_counts)
np.testing.assert_almost_equal(scores, [0.4857218, 0.4857218, 0.1970024, 0.4857218, 0.4857218, 0.4857218,
0.8548192, 0.90317])
def test_get_scores_zero_median(self):
cat_counts = np.array([0, 0, 0, 0, 0, 0, 1, 2])
not_cat_counts = np.array([1, 1, 2, 1, 1, 1, 1, 3])
ScaledFScore.get_scores(cat_counts, not_cat_counts)
def get_scores_for_category(self):
cat_counts, not_cat_counts = self._get_counts()
scores = ScaledFScore.get_scores_for_category(cat_counts, not_cat_counts)
np.testing.assert_almost_equal(scores,
[0.23991183969723384, 0.24969810634506373, 0.23991183969723384,
0.27646711056272855, 0.92885244834997516, 0.42010144843632563,
0.49166017105966719, 0.0, 0.0, 0.50262304057984664])
def _get_counts(self):
cat_counts = np.array([1, 5, 1, 9, 100, 1, 1, 0, 0, 2])
not_cat_counts = np.array([100, 510, 100, 199, 0, 1, 0, 1, 1, 0])
return cat_counts, not_cat_counts
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch, Mock
from diamond.collector import Collector
from nvidia_gpu import NvidiaGPUCollector
##########################################################################
class TestNvidiaGPUCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NvidiaGPUCollector', {
})
self.collector = NvidiaGPUCollector(config, None)
def test_import(self):
self.assertTrue(NvidiaGPUCollector)
@patch.object(Collector, 'publish')
def test_should_publish_gpu_stat(self, publish_mock):
output_mock = Mock(
return_value=(self.getFixture('nvidia_smi').getvalue(), '')
)
collector_mock = patch.object(
NvidiaGPUCollector,
'run_command',
output_mock
)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'gpu_0.memory.total': 4095,
'gpu_0.memory.used': 2670,
'gpu_0.memory.free': 1425,
'gpu_0.utilization.gpu': 0,
'gpu_0.utilization.memory': 0,
'gpu_0.temperature.gpu': 53,
'gpu_1.memory.total': 4095,
'gpu_1.memory.used': 2670,
'gpu_1.memory.free': 1425,
'gpu_1.utilization.gpu': 0,
'gpu_1.utilization.memory': 0,
'gpu_1.temperature.gpu': 44,
'gpu_2.memory.total': 4095,
'gpu_2.memory.used': 1437,
'gpu_2.memory.free': 2658,
'gpu_2.utilization.gpu': 0,
'gpu_2.utilization.memory': 0,
'gpu_2.temperature.gpu': 48,
'gpu_3.memory.total': 4095,
'gpu_3.memory.used': 1437,
'gpu_3.memory.free': 2658,
'gpu_3.utilization.gpu': 0,
'gpu_3.utilization.memory': 0,
'gpu_3.temperature.gpu': 44
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from ipaddress import ip_address
from typing import List, Optional
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.auth.models import User
from homeassistant.core import callback
from homeassistant.util.logging import async_create_catching_coro
from .const import (
DEFAULT_ALEXA_REPORT_STATE,
DEFAULT_EXPOSED_DOMAINS,
DEFAULT_GOOGLE_REPORT_STATE,
DOMAIN,
PREF_ALEXA_DEFAULT_EXPOSE,
PREF_ALEXA_ENTITY_CONFIGS,
PREF_ALEXA_REPORT_STATE,
PREF_ALIASES,
PREF_CLOUD_USER,
PREF_CLOUDHOOKS,
PREF_DISABLE_2FA,
PREF_ENABLE_ALEXA,
PREF_ENABLE_GOOGLE,
PREF_ENABLE_REMOTE,
PREF_GOOGLE_DEFAULT_EXPOSE,
PREF_GOOGLE_ENTITY_CONFIGS,
PREF_GOOGLE_LOCAL_WEBHOOK_ID,
PREF_GOOGLE_REPORT_STATE,
PREF_GOOGLE_SECURE_DEVICES_PIN,
PREF_OVERRIDE_NAME,
PREF_SHOULD_EXPOSE,
PREF_USERNAME,
InvalidTrustedNetworks,
InvalidTrustedProxies,
)
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
_UNDEF = object()
class CloudPreferences:
"""Handle cloud preferences."""
def __init__(self, hass):
"""Initialize cloud prefs."""
self._hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._prefs = None
self._listeners = []
async def async_initialize(self):
"""Finish initializing the preferences."""
prefs = await self._store.async_load()
if prefs is None:
prefs = self._empty_config("")
self._prefs = prefs
if PREF_GOOGLE_LOCAL_WEBHOOK_ID not in self._prefs:
await self._save_prefs(
{
**self._prefs,
PREF_GOOGLE_LOCAL_WEBHOOK_ID: self._hass.components.webhook.async_generate_id(),
}
)
@callback
def async_listen_updates(self, listener):
"""Listen for updates to the preferences."""
self._listeners.append(listener)
async def async_update(
self,
*,
google_enabled=_UNDEF,
alexa_enabled=_UNDEF,
remote_enabled=_UNDEF,
google_secure_devices_pin=_UNDEF,
cloudhooks=_UNDEF,
cloud_user=_UNDEF,
google_entity_configs=_UNDEF,
alexa_entity_configs=_UNDEF,
alexa_report_state=_UNDEF,
google_report_state=_UNDEF,
alexa_default_expose=_UNDEF,
google_default_expose=_UNDEF,
):
"""Update user preferences."""
prefs = {**self._prefs}
for key, value in (
(PREF_ENABLE_GOOGLE, google_enabled),
(PREF_ENABLE_ALEXA, alexa_enabled),
(PREF_ENABLE_REMOTE, remote_enabled),
(PREF_GOOGLE_SECURE_DEVICES_PIN, google_secure_devices_pin),
(PREF_CLOUDHOOKS, cloudhooks),
(PREF_CLOUD_USER, cloud_user),
(PREF_GOOGLE_ENTITY_CONFIGS, google_entity_configs),
(PREF_ALEXA_ENTITY_CONFIGS, alexa_entity_configs),
(PREF_ALEXA_REPORT_STATE, alexa_report_state),
(PREF_GOOGLE_REPORT_STATE, google_report_state),
(PREF_ALEXA_DEFAULT_EXPOSE, alexa_default_expose),
(PREF_GOOGLE_DEFAULT_EXPOSE, google_default_expose),
):
if value is not _UNDEF:
prefs[key] = value
if remote_enabled is True and self._has_local_trusted_network:
prefs[PREF_ENABLE_REMOTE] = False
raise InvalidTrustedNetworks
if remote_enabled is True and self._has_local_trusted_proxies:
prefs[PREF_ENABLE_REMOTE] = False
raise InvalidTrustedProxies
await self._save_prefs(prefs)
async def async_update_google_entity_config(
self,
*,
entity_id,
override_name=_UNDEF,
disable_2fa=_UNDEF,
aliases=_UNDEF,
should_expose=_UNDEF,
):
"""Update config for a Google entity."""
entities = self.google_entity_configs
entity = entities.get(entity_id, {})
changes = {}
for key, value in (
(PREF_OVERRIDE_NAME, override_name),
(PREF_DISABLE_2FA, disable_2fa),
(PREF_ALIASES, aliases),
(PREF_SHOULD_EXPOSE, should_expose),
):
if value is not _UNDEF:
changes[key] = value
if not changes:
return
updated_entity = {**entity, **changes}
updated_entities = {**entities, entity_id: updated_entity}
await self.async_update(google_entity_configs=updated_entities)
async def async_update_alexa_entity_config(
self, *, entity_id, should_expose=_UNDEF
):
"""Update config for an Alexa entity."""
entities = self.alexa_entity_configs
entity = entities.get(entity_id, {})
changes = {}
for key, value in ((PREF_SHOULD_EXPOSE, should_expose),):
if value is not _UNDEF:
changes[key] = value
if not changes:
return
updated_entity = {**entity, **changes}
updated_entities = {**entities, entity_id: updated_entity}
await self.async_update(alexa_entity_configs=updated_entities)
async def async_set_username(self, username):
"""Set the username that is logged in."""
# Logging out.
if username is None:
user = await self._load_cloud_user()
if user is not None:
await self._hass.auth.async_remove_user(user)
await self._save_prefs({**self._prefs, PREF_CLOUD_USER: None})
return
cur_username = self._prefs.get(PREF_USERNAME)
if cur_username == username:
return
if cur_username is None:
await self._save_prefs({**self._prefs, PREF_USERNAME: username})
else:
await self._save_prefs(self._empty_config(username))
def as_dict(self):
"""Return dictionary version."""
return {
PREF_ALEXA_DEFAULT_EXPOSE: self.alexa_default_expose,
PREF_ALEXA_ENTITY_CONFIGS: self.alexa_entity_configs,
PREF_ALEXA_REPORT_STATE: self.alexa_report_state,
PREF_CLOUDHOOKS: self.cloudhooks,
PREF_ENABLE_ALEXA: self.alexa_enabled,
PREF_ENABLE_GOOGLE: self.google_enabled,
PREF_ENABLE_REMOTE: self.remote_enabled,
PREF_GOOGLE_DEFAULT_EXPOSE: self.google_default_expose,
PREF_GOOGLE_ENTITY_CONFIGS: self.google_entity_configs,
PREF_GOOGLE_REPORT_STATE: self.google_report_state,
PREF_GOOGLE_SECURE_DEVICES_PIN: self.google_secure_devices_pin,
}
@property
def remote_enabled(self):
"""Return if remote is enabled on start."""
enabled = self._prefs.get(PREF_ENABLE_REMOTE, False)
if not enabled:
return False
if self._has_local_trusted_network or self._has_local_trusted_proxies:
return False
return True
@property
def alexa_enabled(self):
"""Return if Alexa is enabled."""
return self._prefs[PREF_ENABLE_ALEXA]
@property
def alexa_report_state(self):
"""Return if Alexa report state is enabled."""
return self._prefs.get(PREF_ALEXA_REPORT_STATE, DEFAULT_ALEXA_REPORT_STATE)
@property
def alexa_default_expose(self) -> Optional[List[str]]:
"""Return array of entity domains that are exposed by default to Alexa.
Can return None, in which case for backwards should be interpreted as allow all domains.
"""
return self._prefs.get(PREF_ALEXA_DEFAULT_EXPOSE)
@property
def alexa_entity_configs(self):
"""Return Alexa Entity configurations."""
return self._prefs.get(PREF_ALEXA_ENTITY_CONFIGS, {})
@property
def google_enabled(self):
"""Return if Google is enabled."""
return self._prefs[PREF_ENABLE_GOOGLE]
@property
def google_report_state(self):
"""Return if Google report state is enabled."""
return self._prefs.get(PREF_GOOGLE_REPORT_STATE, DEFAULT_GOOGLE_REPORT_STATE)
@property
def google_secure_devices_pin(self):
"""Return if Google is allowed to unlock locks."""
return self._prefs.get(PREF_GOOGLE_SECURE_DEVICES_PIN)
@property
def google_entity_configs(self):
"""Return Google Entity configurations."""
return self._prefs.get(PREF_GOOGLE_ENTITY_CONFIGS, {})
@property
def google_local_webhook_id(self):
"""Return Google webhook ID to receive local messages."""
return self._prefs[PREF_GOOGLE_LOCAL_WEBHOOK_ID]
@property
def google_default_expose(self) -> Optional[List[str]]:
"""Return array of entity domains that are exposed by default to Google.
Can return None, in which case for backwards should be interpreted as allow all domains.
"""
return self._prefs.get(PREF_GOOGLE_DEFAULT_EXPOSE)
@property
def cloudhooks(self):
"""Return the published cloud webhooks."""
return self._prefs.get(PREF_CLOUDHOOKS, {})
async def get_cloud_user(self) -> str:
"""Return ID from Home Assistant Cloud system user."""
user = await self._load_cloud_user()
if user:
return user.id
user = await self._hass.auth.async_create_system_user(
"Home Assistant Cloud", [GROUP_ID_ADMIN]
)
await self.async_update(cloud_user=user.id)
return user.id
async def _load_cloud_user(self) -> Optional[User]:
"""Load cloud user if available."""
user_id = self._prefs.get(PREF_CLOUD_USER)
if user_id is None:
return None
# Fetch the user. It can happen that the user no longer exists if
# an image was restored without restoring the cloud prefs.
return await self._hass.auth.async_get_user(user_id)
@property
def _has_local_trusted_network(self) -> bool:
"""Return if we allow localhost to bypass auth."""
local4 = ip_address("127.0.0.1")
local6 = ip_address("::1")
for prv in self._hass.auth.auth_providers:
if prv.type != "trusted_networks":
continue
for network in prv.trusted_networks:
if local4 in network or local6 in network:
return True
return False
@property
def _has_local_trusted_proxies(self) -> bool:
"""Return if we allow localhost to be a proxy and use its data."""
if not hasattr(self._hass, "http"):
return False
local4 = ip_address("127.0.0.1")
local6 = ip_address("::1")
if any(
local4 in nwk or local6 in nwk for nwk in self._hass.http.trusted_proxies
):
return True
return False
async def _save_prefs(self, prefs):
"""Save preferences to disk."""
self._prefs = prefs
await self._store.async_save(self._prefs)
for listener in self._listeners:
self._hass.async_create_task(async_create_catching_coro(listener(self)))
@callback
def _empty_config(self, username):
"""Return an empty config."""
return {
PREF_ALEXA_DEFAULT_EXPOSE: DEFAULT_EXPOSED_DOMAINS,
PREF_ALEXA_ENTITY_CONFIGS: {},
PREF_CLOUD_USER: None,
PREF_CLOUDHOOKS: {},
PREF_ENABLE_ALEXA: True,
PREF_ENABLE_GOOGLE: True,
PREF_ENABLE_REMOTE: False,
PREF_GOOGLE_DEFAULT_EXPOSE: DEFAULT_EXPOSED_DOMAINS,
PREF_GOOGLE_ENTITY_CONFIGS: {},
PREF_GOOGLE_LOCAL_WEBHOOK_ID: self._hass.components.webhook.async_generate_id(),
PREF_GOOGLE_SECURE_DEVICES_PIN: None,
PREF_USERNAME: username,
}
|
import unittest
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import coco_semantic_segmentation_label_names
from chainercv.datasets import COCOSemanticSegmentationDataset
from chainercv.utils import assert_is_semantic_segmentation_dataset
@testing.parameterize(
{'split': 'train'},
{'split': 'val'},
)
class TestCOCOSemanticSegmentationDataset(unittest.TestCase):
def setUp(self):
self.dataset = COCOSemanticSegmentationDataset(split=self.split)
@attr.slow
def test_coco_semantic_segmentation_dataset(self):
assert_is_semantic_segmentation_dataset(
self.dataset,
len(coco_semantic_segmentation_label_names),
n_example=10)
testing.run_module(__name__, __file__)
|
from typing import List
from aiohttp import StreamReader
from homeassistant.components.stt import Provider, SpeechMetadata, SpeechResult
from homeassistant.components.stt.const import (
AudioBitRates,
AudioChannels,
AudioCodecs,
AudioFormats,
AudioSampleRates,
SpeechResultState,
)
SUPPORT_LANGUAGES = ["en", "de"]
async def async_get_engine(hass, config, discovery_info=None):
"""Set up Demo speech component."""
return DemoProvider()
class DemoProvider(Provider):
"""Demo speech API provider."""
@property
def supported_languages(self) -> List[str]:
"""Return a list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_formats(self) -> List[AudioFormats]:
"""Return a list of supported formats."""
return [AudioFormats.WAV]
@property
def supported_codecs(self) -> List[AudioCodecs]:
"""Return a list of supported codecs."""
return [AudioCodecs.PCM]
@property
def supported_bit_rates(self) -> List[AudioBitRates]:
"""Return a list of supported bit rates."""
return [AudioBitRates.BITRATE_16]
@property
def supported_sample_rates(self) -> List[AudioSampleRates]:
"""Return a list of supported sample rates."""
return [AudioSampleRates.SAMPLERATE_16000, AudioSampleRates.SAMPLERATE_44100]
@property
def supported_channels(self) -> List[AudioChannels]:
"""Return a list of supported channels."""
return [AudioChannels.CHANNEL_STEREO]
async def async_process_audio_stream(
self, metadata: SpeechMetadata, stream: StreamReader
) -> SpeechResult:
"""Process an audio stream to STT service."""
# Read available data
async for _ in stream.iter_chunked(4096):
pass
return SpeechResult("Turn the Kitchen Lights on", SpeechResultState.SUCCESS)
|
from typing import MutableSequence
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QObject
from qutebrowser.utils import usertypes, log, standarddir, objreg
from qutebrowser.misc import lineparser
class HistoryEmptyError(Exception):
"""Raised when the history is empty."""
class HistoryEndReachedError(Exception):
"""Raised when the end of the history is reached."""
class History(QObject):
"""Command history.
Attributes:
history: A list of executed commands, with newer commands at the end.
_tmphist: Temporary history for history browsing (as NeighborList)
Signals:
changed: Emitted when an entry was added to the history.
"""
changed = pyqtSignal()
def __init__(self, *, history=None, parent=None):
"""Constructor.
Args:
history: The initial history to set.
"""
super().__init__(parent)
self._tmphist = None
if history is None:
self.history: MutableSequence[str] = []
else:
self.history = history
def __getitem__(self, idx):
return self.history[idx]
def is_browsing(self):
"""Check _tmphist to see if we're browsing."""
return self._tmphist is not None
def start(self, text):
"""Start browsing to the history.
Called when the user presses the up/down key and wasn't browsing the
history already.
Args:
text: The preset text.
"""
log.misc.debug("Preset text: '{}'".format(text))
if text:
items: MutableSequence[str] = [
e for e in self.history
if e.startswith(text)]
else:
items = self.history
if not items:
raise HistoryEmptyError
self._tmphist = usertypes.NeighborList(items)
return self._tmphist.lastitem()
@pyqtSlot()
def stop(self):
"""Stop browsing the history."""
self._tmphist = None
def previtem(self):
"""Get the previous item in the temp history.
start() needs to be called before calling this.
"""
if not self.is_browsing():
raise ValueError("Currently not browsing history")
assert self._tmphist is not None
try:
return self._tmphist.previtem()
except IndexError:
raise HistoryEndReachedError
def nextitem(self):
"""Get the next item in the temp history.
start() needs to be called before calling this.
"""
if not self.is_browsing():
raise ValueError("Currently not browsing history")
assert self._tmphist is not None
try:
return self._tmphist.nextitem()
except IndexError:
raise HistoryEndReachedError
def append(self, text):
"""Append a new item to the history.
Args:
text: The text to append.
"""
if not self.history or text != self.history[-1]:
self.history.append(text)
self.changed.emit()
def init():
"""Initialize the LimitLineParser storing the history."""
save_manager = objreg.get('save-manager')
command_history = lineparser.LimitLineParser(
standarddir.data(), 'cmd-history',
limit='completion.cmd_history_max_items')
objreg.register('command-history', command_history)
save_manager.add_saveable('command-history', command_history.save,
command_history.changed)
|
import pandas as pd
import pytest
import pytz
from qstrader.system.rebalance.end_of_month import EndOfMonthRebalance
@pytest.mark.parametrize(
"start_date,end_date,pre_market,expected_dates,expected_time",
[
(
'2020-03-11', '2020-12-31', False, [
'2020-03-31', '2020-04-30', '2020-05-29', '2020-06-30',
'2020-07-31', '2020-08-31', '2020-09-30', '2020-10-30',
'2020-11-30', '2020-12-31'
], '21:00:00'
),
(
'2019-12-26', '2020-09-01', True, [
'2019-12-31', '2020-01-31', '2020-02-28', '2020-03-31',
'2020-04-30', '2020-05-29', '2020-06-30', '2020-07-31',
'2020-08-31'
], '14:30:00'
)
]
)
def test_monthly_rebalance(
start_date, end_date, pre_market, expected_dates, expected_time
):
"""
Checks that the end of month (business day) rebalance provides
the correct datetimes for the provided range.
"""
sd = pd.Timestamp(start_date, tz=pytz.UTC)
ed = pd.Timestamp(end_date, tz=pytz.UTC)
reb = EndOfMonthRebalance(
start_dt=sd, end_dt=ed, pre_market=pre_market
)
actual_datetimes = reb._generate_rebalances()
expected_datetimes = [
pd.Timestamp('%s %s' % (expected_date, expected_time), tz=pytz.UTC)
for expected_date in expected_dates
]
assert actual_datetimes == expected_datetimes
|
import os
from homeassistant.components.folder.sensor import CONF_FOLDER_PATHS
from homeassistant.setup import async_setup_component
CWD = os.path.join(os.path.dirname(__file__))
TEST_FOLDER = "test_folder"
TEST_DIR = os.path.join(CWD, TEST_FOLDER)
TEST_TXT = "mock_test_folder.txt"
TEST_FILE = os.path.join(TEST_DIR, TEST_TXT)
def create_file(path):
"""Create a test file."""
with open(path, "w") as test_file:
test_file.write("test")
def remove_test_file():
"""Remove test file."""
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
os.rmdir(TEST_DIR)
async def test_invalid_path(hass):
"""Test that an invalid path is caught."""
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: "invalid_path"}}
assert await async_setup_component(hass, "sensor", config)
assert len(hass.states.async_entity_ids()) == 0
async def test_valid_path(hass):
"""Test for a valid path."""
if not os.path.isdir(TEST_DIR):
os.mkdir(TEST_DIR)
create_file(TEST_FILE)
hass.config.allowlist_external_dirs = {TEST_DIR}
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: TEST_DIR}}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
state = hass.states.get("sensor.test_folder")
assert state.state == "0.0"
assert state.attributes.get("number_of_files") == 1
remove_test_file()
|
from celery import Celery
from celery.exceptions import (
SoftTimeLimitExceeded,
TimeLimitExceeded,
WorkerLostError,
WorkerShutdown,
WorkerTerminate)
from httpobs.conf import DEVELOPMENT_MODE
from httpobs.database import (insert_test_results,
select_site_headers,
update_scan_state)
from httpobs.scanner import celeryconfig, STATE_ABORTED, STATE_FAILED, STATE_RUNNING
from httpobs.scanner.analyzer import tests
from httpobs.scanner.retriever import retrieve_all
from httpobs.scanner.utils import sanitize_headers
import sys
# Create the scanner task queue
scanner = Celery()
scanner.config_from_object(celeryconfig)
@scanner.task()
def scan(hostname: str, site_id: int, scan_id: int):
try:
# Once celery kicks off the task, let's update the scan state from PENDING to RUNNING
update_scan_state(scan_id, STATE_RUNNING)
# Get the site's cookies and headers
headers = select_site_headers(hostname)
# Attempt to retrieve all the resources
reqs = retrieve_all(hostname, cookies=headers['cookies'], headers=headers['headers'])
# If we can't connect at all, let's abort the test
if reqs['responses']['auto'] is None:
update_scan_state(scan_id, STATE_FAILED, error='site down')
return
# Execute each test, replacing the underscores in the function name with dashes in the test name
# TODO: Get overridden expectations
insert_test_results(site_id,
scan_id,
[test(reqs) for test in tests],
sanitize_headers(reqs['responses']['auto'].headers),
reqs['responses']['auto'].status_code)
# catch the celery timeout, which will almost certainly occur in retrieve_all()
except SoftTimeLimitExceeded:
update_scan_state(scan_id, STATE_ABORTED, error='site unresponsive')
except (TimeLimitExceeded, WorkerLostError, WorkerShutdown, WorkerTerminate):
raise
# the database is down, oh no!
except IOError:
print('database down, aborting scan on {hostname}'.format(hostname=hostname), file=sys.stderr)
except:
# TODO: have more specific error messages
e = sys.exc_info()[1] # get the error message
# If we are unsuccessful, close out the scan in the database
update_scan_state(scan_id, STATE_FAILED, error=repr(e))
# Print the exception to stderr if we're in dev
if DEVELOPMENT_MODE:
import traceback
print('Error detected in scan for : ' + hostname)
traceback.print_exc(file=sys.stderr)
|
import os
import pytest
import coverage
from coverage.backward import StringIO
from coverage.debug import filter_text, info_formatter, info_header, short_id, short_stack
from coverage.debug import clipped_repr
from coverage.env import C_TRACER
from tests.coveragetest import CoverageTest
from tests.helpers import re_line, re_lines
class InfoFormatterTest(CoverageTest):
"""Tests of debug.info_formatter."""
run_in_temp_dir = False
def test_info_formatter(self):
lines = list(info_formatter([
('x', 'hello there'),
('very long label', ['one element']),
('regular', ['abc', 'def', 'ghi', 'jkl']),
('nothing', []),
]))
expected = [
' x: hello there',
' very long label: one element',
' regular: abc',
' def',
' ghi',
' jkl',
' nothing: -none-',
]
self.assertEqual(expected, lines)
def test_info_formatter_with_generator(self):
lines = list(info_formatter(('info%d' % i, i) for i in range(3)))
expected = [
' info0: 0',
' info1: 1',
' info2: 2',
]
self.assertEqual(expected, lines)
def test_too_long_label(self):
with self.assertRaises(AssertionError):
list(info_formatter([('this label is way too long and will not fit', 23)]))
@pytest.mark.parametrize("label, header", [
("x", "-- x ---------------------------------------------------------"),
("hello there", "-- hello there -----------------------------------------------"),
])
def test_info_header(label, header):
assert info_header(label) == header
@pytest.mark.parametrize("id64, id16", [
(0x1234, 0x1234),
(0x12340000, 0x1234),
(0xA5A55A5A, 0xFFFF),
(0x1234cba956780fed, 0x8008),
])
def test_short_id(id64, id16):
assert short_id(id64) == id16
@pytest.mark.parametrize("text, numchars, result", [
("hello", 10, "'hello'"),
("0123456789abcdefghijklmnopqrstuvwxyz", 15, "'01234...vwxyz'"),
])
def test_clipped_repr(text, numchars, result):
assert clipped_repr(text, numchars) == result
@pytest.mark.parametrize("text, filters, result", [
("hello", [], "hello"),
("hello\n", [], "hello\n"),
("hello\nhello\n", [], "hello\nhello\n"),
("hello\nbye\n", [lambda x: "="+x], "=hello\n=bye\n"),
("hello\nbye\n", [lambda x: "="+x, lambda x: x+"\ndone\n"], "=hello\ndone\n=bye\ndone\n"),
])
def test_filter_text(text, filters, result):
assert filter_text(text, filters) == result
class DebugTraceTest(CoverageTest):
"""Tests of debug output."""
def f1_debug_output(self, debug):
"""Runs some code with `debug` option, returns the debug output."""
# Make code to run.
self.make_file("f1.py", """\
def f1(x):
return x+1
for i in range(5):
f1(i)
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=debug)
cov._debug_file = debug_out
self.start_import_stop(cov, "f1")
cov.save()
out_lines = debug_out.getvalue()
return out_lines
def test_debug_no_trace(self):
out_lines = self.f1_debug_output([])
# We should have no output at all.
self.assertFalse(out_lines)
def test_debug_trace(self):
out_lines = self.f1_debug_output(["trace"])
# We should have a line like "Tracing 'f1.py'"
self.assertIn("Tracing 'f1.py'", out_lines)
# We should have lines like "Not tracing 'collector.py'..."
coverage_lines = re_lines(
out_lines,
r"^Not tracing .*: is part of coverage.py$"
)
self.assertTrue(coverage_lines)
def test_debug_trace_pid(self):
out_lines = self.f1_debug_output(["trace", "pid"])
# Now our lines are always prefixed with the process id.
pid_prefix = r"^%5d\.[0-9a-f]{4}: " % os.getpid()
pid_lines = re_lines(out_lines, pid_prefix)
self.assertEqual(pid_lines, out_lines)
# We still have some tracing, and some not tracing.
self.assertTrue(re_lines(out_lines, pid_prefix + "Tracing "))
self.assertTrue(re_lines(out_lines, pid_prefix + "Not tracing "))
def test_debug_callers(self):
out_lines = self.f1_debug_output(["pid", "dataop", "dataio", "callers"])
print(out_lines)
# For every real message, there should be a stack trace with a line like
# "f1_debug_output : /Users/ned/coverage/tests/test_debug.py @71"
real_messages = re_lines(out_lines, r":\d+", match=False).splitlines()
frame_pattern = r"\s+f1_debug_output : .*tests[/\\]test_debug.py:\d+$"
frames = re_lines(out_lines, frame_pattern).splitlines()
self.assertEqual(len(real_messages), len(frames))
last_line = out_lines.splitlines()[-1]
# The details of what to expect on the stack are empirical, and can change
# as the code changes. This test is here to ensure that the debug code
# continues working. It's ok to adjust these details over time.
self.assertRegex(real_messages[-1], r"^\s*\d+\.\w{4}: Adding file tracers: 0 files")
self.assertRegex(last_line, r"\s+add_file_tracers : .*coverage[/\\]sqldata.py:\d+$")
def test_debug_config(self):
out_lines = self.f1_debug_output(["config"])
labels = """
attempted_config_files branch config_files_read config_file cover_pylib data_file
debug exclude_list extra_css html_dir html_title ignore_errors
run_include run_omit parallel partial_always_list partial_list paths
precision show_missing source timid xml_output
report_include report_omit
""".split()
for label in labels:
label_pat = r"^\s*%s: " % label
self.assertEqual(
len(re_lines(out_lines, label_pat).splitlines()),
1,
msg="Incorrect lines for %r" % label,
)
def test_debug_sys(self):
out_lines = self.f1_debug_output(["sys"])
labels = """
version coverage cover_paths pylib_paths tracer configs_attempted config_file
configs_read data_file python platform implementation executable
pid cwd path environment command_line cover_match pylib_match
""".split()
for label in labels:
label_pat = r"^\s*%s: " % label
self.assertEqual(
len(re_lines(out_lines, label_pat).splitlines()),
1,
msg="Incorrect lines for %r" % label,
)
def test_debug_sys_ctracer(self):
out_lines = self.f1_debug_output(["sys"])
tracer_line = re_line(out_lines, r"CTracer:").strip()
if C_TRACER:
expected = "CTracer: available"
else:
expected = "CTracer: unavailable"
self.assertEqual(expected, tracer_line)
def f_one(*args, **kwargs):
"""First of the chain of functions for testing `short_stack`."""
return f_two(*args, **kwargs)
def f_two(*args, **kwargs):
"""Second of the chain of functions for testing `short_stack`."""
return f_three(*args, **kwargs)
def f_three(*args, **kwargs):
"""Third of the chain of functions for testing `short_stack`."""
return short_stack(*args, **kwargs)
class ShortStackTest(CoverageTest):
"""Tests of coverage.debug.short_stack."""
run_in_temp_dir = False
def test_short_stack(self):
stack = f_one().splitlines()
self.assertGreater(len(stack), 10)
self.assertIn("f_three", stack[-1])
self.assertIn("f_two", stack[-2])
self.assertIn("f_one", stack[-3])
def test_short_stack_limit(self):
stack = f_one(limit=5).splitlines()
self.assertEqual(len(stack), 5)
def test_short_stack_skip(self):
stack = f_one(skip=1).splitlines()
self.assertIn("f_two", stack[-1])
|
import numpy as np
import hypertools._shared.helpers as helpers
def test_center():
assert np.array_equal(helpers.center([np.array([[0,0,0],[1,1,1]])]),[np.array([[-0.5,-0.5,-0.5],[0.5,0.5,0.5]])])
def test_group_by_category_ints():
assert helpers.group_by_category([1, 1, 2, 3])==[0, 0, 1, 2]
def test_group_by_category_str():
assert helpers.group_by_category(['a', 'a', 'c', 'b'])==[0, 0, 1, 2]
def test_vals2colors_list():
assert np.allclose(helpers.vals2colors([0, .5, 1]),[(0.9629680891964629, 0.9860207612456747, 0.9360092272202999), (0.7944636678200693, 0.9194156093810073, 0.7700884275278739), (0.4740484429065744, 0.7953863898500577, 0.7713956170703576)])
def test_vals2colors_list_of_lists():
assert np.allclose(helpers.vals2colors([[0],[.5],[1]]),[(0.9629680891964629, 0.9860207612456747, 0.9360092272202999), (0.7944636678200693, 0.9194156093810073, 0.7700884275278739), (0.4740484429065744, 0.7953863898500577, 0.7713956170703576)])
def test_vals2bins():
assert helpers.vals2bins([0,1,2])==[0, 33, 66]
def test_interp_array():
assert np.allclose(helpers.interp_array(np.array([1,2,3])),np.linspace(1,2.9,20))
def test_interp_array_list():
assert np.allclose(helpers.interp_array_list(np.array([[1,2,3],[1,2,3]])),[np.linspace(1,2.9,20)] * 2)
def test_interp_array_list_interpval():
assert helpers.interp_array_list([np.array([[1,2,3],[1,2,3],[1,2,3]])],interp_val=10)[0].shape[0]==20
# def test_check_data_list_of_arrays():
# helpers.check_data([np.random.random((3,3))]*2)=='list'
#
# def test_check_data_list_of_other():
# with pytest.raises(ValueError) as e_info:
# helpers.check_data([1,2,3])
#
# def test_check_data_array():
# helpers.check_data(np.array([[0,1,2],[1,2,3]]))=='array'
#
# def test_check_data_df():
# helpers.check_data(pd.DataFrame([0,1,2]))=='df'
#
# def test_check_data_df_list():
# helpers.check_data([pd.DataFrame([0,1,2]),pd.DataFrame([0,1,2])])=='dflist'
#
# def test_check_data_int():
# with pytest.raises(Exception) as e_info:
# helpers.check_data(int(1))
#
# def test_check_data_str():
# with pytest.raises(Exception) as e_info:
# helpers.check_data(str(1))
def test_parse_args_array():
x = [np.random.random((3,3))]
args=('o',)
assert helpers.parse_args(x, args)==[('o',)]
def test_parse_args_list():
x = [np.random.random((3,3))]*2
args=('o',)
assert helpers.parse_args(x, args)==[('o',),('o',)]
def test_parse_kwargs_array():
x = [np.random.random((3,3))]
kwargs={'label': ['Group A']}
assert helpers.parse_kwargs(x, kwargs)==[{'label': 'Group A'}]
def test_parse_kwargs_list():
x = [np.random.random((3,3))]*2
kwargs={'label': ['Group A', 'Group B']}
assert helpers.parse_kwargs(x, kwargs)==[{'label': 'Group A'}, {'label': 'Group B'}]
def test_reshape_data():
x = [[1,2],[3,4]]*2
labels = ['a','b','a','b']
assert np.array_equal(helpers.reshape_data(x, labels, labels)[0],[np.array([[1,2],[1,2]]),np.array([[3,4],[3,4]])])
|
from django.views.generic import TemplateView
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.models.entry import Entry
class Sitemap(TemplateView):
"""
Sitemap view of the Weblog.
"""
template_name = 'zinnia/sitemap.html'
def get_context_data(self, **kwargs):
"""
Populate the context of the template
with all published entries and all the categories.
"""
context = super(Sitemap, self).get_context_data(**kwargs)
context.update(
{'entries': Entry.published.all(),
'categories': Category.published.all(),
'authors': Author.published.all()}
)
return context
|
from babelfish import LanguageReverseConverter
from ..exceptions import ConfigurationError
class ShooterConverter(LanguageReverseConverter):
def __init__(self):
self.from_shooter = {'chn': ('zho',), 'eng': ('eng',)}
self.to_shooter = {v: k for k, v in self.from_shooter.items()}
self.codes = set(self.from_shooter.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3,) in self.to_shooter:
return self.to_shooter[(alpha3,)]
raise ConfigurationError('Unsupported language for shooter: %s, %s, %s' % (alpha3, country, script))
def reverse(self, shooter):
if shooter in self.from_shooter:
return self.from_shooter[shooter]
raise ConfigurationError('Unsupported language code for shooter: %s' % shooter)
|
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from chainercv.links.model.faster_rcnn.faster_rcnn import FasterRCNN
from chainercv.links.model.faster_rcnn.region_proposal_network import \
RegionProposalNetwork
from chainercv.links.model.vgg.vgg16 import VGG16
from chainercv import utils
class FasterRCNNVGG16(FasterRCNN):
"""Faster R-CNN based on VGG-16.
When you specify the path of a pre-trained chainer model serialized as
a :obj:`.npz` file in the constructor, this chain model automatically
initializes all the parameters with it.
When a string in prespecified set is provided, a pretrained model is
loaded from weights distributed on the Internet.
The list of pretrained models supported are as follows:
* :obj:`voc07`: Loads weights trained with the trainval split of \
PASCAL VOC2007 Detection Dataset.
* :obj:`imagenet`: Loads weights trained with ImageNet Classfication \
task for the feature extractor and the head modules. \
Weights that do not have a corresponding layer in VGG-16 \
will be randomly initialized.
For descriptions on the interface of this model, please refer to
:class:`~chainercv.links.model.faster_rcnn.FasterRCNN`.
:class:`~chainercv.links.model.faster_rcnn.FasterRCNNVGG16`
supports finer control on random initializations of weights by arguments
:obj:`vgg_initialW`, :obj:`rpn_initialW`, :obj:`loc_initialW` and
:obj:`score_initialW`.
It accepts a callable that takes an array and edits its values.
If :obj:`None` is passed as an initializer, the default initializer is
used.
Args:
n_fg_class (int): The number of classes excluding the background.
pretrained_model (string): The destination of the pre-trained
chainer model serialized as a :obj:`.npz` file.
If this is one of the strings described
above, it automatically loads weights stored under a directory
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,
where :obj:`$CHAINER_DATASET_ROOT` is set as
:obj:`$HOME/.chainer/dataset` unless you specify another value
by modifying the environment variable.
min_size (int): A preprocessing parameter for :meth:`prepare`.
max_size (int): A preprocessing parameter for :meth:`prepare`.
ratios (list of floats): This is ratios of width to height of
the anchors.
anchor_scales (list of numbers): This is areas of anchors.
Those areas will be the product of the square of an element in
:obj:`anchor_scales` and the original area of the reference
window.
vgg_initialW (callable): Initializer for the layers corresponding to
the VGG-16 layers.
rpn_initialW (callable): Initializer for Region Proposal Network
layers.
loc_initialW (callable): Initializer for the localization head.
score_initialW (callable): Initializer for the score head.
proposal_creator_params (dict): Key valued parameters for
:class:`~chainercv.links.model.faster_rcnn.ProposalCreator`.
"""
_models = {
'voc07': {
'param': {'n_fg_class': 20},
'url': 'https://chainercv-models.preferred.jp/'
'faster_rcnn_vgg16_voc07_trained_2018_06_01.npz',
'cv2': True
},
'voc0712': {
'param': {'n_fg_class': 20},
'url': 'https://chainercv-models.preferred.jp/'
'faster_rcnn_vgg16_voc0712_trained_2017_07_21.npz',
'cv2': True
},
}
feat_stride = 16
def __init__(self,
n_fg_class=None,
pretrained_model=None,
min_size=600, max_size=1000,
ratios=[0.5, 1, 2], anchor_scales=[8, 16, 32],
vgg_initialW=None, rpn_initialW=None,
loc_initialW=None, score_initialW=None,
proposal_creator_params={}):
param, path = utils.prepare_pretrained_model(
{'n_fg_class': n_fg_class}, pretrained_model, self._models)
if loc_initialW is None:
loc_initialW = chainer.initializers.Normal(0.001)
if score_initialW is None:
score_initialW = chainer.initializers.Normal(0.01)
if rpn_initialW is None:
rpn_initialW = chainer.initializers.Normal(0.01)
if vgg_initialW is None and pretrained_model:
vgg_initialW = chainer.initializers.constant.Zero()
extractor = VGG16(initialW=vgg_initialW)
extractor.pick = 'conv5_3'
# Delete all layers after conv5_3.
extractor.remove_unused()
rpn = RegionProposalNetwork(
512, 512,
ratios=ratios,
anchor_scales=anchor_scales,
feat_stride=self.feat_stride,
initialW=rpn_initialW,
proposal_creator_params=proposal_creator_params,
)
head = VGG16RoIHead(
param['n_fg_class'] + 1,
roi_size=7, spatial_scale=1. / self.feat_stride,
vgg_initialW=vgg_initialW,
loc_initialW=loc_initialW,
score_initialW=score_initialW
)
super(FasterRCNNVGG16, self).__init__(
extractor,
rpn,
head,
mean=np.array([122.7717, 115.9465, 102.9801],
dtype=np.float32)[:, None, None],
min_size=min_size,
max_size=max_size
)
if path == 'imagenet':
self._copy_imagenet_pretrained_vgg16()
elif path:
chainer.serializers.load_npz(path, self)
def _copy_imagenet_pretrained_vgg16(self):
pretrained_model = VGG16(pretrained_model='imagenet')
self.extractor.conv1_1.copyparams(pretrained_model.conv1_1)
self.extractor.conv1_2.copyparams(pretrained_model.conv1_2)
self.extractor.conv2_1.copyparams(pretrained_model.conv2_1)
self.extractor.conv2_2.copyparams(pretrained_model.conv2_2)
self.extractor.conv3_1.copyparams(pretrained_model.conv3_1)
self.extractor.conv3_2.copyparams(pretrained_model.conv3_2)
self.extractor.conv3_3.copyparams(pretrained_model.conv3_3)
self.extractor.conv4_1.copyparams(pretrained_model.conv4_1)
self.extractor.conv4_2.copyparams(pretrained_model.conv4_2)
self.extractor.conv4_3.copyparams(pretrained_model.conv4_3)
self.extractor.conv5_1.copyparams(pretrained_model.conv5_1)
self.extractor.conv5_2.copyparams(pretrained_model.conv5_2)
self.extractor.conv5_3.copyparams(pretrained_model.conv5_3)
self.head.fc6.copyparams(pretrained_model.fc6)
self.head.fc7.copyparams(pretrained_model.fc7)
class VGG16RoIHead(chainer.Chain):
"""Faster R-CNN Head for VGG-16 based implementation.
This class is used as a head for Faster R-CNN.
This outputs class-wise localizations and classification based on feature
maps in the given RoIs.
Args:
n_class (int): The number of classes possibly including the background.
roi_size (int): Height and width of the feature maps after RoI-pooling.
spatial_scale (float): Scale of the roi is resized.
vgg_initialW (callable): Initializer for the layers corresponding to
the VGG-16 layers.
loc_initialW (callable): Initializer for the localization head.
score_initialW (callable): Initializer for the score head.
"""
def __init__(self, n_class, roi_size, spatial_scale,
vgg_initialW=None, loc_initialW=None, score_initialW=None):
# n_class includes the background
super(VGG16RoIHead, self).__init__()
with self.init_scope():
self.fc6 = L.Linear(25088, 4096, initialW=vgg_initialW)
self.fc7 = L.Linear(4096, 4096, initialW=vgg_initialW)
self.cls_loc = L.Linear(4096, n_class * 4, initialW=loc_initialW)
self.score = L.Linear(4096, n_class, initialW=score_initialW)
self.n_class = n_class
self.roi_size = roi_size
self.spatial_scale = spatial_scale
def forward(self, x, rois, roi_indices):
"""Forward the chain.
We assume that there are :math:`N` batches.
Args:
x (~chainer.Variable): 4D image variable.
rois (array): A bounding box array containing coordinates of
proposal boxes. This is a concatenation of bounding box
arrays from multiple images in the batch.
Its shape is :math:`(R', 4)`. Given :math:`R_i` proposed
RoIs from the :math:`i` th image,
:math:`R' = \\sum _{i=1} ^ N R_i`.
roi_indices (array): An array containing indices of images to
which bounding boxes correspond to. Its shape is :math:`(R',)`.
"""
roi_indices = roi_indices.astype(np.float32)
indices_and_rois = self.xp.concatenate(
(roi_indices[:, None], rois), axis=1)
pool = _roi_pooling_2d_yx(
x, indices_and_rois, self.roi_size, self.roi_size,
self.spatial_scale)
fc6 = F.relu(self.fc6(pool))
fc7 = F.relu(self.fc7(fc6))
roi_cls_locs = self.cls_loc(fc7)
roi_scores = self.score(fc7)
return roi_cls_locs, roi_scores
def _roi_pooling_2d_yx(x, indices_and_rois, outh, outw, spatial_scale):
xy_indices_and_rois = indices_and_rois[:, [0, 2, 1, 4, 3]]
pool = F.roi_pooling_2d(
x, xy_indices_and_rois, outh, outw, spatial_scale)
return pool
|
import concurrent
import json
import logging
from pathlib import Path
from types import SimpleNamespace
from typing import List, MutableMapping, Optional
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
from redbot.core.utils.dbtools import APSWConnectionWrapper
from ..audio_logging import debug_exc_log
from ..sql_statements import (
HANDLE_DISCORD_DATA_DELETION_QUERY,
PLAYLIST_CREATE_INDEX,
PLAYLIST_CREATE_TABLE,
PLAYLIST_DELETE,
PLAYLIST_DELETE_SCHEDULED,
PLAYLIST_DELETE_SCOPE,
PLAYLIST_FETCH,
PLAYLIST_FETCH_ALL,
PLAYLIST_FETCH_ALL_CONVERTER,
PLAYLIST_FETCH_ALL_WITH_FILTER,
PLAYLIST_UPSERT,
PRAGMA_FETCH_user_version,
PRAGMA_SET_journal_mode,
PRAGMA_SET_read_uncommitted,
PRAGMA_SET_temp_store,
PRAGMA_SET_user_version,
)
from ..utils import PlaylistScope
from .api_utils import PlaylistFetchResult
log = logging.getLogger("red.cogs.Audio.api.Playlists")
_ = Translator("Audio", Path(__file__))
class PlaylistWrapper:
def __init__(self, bot: Red, config: Config, conn: APSWConnectionWrapper):
self.bot = bot
self.database = conn
self.config = config
self.statement = SimpleNamespace()
self.statement.pragma_temp_store = PRAGMA_SET_temp_store
self.statement.pragma_journal_mode = PRAGMA_SET_journal_mode
self.statement.pragma_read_uncommitted = PRAGMA_SET_read_uncommitted
self.statement.set_user_version = PRAGMA_SET_user_version
self.statement.get_user_version = PRAGMA_FETCH_user_version
self.statement.create_table = PLAYLIST_CREATE_TABLE
self.statement.create_index = PLAYLIST_CREATE_INDEX
self.statement.upsert = PLAYLIST_UPSERT
self.statement.delete = PLAYLIST_DELETE
self.statement.delete_scope = PLAYLIST_DELETE_SCOPE
self.statement.delete_scheduled = PLAYLIST_DELETE_SCHEDULED
self.statement.get_one = PLAYLIST_FETCH
self.statement.get_all = PLAYLIST_FETCH_ALL
self.statement.get_all_with_filter = PLAYLIST_FETCH_ALL_WITH_FILTER
self.statement.get_all_converter = PLAYLIST_FETCH_ALL_CONVERTER
self.statement.drop_user_playlists = HANDLE_DISCORD_DATA_DELETION_QUERY
async def init(self) -> None:
"""Initialize the Playlist table."""
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(self.database.cursor().execute, self.statement.pragma_temp_store)
executor.submit(self.database.cursor().execute, self.statement.pragma_journal_mode)
executor.submit(self.database.cursor().execute, self.statement.pragma_read_uncommitted)
executor.submit(self.database.cursor().execute, self.statement.create_table)
executor.submit(self.database.cursor().execute, self.statement.create_index)
@staticmethod
def get_scope_type(scope: str) -> int:
"""Convert a scope to a numerical identifier."""
if scope == PlaylistScope.GLOBAL.value:
table = 1
elif scope == PlaylistScope.USER.value:
table = 3
else:
table = 2
return table
async def fetch(self, scope: str, playlist_id: int, scope_id: int) -> PlaylistFetchResult:
"""Fetch a single playlist."""
scope_type = self.get_scope_type(scope)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
for future in concurrent.futures.as_completed(
[
executor.submit(
self.database.cursor().execute,
self.statement.get_one,
(
{
"playlist_id": playlist_id,
"scope_id": scope_id,
"scope_type": scope_type,
}
),
)
]
):
try:
row_result = future.result()
except Exception as exc:
debug_exc_log(log, exc, "Failed to completed playlist fetch from database")
row = row_result.fetchone()
if row:
row = PlaylistFetchResult(*row)
return row
async def fetch_all(
self, scope: str, scope_id: int, author_id=None
) -> List[PlaylistFetchResult]:
"""Fetch all playlists."""
scope_type = self.get_scope_type(scope)
output = []
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
if author_id is not None:
for future in concurrent.futures.as_completed(
[
executor.submit(
self.database.cursor().execute,
self.statement.get_all_with_filter,
(
{
"scope_type": scope_type,
"scope_id": scope_id,
"author_id": author_id,
}
),
)
]
):
try:
row_result = future.result()
except Exception as exc:
debug_exc_log(log, exc, "Failed to completed playlist fetch from database")
return []
else:
for future in concurrent.futures.as_completed(
[
executor.submit(
self.database.cursor().execute,
self.statement.get_all,
({"scope_type": scope_type, "scope_id": scope_id}),
)
]
):
try:
row_result = future.result()
except Exception as exc:
debug_exc_log(log, exc, "Failed to completed playlist fetch from database")
return []
async for row in AsyncIter(row_result):
output.append(PlaylistFetchResult(*row))
return output
async def fetch_all_converter(
self, scope: str, playlist_name, playlist_id
) -> List[PlaylistFetchResult]:
"""Fetch all playlists with the specified filter."""
scope_type = self.get_scope_type(scope)
try:
playlist_id = int(playlist_id)
except Exception as exc:
debug_exc_log(log, exc, "Failed converting playlist_id to int")
playlist_id = -1
output = []
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
for future in concurrent.futures.as_completed(
[
executor.submit(
self.database.cursor().execute,
self.statement.get_all_converter,
(
{
"scope_type": scope_type,
"playlist_name": playlist_name,
"playlist_id": playlist_id,
}
),
)
]
):
try:
row_result = future.result()
except Exception as exc:
debug_exc_log(log, exc, "Failed to completed fetch from database")
async for row in AsyncIter(row_result):
output.append(PlaylistFetchResult(*row))
return output
async def delete(self, scope: str, playlist_id: int, scope_id: int):
"""Deletes a single playlists."""
scope_type = self.get_scope_type(scope)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(
self.database.cursor().execute,
self.statement.delete,
({"playlist_id": playlist_id, "scope_id": scope_id, "scope_type": scope_type}),
)
async def delete_scheduled(self):
"""Clean up database from all deleted playlists."""
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(self.database.cursor().execute, self.statement.delete_scheduled)
async def drop(self, scope: str):
"""Delete all playlists in a scope."""
scope_type = self.get_scope_type(scope)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(
self.database.cursor().execute,
self.statement.delete_scope,
({"scope_type": scope_type}),
)
async def create_table(self):
"""Create the playlist table."""
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(self.database.cursor().execute, PLAYLIST_CREATE_TABLE)
async def upsert(
self,
scope: str,
playlist_id: int,
playlist_name: str,
scope_id: int,
author_id: int,
playlist_url: Optional[str],
tracks: List[MutableMapping],
):
"""Insert or update a playlist into the database."""
scope_type = self.get_scope_type(scope)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(
self.database.cursor().execute,
self.statement.upsert,
{
"scope_type": str(scope_type),
"playlist_id": int(playlist_id),
"playlist_name": str(playlist_name),
"scope_id": int(scope_id),
"author_id": int(author_id),
"playlist_url": playlist_url,
"tracks": json.dumps(tracks),
},
)
async def handle_playlist_user_id_deletion(self, user_id: int):
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(
self.database.cursor().execute,
self.statement.drop_user_playlists,
{"user_id": user_id},
)
|
from typing import List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from . import DOMAIN
ACTION_TYPES = {"turn_on", "turn_off"}
ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(ACTION_TYPES),
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN),
}
)
async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device actions for Water Heater devices."""
registry = await entity_registry.async_get_registry(hass)
actions = []
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turn_on",
}
)
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turn_off",
}
)
return actions
async def async_call_action_from_config(
hass: HomeAssistant, config: dict, variables: dict, context: Optional[Context]
) -> None:
"""Execute a device action."""
config = ACTION_SCHEMA(config)
service_data = {ATTR_ENTITY_ID: config[CONF_ENTITY_ID]}
if config[CONF_TYPE] == "turn_on":
service = SERVICE_TURN_ON
elif config[CONF_TYPE] == "turn_off":
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, blocking=True, context=context
)
|
import json
import pytest
from lemur.api_keys.views import * # noqa
from .vectors import (
VALID_ADMIN_API_TOKEN,
VALID_ADMIN_HEADER_TOKEN,
VALID_USER_HEADER_TOKEN,
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_api_key_list_get(client, token, status):
assert client.get(api.url_for(ApiKeyList), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_api_key_list_post_invalid(client, token, status):
assert (
client.post(api.url_for(ApiKeyList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,user_id,status",
[
(VALID_USER_HEADER_TOKEN, 1, 200),
(VALID_ADMIN_HEADER_TOKEN, 2, 200),
(VALID_ADMIN_API_TOKEN, 2, 200),
("", 0, 401),
],
)
def test_api_key_list_post_valid_self(client, user_id, token, status):
assert (
client.post(
api.url_for(ApiKeyList),
data=json.dumps(
{
"name": "a test token",
"user": {
"id": user_id,
"username": "example",
"email": "[email protected]",
},
"ttl": -1,
}
),
headers=token,
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_api_key_list_post_valid_no_permission(client, token, status):
assert (
client.post(
api.url_for(ApiKeyList),
data=json.dumps(
{
"name": "a test token",
"user": {
"id": 2,
"username": "example",
"email": "[email protected]",
},
"ttl": -1,
}
),
headers=token,
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_api_key_list_patch(client, token, status):
assert (
client.patch(api.url_for(ApiKeyList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_api_key_list_delete(client, token, status):
assert client.delete(api.url_for(ApiKeyList), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_user_api_key_list_get(client, token, status):
assert (
client.get(api.url_for(ApiKeyUserList, user_id=1), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_user_api_key_list_post_invalid(client, token, status):
assert (
client.post(
api.url_for(ApiKeyUserList, user_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,user_id,status",
[
(VALID_USER_HEADER_TOKEN, 1, 200),
(VALID_ADMIN_HEADER_TOKEN, 2, 200),
(VALID_ADMIN_API_TOKEN, 2, 200),
("", 0, 401),
],
)
def test_user_api_key_list_post_valid_self(client, user_id, token, status):
assert (
client.post(
api.url_for(ApiKeyUserList, user_id=1),
data=json.dumps(
{"name": "a test token", "user": {"id": user_id}, "ttl": -1}
),
headers=token,
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_user_api_key_list_post_valid_no_permission(client, token, status):
assert (
client.post(
api.url_for(ApiKeyUserList, user_id=2),
data=json.dumps({"name": "a test token", "user": {"id": 2}, "ttl": -1}),
headers=token,
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_api_key_list_patch(client, token, status):
assert (
client.patch(
api.url_for(ApiKeyUserList, user_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_api_key_list_delete(client, token, status):
assert (
client.delete(api.url_for(ApiKeyUserList, user_id=1), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
@pytest.mark.skip(
reason="no way of getting an actual user onto the access key to generate a jwt"
)
def test_api_key_get(client, token, status):
assert client.get(api.url_for(ApiKeys, aid=1), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_api_key_post(client, token, status):
assert client.post(api.url_for(ApiKeys, aid=1), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_api_key_patch(client, token, status):
assert (
client.patch(api.url_for(ApiKeys, aid=1), headers=token).status_code == status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
@pytest.mark.skip(
reason="no way of getting an actual user onto the access key to generate a jwt"
)
def test_api_key_put_permssions(client, token, status):
assert (
client.put(
api.url_for(ApiKeys, aid=1),
data=json.dumps({"name": "Test", "revoked": False, "ttl": -1}),
headers=token,
).status_code
== status
)
# This test works while the other doesn't because the schema allows user id to be null.
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_api_key_described_get(client, token, status):
assert (
client.get(api.url_for(ApiKeysDescribed, aid=1), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
@pytest.mark.skip(
reason="no way of getting an actual user onto the access key to generate a jwt"
)
def test_user_api_key_get(client, token, status):
assert (
client.get(api.url_for(UserApiKeys, uid=1, aid=1), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_api_key_post(client, token, status):
assert (
client.post(
api.url_for(UserApiKeys, uid=2, aid=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_api_key_patch(client, token, status):
assert (
client.patch(
api.url_for(UserApiKeys, uid=2, aid=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
@pytest.mark.skip(
reason="no way of getting an actual user onto the access key to generate a jwt"
)
def test_user_api_key_put_permssions(client, token, status):
assert (
client.put(
api.url_for(UserApiKeys, uid=2, aid=1),
data=json.dumps({"name": "Test", "revoked": False, "ttl": -1}),
headers=token,
).status_code
== status
)
|
import copy
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker.dpb_service import BaseDpbService
BENCHMARK_NAME = 'dpb_testdfsio_benchmark'
BENCHMARK_CONFIG = """
dpb_testdfsio_benchmark:
description: Run testdfsio on dataproc and emr
dpb_service:
service_type: dataproc
worker_group:
vm_spec:
GCP:
machine_type: n1-standard-4
AWS:
machine_type: m4.xlarge
disk_spec:
GCP:
disk_size: 1500
disk_type: pd-standard
AWS:
disk_size: 1500
disk_type: gp2
worker_count: 2
"""
flags.DEFINE_enum('dfsio_fs', BaseDpbService.GCS_FS,
[BaseDpbService.GCS_FS, BaseDpbService.S3_FS,
BaseDpbService.HDFS_FS],
'File System to use in the dfsio operations')
flags.DEFINE_list(
'dfsio_file_sizes_list', [1], 'A list of file sizes to use for each of the'
' dfsio files.')
flags.DEFINE_list(
'dfsio_num_files_list', [4], 'A list of number of dfsio files to use'
' during individual runs.')
FLAGS = flags.FLAGS
SUPPORTED_DPB_BACKENDS = [dpb_service.DATAPROC, dpb_service.EMR]
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: The config used to construct the BenchmarkSpec.
Raises:
InvalidValue: On encountering invalid configuration.
"""
dpb_service_type = benchmark_config.dpb_service.service_type
if dpb_service_type not in SUPPORTED_DPB_BACKENDS:
raise errors.Config.InvalidValue('Invalid backend for dfsio. Not in:{}'.
format(str(SUPPORTED_DPB_BACKENDS)))
def Prepare(benchmark_spec):
if FLAGS.dfsio_fs != BaseDpbService.HDFS_FS:
benchmark_spec.dpb_service.CreateBucket(benchmark_spec.uuid.split('-')[0])
def Run(benchmark_spec):
"""Runs testdfsio benchmark and reports the results.
Args:
benchmark_spec: Spec needed to run the testdfsio benchmark
Returns:
A list of samples
"""
service = benchmark_spec.dpb_service
source = '{}'.format(benchmark_spec.uuid.split('-')[0])
if FLAGS.dfsio_fs != BaseDpbService.HDFS_FS:
source = '{}://{}'.format(FLAGS.dfsio_fs, source)
source_dir = '{}{}'.format(source, '/dfsio')
results = []
for file_size in FLAGS.dfsio_file_sizes_list:
for num_files in FLAGS.dfsio_num_files_list:
metadata = copy.copy(service.GetMetadata())
metadata.update({'dfsio_fs': FLAGS.dfsio_fs})
metadata.update({'dfsio_num_files': num_files})
metadata.update({'dfsio_file_size_mbs': file_size})
if FLAGS.zones:
zone = FLAGS.zones[0]
region = zone.rsplit('-', 1)[0]
metadata.update({'regional': True})
metadata.update({'region': region})
elif FLAGS.cloud == 'AWS':
metadata.update({'regional': True})
metadata.update({'region': 'aws_default'})
# This order is important. Write generates the data for read and clean
# deletes it for the next write.
for command in ('write', 'read', 'clean'):
args = [
'-' + command, '-nrFiles',
str(num_files), '-fileSize',
str(file_size)
]
properties = {'test.build.data': source_dir}
if FLAGS.dfsio_fs != BaseDpbService.HDFS_FS:
properties['fs.default.name'] = source_dir
result = service.SubmitJob(
classname='org.apache.hadoop.fs.TestDFSIO',
properties=properties,
job_arguments=args,
job_type=dpb_service.BaseDpbService.HADOOP_JOB_TYPE)
results.append(
sample.Sample(command + '_run_time', result.run_time, 'seconds',
metadata))
return results
def Cleanup(benchmark_spec):
"""Cleans up the testdfsio benchmark."""
if FLAGS.dfsio_fs != BaseDpbService.HDFS_FS:
benchmark_spec.dpb_service.DeleteBucket(benchmark_spec.uuid.split('-')[0])
|
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import BleBoxEntity, create_blebox_entities
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a BleBox climate entity."""
create_blebox_entities(
hass, config_entry, async_add_entities, BleBoxClimateEntity, "climates"
)
class BleBoxClimateEntity(BleBoxEntity, ClimateEntity):
"""Representation of a BleBox climate feature (saunaBox)."""
@property
def supported_features(self):
"""Return the supported climate features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def hvac_mode(self):
"""Return the desired HVAC mode."""
if self._feature.is_on is None:
return None
return HVAC_MODE_HEAT if self._feature.is_on else HVAC_MODE_OFF
@property
def hvac_action(self):
"""Return the actual current HVAC action."""
is_on = self._feature.is_on
if not is_on:
return None if is_on is None else CURRENT_HVAC_OFF
# NOTE: In practice, there's no need to handle case when is_heating is None
return CURRENT_HVAC_HEAT if self._feature.is_heating else CURRENT_HVAC_IDLE
@property
def hvac_modes(self):
"""Return a list of possible HVAC modes."""
return [HVAC_MODE_OFF, HVAC_MODE_HEAT]
@property
def temperature_unit(self):
"""Return the temperature unit."""
return TEMP_CELSIUS
@property
def max_temp(self):
"""Return the maximum temperature supported."""
return self._feature.max_temp
@property
def min_temp(self):
"""Return the maximum temperature supported."""
return self._feature.min_temp
@property
def current_temperature(self):
"""Return the current temperature."""
return self._feature.current
@property
def target_temperature(self):
"""Return the desired thermostat temperature."""
return self._feature.desired
async def async_set_hvac_mode(self, hvac_mode):
"""Set the climate entity mode."""
if hvac_mode == HVAC_MODE_HEAT:
await self._feature.async_on()
return
await self._feature.async_off()
async def async_set_temperature(self, **kwargs):
"""Set the thermostat temperature."""
value = kwargs[ATTR_TEMPERATURE]
await self._feature.async_set_temperature(value)
|
import unittest
from mock import MagicMock, call
from uiautomator import AutomatorDeviceObject, Selector, AutomatorDeviceNamedUiObject
class TestDeviceObjInit(unittest.TestCase):
def setUp(self):
self.device = MagicMock()
self.device.server.jsonrpc = MagicMock()
def test_init(self):
kwargs = {"text": "text", "className": "android"}
self.device_obj = AutomatorDeviceObject(self.device,
Selector(**kwargs))
self.assertEqual(self.device_obj.selector,
Selector(**kwargs))
self.assertEqual(self.device_obj.jsonrpc,
self.device.server.jsonrpc)
class TestDeviceObj(unittest.TestCase):
def setUp(self):
self.device = MagicMock()
self.jsonrpc = self.device.server.jsonrpc = MagicMock()
self.jsonrpc_wrap = self.device.server.jsonrpc_wrap = MagicMock()
self.kwargs = {"text": "text", "className": "android"}
self.obj = AutomatorDeviceObject(self.device,
Selector(**self.kwargs))
def test_child_selector(self):
kwargs = {"text": "child text", "className": "android"}
obj = self.obj.child_selector(**kwargs)
self.assertEqual(len(obj.selector['childOrSibling']), 1)
self.assertEqual(obj.selector['childOrSibling'][0], 'child')
self.assertEqual(len(obj.selector['childOrSiblingSelector']), 1)
self.assertEqual(obj.selector['childOrSiblingSelector'][0], Selector(**kwargs))
def test_from_parent(self):
kwargs = {"text": "parent text", "className": "android"}
obj = self.obj.from_parent(**kwargs)
self.assertEqual(len(obj.selector['childOrSibling']), 1)
self.assertEqual(obj.selector['childOrSibling'][0], 'sibling')
self.assertEqual(len(obj.selector['childOrSiblingSelector']), 1)
self.assertEqual(obj.selector['childOrSiblingSelector'][0], Selector(**kwargs))
def test_exists(self):
self.jsonrpc.exist = MagicMock()
self.jsonrpc.exist.return_value = True
self.assertTrue(self.obj.exists)
self.jsonrpc.exist.return_value = False
self.assertFalse(self.obj.exists)
self.assertEqual(self.jsonrpc.exist.call_args_list,
[call(self.obj.selector),
call(self.obj.selector)])
def test_info(self):
info = {"text": "item text"}
self.jsonrpc.objInfo.return_value = info
self.assertEqual(self.obj.info,
info)
self.jsonrpc.objInfo.assert_called_once_with(self.obj.selector)
def test_info_attr(self):
info = {'contentDescription': '',
'checked': False,
'scrollable': False,
'text': '',
'packageName': 'android',
'selected': False,
'enabled': True,
'bounds': {'top': 0,
'left': 0,
'right': 720,
'bottom': 1184},
'className':
'android.widget.FrameLayout',
'focusable': False,
'focused': False,
'clickable': False,
'checkable': False,
'chileCount': 2,
'longClickable': False,
'visibleBounds': {'top': 0,
'left': 0,
'right': 720,
'bottom': 1184}}
self.jsonrpc.objInfo.return_value = info
self.assertEqual(self.obj.info, info)
self.jsonrpc.objInfo.assert_called_once_with(self.obj.selector)
self.assertEqual(self.obj.description, info["contentDescription"])
for k in info:
self.assertEqual(getattr(self.obj, k), info[k])
with self.assertRaises(AttributeError):
self.obj.not_exists
def test_text(self):
self.jsonrpc.clearTextField = MagicMock()
self.obj.set_text(None)
self.obj.set_text("")
self.obj.clear_text()
self.assertEqual(self.jsonrpc.clearTextField.call_args_list,
[call(self.obj.selector), call(self.obj.selector), call(self.obj.selector)])
self.jsonrpc.setText.return_value = False
texts = ["abc", "123", "()#*$&"]
for text in texts:
self.assertFalse(self.obj.set_text(text))
self.assertEqual(self.jsonrpc.setText.call_args_list,
[call(self.obj.selector, t) for t in texts])
def test_click(self):
self.jsonrpc.click.return_value = False
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertFalse(self.obj.click(c))
self.assertEqual(self.jsonrpc.click.call_args_list,
[call(self.obj.selector, c) for c in corners])
self.jsonrpc.click = MagicMock()
self.jsonrpc.click.return_value = True
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertTrue(getattr(self.obj.click, c)())
self.assertEqual(self.jsonrpc.click.call_args_list,
[call(self.obj.selector, c) for c in corners])
self.jsonrpc.click = MagicMock()
self.jsonrpc.click.return_value = True
self.assertTrue(self.obj.click())
self.jsonrpc.click.assert_called_once_with(self.obj.selector)
def test_click_wait(self):
self.jsonrpc.clickAndWaitForNewWindow.return_value = True
self.assertTrue(self.obj.click.wait(timeout=321))
self.jsonrpc.clickAndWaitForNewWindow.assert_called_once_with(self.obj.selector, 321)
def test_long_click(self):
self.jsonrpc.longClick.return_value = False
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertFalse(self.obj.long_click(c))
self.assertEqual(self.jsonrpc.longClick.call_args_list,
[call(self.obj.selector, c) for c in corners])
self.jsonrpc.longClick = MagicMock()
self.jsonrpc.longClick.return_value = True
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertTrue(getattr(self.obj.long_click, c)())
self.assertEqual(self.jsonrpc.longClick.call_args_list,
[call(self.obj.selector, c) for c in corners])
self.jsonrpc.longClick = MagicMock()
self.jsonrpc.longClick.return_value = True
self.assertTrue(self.obj.long_click())
self.jsonrpc.longClick.assert_called_once_with(self.obj.selector)
def test_long_click_using_swipe(self):
self.device.long_click.return_value = False
self.jsonrpc.objInfo.return_value = {
'longClickable': False,
'visibleBounds': {
'top': 0,
'bottom': 60,
'left': 0,
'right': 60
}
}
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertFalse(self.obj.long_click(c))
self.assertEqual(self.device.long_click.call_args_list,
[call(10, 10), call(10, 10), call(50, 50), call(50, 50)])
self.device.long_click = MagicMock()
self.device.long_click.return_value = True
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertTrue(getattr(self.obj.long_click, c)())
self.assertEqual(self.device.long_click.call_args_list,
[call(10, 10), call(10, 10), call(50, 50), call(50, 50)])
self.device.long_click = MagicMock()
self.device.long_click.return_value = True
self.assertTrue(self.obj.long_click())
self.device.long_click.assert_called_once_with(30, 30)
def test_drag_to(self):
self.jsonrpc.dragTo.return_value = False
self.assertFalse(self.obj.drag.to(10, 20, steps=10))
self.jsonrpc.dragTo.return_value = True
self.assertTrue(self.obj.drag.to(x=10, y=20, steps=20))
sel = {"text": "text..."}
self.assertTrue(self.obj.drag.to(steps=30, **sel))
self.assertEqual(self.jsonrpc.dragTo.call_args_list,
[call(self.obj.selector, 10, 20, 10),
call(self.obj.selector, 10, 20, 20),
call(self.obj.selector, Selector(**sel), 30)])
def test_gesture(self):
self.jsonrpc.gesture.return_value = True
self.assertTrue(self.obj.gesture((1, 1), (2, 2), (3, 3), (4, 4), 100))
self.assertTrue(self.obj.gesture(4, 3).to(2, 1, 20))
self.assertEqual(self.jsonrpc.gesture.call_args_list,
[call(self.obj.selector, {'x':1, 'y': 1}, {'x':2, 'y':2}, {'x':3, 'y':3}, {'x':4, 'y':4}, 100), call(self.obj.selector, 4, 3, 2, 1, 20)])
def test_pinch(self):
self.jsonrpc.pinchIn.return_value = True
self.assertTrue(self.obj.pinch.In(percent=90, steps=30))
self.assertTrue(self.obj.pinch("in", 80, 40))
self.assertTrue(self.obj.pinch("In", 70, 50))
self.assertEqual(self.jsonrpc.pinchIn.call_args_list,
[call(self.obj.selector, 90, 30), call(self.obj.selector, 80, 40), call(self.obj.selector, 70, 50)])
self.jsonrpc.pinchOut.return_value = True
self.assertTrue(self.obj.pinch.Out(percent=90, steps=30))
self.assertTrue(self.obj.pinch("out", 80, 40))
self.assertTrue(self.obj.pinch("Out", 70, 50))
self.assertEqual(self.jsonrpc.pinchIn.call_args_list,
[call(self.obj.selector, 90, 30), call(self.obj.selector, 80, 40), call(self.obj.selector, 70, 50)])
def test_swipe(self):
self.jsonrpc.swipe.return_value = True
dirs = ["up", "down", "right", "left"]
for d in dirs:
self.assertTrue(self.obj.swipe(d, 30))
self.assertEqual(self.jsonrpc.swipe.call_args_list,
[call(self.obj.selector, d, 30) for d in dirs])
self.jsonrpc.swipe = MagicMock()
self.jsonrpc.swipe.return_value = True
dirs = ["up", "down", "right", "left"]
for d in dirs:
self.assertTrue(getattr(self.obj.swipe, d)(steps=30))
self.assertEqual(self.jsonrpc.swipe.call_args_list,
[call(self.obj.selector, d, 30) for d in dirs])
def test_fling(self):
self.jsonrpc.flingForward.return_value = True
self.assertTrue(self.obj.fling.horiz.forward())
self.assertTrue(self.obj.fling.horizentally.forward())
self.assertTrue(self.obj.fling.vert.forward())
self.assertTrue(self.obj.fling())
self.assertEqual(self.jsonrpc.flingForward.call_args_list,
[call(self.obj.selector, False), call(self.obj.selector, False), call(self.obj.selector, True), call(self.obj.selector, True)])
self.jsonrpc.flingBackward.return_value = True
self.assertTrue(self.obj.fling.horiz.backward())
self.assertTrue(self.obj.fling.horizentally.backward())
self.assertTrue(self.obj.fling.vert.backward())
self.assertTrue(self.obj.fling.vertically.backward())
self.assertEqual(self.jsonrpc.flingBackward.call_args_list,
[call(self.obj.selector, False), call(self.obj.selector, False), call(self.obj.selector, True), call(self.obj.selector, True)])
max_swipes = 1000
self.jsonrpc.flingToBeginning.return_value = True
self.assertTrue(self.obj.fling.horiz.toBeginning())
self.assertTrue(self.obj.fling.horizentally.toBeginning())
self.assertTrue(self.obj.fling.vert.toBeginning())
self.assertTrue(self.obj.fling.vertically.toBeginning(max_swipes=100))
self.assertEqual(self.jsonrpc.flingToBeginning.call_args_list,
[call(self.obj.selector, False, max_swipes), call(self.obj.selector, False, max_swipes), call(self.obj.selector, True, max_swipes), call(self.obj.selector, True, 100)])
self.jsonrpc.flingToEnd.return_value = True
self.assertTrue(self.obj.fling.horiz.toEnd())
self.assertTrue(self.obj.fling.horizentally.toEnd())
self.assertTrue(self.obj.fling.vert.toEnd())
self.assertTrue(self.obj.fling.vertically.toEnd(max_swipes=100))
self.assertEqual(self.jsonrpc.flingToEnd.call_args_list,
[call(self.obj.selector, False, max_swipes), call(self.obj.selector, False, max_swipes), call(self.obj.selector, True, max_swipes), call(self.obj.selector, True, 100)])
def test_scroll(self):
steps = 100
max_swipes = 1000
self.jsonrpc.scrollForward.return_value = True
self.assertTrue(self.obj.scroll.horiz.forward())
self.assertTrue(self.obj.scroll.horizentally.forward())
self.assertTrue(self.obj.scroll.vert.forward())
self.assertTrue(self.obj.scroll(steps=20))
self.assertEqual(self.jsonrpc.scrollForward.call_args_list,
[call(self.obj.selector, False, steps), call(self.obj.selector, False, steps), call(self.obj.selector, True, steps), call(self.obj.selector, True, 20)])
self.jsonrpc.scrollBackward.return_value = True
self.assertTrue(self.obj.scroll.horiz.backward())
self.assertTrue(self.obj.scroll.horizentally.backward())
self.assertTrue(self.obj.scroll.vert.backward())
self.assertTrue(self.obj.scroll.vertically.backward(steps=20))
self.assertEqual(self.jsonrpc.scrollBackward.call_args_list,
[call(self.obj.selector, False, steps), call(self.obj.selector, False, steps), call(self.obj.selector, True, steps), call(self.obj.selector, True, 20)])
self.jsonrpc.scrollToBeginning.return_value = True
self.assertTrue(self.obj.scroll.horiz.toBeginning())
self.assertTrue(self.obj.scroll.horizentally.toBeginning())
self.assertTrue(self.obj.scroll.vert.toBeginning())
self.assertTrue(self.obj.scroll.vertically.toBeginning(steps=20, max_swipes=100))
self.assertEqual(self.jsonrpc.scrollToBeginning.call_args_list,
[call(self.obj.selector, False, max_swipes, steps), call(self.obj.selector, False, max_swipes, steps), call(self.obj.selector, True, max_swipes, steps), call(self.obj.selector, True, 100, 20)])
self.jsonrpc.scrollToEnd.return_value = True
self.assertTrue(self.obj.scroll.horiz.toEnd())
self.assertTrue(self.obj.scroll.horizentally.toEnd())
self.assertTrue(self.obj.scroll.vert.toEnd())
self.assertTrue(self.obj.scroll.vertically.toEnd(steps=20, max_swipes=100))
self.assertEqual(self.jsonrpc.scrollToEnd.call_args_list,
[call(self.obj.selector, False, max_swipes, steps), call(self.obj.selector, False, max_swipes, steps), call(self.obj.selector, True, max_swipes, steps), call(self.obj.selector, True, 100, 20)])
info = {"text": "..."}
self.jsonrpc.scrollTo.return_value = True
self.assertTrue(self.obj.scroll.horiz.to(**info))
self.assertTrue(self.obj.scroll.horizentally.to(**info))
self.assertTrue(self.obj.scroll.vert.to(**info))
self.assertTrue(self.obj.scroll.vertically.to(**info))
self.assertEqual(self.jsonrpc.scrollTo.call_args_list,
[call(self.obj.selector, Selector(**info), False), call(self.obj.selector, Selector(**info), False), call(self.obj.selector, Selector(**info), True), call(self.obj.selector, Selector(**info), True)])
def test_wait(self):
timeout = 3000
self.jsonrpc_wrap.return_value.waitUntilGone.return_value = True
self.assertTrue(self.obj.wait.gone())
self.jsonrpc_wrap.return_value.waitUntilGone.assert_called_once_with(self.obj.selector, timeout)
self.jsonrpc_wrap.return_value.waitForExists.return_value = True
self.assertTrue(self.obj.wait.exists(timeout=10))
self.jsonrpc_wrap.return_value.waitForExists.assert_called_once_with(self.obj.selector, 10)
def test_child_by_text(self):
self.jsonrpc.childByText.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_text("child text", **kwargs)
self.jsonrpc.childByText.assert_called_once_with(Selector(**self.kwargs), Selector(**kwargs), "child text")
self.assertEqual("myname", generic_obj.selector)
def test_child_by_text_allow_scroll_search(self):
self.jsonrpc.childByText.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_text("child text", allow_scroll_search=False, **kwargs)
self.jsonrpc.childByText.assert_called_once_with(
Selector(**self.kwargs), Selector(**kwargs), "child text", False)
self.assertEqual("myname", generic_obj.selector)
def test_child_by_description(self):
self.jsonrpc.childByDescription.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_description("child text", **kwargs)
self.jsonrpc.childByDescription.assert_called_once_with(
Selector(**self.kwargs), Selector(**kwargs), "child text")
self.assertEqual("myname", generic_obj.selector)
def test_child_by_description_allow_scroll_search(self):
self.jsonrpc.childByDescription.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_description("child text", allow_scroll_search=False, **kwargs)
self.jsonrpc.childByDescription.assert_called_once_with(
Selector(**self.kwargs), Selector(**kwargs), "child text", False)
self.assertEqual("myname", generic_obj.selector)
def test_child_by_instance(self):
self.jsonrpc.childByInstance.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_instance(1234, **kwargs)
self.jsonrpc.childByInstance.assert_called_once_with(Selector(**self.kwargs), Selector(**kwargs), 1234)
self.assertEqual("myname", generic_obj.selector)
def test_count(self):
self.jsonrpc.count.return_value = 10
self.assertEqual(self.obj.count, 10)
self.jsonrpc.count.assert_called_once_with(Selector(**self.kwargs))
def test_len(self):
self.jsonrpc.count.return_value = 10
self.assertEqual(len(self.obj), 10)
def test_instance_list(self):
count = 10
self.jsonrpc.count.return_value = count
for i in range(count):
self.assertEqual(self.obj[i].selector["instance"], i)
with self.assertRaises(IndexError):
self.obj[count]
self.jsonrpc.count.return_value = 1
self.assertEqual(self.obj[0], self.obj)
def test_instance_iter(self):
count = 10
self.jsonrpc.count.return_value = count
for index, inst in enumerate(self.obj):
self.assertEqual(inst.selector["instance"], index)
def test_left(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 200, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 200, 'bottom': 300, 'left': 50, 'right': 100}}
]
self.jsonrpc.count.return_value = 3
self.assertEqual(self.obj.left().selector["instance"], 2)
def test_right(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 200, 'bottom': 300, 'left': 50, 'right': 100}},
{"bounds": {'top': 200, 'bottom': 300, 'left': 150, 'right': 200}}
]
self.jsonrpc.count.return_value = 3
self.assertEqual(self.obj.right().selector["instance"], 2)
def test_up(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 100, 'right': 150}},
{"bounds": {'top': 150, 'bottom': 200, 'left': 150, 'right': 200}},
{"bounds": {'top': 150, 'bottom': 200, 'left': 100, 'right': 200}}
]
self.jsonrpc.count.return_value = 3
self.assertEqual(self.obj.up().selector["instance"], 2)
def test_down(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 150, 'bottom': 200, 'left': 150, 'right': 200}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 100, 'right': 150}}
]
self.jsonrpc.count.return_value = 3
self.assertEqual(self.obj.down().selector["instance"], 2)
def test_multiple_matched_down(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 150, 'bottom': 200, 'left': 150, 'right': 200}},
{"bounds": {'top': 275, 'bottom': 300, 'left': 100, 'right': 150}},
{"bounds": {'top': 300, 'bottom': 350, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 275, 'left': 100, 'right': 150}}
]
self.jsonrpc.count.return_value = 5
self.assertEqual(self.obj.down().selector["instance"], 4)
class TestAutomatorDeviceNamedUiObject(unittest.TestCase):
def setUp(self):
self.device = MagicMock()
self.jsonrpc = self.device.server.jsonrpc = MagicMock()
self.name = "my-name"
self.obj = AutomatorDeviceNamedUiObject(self.device, self.name)
def test_child(self):
self.jsonrpc.getChild.return_value = "another-name"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child(**kwargs)
self.jsonrpc.getChild.assert_called_once_with(self.name, Selector(**kwargs))
self.assertEqual(generic_obj.selector, self.jsonrpc.getChild.return_value)
def test_sibling(self):
self.jsonrpc.getFromParent.return_value = "another-name"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.sibling(**kwargs)
self.jsonrpc.getFromParent.assert_called_once_with(self.name, Selector(**kwargs))
self.assertEqual(generic_obj.selector, self.jsonrpc.getFromParent.return_value)
|
import asyncio
import logging
import aiohttp
from aiohttp import hdrs
import voluptuous as vol
from homeassistant.const import (
CONF_HEADERS,
CONF_METHOD,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_TIMEOUT,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_BAD_REQUEST,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
DOMAIN = "rest_command"
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
DEFAULT_METHOD = "get"
DEFAULT_VERIFY_SSL = True
SUPPORT_REST_METHODS = ["get", "patch", "post", "put", "delete"]
CONF_CONTENT_TYPE = "content_type"
COMMAND_SCHEMA = vol.Schema(
{
vol.Required(CONF_URL): cv.template,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.All(
vol.Lower, vol.In(SUPPORT_REST_METHODS)
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.template}),
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_PAYLOAD): cv.template,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),
vol.Optional(CONF_CONTENT_TYPE): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(COMMAND_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Set up the REST command component."""
@callback
def async_register_rest_command(name, command_config):
"""Create service for rest command."""
websession = async_get_clientsession(hass, command_config.get(CONF_VERIFY_SSL))
timeout = command_config[CONF_TIMEOUT]
method = command_config[CONF_METHOD]
template_url = command_config[CONF_URL]
template_url.hass = hass
auth = None
if CONF_USERNAME in command_config:
username = command_config[CONF_USERNAME]
password = command_config.get(CONF_PASSWORD, "")
auth = aiohttp.BasicAuth(username, password=password)
template_payload = None
if CONF_PAYLOAD in command_config:
template_payload = command_config[CONF_PAYLOAD]
template_payload.hass = hass
template_headers = None
if CONF_HEADERS in command_config:
template_headers = command_config[CONF_HEADERS]
for template_header in template_headers.values():
template_header.hass = hass
content_type = None
if CONF_CONTENT_TYPE in command_config:
content_type = command_config[CONF_CONTENT_TYPE]
async def async_service_handler(service):
"""Execute a shell command service."""
payload = None
if template_payload:
payload = bytes(
template_payload.async_render(
variables=service.data, parse_result=False
),
"utf-8",
)
request_url = template_url.async_render(
variables=service.data, parse_result=False
)
headers = None
if template_headers:
headers = {}
for header_name, template_header in template_headers.items():
headers[header_name] = template_header.async_render(
variables=service.data, parse_result=False
)
if content_type:
if headers is None:
headers = {}
headers[hdrs.CONTENT_TYPE] = content_type
try:
async with getattr(websession, method)(
request_url,
data=payload,
auth=auth,
headers=headers,
timeout=timeout,
) as response:
if response.status < HTTP_BAD_REQUEST:
_LOGGER.debug(
"Success. Url: %s. Status code: %d. Payload: %s",
response.url,
response.status,
payload,
)
else:
_LOGGER.warning(
"Error. Url: %s. Status code %d. Payload: %s",
response.url,
response.status,
payload,
)
except asyncio.TimeoutError:
_LOGGER.warning("Timeout call %s", request_url)
except aiohttp.ClientError:
_LOGGER.error("Client error %s", request_url)
# register services
hass.services.async_register(DOMAIN, name, async_service_handler)
for command, command_config in config[DOMAIN].items():
async_register_rest_command(command, command_config)
return True
|
import asyncio
import datetime
import mimetypes
import os
import re
import stat
import subprocess
import sys
import typing
import webbrowser
import pkg_resources
from nikola.plugin_categories import Command
from nikola.utils import dns_sd, req_missing, get_theme_path, makedirs
try:
import aiohttp
from aiohttp import web
from aiohttp.web_urldispatcher import StaticResource
from aiohttp.web_exceptions import HTTPNotFound, HTTPForbidden, HTTPMovedPermanently
from aiohttp.web_response import Response
from aiohttp.web_fileresponse import FileResponse
except ImportError:
aiohttp = web = None
StaticResource = HTTPNotFound = HTTPForbidden = Response = FileResponse = object
try:
from watchdog.observers import Observer
except ImportError:
Observer = None
LRJS_PATH = os.path.join(os.path.dirname(__file__), 'livereload.js')
REBUILDING_REFRESH_DELAY = 0.35
IDLE_REFRESH_DELAY = 0.05
if sys.platform == 'win32':
asyncio.set_event_loop(asyncio.ProactorEventLoop())
class CommandAuto(Command):
"""Automatic rebuilds for Nikola."""
name = "auto"
has_server = True
doc_purpose = "builds and serves a site; automatically detects site changes, rebuilds, and optionally refreshes a browser"
dns_sd = None
delta_last_rebuild = datetime.timedelta(milliseconds=100)
web_runner = None # type: web.AppRunner
cmd_options = [
{
'name': 'port',
'short': 'p',
'long': 'port',
'default': 8000,
'type': int,
'help': 'Port number',
},
{
'name': 'address',
'short': 'a',
'long': 'address',
'type': str,
'default': '127.0.0.1',
'help': 'Address to bind',
},
{
'name': 'browser',
'short': 'b',
'long': 'browser',
'type': bool,
'help': 'Start a web browser',
'default': False,
},
{
'name': 'ipv6',
'short': '6',
'long': 'ipv6',
'default': False,
'type': bool,
'help': 'Use IPv6',
},
{
'name': 'no-server',
'long': 'no-server',
'default': False,
'type': bool,
'help': 'Disable the server, automate rebuilds only'
},
{
'name': 'process',
'short': 'n',
'long': 'process',
'default': 0,
'type': int,
'help': 'Number of subprocesses (nikola build argument)'
},
{
'name': 'parallel-type',
'short': 'P',
'long': 'parallel-type',
'default': 'process',
'type': str,
'help': "Parallelization mode ('process' or 'thread', nikola build argument)"
},
]
def _execute(self, options, args):
"""Start the watcher."""
self.sockets = []
self.rebuild_queue = asyncio.Queue()
self.reload_queue = asyncio.Queue()
self.last_rebuild = datetime.datetime.now()
self.is_rebuilding = False
if aiohttp is None and Observer is None:
req_missing(['aiohttp', 'watchdog'], 'use the "auto" command')
elif aiohttp is None:
req_missing(['aiohttp'], 'use the "auto" command')
elif Observer is None:
req_missing(['watchdog'], 'use the "auto" command')
if sys.argv[0].endswith('__main__.py'):
self.nikola_cmd = [sys.executable, '-m', 'nikola', 'build']
else:
self.nikola_cmd = [sys.argv[0], 'build']
if self.site.configuration_filename != 'conf.py':
self.nikola_cmd.append('--conf=' + self.site.configuration_filename)
if options and options.get('process'):
self.nikola_cmd += ['--process={}'.format(options['process']),
'--parallel-type={}'.format(options['parallel-type'])]
port = options and options.get('port')
self.snippet = '''<script>document.write('<script src="http://'
+ (location.host || 'localhost').split(':')[0]
+ ':{0}/livereload.js?snipver=1"></'
+ 'script>')</script>
</head>'''.format(port)
# Deduplicate entries by using a set -- otherwise, multiple rebuilds are triggered
watched = set([
'templates/'
] + [get_theme_path(name) for name in self.site.THEMES])
for item in self.site.config['post_pages']:
watched.add(os.path.dirname(item[0]))
for item in self.site.config['FILES_FOLDERS']:
watched.add(item)
for item in self.site.config['GALLERY_FOLDERS']:
watched.add(item)
for item in self.site.config['LISTINGS_FOLDERS']:
watched.add(item)
for item in self.site.config['IMAGE_FOLDERS']:
watched.add(item)
for item in self.site._plugin_places:
watched.add(item)
# Nikola itself (useful for developers)
watched.add(pkg_resources.resource_filename('nikola', ''))
out_folder = self.site.config['OUTPUT_FOLDER']
if not os.path.exists(out_folder):
makedirs(out_folder)
if options and options.get('browser'):
browser = True
else:
browser = False
if options['ipv6']:
dhost = '::'
else:
dhost = '0.0.0.0'
host = options['address'].strip('[').strip(']') or dhost
# Prepare asyncio event loop
# Required for subprocessing to work
loop = asyncio.get_event_loop()
# Set debug setting
loop.set_debug(self.site.debug)
# Server can be disabled (Issue #1883)
self.has_server = not options['no-server']
if self.has_server:
loop.run_until_complete(self.set_up_server(host, port, out_folder))
# Run an initial build so we are up-to-date. The server is running, but we are not watching yet.
loop.run_until_complete(self.run_initial_rebuild())
self.wd_observer = Observer()
# Watch output folders and trigger reloads
if self.has_server:
self.wd_observer.schedule(NikolaEventHandler(self.reload_page, loop), out_folder, recursive=True)
# Watch input folders and trigger rebuilds
for p in watched:
if os.path.exists(p):
self.wd_observer.schedule(NikolaEventHandler(self.queue_rebuild, loop), p, recursive=True)
# Watch config file (a bit of a hack, but we need a directory)
_conf_fn = os.path.abspath(self.site.configuration_filename or 'conf.py')
_conf_dn = os.path.dirname(_conf_fn)
self.wd_observer.schedule(ConfigEventHandler(_conf_fn, self.queue_rebuild, loop), _conf_dn, recursive=False)
self.wd_observer.start()
win_sleeper = None
# https://bugs.python.org/issue23057 (fixed in Python 3.8)
if sys.platform == 'win32' and sys.version_info < (3, 8):
win_sleeper = asyncio.ensure_future(windows_ctrlc_workaround())
if not self.has_server:
self.logger.info("Watching for changes...")
# Run the event loop forever (no server mode).
try:
# Run rebuild queue
loop.run_until_complete(self.run_rebuild_queue())
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if win_sleeper:
win_sleeper.cancel()
self.wd_observer.stop()
self.wd_observer.join()
loop.close()
return
if options['ipv6'] or '::' in host:
server_url = "http://[{0}]:{1}/".format(host, port)
else:
server_url = "http://{0}:{1}/".format(host, port)
self.logger.info("Serving on {0} ...".format(server_url))
if browser:
# Some browsers fail to load 0.0.0.0 (Issue #2755)
if host == '0.0.0.0':
server_url = "http://127.0.0.1:{0}/".format(port)
self.logger.info("Opening {0} in the default web browser...".format(server_url))
webbrowser.open(server_url)
# Run the event loop forever and handle shutdowns.
try:
# Run rebuild queue
rebuild_queue_fut = asyncio.ensure_future(self.run_rebuild_queue())
reload_queue_fut = asyncio.ensure_future(self.run_reload_queue())
self.dns_sd = dns_sd(port, (options['ipv6'] or '::' in host))
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self.logger.info("Server is shutting down.")
if win_sleeper:
win_sleeper.cancel()
if self.dns_sd:
self.dns_sd.Reset()
rebuild_queue_fut.cancel()
reload_queue_fut.cancel()
loop.run_until_complete(self.web_runner.cleanup())
self.wd_observer.stop()
self.wd_observer.join()
loop.close()
async def set_up_server(self, host: str, port: int, out_folder: str) -> None:
"""Set up aiohttp server and start it."""
webapp = web.Application()
webapp.router.add_get('/livereload.js', self.serve_livereload_js)
webapp.router.add_get('/robots.txt', self.serve_robots_txt)
webapp.router.add_route('*', '/livereload', self.websocket_handler)
resource = IndexHtmlStaticResource(True, self.snippet, '', out_folder)
webapp.router.register_resource(resource)
webapp.on_shutdown.append(self.remove_websockets)
self.web_runner = web.AppRunner(webapp)
await self.web_runner.setup()
website = web.TCPSite(self.web_runner, host, port)
await website.start()
async def run_initial_rebuild(self) -> None:
"""Run an initial rebuild."""
await self._rebuild_site()
# If there are any clients, have them reload the root.
await self._send_reload_command(self.site.config['INDEX_FILE'])
async def queue_rebuild(self, event) -> None:
"""Rebuild the site."""
# Move events have a dest_path, some editors like gedit use a
# move on larger save operations for write protection
event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path
if sys.platform == 'win32':
# Windows hidden files support
is_hidden = os.stat(event_path).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN
else:
is_hidden = False
has_hidden_component = any(p.startswith('.') for p in event_path.split(os.sep))
if (is_hidden or has_hidden_component or
'__pycache__' in event_path or
event_path.endswith(('.pyc', '.pyo', '.pyd', '_bak', '~')) or
event.is_directory): # Skip on folders, these are usually duplicates
return
self.logger.debug('Queuing rebuild from {0}'.format(event_path))
await self.rebuild_queue.put((datetime.datetime.now(), event_path))
async def run_rebuild_queue(self) -> None:
"""Run rebuilds from a queue (Nikola can only build in a single instance)."""
while True:
date, event_path = await self.rebuild_queue.get()
if date < (self.last_rebuild + self.delta_last_rebuild):
self.logger.debug("Skipping rebuild from {0} (within delta)".format(event_path))
continue
await self._rebuild_site(event_path)
async def _rebuild_site(self, event_path: typing.Optional[str] = None) -> None:
"""Rebuild the site."""
self.is_rebuilding = True
self.last_rebuild = datetime.datetime.now()
if event_path:
self.logger.info('REBUILDING SITE (from {0})'.format(event_path))
else:
self.logger.info('REBUILDING SITE')
p = await asyncio.create_subprocess_exec(*self.nikola_cmd, stderr=subprocess.PIPE)
exit_code = await p.wait()
out = (await p.stderr.read()).decode('utf-8')
if exit_code != 0:
self.logger.error("Rebuild failed\n" + out)
await self.send_to_websockets({'command': 'alert', 'message': out})
else:
self.logger.info("Rebuild successful\n" + out)
self.is_rebuilding = False
async def run_reload_queue(self) -> None:
"""Send reloads from a queue to limit CPU usage."""
while True:
p = await self.reload_queue.get()
self.logger.info('REFRESHING: {0}'.format(p))
await self._send_reload_command(p)
if self.is_rebuilding:
await asyncio.sleep(REBUILDING_REFRESH_DELAY)
else:
await asyncio.sleep(IDLE_REFRESH_DELAY)
async def _send_reload_command(self, path: str) -> None:
"""Send a reload command."""
await self.send_to_websockets({'command': 'reload', 'path': path, 'liveCSS': True})
async def reload_page(self, event) -> None:
"""Reload the page."""
# Move events have a dest_path, some editors like gedit use a
# move on larger save operations for write protection
if event:
event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path
else:
event_path = self.site.config['OUTPUT_FOLDER']
p = os.path.relpath(event_path, os.path.abspath(self.site.config['OUTPUT_FOLDER'])).replace(os.sep, '/')
await self.reload_queue.put(p)
async def serve_livereload_js(self, request):
"""Handle requests to /livereload.js and serve the JS file."""
return FileResponse(LRJS_PATH)
async def serve_robots_txt(self, request):
"""Handle requests to /robots.txt."""
return Response(body=b'User-Agent: *\nDisallow: /\n', content_type='text/plain', charset='utf-8')
async def websocket_handler(self, request):
"""Handle requests to /livereload and initiate WebSocket communication."""
ws = web.WebSocketResponse()
await ws.prepare(request)
self.sockets.append(ws)
while True:
msg = await ws.receive()
self.logger.debug("Received message: {0}".format(msg))
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.json()
if message['command'] == 'hello':
response = {
'command': 'hello',
'protocols': [
'http://livereload.com/protocols/official-7',
],
'serverName': 'Nikola Auto (livereload)',
}
await ws.send_json(response)
elif message['command'] != 'info':
self.logger.warning("Unknown command in message: {0}".format(message))
elif msg.type in (aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.CLOSING):
break
elif msg.type == aiohttp.WSMsgType.CLOSE:
self.logger.debug("Closing WebSocket")
await ws.close()
break
elif msg.type == aiohttp.WSMsgType.ERROR:
self.logger.error('WebSocket connection closed with exception {0}'.format(ws.exception()))
break
else:
self.logger.warning("Received unknown message: {0}".format(msg))
self.sockets.remove(ws)
self.logger.debug("WebSocket connection closed: {0}".format(ws))
return ws
async def remove_websockets(self, app) -> None:
"""Remove all websockets."""
for ws in self.sockets:
await ws.close()
self.sockets.clear()
async def send_to_websockets(self, message: dict) -> None:
"""Send a message to all open WebSockets."""
to_delete = []
for ws in self.sockets:
if ws.closed:
to_delete.append(ws)
continue
try:
await ws.send_json(message)
if ws._close_code:
await ws.close()
to_delete.append(ws)
except RuntimeError as e:
if 'closed' in e.args[0]:
self.logger.warning("WebSocket {0} closed uncleanly".format(ws))
to_delete.append(ws)
else:
raise
for ws in to_delete:
self.sockets.remove(ws)
async def windows_ctrlc_workaround() -> None:
"""Work around bpo-23057."""
# https://bugs.python.org/issue23057
while True:
await asyncio.sleep(1)
class IndexHtmlStaticResource(StaticResource):
"""A StaticResource implementation that serves /index.html in directory roots."""
modify_html = True
snippet = "</head>"
def __init__(self, modify_html=True, snippet="</head>", *args, **kwargs):
"""Initialize a resource."""
self.modify_html = modify_html
self.snippet = snippet
super().__init__(*args, **kwargs)
async def _handle(self, request: 'web.Request') -> 'web.Response':
"""Handle incoming requests (pass to handle_file)."""
filename = request.match_info['filename']
return await self.handle_file(request, filename)
async def handle_file(self, request: 'web.Request', filename: str, from_index=None) -> 'web.Response':
"""Handle file requests."""
try:
filepath = self._directory.joinpath(filename).resolve()
if not self._follow_symlinks:
filepath.relative_to(self._directory)
except (ValueError, FileNotFoundError) as error:
# relatively safe
raise HTTPNotFound() from error
except Exception as error:
# perm error or other kind!
request.app.logger.exception(error)
raise HTTPNotFound() from error
# on opening a dir, load it's contents if allowed
if filepath.is_dir():
if filename.endswith('/') or not filename:
ret = await self.handle_file(request, filename + 'index.html', from_index=filename)
else:
# Redirect and add trailing slash so relative links work (Issue #3140)
new_url = request.rel_url.path + '/'
if request.rel_url.query_string:
new_url += '?' + request.rel_url.query_string
raise HTTPMovedPermanently(new_url)
elif filepath.is_file():
ct, encoding = mimetypes.guess_type(str(filepath))
encoding = encoding or 'utf-8'
if ct == 'text/html' and self.modify_html:
if sys.version_info[0] == 3 and sys.version_info[1] <= 5:
# Python 3.4 and 3.5 do not accept pathlib.Path objects in calls to open()
filepath = str(filepath)
with open(filepath, 'r', encoding=encoding) as fh:
text = fh.read()
text = self.transform_html(text)
ret = Response(text=text, content_type=ct, charset=encoding)
else:
ret = FileResponse(filepath, chunk_size=self._chunk_size)
elif from_index:
filepath = self._directory.joinpath(from_index).resolve()
try:
return Response(text=self._directory_as_html(filepath),
content_type="text/html")
except PermissionError:
raise HTTPForbidden
else:
raise HTTPNotFound
return ret
def transform_html(self, text: str) -> str:
"""Apply some transforms to HTML content."""
# Inject livereload.js
text = text.replace('</head>', self.snippet, 1)
# Disable <base> tag
text = re.sub(r'<base\s([^>]*)>', r'<!--base \g<1>-->', text, flags=re.IGNORECASE)
return text
# Based on code from the 'hachiko' library by John Biesnecker — thanks!
# https://github.com/biesnecker/hachiko
class NikolaEventHandler:
"""A Nikola-specific event handler for Watchdog. Based on code from hachiko."""
def __init__(self, function, loop):
"""Initialize the handler."""
self.function = function
self.loop = loop
async def on_any_event(self, event):
"""Handle all file events."""
await self.function(event)
def dispatch(self, event):
"""Dispatch events to handler."""
self.loop.call_soon_threadsafe(asyncio.ensure_future, self.on_any_event(event))
class ConfigEventHandler(NikolaEventHandler):
"""A Nikola-specific handler for Watchdog that handles the config file (as a workaround)."""
def __init__(self, configuration_filename, function, loop):
"""Initialize the handler."""
self.configuration_filename = configuration_filename
self.function = function
self.loop = loop
async def on_any_event(self, event):
"""Handle file events if they concern the configuration file."""
if event._src_path == self.configuration_filename:
await self.function(event)
|
import json
def _from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'frozenset':
return frozenset(json_object['__value__'])
if json_object['__class__'] == 'tuple':
return tuple(json_object['__value__'])
return json_object
def _to_json(python_object):
if isinstance(python_object, frozenset):
python_object = {'__class__': 'frozenset',
'__value__': list(python_object)}
elif isinstance(python_object, tuple):
python_object = {'__class__': 'tuple',
'__value__': list(python_object)}
else:
raise TypeError(repr(python_object) + ' is not JSON serializable')
return python_object
class dedupe_decoder(json.JSONDecoder):
def __init__(self, **kwargs):
json.JSONDecoder.__init__(self, object_hook=_from_json, **kwargs)
|
from datetime import timedelta
from pytautulli import Tautulli
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
CONF_MONITORED_USERS = "monitored_users"
DEFAULT_NAME = "Tautulli"
DEFAULT_PORT = "8181"
DEFAULT_PATH = ""
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
TIME_BETWEEN_UPDATES = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_MONITORED_USERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the Tautulli sensor."""
name = config.get(CONF_NAME)
host = config[CONF_HOST]
port = config.get(CONF_PORT)
path = config.get(CONF_PATH)
api_key = config[CONF_API_KEY]
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
user = config.get(CONF_MONITORED_USERS)
use_ssl = config[CONF_SSL]
verify_ssl = config.get(CONF_VERIFY_SSL)
session = async_get_clientsession(hass, verify_ssl)
tautulli = TautulliData(
Tautulli(host, port, api_key, hass.loop, session, use_ssl, path)
)
if not await tautulli.test_connection():
raise PlatformNotReady
sensor = [TautulliSensor(tautulli, name, monitored_conditions, user)]
async_add_entities(sensor, True)
class TautulliSensor(Entity):
"""Representation of a Tautulli sensor."""
def __init__(self, tautulli, name, monitored_conditions, users):
"""Initialize the Tautulli sensor."""
self.tautulli = tautulli
self.monitored_conditions = monitored_conditions
self.usernames = users
self.sessions = {}
self.home = {}
self._attributes = {}
self._name = name
self._state = None
async def async_update(self):
"""Get the latest data from the Tautulli API."""
await self.tautulli.async_update()
self.home = self.tautulli.api.home_data
self.sessions = self.tautulli.api.session_data
self._attributes["Top Movie"] = self.home.get("movie")
self._attributes["Top TV Show"] = self.home.get("tv")
self._attributes["Top User"] = self.home.get("user")
for key in self.sessions:
if "sessions" not in key:
self._attributes[key] = self.sessions[key]
for user in self.tautulli.api.users:
if self.usernames is None or user in self.usernames:
userdata = self.tautulli.api.user_data
self._attributes[user] = {}
self._attributes[user]["Activity"] = userdata[user]["Activity"]
if self.monitored_conditions:
for key in self.monitored_conditions:
try:
self._attributes[user][key] = userdata[user][key]
except (KeyError, TypeError):
self._attributes[user][key] = ""
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self.sessions.get("stream_count")
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:plex"
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return "Watching"
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
class TautulliData:
"""Get the latest data and update the states."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
@Throttle(TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Tautulli."""
await self.api.get_data()
async def test_connection(self):
"""Test connection to Tautulli."""
await self.api.test_connection()
connection_status = self.api.connection
return connection_status
|
import discord
import random
def randomize_colour(embed: discord.Embed) -> discord.Embed:
"""
Gives the provided embed a random color.
There is an alias for this called randomize_color
Parameters
----------
embed : discord.Embed
The embed to add a color to
Returns
-------
discord.Embed
The embed with the color set to a random color
"""
embed.colour = discord.Color(value=random.randint(0x000000, 0xFFFFFF))
return embed
randomize_color = randomize_colour
|
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_ENTITY_NAMESPACE, CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import DEFAULT_ENTITY_NAMESPACE, DOMAIN as SKYBELL_DOMAIN, SkybellDevice
# Switch types: Name
SWITCH_TYPES = {
"do_not_disturb": ["Do Not Disturb"],
"motion_sensor": ["Motion Sensor"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_ENTITY_NAMESPACE, default=DEFAULT_ENTITY_NAMESPACE
): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SWITCH_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform for a Skybell device."""
skybell = hass.data.get(SKYBELL_DOMAIN)
sensors = []
for switch_type in config.get(CONF_MONITORED_CONDITIONS):
for device in skybell.get_devices():
sensors.append(SkybellSwitch(device, switch_type))
add_entities(sensors, True)
class SkybellSwitch(SkybellDevice, SwitchEntity):
"""A switch implementation for Skybell devices."""
def __init__(self, device, switch_type):
"""Initialize a light for a Skybell device."""
super().__init__(device)
self._switch_type = switch_type
self._name = "{} {}".format(
self._device.name, SWITCH_TYPES[self._switch_type][0]
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
def turn_on(self, **kwargs):
"""Turn on the switch."""
setattr(self._device, self._switch_type, True)
def turn_off(self, **kwargs):
"""Turn off the switch."""
setattr(self._device, self._switch_type, False)
@property
def is_on(self):
"""Return true if device is on."""
return getattr(self._device, self._switch_type)
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from supervisord import SupervisordCollector
##########################################################################
class TestSupervisordCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SupervisordCollector', {})
self.collector = SupervisordCollector(config, None)
self.assertTrue(self.collector)
def test_import(self):
self.assertTrue(SupervisordCollector)
@patch.object(Collector, 'publish')
def test_success(self, publish_mock):
self.collector.getAllProcessInfo = Mock(
return_value=eval(self.getFixture('valid_fixture').getvalue()))
self.collector.collect()
metrics = {
'test_group.test_name_1.state': 20,
'test_group.test_name_1.uptime': 5,
'test_group.test_name_2.state': 200,
'test_group.test_name_2.uptime': 500
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from typing import Optional
from homeassistant.components.sensor import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers.typing import HomeAssistantType
from . import AqualinkEntity
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered sensors."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkSensor(dev))
async_add_entities(devs, True)
class HassAqualinkSensor(AqualinkEntity):
"""Representation of a sensor."""
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self.dev.label
@property
def unit_of_measurement(self) -> Optional[str]:
"""Return the measurement unit for the sensor."""
if self.dev.name.endswith("_temp"):
if self.dev.system.temp_unit == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
return None
@property
def state(self) -> Optional[str]:
"""Return the state of the sensor."""
if self.dev.state == "":
return None
try:
state = int(self.dev.state)
except ValueError:
state = float(self.dev.state)
return state
@property
def device_class(self) -> Optional[str]:
"""Return the class of the sensor."""
if self.dev.name.endswith("_temp"):
return DEVICE_CLASS_TEMPERATURE
return None
|
from __future__ import print_function
import pandas as pd
from scattertext import CorpusFromParsedDocuments, produce_scattertext_explorer
from scattertext import chinese_nlp
# compare chinese translations of tale of two cities and ulysses, from http://www.pku.edu.cn/study/novel/ulysses/cindex.htm
def main():
df = pd.read_csv('https://cdn.rawgit.com/JasonKessler/scattertext/e508bf32/scattertext/data/chinese.csv')
df['text'] = df['text'].apply(chinese_nlp)
corpus = CorpusFromParsedDocuments(df,
category_col='novel',
parsed_col='text').build()
html = produce_scattertext_explorer(corpus,
category='Tale of Two Cities',
category_name='Tale of Two Cities',
not_category_name='Ulysses',
width_in_pixels=1000,
metadata=df['novel'],
asian_mode=True)
open('./demo_chinese.html', 'w').write(html)
print('Open ./demo_chinese.html in Chrome or Firefox.')
if __name__ == '__main__':
main()
|
from collections import defaultdict
from itertools import tee, chain, combinations
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors import factor_product
from pgmpy.inference import Inference, BeliefPropagation
class DBNInference(Inference):
def __init__(self, model):
"""
Class for performing inference using Belief Propagation method
for the input Dynamic Bayesian Network.
For the exact inference implementation, the interface algorithm
is used which is adapted from [1].
Parameters
----------
model: Dynamic Bayesian Network
Model for which inference is to performed
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.start_junction_tree.nodes()
[(('X', 0), ('Z', 0)), (('X', 0), ('Y', 0))]
>>> dbn_inf.one_and_half_junction_tree.nodes()
[(('Z', 1), ('Z', 0)),
(('Y', 1), ('X', 1)),
(('Z', 1), ('X', 1))]
References
----------
[1] Dynamic Bayesian Networks: Representation, Inference and Learning
by Kevin Patrick Murphy
http://www.cs.ubc.ca/~murphyk/Thesis/thesis.pdf
"""
super(DBNInference, self).__init__(model)
self.interface_nodes_0 = model.get_interface_nodes(time_slice=0)
self.interface_nodes_1 = model.get_interface_nodes(time_slice=1)
start_markov_model = self.start_bayesian_model.to_markov_model()
one_and_half_markov_model = self.one_and_half_model.to_markov_model()
combinations_slice_0 = tee(combinations(self.interface_nodes_0, 2), 2)
combinations_slice_1 = combinations(self.interface_nodes_1, 2)
start_markov_model.add_edges_from(combinations_slice_0[0])
one_and_half_markov_model.add_edges_from(
chain(combinations_slice_0[1], combinations_slice_1)
)
self.one_and_half_junction_tree = one_and_half_markov_model.to_junction_tree()
self.start_junction_tree = start_markov_model.to_junction_tree()
self.start_interface_clique = self._get_clique(
self.start_junction_tree, self.interface_nodes_0
)
self.in_clique = self._get_clique(
self.one_and_half_junction_tree, self.interface_nodes_0
)
self.out_clique = self._get_clique(
self.one_and_half_junction_tree, self.interface_nodes_1
)
def _shift_nodes(self, nodes, time_slice):
"""
Shifting the nodes to a certain required timeslice.
Parameters
----------
nodes: list, array-like
List of node names.
nodes that are to be shifted to some other time slice.
time_slice: int
time slice where to shift the nodes.
"""
return [(node[0], time_slice) for node in nodes]
def _get_clique(self, junction_tree, nodes):
"""
Extracting the cliques from the junction tree which are a subset of
the given nodes.
Parameters
----------
junction_tree: Junction tree
from which the nodes are to be extracted.
nodes: iterable container
A container of nodes (list, dict, set, etc.).
"""
return [
clique for clique in junction_tree.nodes() if set(nodes).issubset(clique)
][0]
def _get_evidence(self, evidence_dict, time_slice, shift):
"""
Getting the evidence belonging to a particular timeslice.
Parameters
----------
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
time: int
the evidence corresponding to the time slice
shift: int
shifting the evidence corresponding to the given time slice.
"""
if evidence_dict:
return {
(node[0], shift): evidence_dict[node]
for node in evidence_dict
if node[1] == time_slice
}
def _marginalize_factor(self, nodes, factor):
"""
Marginalizing the factor selectively for a set of variables.
Parameters
----------
nodes: list, array-like
A container of nodes (list, dict, set, etc.).
factor: factor
factor which is to be marginalized.
"""
marginalizing_nodes = list(set(factor.scope()).difference(nodes))
return factor.marginalize(marginalizing_nodes, inplace=False)
def _update_belief(self, belief_prop, clique, clique_potential, message=None):
"""
Method for updating the belief.
Parameters
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
in_clique: clique
The factor which needs to be updated corresponding to the input clique.
out_clique_potential: factor
Multiplying factor which will be multiplied to the factor corresponding to the clique.
"""
old_factor = belief_prop.junction_tree.get_factors(clique)
belief_prop.junction_tree.remove_factors(old_factor)
if message:
if message.scope() and clique_potential.scope():
new_factor = old_factor * message
new_factor = new_factor / clique_potential
else:
new_factor = old_factor
else:
new_factor = old_factor * clique_potential
belief_prop.junction_tree.add_factors(new_factor)
belief_prop.calibrate()
def _get_factor(self, belief_prop, evidence):
"""
Extracts the required factor from the junction tree.
Parameters
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
"""
final_factor = factor_product(*belief_prop.junction_tree.get_factors())
if evidence:
for var in evidence:
if var in final_factor.scope():
final_factor.reduce([(var, evidence[var])])
return final_factor
def _shift_factor(self, factor, shift):
"""
Shifting the factor to a certain required time slice.
Parameters
----------
factor: DiscreteFactor
The factor which needs to be shifted.
shift: int
The new timeslice to which the factor should belong to.
"""
new_scope = self._shift_nodes(factor.scope(), shift)
return DiscreteFactor(new_scope, factor.cardinality, factor.values)
def forward_inference(self, variables, evidence=None, args=None):
"""
Forward inference method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples:
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.forward_inference([('X', 2)], {('Y', 0):1, ('Y', 1):0, ('Y', 2):1})[('X', 2)].values
array([ 0.76738736, 0.23261264])
"""
variable_dict = defaultdict(list)
for var in variables:
variable_dict[var[1]].append(var)
time_range = max(variable_dict)
if evidence:
evid_time_range = max([time_slice for var, time_slice in evidence.keys()])
time_range = max(time_range, evid_time_range)
start_bp = BeliefPropagation(self.start_junction_tree)
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
evidence_0 = self._get_evidence(evidence, 0, 0)
interface_nodes_dict = {}
potential_dict = {}
if evidence:
interface_nodes_dict = {
k: v for k, v in evidence_0.items() if k in self.interface_nodes_0
}
initial_factor = self._get_factor(start_bp, evidence_0)
marginalized_factor = self._marginalize_factor(
self.interface_nodes_0, initial_factor
)
potential_dict[0] = marginalized_factor
self._update_belief(mid_bp, self.in_clique, marginalized_factor)
if variable_dict[0]:
factor_values = start_bp.query(
variable_dict[0], evidence=evidence_0, joint=False
)
else:
factor_values = {}
for time_slice in range(1, time_range + 1):
evidence_time = self._get_evidence(evidence, time_slice, 1)
if interface_nodes_dict:
evidence_time.update(interface_nodes_dict)
if variable_dict[time_slice]:
variable_time = self._shift_nodes(variable_dict[time_slice], 1)
new_values = mid_bp.query(
variable_time, evidence=evidence_time, joint=False
)
changed_values = {}
for key in new_values.keys():
new_key = (key[0], time_slice)
new_factor = DiscreteFactor(
[new_key], new_values[key].cardinality, new_values[key].values
)
changed_values[new_key] = new_factor
factor_values.update(changed_values)
clique_phi = self._get_factor(mid_bp, evidence_time)
out_clique_phi = self._marginalize_factor(
self.interface_nodes_1, clique_phi
)
new_factor = self._shift_factor(out_clique_phi, 0)
potential_dict[time_slice] = new_factor
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
self._update_belief(mid_bp, self.in_clique, new_factor)
if evidence_time:
interface_nodes_dict = {
(k[0], 0): v
for k, v in evidence_time.items()
if k in self.interface_nodes_1
}
else:
interface_nodes_dict = {}
if args == "potential":
return potential_dict
return factor_values
def backward_inference(self, variables, evidence=None):
"""
Backward inference method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.backward_inference([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
array([ 0.66594382, 0.33405618])
"""
variable_dict = defaultdict(list)
for var in variables:
variable_dict[var[1]].append(var)
time_range = max(variable_dict)
interface_nodes_dict = {}
if evidence:
evid_time_range = max([time_slice for var, time_slice in evidence.keys()])
time_range = max(time_range, evid_time_range)
end_bp = BeliefPropagation(self.start_junction_tree)
potential_dict = self.forward_inference(variables, evidence, "potential")
update_factor = self._shift_factor(potential_dict[time_range], 1)
factor_values = {}
for time_slice in range(time_range, 0, -1):
evidence_time = self._get_evidence(evidence, time_slice, 1)
evidence_prev_time = self._get_evidence(evidence, time_slice - 1, 0)
if evidence_prev_time:
interface_nodes_dict = {
k: v
for k, v in evidence_prev_time.items()
if k in self.interface_nodes_0
}
if evidence_time:
evidence_time.update(interface_nodes_dict)
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
self._update_belief(mid_bp, self.in_clique, potential_dict[time_slice - 1])
forward_factor = self._shift_factor(potential_dict[time_slice], 1)
self._update_belief(mid_bp, self.out_clique, forward_factor, update_factor)
if variable_dict[time_slice]:
variable_time = self._shift_nodes(variable_dict[time_slice], 1)
new_values = mid_bp.query(
variable_time, evidence=evidence_time, joint=False
)
changed_values = {}
for key in new_values.keys():
new_key = (key[0], time_slice)
new_factor = DiscreteFactor(
[new_key], new_values[key].cardinality, new_values[key].values
)
changed_values[new_key] = new_factor
factor_values.update(changed_values)
clique_phi = self._get_factor(mid_bp, evidence_time)
in_clique_phi = self._marginalize_factor(self.interface_nodes_0, clique_phi)
update_factor = self._shift_factor(in_clique_phi, 1)
out_clique_phi = self._shift_factor(update_factor, 0)
self._update_belief(
end_bp, self.start_interface_clique, potential_dict[0], out_clique_phi
)
evidence_0 = self._get_evidence(evidence, 0, 0)
if variable_dict[0]:
factor_values.update(
end_bp.query(variable_dict[0], evidence_0, joint=False)
)
return factor_values
def query(self, variables, evidence=None, args="exact"):
"""
Query method for Dynamic Bayesian Network using Interface Algorithm.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.query([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
array([ 0.66594382, 0.33405618])
"""
if args == "exact":
return self.backward_inference(variables, evidence)
|
import io
from locale import getpreferredencoding
import os
import subprocess
import sys
import shlex
from . import pprint
ENCODING = getpreferredencoding() or 'utf-8'
_COMMIT_FILE = 'GL_COMMIT_EDIT_MSG'
_MERGE_MSG_FILE = 'MERGE_MSG'
def show(files, repo):
"""Show the commit dialog.
Args:
files: files for pre-populating the dialog.
repo: the repository.
Returns:
The commit msg.
"""
cf = io.open(_commit_file(repo), mode='w', encoding=ENCODING)
curr_b = repo.current_branch
if curr_b.merge_in_progress or curr_b.fuse_in_progress:
merge_msg = io.open(
_merge_msg_file(repo), mode='r', encoding=ENCODING).read()
cf.write(merge_msg)
cf.write('\n')
pprint.sep(stream=cf.write)
pprint.msg(
'Please enter the commit message for your changes above, an empty '
'message aborts', stream=cf.write)
pprint.msg('the commit.', stream=cf.write)
pprint.blank(stream=cf.write)
pprint.msg(
'These are the files whose changes will be committed:', stream=cf.write)
for f in files:
pprint.item(f, stream=cf.write)
pprint.sep(stream=cf.write)
cf.close()
_launch_editor(cf.name, repo)
return _extract_msg(repo)
def _launch_editor(fp, repo):
try:
editor = repo.config['core.editor']
except KeyError:
editor = os.environ['EDITOR'] if 'EDITOR' in os.environ else 'vim'
cmd = shlex.split(editor)
cmd.append(fp)
try:
ret = subprocess.call(cmd)
if ret != 0:
pprint.err('Call to editor {0} failed'.format(editor))
except OSError:
pprint.err('Couldn\'t launch editor {0}'.format(editor))
pprint.err_exp('change the value of git\'s core.editor setting')
def _extract_msg(repo):
cf = io.open(_commit_file(repo), mode='r', encoding=ENCODING)
sep = pprint.SEP + '\n'
msg = ''
l = cf.readline()
while l != sep and len(l) > 0:
msg += l
l = cf.readline()
# We reached the separator, this marks the end of the commit msg
return msg
def _commit_file(repo):
return os.path.join(repo.path, _COMMIT_FILE)
def _merge_msg_file(repo):
return os.path.join(repo.path, _MERGE_MSG_FILE)
|
import io
import os
from nikola.packages.pygments_better_html import BetterHtmlFormatter
from nikola.plugin_categories import Task
from nikola import utils
class CopyAssets(Task):
"""Copy theme assets into output."""
name = "copy_assets"
def gen_tasks(self):
"""Create tasks to copy the assets of the whole theme chain.
If a file is present on two themes, use the version
from the "youngest" theme.
"""
kw = {
"themes": self.site.THEMES,
"translations": self.site.translations,
"files_folders": self.site.config['FILES_FOLDERS'],
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
"code.css_selectors": ['pre.code', '.code .codetable', '.highlight pre'],
"code.css_wrappers": ['.highlight', '.code'],
"code.css_head": '/* code.css file generated by Nikola */\n',
"code.css_close": (
"\ntable.codetable, table.highlighttable { width: 100%;}\n"
".codetable td.linenos, td.linenos { text-align: right; width: 3.5em; "
"padding-right: 0.5em; background: rgba(127, 127, 127, 0.2) }\n"
".codetable td.code, td.code { padding-left: 0.5em; }\n"),
}
tasks = {}
code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
code_css_input = utils.get_asset_path('assets/css/code.css',
themes=kw['themes'],
files_folders=kw['files_folders'], output_dir=None)
yield self.group_task()
main_theme = utils.get_theme_path(kw['themes'][0])
theme_ini = utils.parse_theme_meta(main_theme)
if theme_ini:
ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',')
ignored_assets = [os.path.normpath(asset_name.strip()) for asset_name in ignored_assets]
else:
ignored_assets = []
for theme_name in kw['themes']:
src = os.path.join(utils.get_theme_path(theme_name), 'assets')
dst = os.path.join(kw['output_folder'], 'assets')
for task in utils.copy_tree(src, dst):
asset_name = os.path.relpath(task['name'], dst)
if task['name'] in tasks or asset_name in ignored_assets:
continue
tasks[task['name']] = task
task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]
task['basename'] = self.name
if code_css_input:
if 'file_dep' not in task:
task['file_dep'] = []
task['file_dep'].append(code_css_input)
yield utils.apply_filters(task, kw['filters'])
# Check whether or not there is a code.css file around.
if not code_css_input and kw['code_color_scheme']:
def create_code_css():
formatter = BetterHtmlFormatter(style=kw["code_color_scheme"])
utils.makedirs(os.path.dirname(code_css_path))
with io.open(code_css_path, 'w+', encoding='utf-8') as outf:
outf.write(kw["code.css_head"])
outf.write(formatter.get_style_defs(
kw["code.css_selectors"], kw["code.css_wrappers"]))
outf.write(kw["code.css_close"])
if os.path.exists(code_css_path):
with io.open(code_css_path, 'r', encoding='utf-8-sig') as fh:
testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"]
else:
testcontents = False
task = {
'basename': self.name,
'name': code_css_path,
'targets': [code_css_path],
'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],
'actions': [(create_code_css, [])],
'clean': True,
}
yield utils.apply_filters(task, kw['filters'])
|
import os
import unittest
from perfkitbenchmarker import benchmark_status
class MockSpec(object):
"""A mock BenchmarkSpec class.
We need to use this rather than a mock.MagicMock object because
the "name" attribute of MagicMocks is difficult to set.
"""
def __init__(self, name, uid, status, failed_substatus=None):
self.name = name
self.uid = uid
self.status = status
self.failed_substatus = failed_substatus
_BENCHMARK_SPECS = [
MockSpec('iperf', 'iperf0', benchmark_status.SUCCEEDED),
MockSpec('iperf', 'iperf1', benchmark_status.FAILED),
MockSpec('iperf', 'iperf2', benchmark_status.FAILED,
benchmark_status.FailedSubstatus.QUOTA),
MockSpec('cluster_boot', 'cluster_boot0', benchmark_status.SKIPPED)
]
_STATUS_TABLE = os.linesep.join((
'--------------------------------------------------------',
'Name UID Status Failed Substatus',
'--------------------------------------------------------',
'iperf iperf0 SUCCEEDED ',
'iperf iperf1 FAILED ',
'iperf iperf2 FAILED QUOTA_EXCEEDED ',
'cluster_boot cluster_boot0 SKIPPED ',
'--------------------------------------------------------'))
_STATUS_SUMMARY = os.linesep.join((
'Benchmark run statuses:',
'--------------------------------------------------------',
'Name UID Status Failed Substatus',
'--------------------------------------------------------',
'iperf iperf0 SUCCEEDED ',
'iperf iperf1 FAILED ',
'iperf iperf2 FAILED QUOTA_EXCEEDED ',
'cluster_boot cluster_boot0 SKIPPED ',
'--------------------------------------------------------',
'Success rate: 25.00% (1/4)'))
class CreateSummaryTableTestCase(unittest.TestCase):
def testCreateSummaryTable(self):
result = benchmark_status._CreateSummaryTable(_BENCHMARK_SPECS)
self.assertEqual(result, _STATUS_TABLE)
class CreateSummaryTestCase(unittest.TestCase):
def testCreateSummary(self):
result = benchmark_status.CreateSummary(_BENCHMARK_SPECS)
self.assertEqual(result, _STATUS_SUMMARY)
if __name__ == '__main__':
unittest.main()
|
import unittest
import numpy as np
from chainer.testing import attr
from chainer import Variable
from chainercv.links import SEResNeXt101
from chainercv.links import SEResNeXt50
from chainercv.utils import testing
@testing.parameterize(*(
testing.product_dict(
[
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'res5',
'shapes': (1, 2048, 7, 7), 'n_class': None},
{'pick': ['res2', 'conv1'],
'shapes': ((1, 256, 56, 56), (1, 64, 112, 112)), 'n_class': None},
],
[
{'model_class': SEResNeXt50},
{'model_class': SEResNeXt101},
],
)
))
class TestSEResNeXtCall(unittest.TestCase):
def setUp(self):
self.link = self.model_class(
n_class=self.n_class, pretrained_model=None)
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
features = self.link(x)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
@testing.parameterize(*testing.product({
'model': [SEResNeXt50, SEResNeXt101],
'n_class': [None, 500, 1000],
'pretrained_model': ['imagenet'],
'mean': [None, np.random.uniform((3, 1, 1)).astype(np.float32)],
}))
class TestSEResNeXtPretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_class': self.n_class,
'pretrained_model': self.pretrained_model,
'mean': self.mean,
}
if self.pretrained_model == 'imagenet':
valid = self.n_class in {None, 1000}
if valid:
self.model(**kwargs)
else:
with self.assertRaises(ValueError):
self.model(**kwargs)
testing.run_module(__name__, __file__)
|
import os
from django.core.management.color import no_style
from django.db import connection
from django.test import LiveServerTestCase, TestCase
from django.test.utils import override_settings
from weblate.auth.models import Group, User
from weblate.checks.models import Check
from weblate.lang.models import Language, Plural
from weblate.trans.models import (
Announcement,
AutoComponentList,
Comment,
Component,
ComponentList,
Project,
Suggestion,
Unit,
Vote,
)
from weblate.trans.tests.utils import RepoTestMixin, create_test_user
from weblate.utils.django_hacks import immediate_on_commit, immediate_on_commit_leave
from weblate.utils.files import remove_tree
from weblate.utils.state import STATE_TRANSLATED
def fixup_languages_seq():
# Reset sequence for Language and Plural objects as
# we're manipulating with them in FixtureTestCase.setUpTestData
# and that seems to affect sequence for other tests as well
# on some PostgreSQL versions (probably sequence is not rolled back
# in a transaction).
commands = connection.ops.sequence_reset_sql(no_style(), [Language, Plural])
if commands:
with connection.cursor() as cursor:
for sql in commands:
cursor.execute(sql)
# Invalidate object cache for languages
Language.objects.flush_object_cache()
class BaseTestCase(TestCase):
@classmethod
def setUpTestData(cls):
fixup_languages_seq()
@classmethod
def setUpClass(cls):
super().setUpClass()
immediate_on_commit(cls)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
immediate_on_commit_leave(cls)
class BaseLiveServerTestCase(LiveServerTestCase):
@classmethod
def setUpTestData(cls):
fixup_languages_seq()
@classmethod
def setUpClass(cls):
super().setUpClass()
immediate_on_commit(cls)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
immediate_on_commit_leave(cls)
class RepoTestCase(BaseTestCase, RepoTestMixin):
"""Generic class for tests working with repositories."""
def setUp(self):
self.clone_test_repos()
class ProjectTest(RepoTestCase):
"""Project object testing."""
def test_create(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.full_path))
self.assertTrue(project.slug in project.full_path)
def test_rename(self):
component = self.create_link()
self.assertTrue(Component.objects.filter(repo="weblate://test/test").exists())
project = component.project
old_path = project.full_path
self.assertTrue(os.path.exists(old_path))
self.assertTrue(
os.path.exists(
component.translation_set.get(language_code="cs").get_filename()
)
)
project.name = "Changed"
project.slug = "changed"
project.save()
new_path = project.full_path
self.addCleanup(remove_tree, new_path, True)
self.assertFalse(os.path.exists(old_path))
self.assertTrue(os.path.exists(new_path))
self.assertTrue(
Component.objects.filter(repo="weblate://changed/test").exists()
)
self.assertFalse(Component.objects.filter(repo="weblate://test/test").exists())
component = Component.objects.get(pk=component.pk)
self.assertTrue(
os.path.exists(
component.translation_set.get(language_code="cs").get_filename()
)
)
# Check that glossaries were renamed
self.assertEqual(
["Changed"], list(project.glossary_set.values_list("name", flat=True))
)
def test_delete(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.full_path))
project.delete()
self.assertFalse(os.path.exists(project.full_path))
def test_delete_votes(self):
component = self.create_po(suggestion_voting=True, suggestion_autoaccept=True)
user = create_test_user()
translation = component.translation_set.get(language_code="cs")
unit = translation.unit_set.first()
suggestion = Suggestion.objects.add(unit, "Test", None)
Vote.objects.create(suggestion=suggestion, value=Vote.POSITIVE, user=user)
component.project.delete()
def test_delete_all(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.full_path))
Project.objects.all().delete()
self.assertFalse(os.path.exists(project.full_path))
def test_acl(self):
"""Test for ACL handling."""
# Create user to verify ACL
user = create_test_user()
# Create project
project = self.create_project()
# Enable ACL
project.access_control = Project.ACCESS_PRIVATE
project.save()
# Check user does not have access
self.assertFalse(user.can_access_project(project))
# Add to ACL group
user.groups.add(Group.objects.get(name="Test@Translate"))
# Need to fetch user again to clear permission cache
user = User.objects.get(username="testuser")
# We now should have access
self.assertTrue(user.can_access_project(project))
class TranslationTest(RepoTestCase):
"""Translation testing."""
def test_basic(self):
component = self.create_component()
# Verify source translation
translation = component.source_translation
self.assertFalse(translation.unit_set.filter(num_words=0).exists())
self.assertEqual(translation.stats.translated, 4)
self.assertEqual(translation.stats.all, 4)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all_words, 15)
# Verify target translation
translation = component.translation_set.get(language_code="cs")
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.all, 4)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all_words, 15)
def test_validation(self):
"""Translation validation."""
component = self.create_component()
translation = component.translation_set.get(language_code="cs")
translation.full_clean()
def test_update_stats(self):
"""Check update stats with no units."""
component = self.create_component()
translation = component.translation_set.get(language_code="cs")
self.assertEqual(translation.stats.all, 4)
self.assertEqual(translation.stats.all_words, 15)
translation.unit_set.all().delete()
translation.invalidate_cache()
self.assertEqual(translation.stats.all, 0)
self.assertEqual(translation.stats.all_words, 0)
def test_commit_groupping(self):
component = self.create_component()
translation = component.translation_set.get(language_code="cs")
user = create_test_user()
start_rev = component.repository.last_revision
# Initial translation
for unit in translation.unit_set.iterator():
unit.translate(user, "test2", STATE_TRANSLATED)
# Translation completed, no commit forced
self.assertEqual(start_rev, component.repository.last_revision)
# Translation from same author should not trigger commit
for unit in translation.unit_set.iterator():
unit.translate(user, "test3", STATE_TRANSLATED)
for unit in translation.unit_set.iterator():
unit.translate(user, "test4", STATE_TRANSLATED)
self.assertEqual(start_rev, component.repository.last_revision)
# Translation from other author should trigger commmit
for i, unit in enumerate(translation.unit_set.iterator()):
user = User.objects.create(
full_name=f"User {unit.pk}",
username=f"user-{unit.pk}",
email=f"{unit.pk}@example.com",
)
# Fetch current pending state, it might have been
# updated by background commit
unit.pending = Unit.objects.get(pk=unit.pk).pending
unit.translate(user, "test", STATE_TRANSLATED)
if i == 0:
# First edit should trigger commit
self.assertNotEqual(start_rev, component.repository.last_revision)
start_rev = component.repository.last_revision
# No further commit now
self.assertEqual(start_rev, component.repository.last_revision)
# Commit pending changes
translation.commit_pending("test", None)
self.assertNotEqual(start_rev, component.repository.last_revision)
class ComponentListTest(RepoTestCase):
"""Test(s) for ComponentList model."""
def test_slug(self):
"""Test ComponentList slug."""
clist = ComponentList()
clist.slug = "slug"
self.assertEqual(clist.tab_slug(), "list-slug")
def test_auto(self):
self.create_component()
clist = ComponentList.objects.create(name="Name", slug="slug")
AutoComponentList.objects.create(
project_match="^.*$", component_match="^.*$", componentlist=clist
)
self.assertEqual(clist.components.count(), 1)
def test_auto_create(self):
clist = ComponentList.objects.create(name="Name", slug="slug")
AutoComponentList.objects.create(
project_match="^.*$", component_match="^.*$", componentlist=clist
)
self.assertEqual(clist.components.count(), 0)
self.create_component()
self.assertEqual(clist.components.count(), 1)
def test_auto_nomatch(self):
self.create_component()
clist = ComponentList.objects.create(name="Name", slug="slug")
AutoComponentList.objects.create(
project_match="^none$", component_match="^.*$", componentlist=clist
)
self.assertEqual(clist.components.count(), 0)
class ModelTestCase(RepoTestCase):
def setUp(self):
super().setUp()
self.component = self.create_component()
class SourceUnitTest(ModelTestCase):
"""Source Unit objects testing."""
def test_source_unit(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
self.assertIsNotNone(unit.source_unit)
unit = Unit.objects.filter(translation__language_code="en")[0]
self.assertEqual(unit.source_unit, unit)
def test_priority(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
self.assertEqual(unit.priority, 100)
source = unit.source_unit
source.extra_flags = "priority:200"
source.save()
unit2 = Unit.objects.get(pk=unit.pk)
self.assertEqual(unit2.priority, 200)
def test_check_flags(self):
"""Setting of Source check_flags changes checks for related units."""
self.assertEqual(Check.objects.count(), 3)
check = Check.objects.all()[0]
unit = check.unit
self.assertEqual(self.component.stats.allchecks, 3)
source = unit.source_unit
source.extra_flags = f"ignore-{check.check}"
source.save()
self.assertEqual(Check.objects.count(), 0)
self.assertEqual(Component.objects.get(pk=self.component.pk).stats.allchecks, 0)
class UnitTest(ModelTestCase):
def test_newlines(self):
user = create_test_user()
unit = Unit.objects.filter(
translation__language_code="cs", source="Hello, world!\n"
)[0]
unit.translate(user, "new\nstring\n", STATE_TRANSLATED)
self.assertEqual(unit.target, "new\nstring\n")
# New object to clear all_flags cache
unit = Unit.objects.get(pk=unit.pk)
unit.flags = "dos-eol"
unit.translate(user, "new\nstring", STATE_TRANSLATED)
self.assertEqual(unit.target, "new\r\nstring\r\n")
unit.translate(user, "other\r\nstring", STATE_TRANSLATED)
self.assertEqual(unit.target, "other\r\nstring\r\n")
def test_flags(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
unit.flags = "no-wrap, ignore-same"
self.assertEqual(unit.all_flags.items(), {"no-wrap", "ignore-same"})
def test_order_by_request(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
source = unit.source_unit
source.extra_flags = "priority:200"
source.save()
# test both ascending and descending order works
unit1 = Unit.objects.filter(translation__language_code="cs")
unit1 = unit1.order_by_request({"sort_by": "-priority"})
self.assertEqual(unit1[0].priority, 200)
unit1 = Unit.objects.filter(translation__language_code="cs")
unit1 = unit1.order_by_request({"sort_by": "priority"})
self.assertEqual(unit1[0].priority, 100)
# test if invalid sorting, then sorted in default order
unit2 = Unit.objects.filter(translation__language_code="cs")
unit2 = unit2.order()
unit3 = Unit.objects.filter(translation__language_code="cs")
unit3 = unit3.order_by_request({"sort_by": "invalid"})
self.assertEqual(unit3[0], unit2[0])
# test sorting by count
unit4 = Unit.objects.filter(translation__language_code="cs")[2]
Comment.objects.create(unit=unit4, comment="Foo")
unit5 = Unit.objects.filter(translation__language_code="cs")
unit5 = unit5.order_by_request({"sort_by": "-num_comments"})
self.assertEqual(unit5[0].comment_set.count(), 1)
unit5 = Unit.objects.filter(translation__language_code="cs")
unit5 = unit5.order_by_request({"sort_by": "num_comments"})
self.assertEqual(unit5[0].comment_set.count(), 0)
# check all order options produce valid queryset
order_options = [
"priority",
"position",
"context",
"num_words",
"labels",
"timestamp",
"num_failing_checks",
]
for order_option in order_options:
ordered_unit = Unit.objects.filter(
translation__language_code="cs"
).order_by_request({"sort_by": order_option})
ordered_desc_unit = Unit.objects.filter(
translation__language_code="cs"
).order_by_request({"sort_by": f"-{order_option}"})
self.assertEqual(len(ordered_unit), 4)
self.assertEqual(len(ordered_desc_unit), 4)
# check sorting with multiple options work
multiple_ordered_unit = Unit.objects.filter(
translation__language_code="cs"
).order_by_request({"sort_by": "position,timestamp"})
self.assertEqual(multiple_ordered_unit.count(), 4)
def test_get_max_length_no_pk(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
unit.pk = False
self.assertEqual(unit.get_max_length(), 10000)
def test_get_max_length_empty_source_default_fallback(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
unit.pk = True
unit.source = ""
self.assertEqual(unit.get_max_length(), 100)
def test_get_max_length_default_fallback(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
unit.pk = True
unit.source = "My test source"
self.assertEqual(unit.get_max_length(), 140)
@override_settings(LIMIT_TRANSLATION_LENGTH_BY_SOURCE_LENGTH=False)
def test_get_max_length_empty_source_disabled_default_fallback(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
unit.pk = True
unit.source = ""
self.assertEqual(unit.get_max_length(), 10000)
@override_settings(LIMIT_TRANSLATION_LENGTH_BY_SOURCE_LENGTH=False)
def test_get_max_length_disabled_default_fallback(self):
unit = Unit.objects.filter(translation__language_code="cs")[0]
unit.pk = True
unit.source = "My test source"
self.assertEqual(unit.get_max_length(), 10000)
class AnnouncementTest(ModelTestCase):
"""Test(s) for Announcement model."""
def setUp(self):
super().setUp()
Announcement.objects.create(
language=Language.objects.get(code="cs"), message="test cs"
)
Announcement.objects.create(
language=Language.objects.get(code="de"), message="test de"
)
Announcement.objects.create(
project=self.component.project, message="test project"
)
Announcement.objects.create(
component=self.component,
project=self.component.project,
message="test component",
)
Announcement.objects.create(message="test global")
def verify_filter(self, messages, count, message=None):
"""Verify whether messages have given count and first contains string."""
self.assertEqual(len(messages), count)
if message is not None:
self.assertEqual(messages[0].message, message)
def test_contextfilter_global(self):
self.verify_filter(Announcement.objects.context_filter(), 1, "test global")
def test_contextfilter_project(self):
self.verify_filter(
Announcement.objects.context_filter(project=self.component.project),
1,
"test project",
)
def test_contextfilter_component(self):
self.verify_filter(
Announcement.objects.context_filter(component=self.component), 2
)
def test_contextfilter_translation(self):
self.verify_filter(
Announcement.objects.context_filter(
component=self.component, language=Language.objects.get(code="cs")
),
3,
)
def test_contextfilter_language(self):
self.verify_filter(
Announcement.objects.context_filter(
language=Language.objects.get(code="cs")
),
1,
"test cs",
)
self.verify_filter(
Announcement.objects.context_filter(
language=Language.objects.get(code="de")
),
1,
"test de",
)
|
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import providers
class KubernetesProviderInfo(provider_info.BaseProviderInfo):
SUPPORTED_BENCHMARKS = ['block_storage_workload', 'cassandra_ycsb',
'cassandra_stress', 'cluster_boot', 'fio',
'iperf', 'mesh_network', 'mongodb_ycsb',
'netperf', 'redis']
UNSUPPORTED_BENCHMARKS = ['bonnieplusplus', 'sysbench']
CLOUD = providers.KUBERNETES
@classmethod
def IsBenchmarkSupported(cls, benchmark):
if benchmark in cls.SUPPORTED_BENCHMARKS:
return True
elif benchmark in cls.UNSUPPORTED_BENCHMARKS:
return False
else:
return None
|
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.components.twilio import DATA_TWILIO
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_FROM_NUMBER = "from_number"
ATTR_MEDIAURL = "media_url"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_FROM_NUMBER): vol.All(
cv.string,
vol.Match(
r"^\+?[1-9]\d{1,14}$|"
r"^(?=.{1,11}$)[a-zA-Z0-9\s]*"
r"[a-zA-Z][a-zA-Z0-9\s]*$"
r"^(?:[a-zA-Z]+)\:?\+?[1-9]\d{1,14}$|"
),
)
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Twilio SMS notification service."""
return TwilioSMSNotificationService(
hass.data[DATA_TWILIO], config[CONF_FROM_NUMBER]
)
class TwilioSMSNotificationService(BaseNotificationService):
"""Implement the notification service for the Twilio SMS service."""
def __init__(self, twilio_client, from_number):
"""Initialize the service."""
self.client = twilio_client
self.from_number = from_number
def send_message(self, message="", **kwargs):
"""Send SMS to specified target user cell."""
targets = kwargs.get(ATTR_TARGET)
data = kwargs.get(ATTR_DATA) or {}
twilio_args = {"body": message, "from_": self.from_number}
if ATTR_MEDIAURL in data:
twilio_args[ATTR_MEDIAURL] = data[ATTR_MEDIAURL]
if not targets:
_LOGGER.info("At least 1 target is required")
return
for target in targets:
self.client.messages.create(to=target, **twilio_args)
|
from homeassistant import config_entries
from homeassistant.components.yeelight import (
CONF_DEVICE,
CONF_MODE_MUSIC,
CONF_MODEL,
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DEFAULT_MODE_MUSIC,
DEFAULT_NAME,
DEFAULT_NIGHTLIGHT_SWITCH,
DEFAULT_SAVE_ON_CHANGE,
DEFAULT_TRANSITION,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
)
from homeassistant.const import CONF_HOST, CONF_ID, CONF_NAME
from homeassistant.core import HomeAssistant
from . import (
ID,
IP_ADDRESS,
MODULE,
MODULE_CONFIG_FLOW,
NAME,
UNIQUE_NAME,
_mocked_bulb,
_patch_discovery,
)
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
DEFAULT_CONFIG = {
CONF_MODEL: "",
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: DEFAULT_NIGHTLIGHT_SWITCH,
}
async def test_discovery(hass: HomeAssistant):
"""Test setting up discovery."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(f"{MODULE_CONFIG_FLOW}.yeelight"):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with patch(f"{MODULE}.async_setup", return_value=True) as mock_setup, patch(
f"{MODULE}.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_DEVICE: ID}
)
assert result3["type"] == "create_entry"
assert result3["title"] == UNIQUE_NAME
assert result3["data"] == {CONF_ID: ID}
await hass.async_block_till_done()
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(f"{MODULE_CONFIG_FLOW}.yeelight"):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_no_device(hass: HomeAssistant):
"""Test discovery without device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with _patch_discovery(f"{MODULE_CONFIG_FLOW}.yeelight", no_device=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_import(hass: HomeAssistant):
"""Test import from yaml."""
config = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: IP_ADDRESS,
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH_TYPE: NIGHTLIGHT_SWITCH_TYPE_LIGHT,
}
# Cannot connect
mocked_bulb = _mocked_bulb(cannot_connect=True)
with patch(f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
type(mocked_bulb).get_capabilities.assert_called_once()
type(mocked_bulb).get_properties.assert_called_once()
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
# Success
mocked_bulb = _mocked_bulb()
with patch(f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_setup, patch(
f"{MODULE}.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
type(mocked_bulb).get_capabilities.assert_called_once()
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: IP_ADDRESS,
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: True,
}
await hass.async_block_till_done()
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# Duplicate
mocked_bulb = _mocked_bulb()
with patch(f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_manual(hass: HomeAssistant):
"""Test manually setup."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
# Cannot connect (timeout)
mocked_bulb = _mocked_bulb(cannot_connect=True)
with patch(f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
# Cannot connect (error)
type(mocked_bulb).get_capabilities = MagicMock(side_effect=OSError)
with patch(f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
assert result3["errors"] == {"base": "cannot_connect"}
# Success
mocked_bulb = _mocked_bulb()
with patch(f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb), patch(
f"{MODULE}.async_setup", return_value=True
), patch(
f"{MODULE}.async_setup_entry",
return_value=True,
):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result4["type"] == "create_entry"
assert result4["title"] == IP_ADDRESS
assert result4["data"] == {CONF_HOST: IP_ADDRESS}
# Duplicate
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mocked_bulb = _mocked_bulb()
with patch(f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_options(hass: HomeAssistant):
"""Test options flow."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS, CONF_NAME: NAME}
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with patch(f"{MODULE}.Bulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
config = {
CONF_NAME: NAME,
CONF_MODEL: "",
CONF_TRANSITION: DEFAULT_TRANSITION,
CONF_MODE_MUSIC: DEFAULT_MODE_MUSIC,
CONF_SAVE_ON_CHANGE: DEFAULT_SAVE_ON_CHANGE,
CONF_NIGHTLIGHT_SWITCH: DEFAULT_NIGHTLIGHT_SWITCH,
}
assert config_entry.options == config
assert hass.states.get(f"light.{NAME}_nightlight") is None
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
config[CONF_NIGHTLIGHT_SWITCH] = True
user_input = {**config}
user_input.pop(CONF_NAME)
with patch(f"{MODULE}.Bulb", return_value=mocked_bulb):
result2 = await hass.config_entries.options.async_configure(
result["flow_id"], user_input
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == config
assert result2["data"] == config_entry.options
assert hass.states.get(f"light.{NAME}_nightlight") is not None
async def test_manual_no_capabilities(hass: HomeAssistant):
"""Test manually setup without successful get_capabilities."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
mocked_bulb = _mocked_bulb()
type(mocked_bulb).get_capabilities = MagicMock(return_value=None)
with patch(f"{MODULE_CONFIG_FLOW}.yeelight.Bulb", return_value=mocked_bulb), patch(
f"{MODULE}.async_setup", return_value=True
), patch(
f"{MODULE}.async_setup_entry",
return_value=True,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
type(mocked_bulb).get_capabilities.assert_called_once()
type(mocked_bulb).get_properties.assert_called_once()
assert result["type"] == "create_entry"
assert result["data"] == {CONF_HOST: IP_ADDRESS}
|
import os.path
import cherrypy
class HomePage:
@cherrypy.expose
def index(self):
return '''
<p>Hi, this is the home page! Check out the other
fun stuff on this site:</p>
<ul>
<li><a href="/joke/">A silly joke</a></li>
<li><a href="/links/">Useful links</a></li>
</ul>'''
class JokePage:
@cherrypy.expose
def index(self):
return '''
<p>"In Python, how do you create a string of random
characters?" -- "Read a Perl file!"</p>
<p>[<a href="../">Return</a>]</p>'''
class LinksPage:
def __init__(self):
# Request handler objects can create their own nested request
# handler objects. Simply create them inside their __init__
# methods!
self.extra = ExtraLinksPage()
@cherrypy.expose
def index(self):
# Note the way we link to the extra links page (and back).
# As you can see, this object doesn't really care about its
# absolute position in the site tree, since we use relative
# links exclusively.
return '''
<p>Here are some useful links:</p>
<ul>
<li>
<a href="http://www.cherrypy.org">The CherryPy Homepage</a>
</li>
<li>
<a href="http://www.python.org">The Python Homepage</a>
</li>
</ul>
<p>You can check out some extra useful
links <a href="./extra/">here</a>.</p>
<p>[<a href="../">Return</a>]</p>
'''
class ExtraLinksPage:
@cherrypy.expose
def index(self):
# Note the relative link back to the Links page!
return '''
<p>Here are some extra useful links:</p>
<ul>
<li><a href="http://del.icio.us">del.icio.us</a></li>
<li><a href="http://www.cherrypy.org">CherryPy</a></li>
</ul>
<p>[<a href="../">Return to links page</a>]</p>'''
# Of course we can also mount request handler objects right here!
root = HomePage()
root.joke = JokePage()
root.links = LinksPage()
# Remember, we don't need to mount ExtraLinksPage here, because
# LinksPage does that itself on initialization. In fact, there is
# no reason why you shouldn't let your root object take care of
# creating all contained request handler objects.
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(root, config=tutconf)
|
from homeassistant import config_entries, setup
from homeassistant.components.roon.const import DOMAIN
from homeassistant.const import CONF_HOST
from tests.async_mock import patch
from tests.common import MockConfigEntry
class RoonApiMock:
"""Mock to handle returning tokens for testing the RoonApi."""
def __init__(self, token):
"""Initialize."""
self._token = token
@property
def token(self):
"""Return the auth token from the api."""
return self._token
def stop(self): # pylint: disable=no-self-use
"""Close down the api."""
return
async def test_form_and_auth(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock("good_token"),
), patch(
"homeassistant.components.roon.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.roon.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Roon Labs Music Player"
assert result2["data"] == {"host": "1.1.1.1", "api_key": "good_token"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_no_token(hass):
"""Test we handle no token being returned (timeout or not authorized)."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock(None),
):
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_unknown_exception(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.roon.config_flow.RoonApi",
side_effect=Exception,
):
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_host_already_exists(hass):
"""Test we add the host if the config exists and it isn't a duplicate."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "existing_host"}).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock("good_token"),
), patch(
"homeassistant.components.roon.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.roon.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Roon Labs Music Player"
assert result2["data"] == {"host": "1.1.1.1", "api_key": "good_token"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 2
async def test_form_duplicate_host(hass):
"""Test we don't add the host if it's a duplicate."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "existing_host"}).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "existing_host"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "duplicate_entry"}
|
import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
from chainer import variable
class Normalize(chainer.Link):
"""Learnable L2 normalization [#]_.
This link normalizes input along the channel axis and scales it.
The scale factors are trained channel-wise.
.. [#] Wei Liu, Andrew Rabinovich, Alexander C. Berg.
ParseNet: Looking Wider to See Better. ICLR 2016.
Args:
n_channel (int): The number of channels.
initial: A value to initialize the scale factors. It is pased to
:meth:`chainer.initializers._get_initializer`. The default value
is 0.
eps (float): A small value to avoid zero-division. The default value
is :math:`1e-5`.
"""
def __init__(self, n_channel, initial=0, eps=1e-5):
super(Normalize, self).__init__()
self.eps = eps
with self.init_scope():
initializer = initializers._get_initializer(initial)
self.scale = variable.Parameter(initializer)
self.scale.initialize((n_channel),)
def forward(self, x):
"""Normalize input and scale it.
Args:
x (chainer.Variable): A variable holding 4-dimensional array.
Its :obj:`dtype` is :obj:`numpy.float32`.
Returns:
chainer.Variable:
The shape and :obj:`dtype` are same as those of input.
"""
x = F.normalize(x, eps=self.eps, axis=1)
scale = F.broadcast_to(self.scale[:, np.newaxis, np.newaxis], x.shape)
return x * scale
|
import os
import subprocess
from textwrap import dedent
from nikola.plugin_categories import Command
from nikola.plugins.command.check import real_scan_files
from nikola.utils import req_missing, clean_before_deployment
from nikola.__main__ import main
from nikola import __version__
def uni_check_output(*args, **kwargs):
"""Run command and return output as Unicode (UTf-8)."""
o = subprocess.check_output(*args, **kwargs)
return o.decode('utf-8')
def check_ghp_import_installed():
"""Check if ghp-import is installed."""
try:
subprocess.check_output(['ghp-import', '-h'])
except OSError:
# req_missing defaults to `python=True` — and it’s meant to be like this.
# `ghp-import` is installed via pip, but the only way to use it is by executing the script it installs.
req_missing(['ghp-import2'], 'deploy the site to GitHub Pages')
class DeployFailedException(Exception):
"""An internal exception for deployment errors."""
pass
class CommandGitHubDeploy(Command):
"""Deploy site to GitHub Pages."""
name = 'github_deploy'
doc_usage = '[-m COMMIT_MESSAGE]'
doc_purpose = 'deploy the site to GitHub Pages'
doc_description = dedent(
"""\
This command can be used to deploy your site to GitHub Pages. It uses ghp-import to do this task. It also optionally commits to the source branch.
Configuration help: https://getnikola.com/handbook.html#deploying-to-github"""
)
cmd_options = [
{
'name': 'commit_message',
'short': 'm',
'long': 'message',
'default': 'Nikola auto commit.',
'type': str,
'help': 'Commit message',
},
]
def _execute(self, options, args):
"""Run the deployment."""
# Check if ghp-import is installed
check_ghp_import_installed()
# Build before deploying
build = main(['build'])
if build != 0:
self.logger.error('Build failed, not deploying to GitHub')
return build
# Clean non-target files
only_on_output, _ = real_scan_files(self.site)
for f in only_on_output:
os.unlink(f)
# Remove drafts and future posts if requested (Issue #2406)
undeployed_posts = clean_before_deployment(self.site)
if undeployed_posts:
self.logger.warning("Deleted {0} posts due to DEPLOY_* settings".format(len(undeployed_posts)))
# Commit and push
return self._commit_and_push(options['commit_message'])
def _run_command(self, command, xfail=False):
"""Run a command that may or may not fail."""
self.logger.info("==> {0}".format(command))
try:
subprocess.check_call(command)
return 0
except subprocess.CalledProcessError as e:
if xfail:
return e.returncode
self.logger.error(
'Failed GitHub deployment -- command {0} '
'returned {1}'.format(e.cmd, e.returncode)
)
raise DeployFailedException(e.returncode)
def _commit_and_push(self, commit_first_line):
"""Commit all the files and push."""
source = self.site.config['GITHUB_SOURCE_BRANCH']
deploy = self.site.config['GITHUB_DEPLOY_BRANCH']
remote = self.site.config['GITHUB_REMOTE_NAME']
autocommit = self.site.config['GITHUB_COMMIT_SOURCE']
try:
if autocommit:
commit_message = (
'{0}\n\n'
'Nikola version: {1}'.format(commit_first_line, __version__)
)
e = self._run_command(['git', 'checkout', source], True)
if e != 0:
self._run_command(['git', 'checkout', '-b', source])
self._run_command(['git', 'add', '.'])
# Figure out if there is anything to commit
e = self._run_command(['git', 'diff-index', '--quiet', 'HEAD'], True)
if e != 0:
self._run_command(['git', 'commit', '-am', commit_message])
else:
self.logger.info('Nothing to commit to source branch.')
try:
source_commit = uni_check_output(['git', 'rev-parse', source])
except subprocess.CalledProcessError:
try:
source_commit = uni_check_output(['git', 'rev-parse', 'HEAD'])
except subprocess.CalledProcessError:
source_commit = '?'
commit_message = (
'{0}\n\n'
'Source commit: {1}'
'Nikola version: {2}'.format(commit_first_line, source_commit, __version__)
)
output_folder = self.site.config['OUTPUT_FOLDER']
command = ['ghp-import', '-n', '-m', commit_message, '-p', '-r', remote, '-b', deploy, output_folder]
self._run_command(command)
if autocommit:
self._run_command(['git', 'push', '-u', remote, source])
except DeployFailedException as e:
return e.args[0]
self.logger.info("Successful deployment")
|
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import typing
import six
import tqdm
if typing.TYPE_CHECKING:
from threading import Lock, RLock
from typing import Union
_T = typing.TypeVar('_T', covariant=True)
_L = typing.TypeVar('_L')
@six.add_metaclass(abc.ABCMeta)
class ProgressBar(typing.Iterator[_T]):
"""An abstract progess bar used to report interal progress.
"""
def __init__(self, it, *args, **kwargs):
self.it = it
self.__lock = None # type: Union[Lock, RLock, None]
def __iter__(self):
# type: () -> ProgressBar[_T]
return self
def __next__(self):
# type: () -> _T
item = next(self.it)
self.update()
return item
if six.PY2:
next = __next__
@abc.abstractmethod
def update(self):
# type: () -> None
"""Update the progress bar by one step.
"""
return NotImplemented
@abc.abstractmethod
def set_maximum(self, maximum):
# type: (int) -> None
"""Set the maximum number of steps of the operation.
"""
return NotImplemented
def finish(self):
# type: () -> None
"""Notify the progress bar the operation is finished.
"""
pass
def set_lock(self, lock):
# type: (Union[Lock, RLock]) -> None
"""Set a lock to be used by parallel workers.
"""
self.__lock = lock
def get_lock(self):
# type: () -> Union[Lock, RLock]
"""Obtain the progress bar lock.
"""
if self.__lock is None:
raise RuntimeError("lock was not initialised")
return self.__lock
class TqdmProgressBar(tqdm.tqdm, ProgressBar):
"""A progress bar using the `tqdm` library.
"""
def __init__(self, it, *args, **kwargs): # noqa: D102, D107
kwargs["leave"] = False
super(TqdmProgressBar, self).__init__(it, *args, **kwargs)
ProgressBar.__init__(self, it)
def set_maximum(self, maximum): # noqa: D102
self.total = maximum
def finish(self): # noqa: D102
self.close()
|
from dynalite_devices_lib.light import DynaliteChannelLightDevice
import pytest
from homeassistant.components.light import SUPPORT_BRIGHTNESS
from homeassistant.const import ATTR_FRIENDLY_NAME, ATTR_SUPPORTED_FEATURES
from .common import (
ATTR_METHOD,
ATTR_SERVICE,
create_entity_from_device,
create_mock_device,
get_entry_id_from_hass,
run_service_tests,
)
@pytest.fixture
def mock_device():
"""Mock a Dynalite device."""
return create_mock_device("light", DynaliteChannelLightDevice)
async def test_light_setup(hass, mock_device):
"""Test a successful setup."""
await create_entity_from_device(hass, mock_device)
entity_state = hass.states.get("light.name")
assert entity_state.attributes[ATTR_FRIENDLY_NAME] == mock_device.name
assert entity_state.attributes["brightness"] == mock_device.brightness
assert entity_state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORT_BRIGHTNESS
await run_service_tests(
hass,
mock_device,
"light",
[
{ATTR_SERVICE: "turn_on", ATTR_METHOD: "async_turn_on"},
{ATTR_SERVICE: "turn_off", ATTR_METHOD: "async_turn_off"},
],
)
async def test_remove_entity(hass, mock_device):
"""Test when an entity is removed from HA."""
await create_entity_from_device(hass, mock_device)
assert hass.states.get("light.name")
entry_id = await get_entry_id_from_hass(hass)
assert await hass.config_entries.async_unload(entry_id)
await hass.async_block_till_done()
assert not hass.states.get("light.name")
|
from docstructure import SITE_STRUCTURE, BASENAME_MAP
import os, shutil, re, sys, datetime
TARGET_FILE = "lxmldoc.tex"
RST2LATEX_OPTIONS = " ".join([
# "--no-toc-backlinks",
"--strip-comments",
"--language en",
# "--date",
# "--use-latex-footnotes",
"--use-latex-citations",
"--use-latex-toc",
"--font-encoding=T1",
"--output-encoding=utf-8",
"--input-encoding=utf-8",
"--graphicx-option=pdftex",
])
htmlnsmap = {"h" : "http://www.w3.org/1999/xhtml"}
replace_invalid = re.compile(r'[-_/.\s\\]').sub
replace_content = re.compile("\{[^\}]*\}").sub
replace_epydoc_macros = re.compile(r'(,\s*amssymb|dvips\s*,\s*)').sub
replace_rst_macros = re.compile(r'(\\usepackage\{color}|\\usepackage\[[^]]*]\{hyperref})').sub
BASENAME_MAP = BASENAME_MAP.copy()
BASENAME_MAP.update({'api' : 'lxmlapi'})
# LaTeX snippets
DOCUMENT_CLASS = r"""
\documentclass[10pt,english]{report}
\usepackage[a4paper]{geometry}
\usepackage{tabularx}
\usepackage{ifthen}
\usepackage[pdftex]{graphicx}
\parindent0pt
\parskip1ex
%%% Fallback definitions for Docutils-specific commands
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\textwidth}
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}
"""
PYGMENTS_IMPORT = r"""
\usepackage{fancyvrb}
\input{_part_pygments.tex}
"""
EPYDOC_IMPORT = r"""
\input{_part_epydoc.tex}
"""
def write_chapter(master, title, filename):
filename = os.path.join(os.path.dirname(filename),
"_part_%s" % os.path.basename(filename))
master.write(r"""
\chapter{%s}
\label{%s}
\input{%s}
""" % (title, filename, filename))
# the program ----
def rest2latex(script, source_path, dest_path):
command = ('%s %s %s %s > %s' %
(sys.executable, script, RST2LATEX_OPTIONS,
source_path, dest_path))
os.system(command)
def build_pygments_macros(filename):
from pygments.formatters import LatexFormatter
text = LatexFormatter().get_style_defs()
with open(filename, "w") as f:
f.write(text)
f.write('\n')
def copy_epydoc_macros(src, dest, existing_header_lines):
doc = open(src, 'r')
out = open(dest, "w")
for line in doc:
if line.startswith('%% generator') \
or line.startswith('% generated by ') \
or '\\begin{document}' in line \
or '\\makeindex' in line:
break
if line.startswith('%') or \
r'\documentclass' in line or \
r'\makeindex' in line or \
r'{inputenc}' in line:
continue
if line.startswith(r'\usepackage'):
if line in existing_header_lines:
continue
if '{hyperref}' in line:
line = line.replace('black', 'blue')
out.write( replace_epydoc_macros('', line) )
out.close()
doc.close()
def noop(input):
return input
counter_no = 0
def tex_postprocess(src_path, dest_path, want_header=False, process_line=noop):
"""
Postprocessing of the LaTeX file generated from ReST.
Reads file src_path and saves to dest_path only the true content
(without the document header and final) - so it is suitable
to be used as part of the longer document.
Returns the title of document
If want_header is set, returns also the document header (as
the list of lines).
"""
header = []
add_header_line = header.append
global counter_no
counter_no = counter_no + 1
counter_text = "listcnt%d" % counter_no
search_title = re.compile(r'\\title{([^{}]*(?:{[^}]*})*)}').search
skipping = re.compile(r'(\\end{document}|\\tableofcontents|^%)').search
with open(src_path) as src:
src_text = src.read()
dest = open(dest_path, "w")
title = search_title(src_text)
if title:
# remove any commands from the title
title = re.sub(r'\\\w+({[^}]*})?', '', title.group(1))
iter_lines = iter(src_text.splitlines())
for l in iter_lines:
l = process_line(l)
if not l:
continue
if want_header:
add_header_line(replace_rst_macros('', l))
if l.startswith("\\maketitle"):
break
for l in iter_lines:
l = process_line(l)
if skipping(l):
# To-Do minitoc instead of tableofcontents
continue
elif "\hypertarget{old-versions}" in l:
break
elif "listcnt0" in l:
l = l.replace("listcnt0", counter_text)
dest.write(l + '\n')
dest.close()
if not title:
raise Exception("Bueee, no title in %s" % src_path)
return title, header
def publish(dirname, lxml_path, release):
if not os.path.exists(dirname):
os.mkdir(dirname)
book_title = "lxml %s" % release
doc_dir = os.path.join(lxml_path, 'doc')
script = os.path.join(doc_dir, 'rest2latex.py')
pubkey = os.path.join(doc_dir, 'pubkey.asc')
shutil.copy(pubkey, dirname)
# build pygments macros
build_pygments_macros(os.path.join(dirname, '_part_pygments.tex'))
# Used in postprocessing of generated LaTeX files
header = []
titles = {}
replace_interdoc_hyperrefs = re.compile(
r'\\href\{([^/}]+)[.]([^./}]+)\}').sub
replace_docinternal_hyperrefs = re.compile(
r'\\href\{\\#([^}]+)\}').sub
replace_image_paths = re.compile(
r'^(\\includegraphics{)').sub
def build_hyperref(match):
basename, extension = match.groups()
outname = BASENAME_MAP.get(basename, basename)
if '#' in extension:
anchor = extension.split('#')[-1]
return r"\hyperref[%s]" % anchor
elif extension != 'html':
return r'\href{http://lxml.de/%s.%s}' % (
outname, extension)
else:
return r"\hyperref[_part_%s.tex]" % outname
def fix_relative_hyperrefs(line):
line = replace_image_paths(r'\1../html/', line)
if r'\href' not in line:
return line
line = replace_interdoc_hyperrefs(build_hyperref, line)
return replace_docinternal_hyperrefs(r'\\hyperref[\1]', line)
# Building pages
for section, text_files in SITE_STRUCTURE:
for filename in text_files:
if filename.startswith('@'):
continue
#page_title = filename[1:]
#url = href_map[page_title]
#build_menu_entry(page_title, url, section_head)
basename = os.path.splitext(os.path.basename(filename))[0]
basename = BASENAME_MAP.get(basename, basename)
outname = basename + '.tex'
outpath = os.path.join(dirname, outname)
path = os.path.join(doc_dir, filename)
print("Creating %s" % outname)
rest2latex(script, path, outpath)
final_name = os.path.join(dirname, os.path.dirname(outname),
"_part_%s" % os.path.basename(outname))
title, hd = tex_postprocess(outpath, final_name,
want_header = not header,
process_line=fix_relative_hyperrefs)
if not header:
header = hd
titles[outname] = title
# integrate generated API docs
print("Integrating API docs")
apidocsname = 'api.tex'
apipath = os.path.join(dirname, apidocsname)
tex_postprocess(apipath, os.path.join(dirname, "_part_%s" % apidocsname),
process_line=fix_relative_hyperrefs)
copy_epydoc_macros(apipath, os.path.join(dirname, '_part_epydoc.tex'),
set(header))
# convert CHANGES.txt
print("Integrating ChangeLog")
find_version_title = re.compile(
r'(.*\\section\{)([0-9][^\} ]*)\s+\(([^)]+)\)(\}.*)').search
def fix_changelog(line):
m = find_version_title(line)
if m:
line = "%sChanges in version %s, released %s%s" % m.groups()
else:
line = line.replace(r'\subsection{', r'\subsection*{')
return line
chgname = 'changes-%s.tex' % release
chgpath = os.path.join(dirname, chgname)
rest2latex(script,
os.path.join(lxml_path, 'CHANGES.txt'),
chgpath)
tex_postprocess(chgpath, os.path.join(dirname, "_part_%s" % chgname),
process_line=fix_changelog)
# Writing a master file
print("Building %s\n" % TARGET_FILE)
master = open( os.path.join(dirname, TARGET_FILE), "w")
for hln in header:
if hln.startswith(r"\documentclass"):
#hln = hln.replace('article', 'book')
hln = DOCUMENT_CLASS + EPYDOC_IMPORT
elif hln.startswith(r"\begin{document}"):
# pygments and epydoc support
master.write(PYGMENTS_IMPORT)
elif hln.startswith(r"\title{"):
hln = replace_content(
r'{%s\\\\\\vspace{1cm}\\includegraphics[width=2.5cm]{../html/tagpython-big.png}}' % book_title, hln)
elif hln.startswith(r"\date{"):
hln = replace_content(
r'{%s}' % datetime.date.today().isoformat(), hln)
elif hln.startswith("pdftitle"):
hln = replace_content(
r'{%s}' % book_title, hln)
master.write(hln + '\n')
master.write("\\setcounter{page}{2}\n")
master.write("\\tableofcontents\n")
for section, text_files in SITE_STRUCTURE:
master.write("\n\n\\part{%s}\n" % section)
for filename in text_files:
if filename.startswith('@'):
continue
#print "Not yet implemented: %s" % filename[1:]
#page_title = filename[1:]
#url = href_map[page_title]
#build_menu_entry(page_title, url, section_head)
else:
basename = os.path.splitext(os.path.basename(filename))[0]
basename = BASENAME_MAP.get(basename, basename)
outname = basename + '.tex'
write_chapter(master, titles[outname], outname)
master.write("\\appendix\n")
master.write("\\begin{appendix}\n")
write_chapter(master, "Changes", chgname)
write_chapter(master, "Generated API documentation", apidocsname)
master.write("\\end{appendix}\n")
master.write("\\end{document}\n")
if __name__ == '__main__':
publish(sys.argv[1], sys.argv[2], sys.argv[3])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import logging
from compare_gan.architectures.arch_ops import linear
from compare_gan.gans import loss_lib
from compare_gan.gans import modular_gan
from compare_gan.gans import penalty_lib
from compare_gan.gans import utils
import gin
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
NUM_ROTATIONS = 4
# pylint: disable=not-callable
@gin.configurable(blacklist=["kwargs"])
class SSGAN(modular_gan.ModularGAN):
"""Self-Supervised GAN.
http://arxiv.org/abs/1811.11212
"""
def __init__(self,
self_supervision="rotation_gan",
rotated_batch_size=gin.REQUIRED,
weight_rotation_loss_d=1.0,
weight_rotation_loss_g=0.2,
**kwargs):
"""Creates a new Self-Supervised GAN.
Args:
self_supervision: One of [rotation_gan, rotation_only, None]. When it is
rotation_only, no GAN loss is used, degenerates to a pure rotation
model.
rotated_batch_size: The total number images per batch for the rotation
loss. This must be a multiple of (4 * #CORES) since we consider 4
rotations of each images on each TPU core. For GPU training #CORES is 1.
weight_rotation_loss_d: Weight for the rotation loss for the discriminator
on real images.
weight_rotation_loss_g: Weight for the rotation loss for the generator
on fake images.
**kwargs: Additional arguments passed to `ModularGAN` constructor.
"""
super(SSGAN, self).__init__(**kwargs)
self._self_supervision = self_supervision
self._rotated_batch_size = rotated_batch_size
self._weight_rotation_loss_d = weight_rotation_loss_d
self._weight_rotation_loss_g = weight_rotation_loss_g
# To safe memory ModularGAN supports feeding real and fake samples
# separately through the discriminator. SSGAN does not support this to
# avoid additional additional complexity in create_loss().
assert not self._deprecated_split_disc_calls, \
"Splitting discriminator calls is not supported in SSGAN."
def discriminator_with_rotation_head(self, x, y, is_training):
"""Discriminator network with augmented auxiliary predictions.
Args:
x: an input image tensor.
y: Tensor with label indices.
is_training: boolean, whether or not it is a training call.
Returns:
real_probs: the [0, 1] probability tensor of x being real images.
real_scores: the unbounded score tensor of x being real images.
rotation_scores: the categorical probablity of x being rotated in one of
the four directions.
"""
real_probs, real_scores, final = self.discriminator(
x=x, y=y, is_training=is_training)
use_sn = self._discriminator._spectral_norm # pylint: disable=protected-access
with tf.variable_scope("discriminator_rotation", reuse=tf.AUTO_REUSE):
rotation_scores = linear(tf.reshape(final, (tf.shape(x)[0], -1)),
NUM_ROTATIONS,
scope="score_classify",
use_sn=use_sn)
return real_probs, real_scores, rotation_scores
def create_loss(self, features, labels, params, is_training=True):
"""Build the loss tensors for discriminator and generator.
This method will set self.d_loss and self.g_loss.
Args:
features: Optional dictionary with inputs to the model ("images" should
contain the real images and "z" the noise for the generator).
labels: Tensor will labels. These are class indices. Use
self._get_one_hot_labels(labels) to get a one hot encoded tensor.
params: Dictionary with hyperparameters passed to TPUEstimator.
Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,
`tpu_context`. `batch_size` is the batch size for this core.
is_training: If True build the model in training mode. If False build the
model for inference mode (e.g. use trained averages for batch norm).
Raises:
ValueError: If set of meta/hyper parameters is not supported.
"""
images = features["images"] # Input images.
generated = features["generated"] # Fake images.
if self.conditional:
y = self._get_one_hot_labels(labels)
sampled_y = self._get_one_hot_labels(features["sampled_labels"])
else:
y = None
sampled_y = None
all_y = None
# Batch size per core.
bs = images.shape[0].value
num_replicas = params["context"].num_replicas if "context" in params else 1
assert self._rotated_batch_size % num_replicas == 0
# Rotated batch size per core.
rotated_bs = self._rotated_batch_size // num_replicas
assert rotated_bs % 4 == 0
# Number of images to rotate. Each images gets rotated 3 times.
num_rotated_examples = rotated_bs // 4
logging.info("num_replicas=%s, bs=%s, rotated_bs=%s, "
"num_rotated_examples=%s, params=%s",
num_replicas, bs, rotated_bs, num_rotated_examples, params)
# Augment the images with rotation.
if "rotation" in self._self_supervision:
# Put all rotation angles in a single batch, the first batch_size are
# the original up-right images, followed by rotated_batch_size * 3
# rotated images with 3 different angles.
assert num_rotated_examples <= bs, (num_rotated_examples, bs)
images_rotated = utils.rotate_images(
images[-num_rotated_examples:], rot90_scalars=(1, 2, 3))
generated_rotated = utils.rotate_images(
generated[-num_rotated_examples:], rot90_scalars=(1, 2, 3))
# Labels for rotation loss (unrotated and 3 rotated versions). For
# NUM_ROTATIONS=4 and num_rotated_examples=2 this is:
# [0, 0, 1, 1, 2, 2, 3, 3]
rotate_labels = tf.constant(
np.repeat(np.arange(NUM_ROTATIONS, dtype=np.int32),
num_rotated_examples))
rotate_labels_onehot = tf.one_hot(rotate_labels, NUM_ROTATIONS)
all_images = tf.concat([images, images_rotated,
generated, generated_rotated], 0)
if self.conditional:
y_rotated = tf.tile(y[-num_rotated_examples:], [3, 1])
sampled_y_rotated = tf.tile(y[-num_rotated_examples:], [3, 1])
all_y = tf.concat([y, y_rotated, sampled_y, sampled_y_rotated], 0)
else:
all_images = tf.concat([images, generated], 0)
if self.conditional:
all_y = tf.concat([y, sampled_y], axis=0)
# Compute discriminator output for real and fake images in one batch.
d_all, d_all_logits, c_all_logits = self.discriminator_with_rotation_head(
all_images, y=all_y, is_training=is_training)
d_real, d_fake = tf.split(d_all, 2)
d_real_logits, d_fake_logits = tf.split(d_all_logits, 2)
c_real_logits, c_fake_logits = tf.split(c_all_logits, 2)
# Separate the true/fake scores from whole rotation batch.
d_real_logits = d_real_logits[:bs]
d_fake_logits = d_fake_logits[:bs]
d_real = d_real[:bs]
d_fake = d_fake[:bs]
self.d_loss, _, _, self.g_loss = loss_lib.get_losses(
d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits,
d_fake_logits=d_fake_logits)
penalty_loss = penalty_lib.get_penalty_loss(
x=images, x_fake=generated, y=y, is_training=is_training,
discriminator=self.discriminator, architecture=self._architecture)
self.d_loss += self._lambda * penalty_loss
# Add rotation augmented loss.
if "rotation" in self._self_supervision:
# We take an even pieces for all rotation angles
assert len(c_real_logits.shape.as_list()) == 2, c_real_logits.shape
assert len(c_fake_logits.shape.as_list()) == 2, c_fake_logits.shape
c_real_logits = c_real_logits[- rotated_bs:]
c_fake_logits = c_fake_logits[- rotated_bs:]
preds_onreal = tf.cast(tf.argmax(c_real_logits, -1), rotate_labels.dtype)
accuracy = tf.reduce_mean(
tf.cast(tf.equal(rotate_labels, preds_onreal), tf.float32))
c_real_probs = tf.nn.softmax(c_real_logits)
c_fake_probs = tf.nn.softmax(c_fake_logits)
c_real_loss = - tf.reduce_mean(
tf.reduce_sum(rotate_labels_onehot * tf.log(c_real_probs + 1e-10), 1))
c_fake_loss = - tf.reduce_mean(
tf.reduce_sum(rotate_labels_onehot * tf.log(c_fake_probs + 1e-10), 1))
if self._self_supervision == "rotation_only":
self.d_loss *= 0.0
self.g_loss *= 0.0
self.d_loss += c_real_loss * self._weight_rotation_loss_d
self.g_loss += c_fake_loss * self._weight_rotation_loss_g
else:
c_real_loss = 0.0
c_fake_loss = 0.0
accuracy = tf.zeros([])
self._tpu_summary.scalar("loss/c_real_loss", c_real_loss)
self._tpu_summary.scalar("loss/c_fake_loss", c_fake_loss)
self._tpu_summary.scalar("accuracy/d_rotation", accuracy)
self._tpu_summary.scalar("loss/penalty", penalty_loss)
|
import argparse
import sqlite3
import os
import urllib.parse
import json
import string
browser_default_input_format = {
'chromium': 'chrome',
'chrome': 'chrome',
'ie': 'netscape',
'firefox': 'mozilla',
'seamonkey': 'mozilla',
'palemoon': 'mozilla',
}
def main():
args = get_args()
bookmark_types = []
output_format = None
input_format = args.input_format
if args.search_output:
bookmark_types = ['search']
if args.oldconfig:
output_format = 'oldsearch'
else:
output_format = 'search'
else:
if args.bookmark_output:
output_format = 'bookmark'
elif args.quickmark_output:
output_format = 'quickmark'
if args.import_bookmarks:
bookmark_types.append('bookmark')
if args.import_keywords:
bookmark_types.append('keyword')
if not bookmark_types:
bookmark_types = ['bookmark', 'keyword']
if not output_format:
output_format = 'quickmark'
if not input_format:
if args.browser:
input_format = browser_default_input_format[args.browser]
else:
#default to netscape
input_format = 'netscape'
import_function = {
'netscape': import_netscape_bookmarks,
'mozilla': import_moz_places,
'chrome': import_chrome,
}
import_function[input_format](args.bookmarks, bookmark_types,
output_format)
def get_args():
"""Get the argparse parser."""
parser = argparse.ArgumentParser(
epilog="To import bookmarks from Chromium, Firefox or IE, "
"export them to HTML in your browsers bookmark manager. ")
parser.add_argument(
'browser',
help="Which browser? {%(choices)s}",
choices=browser_default_input_format.keys(),
nargs='?',
metavar='browser')
parser.add_argument(
'-i',
'--input-format',
help='Which input format? (overrides browser default; "netscape" if '
'neither given)',
choices=set(browser_default_input_format.values()),
required=False)
parser.add_argument(
'-b',
'--bookmark-output',
help="Output in bookmark format.",
action='store_true',
default=False,
required=False)
parser.add_argument(
'-q',
'--quickmark-output',
help="Output in quickmark format (default).",
action='store_true',
default=False,
required=False)
parser.add_argument(
'-s',
'--search-output',
help="Output config.py search engine format (negates -B and -K)",
action='store_true',
default=False,
required=False)
parser.add_argument(
'--oldconfig',
help="Output search engine format for old qutebrowser.conf format",
default=False,
action='store_true',
required=False)
parser.add_argument(
'-B',
'--import-bookmarks',
help="Import plain bookmarks (can be combiend with -K)",
action='store_true',
default=False,
required=False)
parser.add_argument(
'-K',
'--import-keywords',
help="Import keywords (can be combined with -B)",
action='store_true',
default=False,
required=False)
parser.add_argument(
'bookmarks',
help="Bookmarks file (html format) or "
"profile folder (Mozilla format)")
args = parser.parse_args()
return args
def search_escape(url):
"""Escape URLs such that preexisting { and } are handled properly.
Will obviously trash a properly-formatted qutebrowser URL.
"""
return url.replace('{', '{{').replace('}', '}}')
def opensearch_convert(url):
"""Convert a basic OpenSearch URL into something qutebrowser can use.
Exceptions:
KeyError:
An unknown and required parameter is present in the URL. This
usually means there's browser/addon specific functionality needed
to build the URL (I'm looking at you and your browser, Google) that
obviously won't be present here.
"""
subst = {
'searchTerms': '%s', # for proper escaping later
'language': '*',
'inputEncoding': 'UTF-8',
'outputEncoding': 'UTF-8'
}
# remove optional parameters (even those we don't support)
for param in string.Formatter().parse(url):
if param[1]:
if param[1].endswith('?'):
url = url.replace('{' + param[1] + '}', '')
elif param[2] and param[2].endswith('?'):
url = url.replace('{' + param[1] + ':' + param[2] + '}', '')
return search_escape(url.format(**subst)).replace('%s', '{}')
def import_netscape_bookmarks(bookmarks_file, bookmark_types, output_format):
"""Import bookmarks from a NETSCAPE-Bookmark-file v1.
Generated by Chromium, Firefox, IE and possibly more browsers. Not all
export all possible bookmark types:
- Firefox mostly works with everything
- Chrome doesn't support keywords at all; searches are a separate
database
"""
import bs4
with open(bookmarks_file, encoding='utf-8') as f:
soup = bs4.BeautifulSoup(f, 'html.parser')
bookmark_query = {
'search': lambda tag: (
(tag.name == 'a') and
('shortcuturl' in tag.attrs) and
('%s' in tag['href'])),
'keyword': lambda tag: (
(tag.name == 'a') and
('shortcuturl' in tag.attrs) and
('%s' not in tag['href'])),
'bookmark': lambda tag: (
(tag.name == 'a') and
('shortcuturl' not in tag.attrs) and
(tag.string)),
}
output_template = {
'search': {
'search':
"c.url.searchengines['{tag[shortcuturl]}'] = "
"'{tag[href]}' #{tag.string}"
},
'oldsearch': {
'search': '{tag[shortcuturl]} = {tag[href]} #{tag.string}',
},
'bookmark': {
'bookmark': '{tag[href]} {tag.string}',
'keyword': '{tag[href]} {tag.string}'
},
'quickmark': {
'bookmark': '{tag.string} {tag[href]}',
'keyword': '{tag[shortcuturl]} {tag[href]}'
}
}
bookmarks = []
for typ in bookmark_types:
tags = soup.findAll(bookmark_query[typ])
for tag in tags:
if typ == 'search':
tag['href'] = search_escape(tag['href']).replace('%s', '{}')
if tag['href'] not in bookmarks:
bookmarks.append(
output_template[output_format][typ].format(tag=tag))
for bookmark in bookmarks:
print(bookmark)
def import_moz_places(profile, bookmark_types, output_format):
"""Import bookmarks from a Mozilla profile's places.sqlite database."""
place_query = {
'bookmark': (
"SELECT DISTINCT moz_bookmarks.title,moz_places.url "
"FROM moz_bookmarks,moz_places "
"WHERE moz_places.id=moz_bookmarks.fk "
"AND moz_places.id NOT IN (SELECT place_id FROM moz_keywords) "
"AND moz_places.url NOT LIKE 'place:%';"
), # Bookmarks with no keywords assigned
'keyword': (
"SELECT moz_keywords.keyword,moz_places.url "
"FROM moz_keywords,moz_places,moz_bookmarks "
"WHERE moz_places.id=moz_bookmarks.fk "
"AND moz_places.id=moz_keywords.place_id "
"AND moz_places.url NOT LIKE '%!%s%' ESCAPE '!';"
), # Bookmarks with keywords assigned but no %s substitution
'search': (
"SELECT moz_keywords.keyword, "
" moz_bookmarks.title, "
" search_conv(moz_places.url) AS url "
"FROM moz_keywords,moz_places,moz_bookmarks "
"WHERE moz_places.id=moz_bookmarks.fk "
"AND moz_places.id=moz_keywords.place_id "
"AND moz_places.url LIKE '%!%s%' ESCAPE '!';"
) # bookmarks with keyword and %s substitution
}
out_template = {
'bookmark': {
'bookmark': '{url} {title}',
'keyword': '{url} {keyword}'
},
'quickmark': {
'bookmark': '{title} {url}',
'keyword': '{keyword} {url}'
},
'oldsearch': {
'search': '{keyword} {url} #{title}'
},
'search': {
'search': "c.url.searchengines['{keyword}'] = '{url}' #{title}"
}
}
def search_conv(url):
return search_escape(url).replace('%s', '{}')
places = sqlite3.connect(os.path.join(profile, "places.sqlite"))
places.create_function('search_conv', 1, search_conv)
places.row_factory = sqlite3.Row
c = places.cursor()
for typ in bookmark_types:
c.execute(place_query[typ])
for row in c:
print(out_template[output_format][typ].format(**row))
def import_chrome(profile, bookmark_types, output_format):
"""Import bookmarks and search keywords from Chrome-type profiles.
On Chrome, keywords and search engines are the same thing and handled in
their own database table; bookmarks cannot have associated keywords. This
is why the dictionary lookups here are much simpler.
"""
out_template = {
'bookmark': '{url} {name}',
'quickmark': '{name} {url}',
'search': "c.url.searchengines['{keyword}'] = '{url}'",
'oldsearch': '{keyword} {url}'
}
if 'search' in bookmark_types:
webdata = sqlite3.connect(os.path.join(profile, 'Web Data'))
c = webdata.cursor()
c.execute('SELECT keyword,url FROM keywords;')
for keyword, url in c:
try:
url = opensearch_convert(url)
print(out_template[output_format].format(
keyword=keyword, url=url))
except KeyError:
print('# Unsupported parameter in url for {}; skipping....'.
format(keyword))
else:
with open(os.path.join(profile, 'Bookmarks'), encoding='utf-8') as f:
bookmarks = json.load(f)
def bm_tree_walk(bm, template):
"""Recursive function to walk through bookmarks."""
if not isinstance(bm, dict):
return
assert 'type' in bm, bm
if bm['type'] == 'url':
if urllib.parse.urlparse(bm['url']).scheme != 'chrome':
print(template.format(**bm))
elif bm['type'] == 'folder':
for child in bm['children']:
bm_tree_walk(child, template)
for root in bookmarks['roots'].values():
bm_tree_walk(root, out_template[output_format])
if __name__ == '__main__':
main()
|
import collections
import unittest
import mock
from kalliope.core import SignalModule
from kalliope.core.NotificationManager import NotificationManager
class FakeSignal(SignalModule):
def __init__(self, name=None, **kwargs):
super(FakeSignal, self).__init__(**kwargs)
self.name = name
def on_notification_received(self, notification=None, payload=None):
pass
@staticmethod
def check_parameters(parameters):
pass
class TestNotificationManager(unittest.TestCase):
def setUp(self):
if __name__ == '__main__':
self.test_path = "__main__.FakeSignal.on_notification_received"
else:
self.test_path = "Tests.test_notification_manager.FakeSignal.on_notification_received"
NotificationManager._instances.clear()
def test_get_instances(self):
# create a signal
signal1 = FakeSignal()
signal2 = FakeSignal()
expected_list = [
signal1, signal2
]
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
# convert received generator into list
lst_from_notification = list(NotificationManager.get_instances())
self.assertTrue(compare(expected_list, lst_from_notification))
def test_send_notification(self):
# create a signal
signal1 = FakeSignal()
# with mock.patch("__main__.FakeSignal.on_notification_received") \
with mock.patch(self.test_path) as mock_on_notification_received:
test_notification = "test"
NotificationManager.send_notification(test_notification)
mock_on_notification_received.assert_called_once_with(notification=test_notification, payload=None)
mock_on_notification_received.reset_mock()
if __name__ == '__main__':
unittest.main()
|
from functools import wraps
from httpobs.scanner.grader import get_score_modifier
def scored_test(func):
@wraps(func)
def wrapper(*args, **kwargs):
test_result = func(*args, **kwargs)
test_result['name'] = func.__name__.replace('_', '-') # add the test name
test_result['score_modifier'] = get_score_modifier(test_result['result']) # and its score modifier
return test_result
return wrapper
|
import io
import os
import sys
import uuid
import pytest
import pygal
from pygal._compat import u
from pygal.graph.map import BaseMap
from pygal.test import make_data
from pygal.util import cut
try:
import cairosvg
except ImportError:
cairosvg = None
def test_multi_render(Chart, datas):
"""Check that a chart always render the same"""
chart = Chart()
chart = make_data(chart, datas)
svg = chart.render()
for i in range(2):
assert svg == chart.render()
def test_render_to_file(Chart, datas):
"""Test in file rendering"""
file_name = '/tmp/test_graph-%s.svg' % uuid.uuid4()
if os.path.exists(file_name):
os.remove(file_name)
chart = Chart()
chart = make_data(chart, datas)
chart.render_to_file(file_name)
with io.open(file_name, encoding="utf-8") as f:
assert 'pygal' in f.read()
os.remove(file_name)
@pytest.mark.skipif(not cairosvg, reason="CairoSVG not installed")
def test_render_to_png(Chart, datas):
"""Test in file png rendering"""
file_name = '/tmp/test_graph-%s.png' % uuid.uuid4()
if os.path.exists(file_name):
os.remove(file_name)
chart = Chart()
chart = make_data(chart, datas)
chart.render_to_png(file_name)
png = chart._repr_png_()
with open(file_name, 'rb') as f:
assert png == f.read()
os.remove(file_name)
def test_metadata(Chart):
"""Test metadata values"""
chart = Chart()
v = range(7)
if Chart in (pygal.Box, ):
return # summary charts cannot display per-value metadata
elif Chart == pygal.XY:
v = list(map(lambda x: (x, x + 1), v))
elif issubclass(Chart, BaseMap):
v = [(k, i) for i, k in enumerate(Chart.x_labels)
if k not in ['oecd', 'nafta', 'eur']]
chart.add(
'Serie with metadata', [
v[0], {
'value': v[1]
}, {
'value': v[2],
'label': 'Three'
}, {
'value': v[3],
'xlink': 'http://4.example.com/'
}, {
'value': v[4],
'xlink': 'http://5.example.com/',
'label': 'Five'
}, {
'value': v[5],
'xlink': {
'href': 'http://6.example.com/'
},
'label': 'Six'
}, {
'value': v[6],
'xlink': {
'href': 'http://7.example.com/',
'target': '_blank'
},
'label': 'Seven'
}
]
)
q = chart.render_pyquery()
for md in ('Three', 'Five', 'Seven'):
assert md in cut(q('desc'), 'text')
for md in ('http://7.example.com/', 'http://4.example.com/'):
assert md in [e.attrib.get('xlink:href') for e in q('a')]
if Chart in (pygal.Pie, pygal.Treemap, pygal.SolidGauge):
# Slices with value 0 are not rendered
assert len(v) - 1 == len(q('.tooltip-trigger').siblings('.value'))
elif not issubclass(Chart, BaseMap):
# Tooltip are not working on maps
assert len(v) == len(q('.tooltip-trigger').siblings('.value'))
def test_empty_lists(Chart):
"""Test chart rendering with an empty serie"""
chart = Chart()
chart.add('A', [1, 2])
chart.add('B', [])
if not chart._dual:
chart.x_labels = ('red', 'green', 'blue')
q = chart.render_pyquery()
assert len(q(".legend")) == 2
def test_empty_lists_with_nones(Chart):
"""Test chart rendering with a None filled serie"""
chart = Chart()
chart.add('A', [None, None])
chart.add('B', [None, 4, 4])
q = chart.render_pyquery()
assert len(q(".legend")) == 2
def test_only_one_value(Chart):
"""Test chart rendering with only one value"""
chart = Chart()
chart.add('S', [1])
q = chart.render_pyquery()
assert len(q(".legend")) == 1
def test_only_one_value_log(Chart):
"""Test logarithmic chart rendering with only one value"""
chart = Chart(logarithmic=True)
chart.add('S', [1])
if not chart._dual:
chart.x_labels = ('single')
q = chart.render_pyquery()
assert len(q(".legend")) == 1
def test_only_one_value_intrp(Chart):
"""Test interpolated chart rendering with only one value"""
chart = Chart(interpolate='cubic')
chart.add('S', [1])
q = chart.render_pyquery()
assert len(q(".legend")) == 1
def test_non_iterable_value(Chart):
"""Test serie as non iterable"""
chart = Chart(no_prefix=True)
chart.add('A', 1)
chart.add('B', 2)
if not chart._dual:
chart.x_labels = ('red', 'green', 'blue')
chart1 = chart.render()
chart = Chart(no_prefix=True)
chart.add('A', [1])
chart.add('B', [2])
if not chart._dual:
chart.x_labels = ('red', 'green', 'blue')
chart2 = chart.render()
assert chart1 == chart2
def test_iterable_types(Chart):
"""Test serie as various iterable"""
chart = Chart(no_prefix=True)
chart.add('A', [1, 2])
chart.add('B', [])
if not chart._dual:
chart.x_labels = ('red', 'green', 'blue')
chart1 = chart.render()
chart = Chart(no_prefix=True)
chart.add('A', (1, 2))
chart.add('B', tuple())
if not chart._dual:
chart.x_labels = ('red', 'green', 'blue')
chart2 = chart.render()
assert chart1 == chart2
def test_values_by_dict(Chart):
"""Test serie as dict"""
chart1 = Chart(no_prefix=True)
chart2 = Chart(no_prefix=True)
if not issubclass(Chart, BaseMap) and not Chart._dual:
chart1.add('A', {'red': 10, 'green': 12, 'blue': 14})
chart1.add('B', {'green': 11, 'red': 7})
chart1.add('C', {'blue': 7})
chart1.add('D', {})
chart1.add('E', {'blue': 2, 'red': 13})
chart1.x_labels = ('red', 'green', 'blue')
chart2.add('A', [10, 12, 14])
chart2.add('B', [7, 11])
chart2.add('C', [None, None, 7])
chart2.add('D', [])
chart2.add('E', [13, None, 2])
chart2.x_labels = ('red', 'green', 'blue')
elif not Chart._dual:
chart1.add('A', {'fr': 10, 'us': 12, 'jp': 14})
chart1.add('B', {'cn': 99})
chart1.add('C', {})
chart2.add('A', [('fr', 10), ('us', 12), ('jp', 14)])
chart2.add('B', [('cn', 99)])
chart2.add('C', [None, (None, None)])
assert chart1.render() == chart2.render()
def test_no_data_with_no_values(Chart):
"""Test no data"""
chart = Chart()
q = chart.render_pyquery()
assert q(".text-overlay text").text() == "No data"
def test_no_data_with_no_values_with_include_x_axis(Chart):
"""Test no data and include_x_axis"""
chart = Chart(include_x_axis=True)
q = chart.render_pyquery()
assert q(".text-overlay text").text() == "No data"
def test_no_data_with_empty_serie(Chart):
"""Test no data for empty serie"""
chart = Chart()
chart.add('Serie', [])
q = chart.render_pyquery()
assert q(".text-overlay text").text() == "No data"
def test_no_data_with_empty_series(Chart):
"""Test no data for 2 empty series"""
chart = Chart()
chart.add('Serie1', [])
chart.add('Serie2', [])
q = chart.render_pyquery()
assert q(".text-overlay text").text() == "No data"
def test_no_data_with_none(Chart):
"""Test no data for a None containing serie"""
chart = Chart()
chart.add('Serie', None)
q = chart.render_pyquery()
assert q(".text-overlay text").text() == "No data"
def test_no_data_with_list_of_none(Chart):
"""Test no data for a None containing serie"""
chart = Chart()
chart.add('Serie', [None])
q = chart.render_pyquery()
assert q(".text-overlay text").text() == "No data"
def test_no_data_with_lists_of_nones(Chart):
"""Test no data for several None containing series"""
chart = Chart()
chart.add('Serie1', [None, None, None, None])
chart.add('Serie2', [None, None, None])
q = chart.render_pyquery()
assert q(".text-overlay text").text() == "No data"
def test_unicode_labels_decode(Chart):
"""Test unicode labels"""
chart = Chart()
chart.add(
u('Série1'), [{
'value': 1,
'xlink': 'http://1/',
'label': u('°ijæð©&×&<—×€¿_…')
}, {
'value': 2,
'xlink': {
'href': 'http://6.example.com/'
},
'label': u('æÂ°€≠|€æÂ°€əæ')
}, {
'value': 3,
'label': 'unicode <3'
}]
)
if not chart._dual:
chart.x_labels = [u('&œ'), u('¿?'), u('††††††††'), 'unicode <3']
chart.render_pyquery()
def test_unicode_labels_python2(Chart):
"""Test unicode labels in python 2"""
if sys.version_info[0] == 3:
return
chart = Chart()
chart.add(
u('Série1'), [{
'value': 1,
'xlink': 'http://1/',
'label': eval("u'°ijæð©&×&<—×€¿_…'")
}, {
'value': 2,
'xlink': {
'href': 'http://6.example.com/'
},
'label': eval("u'æÂ°€≠|€æÂ°€əæ'")
}, {
'value': 3,
'label': eval("'unicode <3'")
}]
)
if not chart._dual:
chart.x_labels = eval("[u'&œ', u'¿?', u'††††††††', 'unicode <3']")
chart.render_pyquery()
def test_unicode_labels_python3(Chart):
"""Test unicode labels in python 3"""
if sys.version_info[0] == 2:
return
chart = Chart()
chart.add(
u('Série1'), [{
'value': 1,
'xlink': 'http://1/',
'label': eval("'°ijæð©&×&<—×€¿_…'")
}, {
'value': 2,
'xlink': {
'href': 'http://6.example.com/'
},
'label': eval("'æÂ°€≠|€æÂ°€əæ'")
}, {
'value': 3,
'label': eval("b'unicode <3'")
}]
)
if not chart._dual:
chart.x_labels = eval("['&œ', '¿?', '††††††††', 'unicode <3']")
chart.render_pyquery()
def test_labels_with_links(Chart):
"""Test values with links"""
chart = Chart()
# link on chart and label
chart.add({
'title': 'Red',
'xlink': {
'href': 'http://en.wikipedia.org/wiki/Red'
}
}, [{
'value': 2,
'label': 'This is red',
'xlink': {
'href': 'http://en.wikipedia.org/wiki/Red'
}
}])
# link on chart only
chart.add(
'Green', [{
'value': 4,
'label': 'This is green',
'xlink': {
'href': 'http://en.wikipedia.org/wiki/Green',
'target': '_top'
}
}]
)
# link on label only opens in new tab
chart.add({
'title': 'Yellow',
'xlink': {
'href': 'http://en.wikipedia.org/wiki/Yellow',
'target': '_blank'
}
}, 7)
# link on chart only
chart.add(
'Blue', [{
'value': 5,
'xlink': {
'href': 'http://en.wikipedia.org/wiki/Blue',
'target': '_blank'
}
}]
)
# link on label and chart with diffrent behaviours
chart.add({
'title': 'Violet',
'xlink': 'http://en.wikipedia.org/wiki/Violet_(color)'
}, [{
'value': 3,
'label': 'This is violet',
'xlink': {
'href': 'http://en.wikipedia.org/wiki/Violet_(color)',
'target': '_self'
}
}])
q = chart.render_pyquery()
links = q('a')
assert len(links) == 7 or isinstance(chart, BaseMap) and len(links) == 3
def test_sparkline(Chart, datas):
"""Test sparkline"""
chart = Chart()
chart = make_data(chart, datas)
assert chart.render_sparkline()
def test_secondary(Chart):
"""Test secondary chart"""
chart = Chart()
rng = [83, .12, -34, 59]
chart.add('First serie', rng)
chart.add('Secondary serie', map(lambda x: x * 2, rng), secondary=True)
assert chart.render_pyquery()
def test_ipython_notebook(Chart, datas):
"""Test ipython notebook"""
chart = Chart()
chart = make_data(chart, datas)
assert chart._repr_svg_()
def test_long_title(Chart, datas):
"""Test chart rendering with a long title"""
chart = Chart(
title="A chart is a graphical representation of data, in which "
"'the data is represented by symbols, such as bars in a bar chart, "
"lines in a line chart, or slices in a pie chart'. A chart can "
"represent tabular numeric data, functions or some kinds of "
"qualitative structure and provides different info."
)
chart = make_data(chart, datas)
q = chart.render_pyquery()
assert len(q('.titles text')) == 5
|
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import docker
flags.DEFINE_string('cloudsuite_in_memory_analytics_dataset',
'/data/ml-latest-small',
'Dataset to use for training.')
flags.DEFINE_string('cloudsuite_in_memory_analytics_ratings_file',
'/data/myratings.csv',
'Ratings file to give the recommendation for.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cloudsuite_in_memory_analytics'
BENCHMARK_CONFIG = """
cloudsuite_in_memory_analytics:
description: >
Run Cloudsuite in-memory analytics benchmark. Specify the number of worker
VMs with --num_vms.
vm_groups:
master:
vm_spec: *default_single_core
vm_count: 1
workers:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['num_vms'].present:
config['vm_groups']['workers']['vm_count'] = FLAGS.num_vms
return config
def Prepare(benchmark_spec):
"""Install docker. Pull images. Create datasets. Start master and workers.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
master = benchmark_spec.vm_groups['master'][0]
workers = benchmark_spec.vm_groups['workers']
def PrepareCommon(vm):
if not docker.IsInstalled(vm):
vm.Install('docker')
vm.Install('cloudsuite/spark')
vm.Install('cloudsuite/movielens-dataset')
vm.RemoteCommand('sudo docker create --name data '
'cloudsuite/movielens-dataset')
def PrepareMaster(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/in-memory-analytics')
start_master_cmd = ('sudo docker run -d --net host -e SPARK_MASTER_IP=%s '
'--name spark-master cloudsuite/spark master' %
master.internal_ip)
vm.RemoteCommand(start_master_cmd)
def PrepareWorker(vm):
PrepareCommon(vm)
start_worker_cmd = ('sudo docker run -d --net host --volumes-from data '
'--name spark-worker cloudsuite/spark worker '
'spark://%s:7077' % master.internal_ip)
vm.RemoteCommand(start_worker_cmd)
target_arg_tuples = ([(PrepareWorker, [vm], {}) for vm in workers] +
[(PrepareMaster, [master], {})])
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
def Run(benchmark_spec):
"""Run the in-memory analytics benchmark.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
master = benchmark_spec.vm_groups['master'][0]
results = []
benchmark_cmd = ('sudo docker run --rm --net host --volumes-from data '
'cloudsuite/in-memory-analytics %s %s '
'--master spark://%s:7077' %
(FLAGS.cloudsuite_in_memory_analytics_dataset,
FLAGS.cloudsuite_in_memory_analytics_ratings_file,
master.internal_ip))
stdout, _ = master.RemoteCommand(benchmark_cmd, should_log=True)
matches = re.findall(r'Benchmark execution time: (\d+)ms', stdout)
if len(matches) != 1:
raise errors.Benchmarks.RunError('Expected to find benchmark execution '
'time')
execution_time = matches[0]
results.append(sample.Sample('Benchmark execution time',
float(execution_time) / 1000,
'seconds'))
return results
def Cleanup(benchmark_spec):
"""Stop and remove docker containers. Remove images.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
master = benchmark_spec.vm_groups['master'][0]
workers = benchmark_spec.vm_groups['workers']
def CleanupCommon(vm):
vm.RemoteCommand('sudo docker rm -v data')
def CleanupMaster(vm):
vm.RemoteCommand('sudo docker stop spark-master')
vm.RemoteCommand('sudo docker rm spark-master')
CleanupCommon(vm)
def CleanupWorker(vm):
vm.RemoteCommand('sudo docker stop spark-worker')
vm.RemoteCommand('sudo docker rm spark-worker')
CleanupCommon(vm)
target_arg_tuples = ([(CleanupWorker, [vm], {}) for vm in workers] +
[(CleanupMaster, [master], {})])
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
|
from __future__ import absolute_import, unicode_literals
import os
import pytest
import kaptan
from tmuxp import config
from .fixtures import config_tmuxinator as fixtures
TMUXP_DIR = os.path.join(os.path.dirname(__file__), '.tmuxp')
@pytest.mark.parametrize(
"tmuxinator_yaml,tmuxinator_dict,tmuxp_dict",
[
(
fixtures.test1.tmuxinator_yaml,
fixtures.test1.tmuxinator_dict,
fixtures.test1.expected,
),
(
fixtures.test2.tmuxinator_yaml,
fixtures.test2.tmuxinator_dict,
fixtures.test2.expected,
), # older vers use `tabs` instead of `windows`
(
fixtures.test3.tmuxinator_yaml,
fixtures.test3.tmuxinator_dict,
fixtures.test3.expected,
), # Test importing <spec/fixtures/sample.yml>
],
)
def test_config_to_dict(tmuxinator_yaml, tmuxinator_dict, tmuxp_dict):
configparser = kaptan.Kaptan(handler='yaml')
test_config = configparser.import_config(tmuxinator_yaml)
yaml_to_dict = test_config.get()
assert yaml_to_dict == tmuxinator_dict
assert config.import_tmuxinator(tmuxinator_dict) == tmuxp_dict
config.validate_schema(config.import_tmuxinator(tmuxinator_dict))
|
import pytest
from homeassistant.components.websocket_api.auth import TYPE_AUTH_REQUIRED
from homeassistant.components.websocket_api.http import URL
from homeassistant.setup import async_setup_component
@pytest.fixture
async def websocket_client(hass, hass_ws_client):
"""Create a websocket client."""
return await hass_ws_client(hass)
@pytest.fixture
async def no_auth_websocket_client(hass, aiohttp_client):
"""Websocket connection that requires authentication."""
assert await async_setup_component(hass, "websocket_api", {})
await hass.async_block_till_done()
client = await aiohttp_client(hass.http.app)
ws = await client.ws_connect(URL)
auth_ok = await ws.receive_json()
assert auth_ok["type"] == TYPE_AUTH_REQUIRED
ws.client = client
yield ws
if not ws.closed:
await ws.close()
|
import diamond.collector
import socket
import re
try:
import simplejson as json
except ImportError:
import json
class TwemproxyCollector(diamond.collector.Collector):
GAUGES = [
'uptime',
'curr_connections',
'client_connections',
'server_connections',
'server_ejected_at',
'in_queue',
'in_queue_bytes',
'out_queue',
'out_queue_bytes'
]
IGNORED = [
'service',
'source',
'timestamp',
'version'
]
def get_default_config_help(self):
config_help = super(TwemproxyCollector, self).get_default_config_help()
config_help.update({
'hosts': "List of hosts, and ports to collect. Set an alias by " +
" prefixing the host:port with alias@",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(TwemproxyCollector, self).get_default_config()
config.update({
'path': 'twemproxy',
'hosts': ['localhost:22222']
})
return config
def get_raw_stats(self, host, port):
data = ''
# connect
try:
if port is None:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(host)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, int(port)))
stats_data = ''
while True:
data = sock.recv(1024)
if not data:
break
stats_data += data
sock.close()
except socket.error:
self.log.exception('Failed to get stats from %s:%s',
host, port)
try:
return json.loads(stats_data)
except (TypeError, ValueError):
self.log.error("Unable to parse response from Twemproxy as a"
" json object")
return False
def get_stats(self, host, port):
stats = {}
pools = {}
data = self.get_raw_stats(host, port)
if data is None:
self.log.error('Unable to import json')
return {}
stats = {}
pools = {}
for stat, value in data.iteritems():
# Test if this is a pool
if isinstance(value, dict):
pool_name = stat.replace('.', '_')
pools[pool_name] = {}
for pool_stat, pool_value in value.iteritems():
# Test if this is a pool server
if isinstance(pool_value, dict):
server_name = pool_stat.replace('.', '_')
pools[pool_name][server_name] = {}
for server_stat, server_value in pool_value.iteritems():
pools[pool_name][server_name][server_stat] = \
int(server_value)
else:
pools[pool_name][pool_stat] = int(pool_value)
else:
if stat in self.IGNORED:
continue
else:
stats[stat] = int(value)
return stats, pools
def collect(self):
hosts = self.config.get('hosts')
# Convert a string config value to be an array
if isinstance(hosts, basestring):
hosts = [hosts]
for host in hosts:
matches = re.search('((.+)\@)?([^:]+)(:(\d+))?', host)
alias = matches.group(2)
hostname = matches.group(3)
port = matches.group(5)
if alias is None:
alias = hostname
stats, pools = self.get_stats(hostname, port)
for stat in stats:
if stat in self.GAUGES:
self.publish_gauge(alias + "." + stat, stats[stat])
else:
self.publish_counter(alias + "." + stat, stats[stat])
# Pool stats
for pool, pool_stats in pools.iteritems():
for stat, stat_value in pool_stats.iteritems():
# Test if this is a pool server
if isinstance(stat_value, dict):
for server_stat, server_value in stat_value.iteritems():
if server_stat in self.GAUGES:
self.publish_gauge(
alias + ".pools." + pool + ".servers." +
stat + "." + server_stat, server_value)
else:
self.publish_counter(
alias + ".pools." + pool + ".servers." +
stat + "." + server_stat, server_value)
else:
if stat in self.GAUGES:
self.publish_gauge(
alias + ".pools." + pool + "." + stat,
stat_value)
else:
self.publish_counter(
alias + ".pools." + pool + "." + stat,
stat_value)
|
import unittest
import numpy as np
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import coco_instance_segmentation_label_names
from chainercv.datasets import COCOInstanceSegmentationDataset
from chainercv.utils import assert_is_bbox
from chainercv.utils import assert_is_instance_segmentation_dataset
try:
import pycocotools # NOQA
_available = True
except ImportError:
_available = False
def _create_paramters():
split_years = testing.product({
'split': ['train', 'val'],
'year': ['2014', '2017']})
split_years += [{'split': 'minival', 'year': '2014'},
{'split': 'valminusminival', 'year': '2014'}]
use_and_return_args = testing.product({
'use_crowded': [False, True],
'return_crowded': [False, True],
'return_area': [False, True],
'return_bbox': [False, True]})
params = testing.product_dict(
split_years,
use_and_return_args)
return params
@testing.parameterize(*_create_paramters())
class TestCOCOInstanceSegmentationDataset(unittest.TestCase):
def setUp(self):
self.dataset = COCOInstanceSegmentationDataset(
split=self.split, year=self.year,
use_crowded=self.use_crowded, return_crowded=self.return_crowded,
return_area=self.return_area, return_bbox=self.return_bbox)
@attr.slow
@unittest.skipUnless(_available, 'pycocotools is not installed')
def test_coco_instance_segmentation_dataset(self):
assert_is_instance_segmentation_dataset(
self.dataset,
len(coco_instance_segmentation_label_names),
n_example=10)
if self.return_area:
for _ in range(10):
i = np.random.randint(0, len(self.dataset))
_, mask, _, area = self.dataset[i][:4]
self.assertIsInstance(area, np.ndarray)
self.assertEqual(area.dtype, np.float32)
self.assertEqual(area.shape, (mask.shape[0],))
if self.return_crowded:
for _ in range(10):
i = np.random.randint(0, len(self.dataset))
example = self.dataset[i]
if self.return_area:
crowded = example[4]
else:
crowded = example[3]
mask = example[1]
self.assertIsInstance(crowded, np.ndarray)
self.assertEqual(crowded.dtype, np.bool)
self.assertEqual(crowded.shape, (mask.shape[0],))
if not self.use_crowded:
np.testing.assert_equal(crowded, 0)
if self.return_bbox:
for _ in range(10):
i = np.random.randint(0, len(self.dataset))
example = self.dataset[i]
bbox = example[-1]
img, mask = example[:2]
assert_is_bbox(bbox, img.shape[1:])
self.assertEqual(len(bbox), len(mask))
testing.run_module(__name__, __file__)
|
import logging
from netort.resource import manager as resource
from . import info
from . import instance_plan as ip
from . import load_plan as lp
from . import missile
from .mark import get_marker
from .module_exceptions import StepperConfigurationError, AmmoFileError
class ComponentFactory():
def __init__(
self,
rps_schedule=None,
http_ver='1.1',
ammo_file=None,
instances_schedule=None,
instances=1000,
loop_limit=-1,
ammo_limit=-1,
uris=None,
headers=None,
autocases=0,
enum_ammo=False,
ammo_type='phantom',
chosen_cases=None,
use_cache=True):
self.log = logging.getLogger(__name__)
self.ammo_file = ammo_file
self.ammo_type = ammo_type
self.rps_schedule = rps_schedule
self.http_ver = http_ver
self.instances_schedule = instances_schedule
loop_limit = int(loop_limit)
if loop_limit == -1: # -1 means infinite
loop_limit = None
ammo_limit = int(ammo_limit)
if ammo_limit == -1: # -1 means infinite
ammo_limit = None
if loop_limit is None and ammo_limit is None and not rps_schedule:
# we should have only one loop if we have instance_schedule
loop_limit = 1
info.status.loop_limit = loop_limit
info.status.ammo_limit = ammo_limit
info.status.publish("instances", instances)
self.uris = uris
if self.uris and loop_limit:
info.status.ammo_limit = len(self.uris) * loop_limit
self.headers = headers if headers is not None else []
self.marker = get_marker(autocases, enum_ammo)
self.chosen_cases = chosen_cases or []
self.use_cache = use_cache
def get_load_plan(self):
"""
return load plan (timestamps generator)
"""
if self.rps_schedule and self.instances_schedule:
raise StepperConfigurationError(
'Both rps and instances schedules specified. You must specify only one of them'
)
elif self.rps_schedule:
info.status.publish('loadscheme', self.rps_schedule)
return lp.create(self.rps_schedule)
elif self.instances_schedule:
info.status.publish('loadscheme', self.instances_schedule)
return ip.create(self.instances_schedule)
else:
self.instances_schedule = []
info.status.publish('loadscheme', self.instances_schedule)
return ip.create(self.instances_schedule)
def get_ammo_generator(self):
"""
return ammo generator
"""
af_readers = {
'phantom': missile.AmmoFileReader,
'slowlog': missile.SlowLogReader,
'line': missile.LineReader,
'uri': missile.UriReader,
'uripost': missile.UriPostReader,
'access': missile.AccessLogReader,
'caseline': missile.CaseLineReader,
}
if self.uris and self.ammo_file:
raise StepperConfigurationError(
'Both uris and ammo file specified. You must specify only one of them'
)
elif self.uris:
ammo_gen = missile.UriStyleGenerator(
self.uris, self.headers, http_ver=self.http_ver)
elif self.ammo_file:
if self.ammo_type in af_readers:
if self.ammo_type == 'phantom':
opener = resource.get_opener(self.ammo_file)
with opener(self.use_cache) as ammo:
try:
ammo_str = next(ammo).decode('utf-8')
if not ammo_str[0].isdigit():
self.ammo_type = 'uri'
self.log.info(
"Setting ammo_type 'uri' because ammo is not started with digit and you did not specify ammo format"
)
else:
self.log.info(
"Default ammo type ('phantom') used, use 'phantom.ammo_type' option to override it"
)
except StopIteration:
self.log.exception(
"Couldn't read first line of ammo file")
raise AmmoFileError(
"Couldn't read first line of ammo file")
else:
raise NotImplementedError(
'No such ammo type implemented: "%s"' % self.ammo_type)
ammo_gen = af_readers[self.ammo_type](
self.ammo_file, headers=self.headers, http_ver=self.http_ver, use_cache=self.use_cache)
else:
raise StepperConfigurationError(
'Ammo not found. Specify uris or ammo file')
self.log.info("Using %s ammo reader" % type(ammo_gen).__name__)
return ammo_gen
def get_marker(self):
return self.marker
def get_filter(self):
if len(self.chosen_cases):
def is_chosen_case(ammo_tuple):
return ammo_tuple[1] in self.chosen_cases
return is_chosen_case
else:
return lambda ammo_tuple: True
|
from datetime import timedelta
import logging
from lyft_rides.auth import ClientCredentialGrant
from lyft_rides.client import LyftRidesClient
from lyft_rides.errors import APIError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_END_LATITUDE = "end_latitude"
CONF_END_LONGITUDE = "end_longitude"
CONF_PRODUCT_IDS = "product_ids"
CONF_START_LATITUDE = "start_latitude"
CONF_START_LONGITUDE = "start_longitude"
ICON = "mdi:taxi"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_START_LATITUDE): cv.latitude,
vol.Optional(CONF_START_LONGITUDE): cv.longitude,
vol.Optional(CONF_END_LATITUDE): cv.latitude,
vol.Optional(CONF_END_LONGITUDE): cv.longitude,
vol.Optional(CONF_PRODUCT_IDS): vol.All(cv.ensure_list, [cv.string]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lyft sensor."""
auth_flow = ClientCredentialGrant(
client_id=config.get(CONF_CLIENT_ID),
client_secret=config.get(CONF_CLIENT_SECRET),
scopes="public",
is_sandbox_mode=False,
)
try:
session = auth_flow.get_session()
timeandpriceest = LyftEstimate(
session,
config.get(CONF_START_LATITUDE, hass.config.latitude),
config.get(CONF_START_LONGITUDE, hass.config.longitude),
config.get(CONF_END_LATITUDE),
config.get(CONF_END_LONGITUDE),
)
timeandpriceest.fetch_data()
except APIError as exc:
_LOGGER.error("Error setting up Lyft platform: %s", exc)
return False
wanted_product_ids = config.get(CONF_PRODUCT_IDS)
dev = []
for product_id, product in timeandpriceest.products.items():
if (wanted_product_ids is not None) and (product_id not in wanted_product_ids):
continue
dev.append(LyftSensor("time", timeandpriceest, product_id, product))
if product.get("estimate") is not None:
dev.append(LyftSensor("price", timeandpriceest, product_id, product))
add_entities(dev, True)
class LyftSensor(Entity):
"""Implementation of an Lyft sensor."""
def __init__(self, sensorType, products, product_id, product):
"""Initialize the Lyft sensor."""
self.data = products
self._product_id = product_id
self._product = product
self._sensortype = sensorType
self._name = f"{self._product['display_name']} {self._sensortype}"
if "lyft" not in self._name.lower():
self._name = f"Lyft{self._name}"
if self._sensortype == "time":
self._unit_of_measurement = TIME_MINUTES
elif self._sensortype == "price":
estimate = self._product["estimate"]
if estimate is not None:
self._unit_of_measurement = estimate.get("currency")
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
params = {
"Product ID": self._product["ride_type"],
"Product display name": self._product["display_name"],
"Vehicle Capacity": self._product["seats"],
}
if self._product.get("pricing_details") is not None:
pricing_details = self._product["pricing_details"]
params["Base price"] = pricing_details.get("base_charge")
params["Cancellation fee"] = pricing_details.get("cancel_penalty_amount")
params["Minimum price"] = pricing_details.get("cost_minimum")
params["Cost per mile"] = pricing_details.get("cost_per_mile")
params["Cost per minute"] = pricing_details.get("cost_per_minute")
params["Price currency code"] = pricing_details.get("currency")
params["Service fee"] = pricing_details.get("trust_and_service")
if self._product.get("estimate") is not None:
estimate = self._product["estimate"]
params["Trip distance (in miles)"] = estimate.get(
"estimated_distance_miles"
)
params["High price estimate (in cents)"] = estimate.get(
"estimated_cost_cents_max"
)
params["Low price estimate (in cents)"] = estimate.get(
"estimated_cost_cents_min"
)
params["Trip duration (in seconds)"] = estimate.get(
"estimated_duration_seconds"
)
params["Prime Time percentage"] = estimate.get("primetime_percentage")
if self._product.get("eta") is not None:
eta = self._product["eta"]
params["Pickup time estimate (in seconds)"] = eta.get("eta_seconds")
return {k: v for k, v in params.items() if v is not None}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data from the Lyft API and update the states."""
self.data.update()
try:
self._product = self.data.products[self._product_id]
except KeyError:
return
self._state = None
if self._sensortype == "time":
eta = self._product["eta"]
if (eta is not None) and (eta.get("is_valid_estimate")):
time_estimate = eta.get("eta_seconds")
if time_estimate is None:
return
self._state = int(time_estimate / 60)
elif self._sensortype == "price":
estimate = self._product["estimate"]
if (estimate is not None) and estimate.get("is_valid_estimate"):
self._state = (
int(
(
estimate.get("estimated_cost_cents_min", 0)
+ estimate.get("estimated_cost_cents_max", 0)
)
/ 2
)
/ 100
)
class LyftEstimate:
"""The class for handling the time and price estimate."""
def __init__(
self,
session,
start_latitude,
start_longitude,
end_latitude=None,
end_longitude=None,
):
"""Initialize the LyftEstimate object."""
self._session = session
self.start_latitude = start_latitude
self.start_longitude = start_longitude
self.end_latitude = end_latitude
self.end_longitude = end_longitude
self.products = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest product info and estimates from the Lyft API."""
try:
self.fetch_data()
except APIError as exc:
_LOGGER.error("Error fetching Lyft data: %s", exc)
def fetch_data(self):
"""Get the latest product info and estimates from the Lyft API."""
client = LyftRidesClient(self._session)
self.products = {}
products_response = client.get_ride_types(
self.start_latitude, self.start_longitude
)
products = products_response.json.get("ride_types")
for product in products:
self.products[product["ride_type"]] = product
if self.end_latitude is not None and self.end_longitude is not None:
price_response = client.get_cost_estimates(
self.start_latitude,
self.start_longitude,
self.end_latitude,
self.end_longitude,
)
prices = price_response.json.get("cost_estimates", [])
for price in prices:
product = self.products[price["ride_type"]]
if price.get("is_valid_estimate"):
product["estimate"] = price
eta_response = client.get_pickup_time_estimates(
self.start_latitude, self.start_longitude
)
etas = eta_response.json.get("eta_estimates")
for eta in etas:
if eta.get("is_valid_estimate"):
self.products[eta["ride_type"]]["eta"] = eta
|
import functools
import logging
from pyheos import CommandFailedError, Heos, HeosError, const
import voluptuous as vol
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
ATTR_PASSWORD,
ATTR_USERNAME,
DOMAIN,
SERVICE_SIGN_IN,
SERVICE_SIGN_OUT,
)
_LOGGER = logging.getLogger(__name__)
HEOS_SIGN_IN_SCHEMA = vol.Schema(
{vol.Required(ATTR_USERNAME): cv.string, vol.Required(ATTR_PASSWORD): cv.string}
)
HEOS_SIGN_OUT_SCHEMA = vol.Schema({})
def register(hass: HomeAssistantType, controller: Heos):
"""Register HEOS services."""
hass.services.async_register(
DOMAIN,
SERVICE_SIGN_IN,
functools.partial(_sign_in_handler, controller),
schema=HEOS_SIGN_IN_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_SIGN_OUT,
functools.partial(_sign_out_handler, controller),
schema=HEOS_SIGN_OUT_SCHEMA,
)
def remove(hass: HomeAssistantType):
"""Unregister HEOS services."""
hass.services.async_remove(DOMAIN, SERVICE_SIGN_IN)
hass.services.async_remove(DOMAIN, SERVICE_SIGN_OUT)
async def _sign_in_handler(controller, service):
"""Sign in to the HEOS account."""
if controller.connection_state != const.STATE_CONNECTED:
_LOGGER.error("Unable to sign in because HEOS is not connected")
return
username = service.data[ATTR_USERNAME]
password = service.data[ATTR_PASSWORD]
try:
await controller.sign_in(username, password)
except CommandFailedError as err:
_LOGGER.error("Sign in failed: %s", err)
except HeosError as err:
_LOGGER.error("Unable to sign in: %s", err)
async def _sign_out_handler(controller, service):
"""Sign out of the HEOS account."""
if controller.connection_state != const.STATE_CONNECTED:
_LOGGER.error("Unable to sign out because HEOS is not connected")
return
try:
await controller.sign_out()
except HeosError as err:
_LOGGER.error("Unable to sign out: %s", err)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.metrics import fractal_dimension as fractal_dimension_lib
import numpy as np
import tensorflow as tf
class FractalDimensionTest(tf.test.TestCase):
def test_straight_line(self):
"""The fractal dimension of a 1D line must lie near 1.0."""
self.assertAllClose(
fractal_dimension_lib.compute_fractal_dimension(
np.random.uniform(size=(10000, 1))), 1.0, atol=0.05)
def test_square(self):
"""The fractal dimension of a 2D square must lie near 2.0."""
self.assertAllClose(
fractal_dimension_lib.compute_fractal_dimension(
np.random.uniform(size=(10000, 2))), 2.0, atol=0.1)
|
import unittest
import numpy as np
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainer import utils
from chainercv.links.model.faster_rcnn import AnchorTargetCreator
from chainercv.utils import generate_random_bbox
class TestAnchorTargetCreator(unittest.TestCase):
img_size = (320, 240)
n_sample = 256
n_anchor_base = 9
pos_ratio = 0.5
def setUp(self):
n_bbox = 8
feat_size = (self.img_size[0] // 16, self.img_size[1] // 16)
self.n_anchor = self.n_anchor_base * np.prod(feat_size)
self.anchor = generate_random_bbox(
self.n_anchor, self.img_size, 16, 200)
self.bbox = generate_random_bbox(n_bbox, self.img_size, 16, 200)
self.anchor_target_layer = AnchorTargetCreator(
self.n_sample, pos_ratio=self.pos_ratio,
)
def check_anchor_target_creator(
self, anchor_target_layer,
bbox, anchor, img_size):
xp = cuda.get_array_module(bbox)
loc, label = self.anchor_target_layer(
bbox, anchor, img_size)
# Test types
self.assertIsInstance(loc, xp.ndarray)
self.assertIsInstance(label, xp.ndarray)
# Test shapes
self.assertEqual(loc.shape, (self.n_anchor, 4))
self.assertEqual(label.shape, (self.n_anchor,))
# Test dtype
self.assertEqual(loc.dtype, np.float32)
self.assertEqual(label.dtype, np.int32)
# Test ratio of foreground and background labels
np.testing.assert_equal(
cuda.to_cpu(utils.force_array(xp.sum(label >= 0))),
self.n_sample)
n_pos = cuda.to_cpu(utils.force_array(xp.sum(label == 1)))
n_neg = cuda.to_cpu(utils.force_array(xp.sum(label == 0)))
self.assertLessEqual(
n_pos, self.n_sample * self.pos_ratio)
self.assertLessEqual(n_neg, self.n_sample - n_pos)
def test_anchor_target_creator_cpu(self):
self.check_anchor_target_creator(
self.anchor_target_layer,
self.bbox,
self.anchor,
self.img_size)
@attr.gpu
def test_anchor_target_creator_gpu(self):
self.check_anchor_target_creator(
self.anchor_target_layer,
cuda.to_gpu(self.bbox),
cuda.to_gpu(self.anchor),
self.img_size)
testing.run_module(__name__, __file__)
|
import numpy as np
import quantities as pq
import matplotlib
from pylatex import Document, Section, Math, Tabular, Figure, SubFigure, \
Package, TikZ, Axis, Plot, Itemize, Enumerate, Description, MultiColumn, \
MultiRow, Command, Matrix, VectorName, Quantity, TableRowSizeError, \
LongTable, FlushLeft, FlushRight, Center, MiniPage, TextBlock, \
PageStyle, Head, Foot, StandAloneGraphic, Tabularx, ColumnType, NewLine, \
LineBreak, NewPage, HFill, HugeText, LargeText, MediumText, \
SmallText, FootnoteText, TextColor, FBox, MdFramed, Tabu, \
HorizontalSpace, VerticalSpace, TikZCoordinate, TikZNode, \
TikZNodeAnchor, TikZUserPath, TikZPathList, TikZPath, TikZDraw, \
TikZScope, TikZOptions, Hyperref, Marker
from pylatex.utils import escape_latex, fix_filename, dumps_list, bold, \
italic, verbatim, NoEscape
matplotlib.use('Agg') # Not to use X server. For TravisCI.
import matplotlib.pyplot as pyplot # noqa
def test_document():
geometry_options = {
"includeheadfoot": True,
"headheight": "12pt",
"headsep": "10pt",
"landscape": True
}
doc = Document(
default_filepath='default_filepath',
documentclass='article',
fontenc='T1',
inputenc='utf8',
lmodern=True,
data=None,
page_numbers=True,
indent=False,
document_options=["a4paper", "12pt"],
geometry_options=geometry_options
)
repr(doc)
doc.append('Some text.')
doc.change_page_style(style="empty")
doc.change_document_style(style="plain")
doc.add_color(name="lightgray", model="gray", description="0.6")
doc.add_color(name="abitless", model="gray", description="0.8")
doc.set_variable(name="myVar", value="1234")
doc.set_variable(name="myVar", value="1234")
doc.change_length(parameter=r"\headheight", value="0.5in")
doc.generate_tex(filepath='')
doc.generate_pdf(filepath='', clean=True)
def test_section():
sec = Section(title='', numbering=True, data=None)
repr(sec)
def test_hyperref():
hr = Hyperref(Marker("marker", "prefix"), "text")
repr(hr)
def test_math():
math = Math(data=None, inline=False)
repr(math)
vec = VectorName(name='')
repr(vec)
# Numpy
m = np.matrix([[2, 3, 4],
[0, 0, 1],
[0, 0, 2]])
matrix = Matrix(matrix=m, mtype='p', alignment=None)
repr(matrix)
def test_table():
# Tabular
t = Tabular(table_spec='|c|c|', data=None, pos=None, width=2)
t.add_hline(start=None, end=None)
t.add_row((1, 2), escape=False, strict=True, mapper=[bold])
t.add_row(1, 2, escape=False, strict=True, mapper=[bold])
# MultiColumn/MultiRow.
t.add_row((MultiColumn(size=2, align='|c|', data='MultiColumn'),),
strict=True)
# One multiRow-cell in that table would not be proper LaTeX,
# so strict is set to False
t.add_row((MultiRow(size=2, width='*', data='MultiRow'),), strict=False)
repr(t)
# TabularX
tabularx = Tabularx(table_spec='X X X',
width_argument=NoEscape(r"\textwidth"))
tabularx.add_row(["test1", "test2", "test3"])
# Long Table
longtable = LongTable(table_spec='c c c')
longtable.add_row(["test", "test2", "test3"])
longtable.end_table_header()
# Colored Tabu
coloredtable = Tabu(table_spec='X[c] X[c]')
coloredtable.add_row(["test", "test2"], color="gray", mapper=bold)
# Colored Tabu with 'spread'
coloredtable = Tabu(table_spec='X[c] X[c]', spread="1in")
coloredtable.add_row(["test", "test2"], color="gray", mapper=bold)
# Colored Tabu with 'to'
coloredtable = Tabu(table_spec='X[c] X[c]', to="5in")
coloredtable.add_row(["test", "test2"], color="gray", mapper=bold)
# Colored Tabularx
coloredtable = Tabularx(table_spec='X[c] X[c]')
coloredtable.add_row(["test", "test2"], color="gray", mapper=bold)
# Column
column = ColumnType("R", "X", r"\raggedleft", parameters=2)
repr(column)
def test_command():
c = Command(command='documentclass', arguments=None, options=None,
packages=None)
repr(c)
def test_graphics():
f = Figure(data=None, position=None)
f.add_image(filename='', width=r'0.8\textwidth', placement=r'\centering')
f.add_caption(caption='')
repr(f)
# Subfigure
s = SubFigure(data=None, position=None, width=r'0.45\linewidth')
s.add_image(filename='', width='r\linewidth',
placement=None)
s.add_caption(caption='')
repr(s)
# Matplotlib
plot = Figure(data=None, position=None)
x = [0, 1, 2, 3, 4, 5, 6]
y = [15, 2, 7, 1, 5, 6, 9]
pyplot.plot(x, y)
plot.add_plot(width=r'0.8\textwidth', placement=r'\centering')
plot.add_caption(caption='I am a caption.')
repr(plot)
# StandAloneGraphic
stand_alone_graphic = StandAloneGraphic(
filename='', image_options=r"width=0.8\textwidth")
repr(stand_alone_graphic)
def test_quantities():
# Quantities
Quantity(quantity=1*pq.kg)
q = Quantity(quantity=1*pq.kg, format_cb=lambda x: str(int(x)))
repr(q)
def test_package():
# Package
p = Package(name='', options=None)
repr(p)
def test_tikz():
# PGFPlots
t = TikZ(data=None)
repr(t)
a = Axis(data=None, options=None)
repr(a)
p = Plot(name=None, func=None, coordinates=None, error_bar=None,
options=None)
repr(p)
opt = TikZOptions(None)
repr(opt)
scope = TikZScope(data=None)
repr(scope)
c = TikZCoordinate.from_str("(0,0)")
c = TikZCoordinate(x=0, y=0, relative=False)
d = c + (0, 1)
e = c - (0, 1)
f = (0, 1) + c
c.distance_to(d)
repr(c)
repr(d)
repr(e)
repr(f)
bool(c == (1, 1))
bool(c == TikZCoordinate(1, 1))
bool(TikZCoordinate(1, 1, relative=True) == (1, 1))
bool(TikZCoordinate(1, 1, relative=False) == (1, 1))
bool(TikZCoordinate(1, 1, relative=True) == TikZCoordinate(1,
1,
relative=False))
# test expected to fail
try:
g = TikZCoordinate(0, 1, relative=True) +\
TikZCoordinate(1, 0, relative=False)
repr(g)
raise Exception
except ValueError:
pass
a = TikZNodeAnchor(node_handle=None, anchor_name=None)
repr(a)
n = TikZNode(handle=None, options=None, at=None, text=None)
repr(n)
p = n.get_anchor_point("north")
repr(p)
p = n.get_anchor_point('_180')
repr(p)
p = n.west
repr(p)
up = TikZUserPath(path_type="edge", options=TikZOptions('bend right'))
repr(up)
pl = TikZPathList('(0, 1)', '--', '(2, 0)')
pl.append((0.5, 0))
repr(pl)
# generate a failure, illegal start
try:
pl = TikZPathList('--', '(0, 1)')
raise Exception
except TypeError:
pass
# fail with illegal path type
try:
pl = TikZPathList('(0, 1)', 'illegal', '(0, 2)')
raise Exception
except ValueError:
pass
# fail with path after path
try:
pl = TikZPathList('(0, 1)', '--', '--')
raise Exception
except ValueError:
pass
# other type of failure: illegal identifier after path
try:
pl = TikZPathList('(0, 1)', '--', 'illegal')
raise Exception
except (ValueError, TypeError):
pass
pt = TikZPath(path=None, options=TikZOptions("->"))
pt.append(TikZCoordinate(0, 1, relative=True))
repr(pt)
pt = TikZPath(path=[n.west, 'edge', TikZCoordinate(0, 1, relative=True)])
repr(pt)
pt = TikZPath(path=pl, options=None)
repr(pt)
dr = TikZDraw(path=None, options=None)
repr(dr)
def test_lists():
# Lists
itemize = Itemize()
itemize.add_item(s="item")
itemize.append("append")
repr(itemize)
enum = Enumerate(enumeration_symbol=r"\alph*)", options={'start': 172})
enum.add_item(s="item")
enum.add_item(s="item2")
enum.append("append")
repr(enum)
desc = Description()
desc.add_item(label="label", s="item")
desc.append("append")
repr(desc)
def test_headfoot():
# Page styles, headers and footers
page_style = PageStyle("NewStyle")
page_style.change_thickness("header", "1pt")
page_style.change_thickness("footer", "1pt")
header = Head("C")
header.append("append")
footer = Foot("C")
footer.append("append")
page_style.append(header)
page_style.append(footer)
repr(header)
repr(footer)
repr(page_style)
def test_position():
repr(HorizontalSpace(size='20pt', star=False))
repr(VerticalSpace(size="20pt", star=True))
# Test alignment environments
center = Center()
center.append("append")
repr(center)
right = FlushRight()
right.append("append")
repr(right)
left = FlushLeft()
left.append("append")
repr(left)
minipage = MiniPage(width=r"\textwidth", height="10pt", pos='t',
align='r', content_pos='t', fontsize="Large")
minipage.append("append")
repr(minipage)
textblock = TextBlock(width="200", horizontal_pos="200",
vertical_pos="200", indent=True)
textblock.append("append")
textblock.dumps()
repr(textblock)
def test_frames():
# Tests the framed environments
md_framed = MdFramed()
md_framed.append("Framed text")
repr(md_framed)
f_box = FBox()
f_box.append("Fboxed text")
repr(f_box)
def test_basic():
# Tests the basic commands and environments
# Basic commands
new_page = NewPage()
repr(new_page)
new_line = NewLine()
repr(new_line)
line_break = LineBreak()
repr(line_break)
h_fill = HFill()
repr(h_fill)
# Basic environments
huge = HugeText("Huge")
huge.append("Huge 2")
repr(huge)
large = LargeText("Large")
large.append("Large 2")
repr(large)
medium = MediumText("Medium")
medium.append("Medium 2")
repr(medium)
small = SmallText("Small")
small.append("Small 2")
repr(small)
footnote = FootnoteText("Footnote")
footnote.append("Footnote 2")
repr(footnote)
text_color = TextColor("green", "GreenText")
text_color.append("More Green Text")
repr(text_color)
def test_utils():
# Utils
escape_latex(s='')
fix_filename(path='')
dumps_list(l=[], escape=False, token='\n')
bold(s='')
italic(s='')
verbatim(s='', delimiter='|')
def test_errors():
# Errors
# TableRowSizeError
# General test
try:
raise TableRowSizeError
except TableRowSizeError:
pass
# Positive test, expected to raise Error
t = Tabular(table_spec='|c|c|', data=None, pos=None)
# TODO: this does not actually check if the error is raised
try:
# Wrong number of cells in table should raise an exception
t.add_row((1, 2, 3), escape=False, strict=True)
except TableRowSizeError:
pass
# Negative test, should not raise
try:
# Wrong number with strict=False should not raise an exception
t.add_row((1, 2, 3), escape=False, strict=False)
except TableRowSizeError:
raise
|
Subsets and Splits