text
stringlengths 213
32.3k
|
---|
import asyncio
from collections import defaultdict
import logging
from typing import Any, Dict, Generic, List, Optional, Type, TypeVar
import pyvera as veraApi
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ARMED,
ATTR_BATTERY_LEVEL,
ATTR_LAST_TRIP_TIME,
ATTR_TRIPPED,
CONF_EXCLUDE,
CONF_LIGHTS,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert, slugify
from homeassistant.util.dt import utc_from_timestamp
from .common import (
ControllerData,
SubscriptionRegistry,
get_configured_platforms,
get_controller_data,
set_controller_data,
)
from .config_flow import fix_device_id_list, new_options
from .const import (
ATTR_CURRENT_ENERGY_KWH,
ATTR_CURRENT_POWER_W,
CONF_CONTROLLER,
CONF_LEGACY_UNIQUE_ID,
DOMAIN,
VERA_ID_FORMAT,
)
_LOGGER = logging.getLogger(__name__)
VERA_ID_LIST_SCHEMA = vol.Schema([int])
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONTROLLER): cv.url,
vol.Optional(CONF_EXCLUDE, default=[]): VERA_ID_LIST_SCHEMA,
vol.Optional(CONF_LIGHTS, default=[]): VERA_ID_LIST_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, base_config: dict) -> bool:
"""Set up for Vera controllers."""
hass.data[DOMAIN] = {}
config = base_config.get(DOMAIN)
if not config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=config,
)
)
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Do setup of vera."""
# Use options entered during initial config flow or provided from configuration.yml
if config_entry.data.get(CONF_LIGHTS) or config_entry.data.get(CONF_EXCLUDE):
hass.config_entries.async_update_entry(
entry=config_entry,
data=config_entry.data,
options=new_options(
config_entry.data.get(CONF_LIGHTS, []),
config_entry.data.get(CONF_EXCLUDE, []),
),
)
saved_light_ids = config_entry.options.get(CONF_LIGHTS, [])
saved_exclude_ids = config_entry.options.get(CONF_EXCLUDE, [])
base_url = config_entry.data[CONF_CONTROLLER]
light_ids = fix_device_id_list(saved_light_ids)
exclude_ids = fix_device_id_list(saved_exclude_ids)
# If the ids were corrected. Update the config entry.
if light_ids != saved_light_ids or exclude_ids != saved_exclude_ids:
hass.config_entries.async_update_entry(
entry=config_entry, options=new_options(light_ids, exclude_ids)
)
# Initialize the Vera controller.
subscription_registry = SubscriptionRegistry(hass)
controller = veraApi.VeraController(base_url, subscription_registry)
await hass.async_add_executor_job(controller.start)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, controller.stop)
try:
all_devices = await hass.async_add_executor_job(controller.get_devices)
all_scenes = await hass.async_add_executor_job(controller.get_scenes)
except RequestException as exception:
# There was a network related error connecting to the Vera controller.
_LOGGER.exception("Error communicating with Vera API")
raise ConfigEntryNotReady from exception
# Exclude devices unwanted by user.
devices = [device for device in all_devices if device.device_id not in exclude_ids]
vera_devices = defaultdict(list)
for device in devices:
device_type = map_vera_device(device, light_ids)
if device_type is not None:
vera_devices[device_type].append(device)
vera_scenes = []
for scene in all_scenes:
vera_scenes.append(scene)
controller_data = ControllerData(
controller=controller,
devices=vera_devices,
scenes=vera_scenes,
config_entry=config_entry,
)
set_controller_data(hass, config_entry, controller_data)
# Forward the config data to the necessary platforms.
for platform in get_configured_platforms(controller_data):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload Withings config entry."""
controller_data: ControllerData = get_controller_data(hass, config_entry)
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in get_configured_platforms(controller_data)
]
tasks.append(hass.async_add_executor_job(controller_data.controller.stop))
await asyncio.gather(*tasks)
return True
def map_vera_device(vera_device: veraApi.VeraDevice, remap: List[int]) -> str:
"""Map vera classes to Home Assistant types."""
type_map = {
veraApi.VeraDimmer: "light",
veraApi.VeraBinarySensor: "binary_sensor",
veraApi.VeraSensor: "sensor",
veraApi.VeraArmableDevice: "switch",
veraApi.VeraLock: "lock",
veraApi.VeraThermostat: "climate",
veraApi.VeraCurtain: "cover",
veraApi.VeraSceneController: "sensor",
veraApi.VeraSwitch: "switch",
}
def map_special_case(instance_class: Type, entity_type: str) -> str:
if instance_class is veraApi.VeraSwitch and vera_device.device_id in remap:
return "light"
return entity_type
return next(
iter(
map_special_case(instance_class, entity_type)
for instance_class, entity_type in type_map.items()
if isinstance(vera_device, instance_class)
),
None,
)
DeviceType = TypeVar("DeviceType", bound=veraApi.VeraDevice)
class VeraDevice(Generic[DeviceType], Entity):
"""Representation of a Vera device entity."""
def __init__(self, vera_device: DeviceType, controller_data: ControllerData):
"""Initialize the device."""
self.vera_device = vera_device
self.controller = controller_data.controller
self._name = self.vera_device.name
# Append device id to prevent name clashes in HA.
self.vera_id = VERA_ID_FORMAT.format(
slugify(vera_device.name), vera_device.vera_device_id
)
if controller_data.config_entry.data.get(CONF_LEGACY_UNIQUE_ID):
self._unique_id = str(self.vera_device.vera_device_id)
else:
self._unique_id = f"vera_{controller_data.config_entry.unique_id}_{self.vera_device.vera_device_id}"
async def async_added_to_hass(self) -> None:
"""Subscribe to updates."""
self.controller.register(self.vera_device, self._update_callback)
def _update_callback(self, _device: DeviceType) -> None:
"""Update the state."""
self.schedule_update_ha_state(True)
@property
def name(self) -> str:
"""Return the name of the device."""
return self._name
@property
def should_poll(self) -> bool:
"""Get polling requirement from vera device."""
return self.vera_device.should_poll
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the device."""
attr = {}
if self.vera_device.has_battery:
attr[ATTR_BATTERY_LEVEL] = self.vera_device.battery_level
if self.vera_device.is_armable:
armed = self.vera_device.is_armed
attr[ATTR_ARMED] = "True" if armed else "False"
if self.vera_device.is_trippable:
last_tripped = self.vera_device.last_trip
if last_tripped is not None:
utc_time = utc_from_timestamp(int(last_tripped))
attr[ATTR_LAST_TRIP_TIME] = utc_time.isoformat()
else:
attr[ATTR_LAST_TRIP_TIME] = None
tripped = self.vera_device.is_tripped
attr[ATTR_TRIPPED] = "True" if tripped else "False"
power = self.vera_device.power
if power:
attr[ATTR_CURRENT_POWER_W] = convert(power, float, 0.0)
energy = self.vera_device.energy
if energy:
attr[ATTR_CURRENT_ENERGY_KWH] = convert(energy, float, 0.0)
attr["Vera Device Id"] = self.vera_device.vera_device_id
return attr
@property
def unique_id(self) -> str:
"""Return a unique ID.
The Vera assigns a unique and immutable ID number to each device.
"""
return self._unique_id
|
from pychannels import Channels
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.helpers import config_validation as cv, entity_platform
from .const import SERVICE_SEEK_BACKWARD, SERVICE_SEEK_BY, SERVICE_SEEK_FORWARD
DATA_CHANNELS = "channels"
DEFAULT_NAME = "Channels"
DEFAULT_PORT = 57000
FEATURE_SUPPORT = (
SUPPORT_PLAY
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_VOLUME_MUTE
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
# Service call validation schemas
ATTR_SECONDS = "seconds"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Channels platform."""
device = ChannelsPlayer(config[CONF_NAME], config[CONF_HOST], config[CONF_PORT])
async_add_entities([device], True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SEEK_FORWARD,
{},
"seek_forward",
)
platform.async_register_entity_service(
SERVICE_SEEK_BACKWARD,
{},
"seek_backward",
)
platform.async_register_entity_service(
SERVICE_SEEK_BY,
{vol.Required(ATTR_SECONDS): vol.Coerce(int)},
"seek_by",
)
class ChannelsPlayer(MediaPlayerEntity):
"""Representation of a Channels instance."""
def __init__(self, name, host, port):
"""Initialize the Channels app."""
self._name = name
self._host = host
self._port = port
self.client = Channels(self._host, self._port)
self.status = None
self.muted = None
self.channel_number = None
self.channel_name = None
self.channel_image_url = None
self.now_playing_title = None
self.now_playing_episode_title = None
self.now_playing_season_number = None
self.now_playing_episode_number = None
self.now_playing_summary = None
self.now_playing_image_url = None
self.favorite_channels = []
def update_favorite_channels(self):
"""Update the favorite channels from the client."""
self.favorite_channels = self.client.favorite_channels()
def update_state(self, state_hash):
"""Update all the state properties with the passed in dictionary."""
self.status = state_hash.get("status", "stopped")
self.muted = state_hash.get("muted", False)
channel_hash = state_hash.get("channel")
np_hash = state_hash.get("now_playing")
if channel_hash:
self.channel_number = channel_hash.get("channel_number")
self.channel_name = channel_hash.get("channel_name")
self.channel_image_url = channel_hash.get("channel_image_url")
else:
self.channel_number = None
self.channel_name = None
self.channel_image_url = None
if np_hash:
self.now_playing_title = np_hash.get("title")
self.now_playing_episode_title = np_hash.get("episode_title")
self.now_playing_season_number = np_hash.get("season_number")
self.now_playing_episode_number = np_hash.get("episode_number")
self.now_playing_summary = np_hash.get("summary")
self.now_playing_image_url = np_hash.get("image_url")
else:
self.now_playing_title = None
self.now_playing_episode_title = None
self.now_playing_season_number = None
self.now_playing_episode_number = None
self.now_playing_summary = None
self.now_playing_image_url = None
@property
def name(self):
"""Return the name of the player."""
return self._name
@property
def state(self):
"""Return the state of the player."""
if self.status == "stopped":
return STATE_IDLE
if self.status == "paused":
return STATE_PAUSED
if self.status == "playing":
return STATE_PLAYING
return None
def update(self):
"""Retrieve latest state."""
self.update_favorite_channels()
self.update_state(self.client.status())
@property
def source_list(self):
"""List of favorite channels."""
sources = [channel["name"] for channel in self.favorite_channels]
return sources
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.muted
@property
def media_content_id(self):
"""Content ID of current playing channel."""
return self.channel_number
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_CHANNEL
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.now_playing_image_url:
return self.now_playing_image_url
if self.channel_image_url:
return self.channel_image_url
return "https://getchannels.com/assets/img/icon-1024.png"
@property
def media_title(self):
"""Title of current playing media."""
if self.state:
return self.now_playing_title
return None
@property
def supported_features(self):
"""Flag of media commands that are supported."""
return FEATURE_SUPPORT
def mute_volume(self, mute):
"""Mute (true) or unmute (false) player."""
if mute != self.muted:
response = self.client.toggle_muted()
self.update_state(response)
def media_stop(self):
"""Send media_stop command to player."""
self.status = "stopped"
response = self.client.stop()
self.update_state(response)
def media_play(self):
"""Send media_play command to player."""
response = self.client.resume()
self.update_state(response)
def media_pause(self):
"""Send media_pause command to player."""
response = self.client.pause()
self.update_state(response)
def media_next_track(self):
"""Seek ahead."""
response = self.client.skip_forward()
self.update_state(response)
def media_previous_track(self):
"""Seek back."""
response = self.client.skip_backward()
self.update_state(response)
def select_source(self, source):
"""Select a channel to tune to."""
for channel in self.favorite_channels:
if channel["name"] == source:
response = self.client.play_channel(channel["number"])
self.update_state(response)
break
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the player."""
if media_type == MEDIA_TYPE_CHANNEL:
response = self.client.play_channel(media_id)
self.update_state(response)
elif media_type in [MEDIA_TYPE_MOVIE, MEDIA_TYPE_EPISODE, MEDIA_TYPE_TVSHOW]:
response = self.client.play_recording(media_id)
self.update_state(response)
def seek_forward(self):
"""Seek forward in the timeline."""
response = self.client.seek_forward()
self.update_state(response)
def seek_backward(self):
"""Seek backward in the timeline."""
response = self.client.seek_backward()
self.update_state(response)
def seek_by(self, seconds):
"""Seek backward in the timeline."""
response = self.client.seek(seconds)
self.update_state(response)
|
from test import CollectorTestCase
from test import get_collector_config
from openldap import OpenLDAPCollector
class TestOpenLDAPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('OpenLDAPCollector', {
})
self.collector = OpenLDAPCollector(config, None)
def test_import(self):
self.assertTrue(OpenLDAPCollector)
|
from distutils.core import setup
import os
import pkg_resources
from setuptools.command import sdist
from setuptools import find_packages
from distutils.extension import Extension
description = """
Collection of Deep Learning Computer Vision Algorithms implemented in Chainer
"""
setup_requires = ['numpy']
install_requires = [
'chainer>=6.0',
# https://github.com/matplotlib/matplotlib/issues/16083
'Pillow<7',
]
ext_data = {
'utils.bbox._nms_gpu_post': {'pyxfile': 'utils/bbox/_nms_gpu_post'}
}
try:
from Cython.Distutils import build_ext as _build_ext
use_cython = True
except ImportError:
from distutils.command.build_ext import build_ext as _build_ext
use_cython = False
for name, data in ext_data.items():
src = os.path.join('chainercv', data['pyxfile'] + '.pyx')
if not os.path.exists(src):
use_cython = False
break
suffix = '.pyx' if use_cython else '.c'
extensions = []
for name, data in ext_data.items():
sources = [os.path.join('chainercv', data['pyxfile'] + suffix)]
extensions.append(
Extension('chainercv.{}'.format(name),
sources=sources)
)
class CheckingBuildExt(_build_ext):
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
print("{}: -> [{}]".format(ext.name, ext.sources))
raise Exception("""Cython-generated file '{}' not found.
Cython is required to compile chainercv from a development
branch. Please install Cython or
download a release package of chainercv.
""".format(src))
def build_extensions(self):
self.check_cython_extensions(self.extensions)
# Include NumPy
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
class Sdist(sdist.sdist):
def __init__(self, *args, **kwargs):
assert use_cython
from Cython.Build import cythonize
for e in extensions:
for src in e.sources:
cythonize(src)
super(sdist.sdist, self).__init__(*args, **kwargs)
cmdclass = {
'build_ext': CheckingBuildExt,
'sdist': Sdist,
}
setup(
name='chainercv',
version='0.13.1',
packages=find_packages(),
author='Yusuke Niitani, Toru Ogawa',
author_email='[email protected], [email protected]',
license='MIT',
description=description,
setup_requires=setup_requires,
install_requires=install_requires,
include_package_data=True,
ext_modules=extensions,
cmdclass=cmdclass,
)
|
from typing import Any
from homeassistant.components.scene import DOMAIN as SENSOR_DOMAIN, Scene
from homeassistant.const import CONF_PLATFORM
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import TuyaDevice
from .const import DOMAIN, TUYA_DATA, TUYA_DISCOVERY_NEW
ENTITY_ID_FORMAT = SENSOR_DOMAIN + ".{}"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up tuya sensors dynamically through tuya discovery."""
platform = config_entry.data[CONF_PLATFORM]
async def async_discover_sensor(dev_ids):
"""Discover and add a discovered tuya sensor."""
if not dev_ids:
return
entities = await hass.async_add_executor_job(
_setup_entities,
hass,
dev_ids,
platform,
)
async_add_entities(entities)
async_dispatcher_connect(
hass, TUYA_DISCOVERY_NEW.format(SENSOR_DOMAIN), async_discover_sensor
)
devices_ids = hass.data[DOMAIN]["pending"].pop(SENSOR_DOMAIN)
await async_discover_sensor(devices_ids)
def _setup_entities(hass, dev_ids, platform):
"""Set up Tuya Scene."""
tuya = hass.data[DOMAIN][TUYA_DATA]
entities = []
for dev_id in dev_ids:
device = tuya.get_device_by_id(dev_id)
if device is None:
continue
entities.append(TuyaScene(device, platform))
return entities
class TuyaScene(TuyaDevice, Scene):
"""Tuya Scene."""
def __init__(self, tuya, platform):
"""Init Tuya scene."""
super().__init__(tuya, platform)
self.entity_id = ENTITY_ID_FORMAT.format(tuya.object_id())
def activate(self, **kwargs: Any) -> None:
"""Activate the scene."""
self._tuya.activate()
|
from test import CollectorTestCase
from test import get_collector_config
from mock import patch
from diamond.collector import Collector
from websitemonitor import WebsiteMonitorCollector
########################################################################
class MockResponse(object):
def __init__(self, resp_data, code=200):
self.resp_data = resp_data
self.code = code
def read(self):
return self.resp_data
def getcode(self):
return self.code
class TestWebsiteCollector(CollectorTestCase):
def setUp(self, config=None):
if config is None:
config = get_collector_config('WebsiteCollector', {
'url': ''
})
else:
config = get_collector_config('WebsiteCollector', config)
self.collector = WebsiteMonitorCollector(config, None)
self.patcher = patch('urllib2.urlopen')
self.urlopen_mock = self.patcher.start()
def test_import(self):
self.assertTrue(WebsiteMonitorCollector)
@patch.object(Collector, 'publish')
def test_websitemonitorcollector_with_data(self, publish_mock):
self.collector.collect()
self.urlopen_mock.return_value = MockResponse(200)
metrics = {}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany([publish_mock], metrics)
@patch.object(Collector, 'publish')
def test_websitemonitorcollector(self, publish_mock):
self.setUp()
self.collector.collect()
self.assertPublishedMany(publish_mock, {
})
def tearDown(self):
self.patcher.stop()
|
import sqlite3
import unittest
import collections
import sys
import gzip
from platform import system
import lzma
import bz2
from functional import seq, pseq
from functional.streams import Stream, ParallelStream
class TestStreams(unittest.TestCase):
def setUp(self):
self.seq = seq
self.seq_c_disabled = Stream(disable_compression=True)
def test_open(self):
with open("LICENSE.txt") as f:
data = f.readlines()
self.assertListEqual(data, self.seq.open("LICENSE.txt").to_list())
text = "".join(data).split(",")
self.assertListEqual(
text, self.seq.open("LICENSE.txt", delimiter=",").to_list()
)
with self.assertRaises(ValueError):
self.seq.open("LICENSE.txt", mode="w").to_list()
def test_open_gzip(self):
expect = ["line0\n", "line1\n", "line2"]
self.assertListEqual(
expect,
self.seq.open("functional/test/data/test.txt.gz", mode="rt").to_list(),
)
def test_open_bz2(self):
expect = ["line0\n", "line1\n", "line2"]
self.assertListEqual(
expect,
self.seq.open("functional/test/data/test.txt.bz2", mode="rt").to_list(),
)
def test_open_xz(self):
expect = ["line0\n", "line1\n", "line2"]
self.assertListEqual(
expect,
self.seq.open("functional/test/data/test.txt.xz", mode="rt").to_list(),
)
def test_disable_compression(self):
file_name = "functional/test/data/test.txt.gz"
with open(file_name, "rb") as f:
expect = f.readlines()
self.assertListEqual(
expect, self.seq_c_disabled.open(file_name, mode="rb").to_list()
)
def test_range(self):
self.assertListEqual([0, 1, 2, 3], self.seq.range(4).to_list())
data = [-5, -3, -1, 1, 3, 5, 7]
self.assertListEqual(data, self.seq.range(-5, 8, 2).to_list())
def test_csv(self):
result = self.seq.csv("functional/test/data/test.csv").to_list()
expect = [["1", "2", "3", "4"], ["a", "b", "c", "d"]]
self.assertEqual(expect, result)
with open("functional/test/data/test.csv", "r") as csv_file:
self.assertEqual(expect, self.seq.csv(csv_file).to_list())
with self.assertRaises(ValueError):
self.seq.csv(1)
def test_csv_dict_reader(self):
result = self.seq.csv_dict_reader(
"functional/test/data/test_header.csv"
).to_list()
self.assertEqual(result[0]["a"], "1")
self.assertEqual(result[0]["b"], "2")
self.assertEqual(result[0]["c"], "3")
self.assertEqual(result[1]["a"], "4")
self.assertEqual(result[1]["b"], "5")
self.assertEqual(result[1]["c"], "6")
with open("functional/test/data/test_header.csv", "r") as f:
result = self.seq.csv_dict_reader(f).to_list()
self.assertEqual(result[0]["a"], "1")
self.assertEqual(result[0]["b"], "2")
self.assertEqual(result[0]["c"], "3")
self.assertEqual(result[1]["a"], "4")
self.assertEqual(result[1]["b"], "5")
self.assertEqual(result[1]["c"], "6")
with self.assertRaises(ValueError):
self.seq.csv_dict_reader(1)
def test_gzip_csv(self):
result = self.seq.csv("functional/test/data/test.csv.gz").to_list()
expect = [["1", "2", "3", "4"], ["a", "b", "c", "d"]]
self.assertEqual(expect, result)
with self.assertRaises(ValueError):
self.seq.csv(1)
def test_bz2_csv(self):
result = self.seq.csv("functional/test/data/test.csv.bz2").to_list()
expect = [["1", "2", "3", "4"], ["a", "b", "c", "d"]]
self.assertEqual(expect, result)
with self.assertRaises(ValueError):
self.seq.csv(1)
def test_xz_csv(self):
result = self.seq.csv("functional/test/data/test.csv.xz").to_list()
expect = [["1", "2", "3", "4"], ["a", "b", "c", "d"]]
self.assertEqual(expect, result)
with self.assertRaises(ValueError):
self.seq.csv(1)
def test_jsonl(self):
result_0 = self.seq.jsonl("functional/test/data/test.jsonl").to_list()
expect_0 = [[1, 2, 3], {"a": 1, "b": 2, "c": 3}]
self.assertEqual(expect_0, result_0)
result_1 = self.seq.jsonl(["[1, 2, 3]", "[4, 5, 6]"])
expect_1 = [[1, 2, 3], [4, 5, 6]]
self.assertEqual(expect_1, result_1)
def test_gzip_jsonl(self):
result_0 = self.seq.jsonl("functional/test/data/test.jsonl.gz").to_list()
expect_0 = [[1, 2, 3], {"a": 1, "b": 2, "c": 3}]
self.assertEqual(expect_0, result_0)
def test_bz2_jsonl(self):
result_0 = self.seq.jsonl("functional/test/data/test.jsonl.bz2").to_list()
expect_0 = [[1, 2, 3], {"a": 1, "b": 2, "c": 3}]
self.assertEqual(expect_0, result_0)
def test_xz_jsonl(self):
result_0 = self.seq.jsonl("functional/test/data/test.jsonl.xz").to_list()
expect_0 = [[1, 2, 3], {"a": 1, "b": 2, "c": 3}]
self.assertEqual(expect_0, result_0)
def test_json(self):
list_test_path = "functional/test/data/test_list.json"
dict_test_path = "functional/test/data/test_dict.json"
list_expect = [1, 2, 3, 4, 5]
dict_expect = list({u"a": 1, u"b": 2, u"c": 3}.items())
result = self.seq.json(list_test_path).to_list()
self.assertEqual(list_expect, result)
result = self.seq.json(dict_test_path).to_list()
self.assertEqual(dict_expect, result)
with open(list_test_path) as file_handle:
result = self.seq.json(file_handle).to_list()
self.assertEqual(list_expect, result)
with open(dict_test_path) as file_handle:
result = self.seq.json(file_handle).to_list()
self.assertEqual(dict_expect, result)
with self.assertRaises(ValueError):
self.seq.json(1)
def test_gzip_json(self):
list_test_path = "functional/test/data/test_list.json.gz"
dict_test_path = "functional/test/data/test_dict.json.gz"
list_expect = [1, 2, 3, 4, 5]
dict_expect = list({u"a": 1, u"b": 2, u"c": 3}.items())
result = self.seq.json(list_test_path).to_list()
self.assertEqual(list_expect, result)
result = self.seq.json(dict_test_path).to_list()
self.assertEqual(dict_expect, result)
with self.assertRaises(ValueError):
self.seq.json(1)
def test_bz2_json(self):
list_test_path = "functional/test/data/test_list.json.bz2"
dict_test_path = "functional/test/data/test_dict.json.bz2"
list_expect = [1, 2, 3, 4, 5]
dict_expect = list({u"a": 1, u"b": 2, u"c": 3}.items())
result = self.seq.json(list_test_path).to_list()
self.assertEqual(list_expect, result)
result = self.seq.json(dict_test_path).to_list()
self.assertEqual(dict_expect, result)
with self.assertRaises(ValueError):
self.seq.json(1)
def test_xz_json(self):
list_test_path = "functional/test/data/test_list.json.xz"
dict_test_path = "functional/test/data/test_dict.json.xz"
list_expect = [1, 2, 3, 4, 5]
dict_expect = list({u"a": 1, u"b": 2, u"c": 3}.items())
result = self.seq.json(list_test_path).to_list()
self.assertEqual(list_expect, result)
result = self.seq.json(dict_test_path).to_list()
self.assertEqual(dict_expect, result)
with self.assertRaises(ValueError):
self.seq.json(1)
def test_sqlite3(self):
db_file = "functional/test/data/test_sqlite3.db"
# test failure case
with self.assertRaises(ValueError):
self.seq.sqlite3(1, "SELECT * from user").to_list()
# test select from file path
query_0 = "SELECT id, name FROM user;"
result_0 = self.seq.sqlite3(db_file, query_0).to_list()
expected_0 = [(1, "Tom"), (2, "Jack"), (3, "Jane"), (4, "Stephan")]
self.assertListEqual(expected_0, result_0)
# test select from connection
with sqlite3.connect(db_file) as conn:
result_0_1 = self.seq.sqlite3(conn, query_0).to_list()
self.assertListEqual(expected_0, result_0_1)
# test select from cursor
with sqlite3.connect(db_file) as conn:
cursor = conn.cursor()
result_0_2 = self.seq.sqlite3(cursor, query_0).to_list()
self.assertListEqual(expected_0, result_0_2)
# test connection with kwds
result_0_3 = self.seq.sqlite3(db_file, query_0, timeout=30).to_list()
self.assertListEqual(expected_0, result_0_3)
# test order by
result_1 = self.seq.sqlite3(
db_file, "SELECT id, name FROM user ORDER BY name;"
).to_list()
expected_1 = [(2, "Jack"), (3, "Jane"), (4, "Stephan"), (1, "Tom")]
self.assertListEqual(expected_1, result_1)
# test query with params
result_2 = self.seq.sqlite3(
db_file, "SELECT id, name FROM user WHERE id = ?;", parameters=(1,)
).to_list()
expected_2 = [(1, "Tom")]
self.assertListEqual(expected_2, result_2)
def test_pandas(self):
try:
import pandas
data = pandas.DataFrame([[1, 3], [4, 5]])
result = seq(data).list()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[0][1], 3)
self.assertEqual(result[1][0], 4)
self.assertEqual(result[1][1], 5)
except ImportError:
pass
def test_to_file(self):
tmp_path = "functional/test/data/tmp/output.txt"
sequence = self.seq(1, 2, 3, 4)
sequence.to_file(tmp_path)
with open(tmp_path, "r") as output:
self.assertEqual("[1, 2, 3, 4]", output.readlines()[0])
sequence.to_file(tmp_path, delimiter=":")
with open(tmp_path, "r") as output:
self.assertEqual("1:2:3:4", output.readlines()[0])
def test_to_file_compressed(self):
tmp_path = "functional/test/data/tmp/output.txt"
sequence = self.seq(1, 2, 3, 4)
sequence.to_file(tmp_path, compression="gzip")
with gzip.open(tmp_path, "rt") as output:
self.assertEqual("[1, 2, 3, 4]", output.readlines()[0])
sequence.to_file(tmp_path, compression="lzma")
with lzma.open(tmp_path, "rt") as output:
self.assertEqual("[1, 2, 3, 4]", output.readlines()[0])
sequence.to_file(tmp_path, compression="bz2")
with bz2.open(tmp_path, "rt") as output:
self.assertEqual("[1, 2, 3, 4]", output.readlines()[0])
def test_to_jsonl(self):
tmp_path = "functional/test/data/tmp/output.txt"
elements = [{"a": 1, "b": 2}, {"c": 3}, {"d": 4}]
sequence = self.seq(elements)
sequence.to_jsonl(tmp_path)
result = self.seq.jsonl(tmp_path).to_list()
self.assertEqual(elements, result)
def test_to_jsonl_compressed(self):
tmp_path = "functional/test/data/tmp/output.txt"
elements = [{"a": 1, "b": 2}, {"c": 3}, {"d": 4}]
sequence = self.seq(elements)
sequence.to_jsonl(tmp_path, compression="gzip")
result = self.seq.jsonl(tmp_path).to_list()
self.assertEqual(elements, result)
sequence.to_jsonl(tmp_path, compression="lzma")
result = self.seq.jsonl(tmp_path).to_list()
self.assertEqual(elements, result)
sequence.to_jsonl(tmp_path, compression="bz2")
result = self.seq.jsonl(tmp_path).to_list()
self.assertEqual(elements, result)
def test_to_json(self):
tmp_path = "functional/test/data/tmp/output.txt"
elements = [[u"a", 1], [u"b", 2], [u"c", 3]]
sequence = self.seq(elements)
sequence.to_json(tmp_path)
result = self.seq.json(tmp_path).to_list()
self.assertEqual(elements, result)
dict_expect = {u"a": 1, u"b": 2, u"c": 3}
sequence.to_json(tmp_path, root_array=False)
result = self.seq.json(tmp_path).to_dict()
self.assertEqual(dict_expect, result)
def test_to_json_compressed(self):
tmp_path = "functional/test/data/tmp/output.txt"
elements = [[u"a", 1], [u"b", 2], [u"c", 3]]
dict_expect = {u"a": 1, u"b": 2, u"c": 3}
sequence = self.seq(elements)
sequence.to_json(tmp_path, compression="gzip")
result = self.seq.json(tmp_path).to_list()
self.assertEqual(elements, result)
sequence.to_json(tmp_path, root_array=False, compression="gzip")
result = self.seq.json(tmp_path).to_dict()
self.assertEqual(dict_expect, result)
sequence.to_json(tmp_path, compression="lzma")
result = self.seq.json(tmp_path).to_list()
self.assertEqual(elements, result)
sequence.to_json(tmp_path, root_array=False, compression="lzma")
result = self.seq.json(tmp_path).to_dict()
self.assertEqual(dict_expect, result)
sequence.to_json(tmp_path, compression="bz2")
result = self.seq.json(tmp_path).to_list()
self.assertEqual(elements, result)
sequence.to_json(tmp_path, root_array=False, compression="bz2")
result = self.seq.json(tmp_path).to_dict()
self.assertEqual(dict_expect, result)
def test_to_csv(self):
tmp_path = "functional/test/data/tmp/output.txt"
elements = [[1, 2, 3], [4, 5, 6], ["a", "b", "c"]]
expect = [["1", "2", "3"], ["4", "5", "6"], ["a", "b", "c"]]
sequence = self.seq(elements)
sequence.to_csv(tmp_path)
result = self.seq.csv(tmp_path).to_list()
self.assertEqual(expect, result)
@unittest.skipUnless(system().startswith("Win"), "Skip CSV test if not on Windows")
def test_to_csv_win(self):
tmp_path = "functional/test/data/tmp/output.txt"
elements = [[1, 2, 3], [4, 5, 6], ["a", "b", "c"]]
expect = [["1", "2", "3"], [], ["4", "5", "6"], [], ["a", "b", "c"], []]
sequence = self.seq(elements)
sequence.to_csv(tmp_path)
result = self.seq.csv(tmp_path).to_list()
self.assertNotEqual(expect, result)
def test_to_csv_compressed(self):
tmp_path = "functional/test/data/tmp/output.txt"
elements = [[1, 2, 3], [4, 5, 6], ["a", "b", "c"]]
expect = [["1", "2", "3"], ["4", "5", "6"], ["a", "b", "c"]]
sequence = self.seq(elements)
sequence.to_csv(tmp_path, compression="gzip")
result = self.seq.csv(tmp_path).to_list()
self.assertEqual(expect, result)
sequence.to_csv(tmp_path, compression="lzma")
result = self.seq.csv(tmp_path).to_list()
self.assertEqual(expect, result)
sequence.to_csv(tmp_path, compression="bz2")
result = self.seq.csv(tmp_path).to_list()
self.assertEqual(expect, result)
def test_to_sqlite3_failure(self):
insert_sql = "INSERT INTO user (id, name) VALUES (?, ?)"
elements = [(1, "Tom"), (2, "Jack"), (3, "Jane"), (4, "Stephan")]
with self.assertRaises(ValueError):
self.seq(elements).to_sqlite3(1, insert_sql)
def test_to_sqlite3_file(self):
tmp_path = "functional/test/data/tmp/test.db"
with sqlite3.connect(tmp_path) as conn:
conn.execute("DROP TABLE IF EXISTS user;")
conn.execute("CREATE TABLE user (id INT, name TEXT);")
conn.commit()
insert_sql = "INSERT INTO user (id, name) VALUES (?, ?)"
elements = [(1, "Tom"), (2, "Jack"), (3, "Jane"), (4, "Stephan")]
self.seq(elements).to_sqlite3(tmp_path, insert_sql)
result = self.seq.sqlite3(tmp_path, "SELECT id, name FROM user;").to_list()
self.assertListEqual(elements, result)
def test_to_sqlite3_query(self):
elements = [(1, "Tom"), (2, "Jack"), (3, "Jane"), (4, "Stephan")]
with sqlite3.connect(":memory:") as conn:
conn.execute("CREATE TABLE user (id INT, name TEXT);")
conn.commit()
insert_sql = "INSERT INTO user (id, name) VALUES (?, ?)"
self.seq(elements).to_sqlite3(conn, insert_sql)
result = self.seq.sqlite3(conn, "SELECT id, name FROM user;").to_list()
self.assertListEqual(elements, result)
def test_to_sqlite3_tuple(self):
elements = [(1, "Tom"), (2, "Jack"), (3, "Jane"), (4, "Stephan")]
with sqlite3.connect(":memory:") as conn:
conn.execute("CREATE TABLE user (id INT, name TEXT);")
conn.commit()
table_name = "user"
self.seq(elements).to_sqlite3(conn, table_name)
result = self.seq.sqlite3(conn, "SELECT id, name FROM user;").to_list()
self.assertListEqual(elements, result)
def test_to_sqlite3_namedtuple(self):
if self.seq is pseq:
raise self.skipTest("pseq can't serialize all functions")
elements = [(1, "Tom"), (2, "Jack"), (3, "Jane"), (4, "Stephan")]
# test namedtuple with the same order as column
with sqlite3.connect(":memory:") as conn:
user = collections.namedtuple("user", ["id", "name"])
conn.execute("CREATE TABLE user (id INT, name TEXT);")
conn.commit()
table_name = "user"
self.seq(elements).map(lambda u: user(u[0], u[1])).to_sqlite3(
conn, table_name
)
result = self.seq.sqlite3(conn, "SELECT id, name FROM user;").to_list()
self.assertListEqual(elements, result)
# test namedtuple with different order
with sqlite3.connect(":memory:") as conn:
user = collections.namedtuple("user", ["name", "id"])
conn.execute("CREATE TABLE user (id INT, name TEXT);")
conn.commit()
table_name = "user"
self.seq(elements).map(lambda u: user(u[1], u[0])).to_sqlite3(
conn, table_name
)
result = self.seq.sqlite3(conn, "SELECT id, name FROM user;").to_list()
self.assertListEqual(elements, result)
def test_to_sqlite3_dict(self):
elements = [(1, "Tom"), (2, "Jack"), (3, "Jane"), (4, "Stephan")]
with sqlite3.connect(":memory:") as conn:
conn.execute("CREATE TABLE user (id INT, name TEXT);")
conn.commit()
table_name = "user"
self.seq(elements).map(lambda x: {"id": x[0], "name": x[1]}).to_sqlite3(
conn, table_name
)
result = self.seq.sqlite3(conn, "SELECT id, name FROM user;").to_list()
self.assertListEqual(elements, result)
def test_to_sqlite3_typerror(self):
elements = [1, 2, 3]
with sqlite3.connect(":memory:") as conn:
conn.execute("CREATE TABLE user (id INT, name TEXT);")
conn.commit()
table_name = "user"
with self.assertRaises(TypeError):
self.seq(elements).to_sqlite3(conn, table_name)
def test_to_pandas(self):
# pylint: disable=superfluous-parens
try:
import pandas as pd
elements = [(1, "a"), (2, "b"), (3, "c")]
df_expect = pd.DataFrame.from_records(elements)
df_seq = self.seq(elements).to_pandas()
self.assertTrue(df_seq.equals(df_expect))
df_expect = pd.DataFrame.from_records(elements, columns=["id", "name"])
df_seq = self.seq(elements).to_pandas(columns=["id", "name"])
self.assertTrue(df_seq.equals(df_expect))
elements = [
dict(id=1, name="a"),
dict(id=2, name="b"),
dict(id=3, name="c"),
]
df_expect = pd.DataFrame.from_records(elements)
df_seq = self.seq(elements).to_pandas()
self.assertTrue(df_seq.equals(df_expect))
except ImportError:
print("pandas not installed, skipping unit test")
# Skipping tests on pypy because of https://github.com/uqfoundation/dill/issues/73
@unittest.skipIf("__pypy__" in sys.builtin_module_names, "Skip parallel tests on pypy")
class TestParallelStreams(TestStreams):
def setUp(self):
self.seq = pseq
self.seq_c_disabled = ParallelStream(disable_compression=True)
|
import io
import json
import os
try:
import nbconvert
from nbconvert.exporters import HTMLExporter
import nbformat
current_nbformat = nbformat.current_nbformat
from jupyter_client import kernelspec
from traitlets.config import Config
NBCONVERT_VERSION_MAJOR = int(nbconvert.__version__.partition(".")[0])
flag = True
except ImportError:
flag = None
from nikola import shortcodes as sc
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, LocaleBorg
class CompileIPynb(PageCompiler):
"""Compile IPynb into HTML."""
name = "ipynb"
friendly_name = "Jupyter Notebook"
demote_headers = True
default_kernel = 'python3'
supports_metadata = True
def _compile_string(self, nb_json):
"""Export notebooks as HTML strings."""
self._req_missing_ipynb()
c = Config(get_default_jupyter_config())
c.merge(Config(self.site.config['IPYNB_CONFIG']))
if 'template_file' not in self.site.config['IPYNB_CONFIG'].get('Exporter', {}):
if NBCONVERT_VERSION_MAJOR >= 6:
c['Exporter']['template_file'] = 'classic/base.html.j2'
else:
c['Exporter']['template_file'] = 'basic.tpl' # not a typo
exportHtml = HTMLExporter(config=c)
body, _ = exportHtml.from_notebook_node(nb_json)
return body
@staticmethod
def _nbformat_read(in_file):
return nbformat.read(in_file, current_nbformat)
def _req_missing_ipynb(self):
if flag is None:
req_missing(['notebook>=4.0.0'], 'build this site (compile ipynb)')
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):
"""Compile notebooks into HTML strings."""
new_data, shortcodes = sc.extract_shortcodes(data)
output = self._compile_string(nbformat.reads(new_data, current_nbformat))
return self.site.apply_shortcodes_uuid(output, shortcodes, filename=source_path, extra_context={'post': post})
def compile(self, source, dest, is_two_file=False, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
with io.open(dest, "w+", encoding="utf-8") as out_file:
with io.open(source, "r", encoding="utf-8-sig") as in_file:
nb_str = in_file.read()
output, shortcode_deps = self.compile_string(nb_str, source,
is_two_file, post,
lang)
out_file.write(output)
if post is None:
if shortcode_deps:
self.logger.error(
"Cannot save dependencies for post {0} (post unknown)",
source)
else:
post._depfile[dest] += shortcode_deps
def read_metadata(self, post, lang=None):
"""Read metadata directly from ipynb file.
As ipynb files support arbitrary metadata as json, the metadata used by Nikola
will be assume to be in the 'nikola' subfield.
"""
self._req_missing_ipynb()
if lang is None:
lang = LocaleBorg().current_lang
source = post.translated_source_path(lang)
with io.open(source, "r", encoding="utf-8-sig") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
# Metadata might not exist in two-file posts or in hand-crafted
# .ipynb files.
return nb_json.get('metadata', {}).get('nikola', {})
def create_post(self, path, **kw):
"""Create a new post."""
self._req_missing_ipynb()
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
kernel = kw.pop('jupyter_kernel', None)
# is_page is not needed to create the file
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if content.startswith("{"):
# imported .ipynb file, guaranteed to start with "{" because it’s JSON.
nb = nbformat.reads(content, current_nbformat)
else:
nb = nbformat.v4.new_notebook()
nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
if kernel is None:
kernel = self.default_kernel
self.logger.warning('No kernel specified, assuming "{0}".'.format(kernel))
IPYNB_KERNELS = {}
ksm = kernelspec.KernelSpecManager()
for k in ksm.find_kernel_specs():
IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()
IPYNB_KERNELS[k]['name'] = k
del IPYNB_KERNELS[k]['argv']
if kernel not in IPYNB_KERNELS:
self.logger.error('Unknown kernel "{0}". Maybe you mispelled it?'.format(kernel))
self.logger.info("Available kernels: {0}".format(", ".join(sorted(IPYNB_KERNELS))))
raise Exception('Unknown kernel "{0}"'.format(kernel))
nb["metadata"]["kernelspec"] = IPYNB_KERNELS[kernel]
if onefile:
nb["metadata"]["nikola"] = metadata
with io.open(path, "w+", encoding="utf-8") as fd:
nbformat.write(nb, fd, 4)
def get_default_jupyter_config():
"""Search default jupyter configuration location paths.
Return dictionary from configuration json files.
"""
config = {}
from jupyter_core.paths import jupyter_config_path
for parent in jupyter_config_path():
try:
for file in os.listdir(parent):
if 'nbconvert' in file and file.endswith('.json'):
abs_path = os.path.join(parent, file)
with open(abs_path) as config_file:
config.update(json.load(config_file))
except OSError:
# some paths jupyter uses to find configurations
# may not exist
pass
return config
|
import logging
from typing import Dict
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA as SENSOR_PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_VERSION,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_STOP,
STATE_UNKNOWN,
)
from homeassistant.exceptions import PlatformNotReady, TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from . import create_influx_url, get_influx_connection, validate_version_specific_config
from .const import (
API_VERSION_2,
COMPONENT_CONFIG_SCHEMA_CONNECTION,
CONF_BUCKET,
CONF_DB_NAME,
CONF_FIELD,
CONF_GROUP_FUNCTION,
CONF_IMPORTS,
CONF_LANGUAGE,
CONF_MEASUREMENT_NAME,
CONF_QUERIES,
CONF_QUERIES_FLUX,
CONF_QUERY,
CONF_RANGE_START,
CONF_RANGE_STOP,
CONF_WHERE,
DEFAULT_API_VERSION,
DEFAULT_FIELD,
DEFAULT_FUNCTION_FLUX,
DEFAULT_GROUP_FUNCTION,
DEFAULT_RANGE_START,
DEFAULT_RANGE_STOP,
INFLUX_CONF_VALUE,
INFLUX_CONF_VALUE_V2,
LANGUAGE_FLUX,
LANGUAGE_INFLUXQL,
MIN_TIME_BETWEEN_UPDATES,
NO_BUCKET_ERROR,
NO_DATABASE_ERROR,
QUERY_MULTIPLE_RESULTS_MESSAGE,
QUERY_NO_RESULTS_MESSAGE,
RENDERING_QUERY_ERROR_MESSAGE,
RENDERING_QUERY_MESSAGE,
RENDERING_WHERE_ERROR_MESSAGE,
RENDERING_WHERE_MESSAGE,
RUNNING_QUERY_MESSAGE,
)
_LOGGER = logging.getLogger(__name__)
def _merge_connection_config_into_query(conf, query):
"""Merge connection details into each configured query."""
for key in conf:
if key not in query and key not in [CONF_QUERIES, CONF_QUERIES_FLUX]:
query[key] = conf[key]
def validate_query_format_for_version(conf: Dict) -> Dict:
"""Ensure queries are provided in correct format based on API version."""
if conf[CONF_API_VERSION] == API_VERSION_2:
if CONF_QUERIES_FLUX not in conf:
raise vol.Invalid(
f"{CONF_QUERIES_FLUX} is required when {CONF_API_VERSION} is {API_VERSION_2}"
)
for query in conf[CONF_QUERIES_FLUX]:
_merge_connection_config_into_query(conf, query)
query[CONF_LANGUAGE] = LANGUAGE_FLUX
del conf[CONF_BUCKET]
else:
if CONF_QUERIES not in conf:
raise vol.Invalid(
f"{CONF_QUERIES} is required when {CONF_API_VERSION} is {DEFAULT_API_VERSION}"
)
for query in conf[CONF_QUERIES]:
_merge_connection_config_into_query(conf, query)
query[CONF_LANGUAGE] = LANGUAGE_INFLUXQL
del conf[CONF_DB_NAME]
return conf
_QUERY_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
)
_QUERY_SCHEMA = {
LANGUAGE_INFLUXQL: _QUERY_SENSOR_SCHEMA.extend(
{
vol.Optional(CONF_DB_NAME): cv.string,
vol.Required(CONF_MEASUREMENT_NAME): cv.string,
vol.Optional(
CONF_GROUP_FUNCTION, default=DEFAULT_GROUP_FUNCTION
): cv.string,
vol.Optional(CONF_FIELD, default=DEFAULT_FIELD): cv.string,
vol.Required(CONF_WHERE): cv.template,
}
),
LANGUAGE_FLUX: _QUERY_SENSOR_SCHEMA.extend(
{
vol.Optional(CONF_BUCKET): cv.string,
vol.Optional(CONF_RANGE_START, default=DEFAULT_RANGE_START): cv.string,
vol.Optional(CONF_RANGE_STOP, default=DEFAULT_RANGE_STOP): cv.string,
vol.Required(CONF_QUERY): cv.template,
vol.Optional(CONF_IMPORTS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_GROUP_FUNCTION): cv.string,
}
),
}
PLATFORM_SCHEMA = vol.All(
SENSOR_PLATFORM_SCHEMA.extend(COMPONENT_CONFIG_SCHEMA_CONNECTION).extend(
{
vol.Exclusive(CONF_QUERIES, "queries"): [_QUERY_SCHEMA[LANGUAGE_INFLUXQL]],
vol.Exclusive(CONF_QUERIES_FLUX, "queries"): [_QUERY_SCHEMA[LANGUAGE_FLUX]],
}
),
validate_version_specific_config,
validate_query_format_for_version,
create_influx_url,
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the InfluxDB component."""
try:
influx = get_influx_connection(config, test_read=True)
except ConnectionError as exc:
_LOGGER.error(exc)
raise PlatformNotReady() from exc
entities = []
if CONF_QUERIES_FLUX in config:
for query in config[CONF_QUERIES_FLUX]:
if query[CONF_BUCKET] in influx.data_repositories:
entities.append(InfluxSensor(hass, influx, query))
else:
_LOGGER.error(NO_BUCKET_ERROR, query[CONF_BUCKET])
else:
for query in config[CONF_QUERIES]:
if query[CONF_DB_NAME] in influx.data_repositories:
entities.append(InfluxSensor(hass, influx, query))
else:
_LOGGER.error(NO_DATABASE_ERROR, query[CONF_DB_NAME])
add_entities(entities, update_before_add=True)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lambda _: influx.close())
class InfluxSensor(Entity):
"""Implementation of a Influxdb sensor."""
def __init__(self, hass, influx, query):
"""Initialize the sensor."""
self._name = query.get(CONF_NAME)
self._unit_of_measurement = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
self._value_template = value_template
self._value_template.hass = hass
else:
self._value_template = None
self._state = None
self._hass = hass
if query[CONF_LANGUAGE] == LANGUAGE_FLUX:
query_clause = query.get(CONF_QUERY)
query_clause.hass = hass
self.data = InfluxFluxSensorData(
influx,
query.get(CONF_BUCKET),
query.get(CONF_RANGE_START),
query.get(CONF_RANGE_STOP),
query_clause,
query.get(CONF_IMPORTS),
query.get(CONF_GROUP_FUNCTION),
)
else:
where_clause = query.get(CONF_WHERE)
where_clause.hass = hass
self.data = InfluxQLSensorData(
influx,
query.get(CONF_DB_NAME),
query.get(CONF_GROUP_FUNCTION),
query.get(CONF_FIELD),
query.get(CONF_MEASUREMENT_NAME),
where_clause,
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from Influxdb and updates the states."""
self.data.update()
value = self.data.value
if value is None:
value = STATE_UNKNOWN
if self._value_template is not None:
value = self._value_template.render_with_possible_json_value(
str(value), STATE_UNKNOWN
)
self._state = value
class InfluxFluxSensorData:
"""Class for handling the data retrieval from Influx with Flux query."""
def __init__(self, influx, bucket, range_start, range_stop, query, imports, group):
"""Initialize the data object."""
self.influx = influx
self.bucket = bucket
self.range_start = range_start
self.range_stop = range_stop
self.query = query
self.imports = imports
self.group = group
self.value = None
self.full_query = None
self.query_prefix = f'from(bucket:"{bucket}") |> range(start: {range_start}, stop: {range_stop}) |>'
if imports is not None:
for i in imports:
self.query_prefix = f'import "{i}" {self.query_prefix}'
if group is None:
self.query_postfix = DEFAULT_FUNCTION_FLUX
else:
self.query_postfix = f'|> {group}(column: "{INFLUX_CONF_VALUE_V2}")'
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data by querying influx."""
_LOGGER.debug(RENDERING_QUERY_MESSAGE, self.query)
try:
rendered_query = self.query.render(parse_result=False)
except TemplateError as ex:
_LOGGER.error(RENDERING_QUERY_ERROR_MESSAGE, ex)
return
self.full_query = f"{self.query_prefix} {rendered_query} {self.query_postfix}"
_LOGGER.debug(RUNNING_QUERY_MESSAGE, self.full_query)
try:
tables = self.influx.query(self.full_query)
except (ConnectionError, ValueError) as exc:
_LOGGER.error(exc)
self.value = None
return
if not tables:
_LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.full_query)
self.value = None
else:
if len(tables) > 1 or len(tables[0].records) > 1:
_LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.full_query)
self.value = tables[0].records[0].values[INFLUX_CONF_VALUE_V2]
class InfluxQLSensorData:
"""Class for handling the data retrieval with v1 API."""
def __init__(self, influx, db_name, group, field, measurement, where):
"""Initialize the data object."""
self.influx = influx
self.db_name = db_name
self.group = group
self.field = field
self.measurement = measurement
self.where = where
self.value = None
self.query = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data with a shell command."""
_LOGGER.debug(RENDERING_WHERE_MESSAGE, self.where)
try:
where_clause = self.where.render(parse_result=False)
except TemplateError as ex:
_LOGGER.error(RENDERING_WHERE_ERROR_MESSAGE, ex)
return
self.query = f"select {self.group}({self.field}) as {INFLUX_CONF_VALUE} from {self.measurement} where {where_clause}"
_LOGGER.debug(RUNNING_QUERY_MESSAGE, self.query)
try:
points = self.influx.query(self.query, self.db_name)
except (ConnectionError, ValueError) as exc:
_LOGGER.error(exc)
self.value = None
return
if not points:
_LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.query)
self.value = None
else:
if len(points) > 1:
_LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.query)
self.value = points[0].get(INFLUX_CONF_VALUE)
|
import asyncio
import logging
from typing import List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_ALIAS,
CONF_ICON,
CONF_MODE,
CONF_SEQUENCE,
CONF_VARIABLES,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.script import (
ATTR_CUR,
ATTR_MAX,
ATTR_MODE,
CONF_MAX,
CONF_MAX_EXCEEDED,
SCRIPT_MODE_SINGLE,
Script,
make_script_schema,
)
from homeassistant.helpers.service import async_set_service_schema
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
DOMAIN = "script"
ATTR_LAST_ACTION = "last_action"
ATTR_LAST_TRIGGERED = "last_triggered"
ATTR_VARIABLES = "variables"
CONF_DESCRIPTION = "description"
CONF_EXAMPLE = "example"
CONF_FIELDS = "fields"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
EVENT_SCRIPT_STARTED = "script_started"
SCRIPT_ENTRY_SCHEMA = make_script_schema(
{
vol.Optional(CONF_ALIAS): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Required(CONF_SEQUENCE): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_DESCRIPTION, default=""): cv.string,
vol.Optional(CONF_VARIABLES): cv.SCRIPT_VARIABLES_SCHEMA,
vol.Optional(CONF_FIELDS, default={}): {
cv.string: {
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(CONF_EXAMPLE): cv.string,
}
},
},
SCRIPT_MODE_SINGLE,
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(SCRIPT_ENTRY_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
SCRIPT_SERVICE_SCHEMA = vol.Schema(dict)
SCRIPT_TURN_ONOFF_SCHEMA = make_entity_service_schema(
{vol.Optional(ATTR_VARIABLES): {str: cv.match_all}}
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
@bind_hass
def is_on(hass, entity_id):
"""Return if the script is on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
@callback
def scripts_with_entity(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all scripts that reference the entity."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
script_entity.entity_id
for script_entity in component.entities
if entity_id in script_entity.script.referenced_entities
]
@callback
def entities_in_script(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all entities in script."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
script_entity = component.get_entity(entity_id)
if script_entity is None:
return []
return list(script_entity.script.referenced_entities)
@callback
def scripts_with_device(hass: HomeAssistant, device_id: str) -> List[str]:
"""Return all scripts that reference the device."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
script_entity.entity_id
for script_entity in component.entities
if device_id in script_entity.script.referenced_devices
]
@callback
def devices_in_script(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all devices in script."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
script_entity = component.get_entity(entity_id)
if script_entity is None:
return []
return list(script_entity.script.referenced_devices)
async def async_setup(hass, config):
"""Load the scripts from the configuration."""
hass.data[DOMAIN] = component = EntityComponent(_LOGGER, DOMAIN, hass)
await _async_process_config(hass, config, component)
async def reload_service(service):
"""Call a service to reload scripts."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
async def turn_on_service(service):
"""Call a service to turn script on."""
variables = service.data.get(ATTR_VARIABLES)
for script_entity in await component.async_extract_from_service(service):
await script_entity.async_turn_on(
variables=variables, context=service.context, wait=False
)
async def turn_off_service(service):
"""Cancel a script."""
# Stopping a script is ok to be done in parallel
script_entities = await component.async_extract_from_service(service)
if not script_entities:
return
await asyncio.wait(
[script_entity.async_turn_off() for script_entity in script_entities]
)
async def toggle_service(service):
"""Toggle a script."""
for script_entity in await component.async_extract_from_service(service):
await script_entity.async_toggle(context=service.context, wait=False)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service, schema=RELOAD_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, turn_on_service, schema=SCRIPT_TURN_ONOFF_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_TURN_OFF, turn_off_service, schema=SCRIPT_TURN_ONOFF_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, toggle_service, schema=SCRIPT_TURN_ONOFF_SCHEMA
)
return True
async def _async_process_config(hass, config, component):
"""Process script configuration."""
async def service_handler(service):
"""Execute a service call to script.<script name>."""
entity_id = ENTITY_ID_FORMAT.format(service.service)
script_entity = component.get_entity(entity_id)
await script_entity.async_turn_on(
variables=service.data, context=service.context
)
script_entities = [
ScriptEntity(hass, object_id, cfg)
for object_id, cfg in config.get(DOMAIN, {}).items()
]
await component.async_add_entities(script_entities)
# Register services for all entities that were created successfully.
for script_entity in script_entities:
object_id = script_entity.object_id
if component.get_entity(script_entity.entity_id) is None:
_LOGGER.error("Couldn't load script %s", object_id)
continue
cfg = config[DOMAIN][object_id]
hass.services.async_register(
DOMAIN, object_id, service_handler, schema=SCRIPT_SERVICE_SCHEMA
)
# Register the service description
service_desc = {
CONF_DESCRIPTION: cfg[CONF_DESCRIPTION],
CONF_FIELDS: cfg[CONF_FIELDS],
}
async_set_service_schema(hass, DOMAIN, object_id, service_desc)
class ScriptEntity(ToggleEntity):
"""Representation of a script entity."""
icon = None
def __init__(self, hass, object_id, cfg):
"""Initialize the script."""
self.object_id = object_id
self.icon = cfg.get(CONF_ICON)
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self.script = Script(
hass,
cfg[CONF_SEQUENCE],
cfg.get(CONF_ALIAS, object_id),
DOMAIN,
running_description="script sequence",
change_listener=self.async_change_listener,
script_mode=cfg[CONF_MODE],
max_runs=cfg[CONF_MAX],
max_exceeded=cfg[CONF_MAX_EXCEEDED],
logger=logging.getLogger(f"{__name__}.{object_id}"),
variables=cfg.get(CONF_VARIABLES),
)
self._changed = asyncio.Event()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the entity."""
return self.script.name
@property
def state_attributes(self):
"""Return the state attributes."""
attrs = {
ATTR_LAST_TRIGGERED: self.script.last_triggered,
ATTR_MODE: self.script.script_mode,
ATTR_CUR: self.script.runs,
}
if self.script.supports_max:
attrs[ATTR_MAX] = self.script.max_runs
if self.script.last_action:
attrs[ATTR_LAST_ACTION] = self.script.last_action
return attrs
@property
def is_on(self):
"""Return true if script is on."""
return self.script.is_running
@callback
def async_change_listener(self):
"""Update state."""
self.async_write_ha_state()
self._changed.set()
async def async_turn_on(self, **kwargs):
"""Turn the script on."""
variables = kwargs.get("variables")
context = kwargs.get("context")
wait = kwargs.get("wait", True)
self.async_set_context(context)
self.hass.bus.async_fire(
EVENT_SCRIPT_STARTED,
{ATTR_NAME: self.script.name, ATTR_ENTITY_ID: self.entity_id},
context=context,
)
coro = self.script.async_run(variables, context)
if wait:
await coro
return
# Caller does not want to wait for called script to finish so let script run in
# separate Task. However, wait for first state change so we can guarantee that
# it is written to the State Machine before we return.
self._changed.clear()
self.hass.async_create_task(coro)
await self._changed.wait()
async def async_turn_off(self, **kwargs):
"""Turn script off."""
await self.script.async_stop()
async def async_will_remove_from_hass(self):
"""Stop script and remove service when it will be removed from Home Assistant."""
await self.script.async_stop()
# remove service
self.hass.services.async_remove(DOMAIN, self.object_id)
|
import random
from .const import DOMAIN, SERVICE_RANDOMIZE_DEVICE_TRACKER_DATA
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up the demo tracker."""
def offset():
"""Return random offset."""
return (random.randrange(500, 2000)) / 2e5 * random.choice((-1, 1))
def random_see(dev_id, name):
"""Randomize a sighting."""
see(
dev_id=dev_id,
host_name=name,
gps=(hass.config.latitude + offset(), hass.config.longitude + offset()),
gps_accuracy=random.randrange(50, 150),
battery=random.randrange(10, 90),
)
def observe(call=None):
"""Observe three entities."""
random_see("demo_paulus", "Paulus")
random_see("demo_anne_therese", "Anne Therese")
observe()
see(
dev_id="demo_home_boy",
host_name="Home Boy",
gps=[hass.config.latitude - 0.00002, hass.config.longitude + 0.00002],
gps_accuracy=20,
battery=53,
)
hass.services.register(DOMAIN, SERVICE_RANDOMIZE_DEVICE_TRACKER_DATA, observe)
return True
|
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS
from . import DATA_MELISSA
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE
OP_MODES = [
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_OFF,
]
FAN_MODES = [FAN_AUTO, FAN_HIGH, FAN_MEDIUM, FAN_LOW]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Iterate through and add all Melissa devices."""
api = hass.data[DATA_MELISSA]
devices = (await api.async_fetch_devices()).values()
all_devices = []
for device in devices:
if device["type"] == "melissa":
all_devices.append(MelissaClimate(api, device["serial_number"], device))
async_add_entities(all_devices)
class MelissaClimate(ClimateEntity):
"""Representation of a Melissa Climate device."""
def __init__(self, api, serial_number, init_data):
"""Initialize the climate device."""
self._name = init_data["name"]
self._api = api
self._serial_number = serial_number
self._data = init_data["controller_log"]
self._state = None
self._cur_settings = None
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._name
@property
def fan_mode(self):
"""Return the current fan mode."""
if self._cur_settings is not None:
return self.melissa_fan_to_hass(self._cur_settings[self._api.FAN])
@property
def current_temperature(self):
"""Return the current temperature."""
if self._data:
return self._data[self._api.TEMP]
@property
def current_humidity(self):
"""Return the current humidity value."""
if self._data:
return self._data[self._api.HUMIDITY]
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return PRECISION_WHOLE
@property
def hvac_mode(self):
"""Return the current operation mode."""
if self._cur_settings is None:
return None
is_on = self._cur_settings[self._api.STATE] in (
self._api.STATE_ON,
self._api.STATE_IDLE,
)
if not is_on:
return HVAC_MODE_OFF
return self.melissa_op_to_hass(self._cur_settings[self._api.MODE])
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return OP_MODES
@property
def fan_modes(self):
"""List of available fan modes."""
return FAN_MODES
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._cur_settings is None:
return None
return self._cur_settings[self._api.TEMP]
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def min_temp(self):
"""Return the minimum supported temperature for the thermostat."""
return 16
@property
def max_temp(self):
"""Return the maximum supported temperature for the thermostat."""
return 30
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
await self.async_send({self._api.TEMP: temp})
async def async_set_fan_mode(self, fan_mode):
"""Set fan mode."""
melissa_fan_mode = self.hass_fan_to_melissa(fan_mode)
await self.async_send({self._api.FAN: melissa_fan_mode})
async def async_set_hvac_mode(self, hvac_mode):
"""Set operation mode."""
if hvac_mode == HVAC_MODE_OFF:
await self.async_send({self._api.STATE: self._api.STATE_OFF})
return
mode = self.hass_mode_to_melissa(hvac_mode)
await self.async_send(
{self._api.MODE: mode, self._api.STATE: self._api.STATE_ON}
)
async def async_send(self, value):
"""Send action to service."""
try:
old_value = self._cur_settings.copy()
self._cur_settings.update(value)
except AttributeError:
old_value = None
if not await self._api.async_send(
self._serial_number, "melissa", self._cur_settings
):
self._cur_settings = old_value
async def async_update(self):
"""Get latest data from Melissa."""
try:
self._data = (await self._api.async_status(cached=True))[
self._serial_number
]
self._cur_settings = (
await self._api.async_cur_settings(self._serial_number)
)["controller"]["_relation"]["command_log"]
except KeyError:
_LOGGER.warning("Unable to update entity %s", self.entity_id)
def melissa_op_to_hass(self, mode):
"""Translate Melissa modes to hass states."""
if mode == self._api.MODE_HEAT:
return HVAC_MODE_HEAT
if mode == self._api.MODE_COOL:
return HVAC_MODE_COOL
if mode == self._api.MODE_DRY:
return HVAC_MODE_DRY
if mode == self._api.MODE_FAN:
return HVAC_MODE_FAN_ONLY
_LOGGER.warning("Operation mode %s could not be mapped to hass", mode)
return None
def melissa_fan_to_hass(self, fan):
"""Translate Melissa fan modes to hass modes."""
if fan == self._api.FAN_AUTO:
return HVAC_MODE_AUTO
if fan == self._api.FAN_LOW:
return FAN_LOW
if fan == self._api.FAN_MEDIUM:
return FAN_MEDIUM
if fan == self._api.FAN_HIGH:
return FAN_HIGH
_LOGGER.warning("Fan mode %s could not be mapped to hass", fan)
return None
def hass_mode_to_melissa(self, mode):
"""Translate hass states to melissa modes."""
if mode == HVAC_MODE_HEAT:
return self._api.MODE_HEAT
if mode == HVAC_MODE_COOL:
return self._api.MODE_COOL
if mode == HVAC_MODE_DRY:
return self._api.MODE_DRY
if mode == HVAC_MODE_FAN_ONLY:
return self._api.MODE_FAN
_LOGGER.warning("Melissa have no setting for %s mode", mode)
def hass_fan_to_melissa(self, fan):
"""Translate hass fan modes to melissa modes."""
if fan == HVAC_MODE_AUTO:
return self._api.FAN_AUTO
if fan == FAN_LOW:
return self._api.FAN_LOW
if fan == FAN_MEDIUM:
return self._api.FAN_MEDIUM
if fan == FAN_HIGH:
return self._api.FAN_HIGH
_LOGGER.warning("Melissa have no setting for %s fan mode", fan)
|
from pylatex import Document, NoEscape
import pylatex.config as cf
lorem = '''
Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere
cubilia Curae; Phasellus facilisis tortor vel imperdiet vestibulum. Vivamus et
mollis risus. Proin ut enim eu leo volutpat tristique. Vivamus quam enim,
efficitur quis turpis ac, condimentum tincidunt tellus. Praesent non tellus in
quam tempor dignissim. Sed feugiat ante id mauris vehicula, quis elementum nunc
molestie. Pellentesque a vulputate nisi, ut vulputate ex. Morbi erat eros,
aliquam in justo sed, placerat tempor mauris. In vitae velit eu lorem dapibus
consequat. Integer posuere ornare laoreet.
Donec pellentesque libero id tempor aliquam. Maecenas a diam at metus varius
rutrum vel in nisl. Maecenas a est lorem. Vivamus tristique nec eros ac
hendrerit. Vivamus imperdiet justo id lobortis luctus. Sed facilisis ipsum ut
tellus pellentesque tincidunt. Mauris libero lectus, maximus at mattis ut,
venenatis eget diam. Fusce in leo at erat varius laoreet. Mauris non ipsum
pretium, convallis purus vel, pulvinar leo. Aliquam lacinia lorem dapibus
tortor imperdiet, quis consequat diam mollis.
Praesent accumsan ultrices diam a eleifend. Vestibulum ante ipsum primis in
faucibus orci luctus et ultrices posuere cubilia Curae; Suspendisse accumsan
orci ut sodales ullamcorper. Integer bibendum elementum convallis. Praesent
accumsan at leo eget ullamcorper. Maecenas eget tempor enim. Quisque et nisl
eros.
'''
def main():
cf.active = cf.Version1()
doc = Document(data=NoEscape(lorem))
doc.generate_pdf('config1_with_indent', clean_tex=False)
cf.active = cf.Version1(indent=False)
doc = Document(data=NoEscape(lorem))
doc.generate_pdf('config2_without_indent', clean_tex=False)
with cf.Version1().use():
doc = Document(data=NoEscape(lorem))
doc.generate_pdf('config3_with_indent_again', clean_tex=False)
doc = Document(data=NoEscape(lorem))
doc.generate_pdf('config4_without_indent_again', clean_tex=False)
if __name__ == '__main__':
main()
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from postfix import PostfixCollector
##########################################################################
class TestPostfixCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PostfixCollector', {
'host': 'localhost',
'port': 7777,
'interval': '1',
})
self.collector = PostfixCollector(config, None)
def test_import(self):
self.assertTrue(PostfixCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
first_resp = self.getFixture('postfix-stats.1.json').getvalue()
patch_collector = patch.object(
PostfixCollector,
'get_json',
Mock(return_value=first_resp))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
self.assertPublishedMany(publish_mock, {})
second_resp = self.getFixture('postfix-stats.2.json').getvalue()
patch_collector = patch.object(PostfixCollector,
'get_json',
Mock(return_value=second_resp))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'send.status.sent': 4,
'send.resp_codes.2_0_0': 5,
'clients.127_0_0_1': 1,
}
self.assertPublishedMany(publish_mock, metrics)
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import diamond.collector
class KafkaConsumerLagCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
collector = super(KafkaConsumerLagCollector, self)
config_help = collector.get_default_config_help()
config_help.update({
'bin': 'The path to kafka-run-class.sh binary',
'topic': 'Comma-separated list of consumer topics.',
'zookeeper': 'ZooKeeper connect string.',
'consumer_groups': 'Consumer groups'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(KafkaConsumerLagCollector, self).get_default_config()
config.update({
'path': 'kafka.ConsumerLag',
'bin': '/opt/kafka/bin/kafka-run-class.sh',
'zookeeper': 'localhost:2181'
})
return config
def collect(self):
zookeeper = ','.join(self.config.get('zookeeper'))
consumer_groups = self.config.get('consumer_groups')
topic = self.config.get('topic')
cluster_name = '-'.join(zookeeper.split('/')[1:]).replace('-', '_')
for consumer_group in consumer_groups:
try:
cmd = [
'kafka.tools.ConsumerOffsetChecker',
'--group',
consumer_group,
'--zookeeper',
zookeeper
]
if topic:
cmd += '--topic %s' % topic
raw_output = self.run_command(cmd)
if raw_output is None:
return
for i, output in enumerate(raw_output[0].split('\n')):
if i == 0:
continue
items = output.strip().split(' ')
metrics = [item for item in items if item]
if not metrics:
continue
prefix_keys = metrics[:3]
value = float(metrics[5])
if cluster_name:
prefix_keys.insert(0, cluster_name)
self.publish('.'.join(prefix_keys), value)
except Exception as e:
self.log.error(e)
|
from unittest import TestCase
from scattertext.WhitespaceNLP import whitespace_nlp_with_sentences, whitespace_nlp, Tok, Doc
class TestWhitespaceNLP(TestCase):
def test_whitespace_nlp(self):
raw = '''Hi! My name
is Jason. You can call me
Mr. J. Is that your name too?
Ha. Ha ha.
'''
doc = whitespace_nlp(raw)
self.assertEqual(len(list(doc)), 55)
self.assertEqual(len(doc.sents), 1)
tok = Tok('WORD', 'Jason', 'jason', 'Name', 'NNP')
self.assertEqual(len(tok), 5)
self.assertEqual(str(tok), 'jason')
self.assertEqual(str(Doc([[Tok('WORD', 'Jason', 'jason', 'Name', 'NNP'),
Tok('WORD', 'a', 'a', 'Name', 'NNP')]],
raw='asdfbasdfasd')),
'asdfbasdfasd')
self.assertEqual(str(Doc([[Tok('WORD', 'Blah', 'blah', 'Name', 'NNP'),
Tok('Space', ' ', ' ', ' ', ' '),
Tok('WORD', 'a', 'a', 'Name', 'NNP')]])),
'blah a')
def test_whitespace_nlp_with_sentences(self):
raw = '''Hi! My name
is Jason. You can call me
Mr. J. Is that your name too?
Ha. Ha ha.
'''
doc = whitespace_nlp_with_sentences(raw)
self.assertEqual(doc.text, raw)
self.assertEqual(len(doc.sents), 7)
self.assertEqual(doc[3].orth_, 'name')
self.assertEqual(doc[25].orth_, '.')
self.assertEqual(len(doc), 26)
self.assertEqual(doc[3].idx, 7)
self.assertEqual(raw[doc[3].idx:(doc[3].idx+len(doc[3].orth_))], 'name')
def test_whitespace_nlp_with_sentences_singleton(self):
raw = 'Blah'
self.assertEqual(whitespace_nlp_with_sentences(raw).text, raw)
self.assertEqual(len(whitespace_nlp_with_sentences(raw).sents), 1)
self.assertEqual(len(whitespace_nlp_with_sentences(raw).sents[0]), 1)
raw = 'Blah.'
self.assertEqual(whitespace_nlp_with_sentences(raw).text, raw)
self.assertEqual(len(whitespace_nlp_with_sentences(raw).sents), 1)
self.assertEqual(len(whitespace_nlp_with_sentences(raw).sents[0]), 2)
|
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.insteon.config_flow import (
HUB1,
HUB2,
MODEM_TYPE,
PLM,
STEP_ADD_OVERRIDE,
STEP_ADD_X10,
STEP_CHANGE_HUB_CONFIG,
STEP_HUB_V2,
STEP_REMOVE_OVERRIDE,
STEP_REMOVE_X10,
)
from homeassistant.components.insteon.const import (
CONF_CAT,
CONF_DIM_STEPS,
CONF_HOUSECODE,
CONF_HUB_VERSION,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_UNITCODE,
CONF_X10,
DOMAIN,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_HOST,
CONF_PASSWORD,
CONF_PLATFORM,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
MOCK_HOSTNAME,
MOCK_IMPORT_CONFIG_PLM,
MOCK_IMPORT_MINIMUM_HUB_V1,
MOCK_IMPORT_MINIMUM_HUB_V2,
MOCK_PASSWORD,
MOCK_USER_INPUT_HUB_V1,
MOCK_USER_INPUT_HUB_V2,
MOCK_USER_INPUT_PLM,
MOCK_USERNAME,
PATCH_ASYNC_SETUP,
PATCH_ASYNC_SETUP_ENTRY,
PATCH_CONNECTION,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def mock_successful_connection(*args, **kwargs):
"""Return a successful connection."""
return True
async def mock_failed_connection(*args, **kwargs):
"""Return a failed connection."""
raise ConnectionError("Connection failed")
async def _init_form(hass, modem_type):
"""Run the user form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{MODEM_TYPE: modem_type},
)
return result2
async def _device_form(hass, flow_id, connection, user_input):
"""Test the PLM, Hub v1 or Hub v2 form."""
with patch(PATCH_CONNECTION, new=connection,), patch(
PATCH_ASYNC_SETUP, return_value=True
) as mock_setup, patch(
PATCH_ASYNC_SETUP_ENTRY,
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(flow_id, user_input)
await hass.async_block_till_done()
return result, mock_setup, mock_setup_entry
async def test_form_select_modem(hass: HomeAssistantType):
"""Test we get a modem form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, HUB2)
assert result["step_id"] == STEP_HUB_V2
assert result["type"] == "form"
async def test_fail_on_existing(hass: HomeAssistantType):
"""Test we fail if the integration is already configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
assert config_entry.state == config_entries.ENTRY_STATE_NOT_LOADED
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_form_select_plm(hass: HomeAssistantType):
"""Test we set up the PLM correctly."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, PLM)
result2, mock_setup, mock_setup_entry = await _device_form(
hass, result["flow_id"], mock_successful_connection, MOCK_USER_INPUT_PLM
)
assert result2["type"] == "create_entry"
assert result2["data"] == MOCK_USER_INPUT_PLM
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_select_hub_v1(hass: HomeAssistantType):
"""Test we set up the Hub v1 correctly."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, HUB1)
result2, mock_setup, mock_setup_entry = await _device_form(
hass, result["flow_id"], mock_successful_connection, MOCK_USER_INPUT_HUB_V1
)
assert result2["type"] == "create_entry"
assert result2["data"] == {
**MOCK_USER_INPUT_HUB_V1,
CONF_HUB_VERSION: 1,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_select_hub_v2(hass: HomeAssistantType):
"""Test we set up the Hub v2 correctly."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, HUB2)
result2, mock_setup, mock_setup_entry = await _device_form(
hass, result["flow_id"], mock_successful_connection, MOCK_USER_INPUT_HUB_V2
)
assert result2["type"] == "create_entry"
assert result2["data"] == {
**MOCK_USER_INPUT_HUB_V2,
CONF_HUB_VERSION: 2,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_failed_connection_plm(hass: HomeAssistantType):
"""Test a failed connection with the PLM."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, PLM)
result2, _, _ = await _device_form(
hass, result["flow_id"], mock_failed_connection, MOCK_USER_INPUT_PLM
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_failed_connection_hub(hass: HomeAssistantType):
"""Test a failed connection with a Hub."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, HUB2)
result2, _, _ = await _device_form(
hass, result["flow_id"], mock_failed_connection, MOCK_USER_INPUT_HUB_V2
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def _import_config(hass, config):
"""Run the import step."""
with patch(PATCH_CONNECTION, new=mock_successful_connection,), patch(
PATCH_ASYNC_SETUP, return_value=True
), patch(PATCH_ASYNC_SETUP_ENTRY, return_value=True):
return await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
async def test_import_plm(hass: HomeAssistantType):
"""Test importing a minimum PLM config from yaml."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _import_config(hass, MOCK_IMPORT_CONFIG_PLM)
assert result["type"] == "create_entry"
assert hass.config_entries.async_entries(DOMAIN)
for entry in hass.config_entries.async_entries(DOMAIN):
assert entry.data == MOCK_IMPORT_CONFIG_PLM
async def _options_init_form(hass, entry_id, step):
"""Run the init options form."""
with patch(PATCH_ASYNC_SETUP_ENTRY, return_value=True):
result = await hass.config_entries.options.async_init(entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
{step: True},
)
return result2
async def test_import_min_hub_v2(hass: HomeAssistantType):
"""Test importing a minimum Hub v2 config from yaml."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _import_config(
hass, {**MOCK_IMPORT_MINIMUM_HUB_V2, CONF_PORT: 25105, CONF_HUB_VERSION: 2}
)
assert result["type"] == "create_entry"
assert hass.config_entries.async_entries(DOMAIN)
for entry in hass.config_entries.async_entries(DOMAIN):
assert entry.data[CONF_HOST] == MOCK_HOSTNAME
assert entry.data[CONF_PORT] == 25105
assert entry.data[CONF_USERNAME] == MOCK_USERNAME
assert entry.data[CONF_PASSWORD] == MOCK_PASSWORD
assert entry.data[CONF_HUB_VERSION] == 2
async def test_import_min_hub_v1(hass: HomeAssistantType):
"""Test importing a minimum Hub v1 config from yaml."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _import_config(
hass, {**MOCK_IMPORT_MINIMUM_HUB_V1, CONF_PORT: 9761, CONF_HUB_VERSION: 1}
)
assert result["type"] == "create_entry"
assert hass.config_entries.async_entries(DOMAIN)
for entry in hass.config_entries.async_entries(DOMAIN):
assert entry.data[CONF_HOST] == MOCK_HOSTNAME
assert entry.data[CONF_PORT] == 9761
assert entry.data[CONF_HUB_VERSION] == 1
async def test_import_existing(hass: HomeAssistantType):
"""Test we fail on an existing config imported."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
assert config_entry.state == config_entries.ENTRY_STATE_NOT_LOADED
result = await _import_config(
hass, {**MOCK_IMPORT_MINIMUM_HUB_V2, CONF_PORT: 25105, CONF_HUB_VERSION: 2}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_import_failed_connection(hass: HomeAssistantType):
"""Test a failed connection on import."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(PATCH_CONNECTION, new=mock_failed_connection,), patch(
PATCH_ASYNC_SETUP, return_value=True
), patch(PATCH_ASYNC_SETUP_ENTRY, return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={**MOCK_IMPORT_MINIMUM_HUB_V2, CONF_PORT: 25105, CONF_HUB_VERSION: 2},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def _options_form(hass, flow_id, user_input):
"""Test an options form."""
with patch(PATCH_ASYNC_SETUP_ENTRY, return_value=True) as mock_setup_entry:
result = await hass.config_entries.options.async_configure(flow_id, user_input)
return result, mock_setup_entry
async def test_options_change_hub_config(hass: HomeAssistantType):
"""Test changing Hub v2 config."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(
hass, config_entry.entry_id, STEP_CHANGE_HUB_CONFIG
)
user_input = {
CONF_HOST: "2.3.4.5",
CONF_PORT: 9999,
CONF_USERNAME: "new username",
CONF_PASSWORD: "new password",
}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {}
assert config_entry.data == {**user_input, CONF_HUB_VERSION: 2}
async def test_options_add_device_override(hass: HomeAssistantType):
"""Test adding a device override."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_OVERRIDE)
user_input = {
CONF_ADDRESS: "1a2b3c",
CONF_CAT: "0x04",
CONF_SUBCAT: "0xaa",
}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_OVERRIDE]) == 1
assert config_entry.options[CONF_OVERRIDE][0][CONF_ADDRESS] == "1A.2B.3C"
assert config_entry.options[CONF_OVERRIDE][0][CONF_CAT] == 4
assert config_entry.options[CONF_OVERRIDE][0][CONF_SUBCAT] == 170
result2 = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_OVERRIDE)
user_input = {
CONF_ADDRESS: "4d5e6f",
CONF_CAT: "05",
CONF_SUBCAT: "bb",
}
await _options_form(hass, result2["flow_id"], user_input)
assert len(config_entry.options[CONF_OVERRIDE]) == 2
assert config_entry.options[CONF_OVERRIDE][1][CONF_ADDRESS] == "4D.5E.6F"
assert config_entry.options[CONF_OVERRIDE][1][CONF_CAT] == 5
assert config_entry.options[CONF_OVERRIDE][1][CONF_SUBCAT] == 187
async def test_options_remove_device_override(hass: HomeAssistantType):
"""Test removing a device override."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={
CONF_OVERRIDE: [
{CONF_ADDRESS: "1A.2B.3C", CONF_CAT: 6, CONF_SUBCAT: 100},
{CONF_ADDRESS: "4D.5E.6F", CONF_CAT: 7, CONF_SUBCAT: 200},
]
},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_REMOVE_OVERRIDE)
user_input = {CONF_ADDRESS: "1A.2B.3C"}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_OVERRIDE]) == 1
async def test_options_remove_device_override_with_x10(hass: HomeAssistantType):
"""Test removing a device override when an X10 device is configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={
CONF_OVERRIDE: [
{CONF_ADDRESS: "1A.2B.3C", CONF_CAT: 6, CONF_SUBCAT: 100},
{CONF_ADDRESS: "4D.5E.6F", CONF_CAT: 7, CONF_SUBCAT: 200},
],
CONF_X10: [
{
CONF_HOUSECODE: "d",
CONF_UNITCODE: 5,
CONF_PLATFORM: "light",
CONF_DIM_STEPS: 22,
}
],
},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_REMOVE_OVERRIDE)
user_input = {CONF_ADDRESS: "1A.2B.3C"}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_OVERRIDE]) == 1
assert len(config_entry.options[CONF_X10]) == 1
async def test_options_add_x10_device(hass: HomeAssistantType):
"""Test adding an X10 device."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_X10)
user_input = {
CONF_HOUSECODE: "c",
CONF_UNITCODE: 12,
CONF_PLATFORM: "light",
CONF_DIM_STEPS: 18,
}
result2, _ = await _options_form(hass, result["flow_id"], user_input)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_X10]) == 1
assert config_entry.options[CONF_X10][0][CONF_HOUSECODE] == "c"
assert config_entry.options[CONF_X10][0][CONF_UNITCODE] == 12
assert config_entry.options[CONF_X10][0][CONF_PLATFORM] == "light"
assert config_entry.options[CONF_X10][0][CONF_DIM_STEPS] == 18
result = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_X10)
user_input = {
CONF_HOUSECODE: "d",
CONF_UNITCODE: 10,
CONF_PLATFORM: "binary_sensor",
CONF_DIM_STEPS: 15,
}
result3, _ = await _options_form(hass, result["flow_id"], user_input)
assert result3["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_X10]) == 2
assert config_entry.options[CONF_X10][1][CONF_HOUSECODE] == "d"
assert config_entry.options[CONF_X10][1][CONF_UNITCODE] == 10
assert config_entry.options[CONF_X10][1][CONF_PLATFORM] == "binary_sensor"
assert config_entry.options[CONF_X10][1][CONF_DIM_STEPS] == 15
async def test_options_remove_x10_device(hass: HomeAssistantType):
"""Test removing an X10 device."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={
CONF_X10: [
{
CONF_HOUSECODE: "C",
CONF_UNITCODE: 4,
CONF_PLATFORM: "light",
CONF_DIM_STEPS: 18,
},
{
CONF_HOUSECODE: "D",
CONF_UNITCODE: 10,
CONF_PLATFORM: "binary_sensor",
CONF_DIM_STEPS: 15,
},
]
},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_REMOVE_X10)
for device in config_entry.options[CONF_X10]:
housecode = device[CONF_HOUSECODE].upper()
unitcode = device[CONF_UNITCODE]
print(f"Housecode: {housecode}, Unitcode: {unitcode}")
user_input = {CONF_DEVICE: "Housecode: C, Unitcode: 4"}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_X10]) == 1
async def test_options_remove_x10_device_with_override(hass: HomeAssistantType):
"""Test removing an X10 device when a device override is configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={
CONF_X10: [
{
CONF_HOUSECODE: "C",
CONF_UNITCODE: 4,
CONF_PLATFORM: "light",
CONF_DIM_STEPS: 18,
},
{
CONF_HOUSECODE: "D",
CONF_UNITCODE: 10,
CONF_PLATFORM: "binary_sensor",
CONF_DIM_STEPS: 15,
},
],
CONF_OVERRIDE: [{CONF_ADDRESS: "1A.2B.3C", CONF_CAT: 1, CONF_SUBCAT: 18}],
},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_REMOVE_X10)
for device in config_entry.options[CONF_X10]:
housecode = device[CONF_HOUSECODE].upper()
unitcode = device[CONF_UNITCODE]
print(f"Housecode: {housecode}, Unitcode: {unitcode}")
user_input = {CONF_DEVICE: "Housecode: C, Unitcode: 4"}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_X10]) == 1
assert len(config_entry.options[CONF_OVERRIDE]) == 1
async def test_options_dup_selection(hass: HomeAssistantType):
"""Test if a duplicate selection was made in options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
{STEP_ADD_OVERRIDE: True, STEP_ADD_X10: True},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "select_single"}
async def test_options_override_bad_data(hass: HomeAssistantType):
"""Test for bad data in a device override."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_OVERRIDE)
user_input = {
CONF_ADDRESS: "zzzzzz",
CONF_CAT: "bad",
CONF_SUBCAT: "data",
}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "input_error"}
|
import ast
from bisect import bisect_left
from distutils import spawn
import errno
import functools
import inspect
import itertools
import logging
import os
import re
import subprocess
import time
import traceback
import warnings
from decorator import decorator
from google.protobuf.descriptor import FieldDescriptor
from gmusicapi import __version__
from gmusicapi.appdirs import my_appdirs
from gmusicapi.exceptions import CallFailure, GmusicapiWarning, NotSubscribed
# this controls the crazy logging setup that checks the callstack;
# it should be monkey-patched to False after importing to disable it.
# when False, static code will simply log in the standard way under the root.
per_client_logging = True
# Map descriptor.CPPTYPE -> python type.
_python_to_cpp_types = {
int: ('int32', 'int64', 'uint32', 'uint64'),
float: ('double', 'float'),
bool: ('bool',),
str: ('string',),
}
cpp_type_to_python = {
getattr(FieldDescriptor, 'CPPTYPE_' + cpp.upper()): python
for (python, cpplist) in _python_to_cpp_types.items()
for cpp in cpplist
}
log_filepath = os.path.join(my_appdirs.user_log_dir, 'gmusicapi.log')
printed_log_start_message = False # global, set in config_debug_logging
# matches a mac address in GM form, eg
# 00:11:22:33:AA:BB
_mac_pattern = re.compile("^({pair}:){{5}}{pair}$".format(pair='[0-9A-F]' * 2))
class DynamicClientLogger:
"""Dynamically proxies to the logger of a Client higher in the call stack.
This is a ridiculous hack needed because
logging is, in the eyes of a user, per-client.
So, logging from static code (eg protocol, utils) needs to log using the
config of the calling client's logger.
There can be multiple clients, so we can't just use a globally-available
logger.
Instead of refactoring every function to receieve a logger, we introspect
the callstack at runtime to figure out who's calling us, then use their
logger.
This probably won't work on non-CPython implementations.
"""
def __init__(self, caller_name):
self.caller_name = caller_name
def __getattr__(self, name):
# this isn't a totally foolproof way to proxy, but it's fine for
# the usual logger.debug, etc methods.
logger = logging.getLogger(self.caller_name)
if per_client_logging:
# search upwards for a client instance
for frame_rec in inspect.getouterframes(inspect.currentframe()):
frame = frame_rec[0]
try:
if 'self' in frame.f_locals:
f_self = frame.f_locals['self']
# can't import and check against classes; that causes an import cycle
if (f_self is not None and
f_self.__module__.startswith('gmusicapi.clients') and
f_self.__class__.__name__ in ('Musicmanager', 'Webclient',
'Mobileclient')):
logger = f_self.logger
break
finally:
del frame # avoid circular references
else:
# log to root logger.
# should this be stronger? There's no default root logger set up.
stack = traceback.extract_stack()
logger.info('could not locate client caller in stack:\n%s',
'\n'.join(traceback.format_list(stack)))
return getattr(logger, name)
log = DynamicClientLogger(__name__)
def deprecated(instructions):
"""Flags a method as deprecated.
:param instructions: human-readable note to assist migration.
"""
@decorator
def wrapper(func, *args, **kwargs):
message = "{0} is deprecated and may break unexpectedly; {1}".format(
func.__name__,
instructions)
warnings.warn(message,
GmusicapiWarning,
stacklevel=2)
return func(*args, **kwargs)
return wrapper
def longest_increasing_subseq(seq):
"""Returns the longest (non-contiguous) subsequence
of seq that is strictly increasing.
"""
# adapted from http://goo.gl/lddm3c
if not seq:
return []
# head[j] = index in 'seq' of the final member of the best subsequence
# of length 'j + 1' yet found
head = [0]
# predecessor[j] = linked list of indices of best subsequence ending
# at seq[j], in reverse order
predecessor = [-1]
for i in range(1, len(seq)):
# Find j such that: seq[head[j - 1]] < seq[i] <= seq[head[j]]
# seq[head[j]] is increasing, so use binary search.
j = bisect_left([seq[head[idx]] for idx in range(len(head))], seq[i])
if j == len(head):
head.append(i)
if seq[i] < seq[head[j]]:
head[j] = i
predecessor.append(head[j - 1] if j > 0 else -1)
# trace subsequence back to output
result = []
trace_idx = head[-1]
while (trace_idx >= 0):
result.append(seq[trace_idx])
trace_idx = predecessor[trace_idx]
return result[::-1]
def id_or_nid(song_dict):
"""Equivalent to ``d.get('id') or d['nid']``.
Uploaded songs have an id key, while AA tracks
have a nid key, which can often be used interchangably.
"""
return song_dict.get('id') or song_dict['nid']
def datetime_to_microseconds(dt):
"""Return microseconds since epoch, as an int.
:param dt: a datetime.datetime
"""
return int(time.mktime(dt.timetuple()) * 1000000) + dt.microsecond
def is_valid_mac(mac_string):
"""Return True if mac_string is of form
eg '00:11:22:33:AA:BB'.
"""
if not _mac_pattern.match(mac_string):
return False
return True
def create_mac_string(num, splitter=':'):
"""Return the mac address interpretation of num,
in the form eg '00:11:22:33:AA:BB'.
:param num: a 48-bit integer (eg from uuid.getnode)
:param spliiter: a string to join the hex pairs with
"""
mac = hex(num)[2:]
# trim trailing L for long consts
if mac[-1] == 'L':
mac = mac[:-1]
pad = max(12 - len(mac), 0)
mac = '0' * pad + mac
mac = splitter.join([mac[x:x + 2] for x in range(0, 12, 2)])
mac = mac.upper()
return mac
# from http://stackoverflow.com/a/5032238/1231454
def make_sure_path_exists(path, mode=None):
try:
if mode is not None:
os.makedirs(path, mode)
else:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# from http://stackoverflow.com/a/8101118/1231454
class DocstringInheritMeta(type):
"""A variation on
http://groups.google.com/group/comp.lang.python/msg/26f7b4fcb4d66c95
by Paul McGuire
"""
def __new__(meta, name, bases, clsdict):
if not('__doc__' in clsdict and clsdict['__doc__']):
for mro_cls in (mro_cls for base in bases for mro_cls in base.mro()):
doc = mro_cls.__doc__
if doc:
clsdict['__doc__'] = doc
break
for attr, attribute in clsdict.items():
if not attribute.__doc__:
for mro_cls in (mro_cls for base in bases for mro_cls in base.mro()
if hasattr(mro_cls, attr)):
doc = getattr(getattr(mro_cls, attr), '__doc__')
if doc:
attribute.__doc__ = doc
break
return type.__new__(meta, name, bases, clsdict)
def dual_decorator(func):
"""This is a decorator that converts a paramaterized decorator for no-param use.
source: http://stackoverflow.com/questions/3888158.
"""
@functools.wraps(func)
def inner(*args, **kw):
if (len(args) == 1 and not kw and callable(args[0]) and
not (type(args[0]) == type and issubclass(args[0], BaseException))):
return func()(args[0])
else:
return func(*args, **kw)
return inner
@dual_decorator
def enforce_id_param(position=1):
"""Verifies that the caller is passing a single song id, and not
a song dictionary.
:param position: (optional) the position of the expected id - defaults to 1.
"""
@decorator
def wrapper(function, *args, **kw):
if not isinstance(args[position], str):
raise ValueError("Invalid param type in position %s;"
" expected an id (did you pass a dictionary?)" % position)
return function(*args, **kw)
return wrapper
@dual_decorator
def enforce_ids_param(position=1):
"""Verifies that the caller is passing a list of song ids, and not a
list of song dictionaries.
:param position: (optional) the position of the expected list - defaults to 1.
"""
@decorator
def wrapper(function, *args, **kw):
if (not isinstance(args[position], (list, tuple)) or
not all([isinstance(e, str) for e in args[position]])):
raise ValueError("Invalid param type in position %s;"
" expected ids (did you pass dictionaries?)" % position)
return function(*args, **kw)
return wrapper
def configure_debug_log_handlers(logger):
"""Warnings and above to stderr, below to gmusicapi.log when possible.
Output includes line number."""
global printed_log_start_message
logger.setLevel(logging.DEBUG)
logging_to_file = True
try:
make_sure_path_exists(os.path.dirname(log_filepath), 0o700)
debug_handler = logging.FileHandler(log_filepath, encoding='utf-8')
except OSError:
logging_to_file = False
debug_handler = logging.StreamHandler()
debug_handler.setLevel(logging.DEBUG)
important_handler = logging.StreamHandler()
important_handler.setLevel(logging.WARNING)
logger.addHandler(debug_handler)
logger.addHandler(important_handler)
if not printed_log_start_message:
# print out startup message without verbose formatting
logger.info("!-- begin debug log --!")
logger.info("version: " + __version__)
if logging_to_file:
logger.info("logging to: " + log_filepath)
printed_log_start_message = True
formatter = logging.Formatter(
'%(asctime)s - %(name)s (%(module)s:%(lineno)s) [%(levelname)s]: %(message)s'
)
debug_handler.setFormatter(formatter)
important_handler.setFormatter(formatter)
@dual_decorator
def retry(retry_exception=None, tries=5, delay=2, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
An exception from a final attempt will propogate.
:param retry_exception: exception (or tuple of exceptions) to check for and retry on.
If None, use (AssertionError, CallFailure).
:param tries: number of times to try (not retry) before giving up
:param delay: initial delay between retries in seconds
:param backoff: backoff multiplier
:param logger: logger to use. If None, use 'gmusicapi.utils' logger
Modified from
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python.
"""
if logger is None:
logger = logging.getLogger('gmusicapi.utils')
if retry_exception is None:
retry_exception = (AssertionError, CallFailure)
@decorator
def retry_wrapper(f, *args, **kwargs):
mtries, mdelay = tries, delay # make our own mutable copies
while mtries > 1:
try:
return f(*args, **kwargs)
except retry_exception as e:
logger.info("%s, retrying in %s seconds...", e, mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return retry_wrapper
def pb_set(msg, field_name, val):
"""Return True and set val to field_name in msg if the assignment
is type-compatible, else return False.
val will be coerced to a proper type if needed.
:param msg: an instance of a protobuf.message
:param field_name:
:param val
"""
# Find the proper type.
field_desc = msg.DESCRIPTOR.fields_by_name[field_name]
proper_type = cpp_type_to_python[field_desc.cpp_type]
# Try with the given type first.
# Their set hooks will automatically coerce.
try_types = (type(val), proper_type)
for t in try_types:
log.debug("attempt %s.%s = %s(%r)", msg.__class__.__name__, field_name, t, val)
try:
setattr(msg, field_name, t(val))
log.debug("! success")
break
except (TypeError, ValueError):
log.debug("X failure")
else:
return False # no assignments stuck
return True
def locate_mp3_transcoder():
"""Return the path to a transcoder (ffmpeg or avconv) with mp3 support.
Raise ValueError if none are suitable."""
transcoders = ['ffmpeg', 'avconv']
transcoder_details = {}
for transcoder in transcoders:
cmd_path = spawn.find_executable(transcoder)
if cmd_path is None:
transcoder_details[transcoder] = 'not installed'
continue
with open(os.devnull, "w") as null:
stdout = subprocess.check_output([cmd_path, '-codecs'], stderr=null).decode("ascii")
mp3_encoding_support = ('libmp3lame' in stdout and 'disable-libmp3lame' not in stdout)
if mp3_encoding_support:
transcoder_details[transcoder] = "mp3 encoding support"
break # mp3 decoding/encoding supported
else:
transcoder_details[transcoder] = 'no mp3 encoding support'
else:
raise ValueError('ffmpeg or avconv must be in the path and support mp3 encoding'
"\ndetails: %r" % transcoder_details)
return cmd_path
def transcode_to_mp3(filepath, quality='320k', slice_start=None, slice_duration=None):
"""Return the bytestring result of transcoding the file at *filepath* to mp3.
An ID3 header is not included in the result.
:param filepath: location of file
:param quality: if int, pass to -q:a. if string, pass to -b:a
-q:a roughly corresponds to libmp3lame -V0, -V1...
:param slice_start: (optional) transcode a slice, starting at this many seconds
:param slice_duration: (optional) when used with slice_start, the number of seconds in the slice
Raise:
* OSError: problems during transcoding
* ValueError: invalid params, transcoder not found
"""
err_output = None
cmd_path = locate_mp3_transcoder()
cmd = [cmd_path, '-i', filepath]
if slice_duration is not None:
cmd.extend(['-t', str(slice_duration)])
if slice_start is not None:
cmd.extend(['-ss', str(slice_start)])
if isinstance(quality, int):
cmd.extend(['-q:a', str(quality)])
elif isinstance(quality, str):
cmd.extend(['-b:a', quality])
else:
raise ValueError("quality must be int or string, but received %r" % quality)
cmd.extend(['-f', 's16le', # don't output id3 headers
'-c', 'libmp3lame',
'pipe:1'])
log.debug('running transcode command %r', cmd)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
audio_out, err_output = proc.communicate()
if proc.returncode != 0:
err_output = ("(return code: %r)\n" % proc.returncode) + err_output.decode("ascii")
raise OSError # handle errors in except
except OSError as e:
err_msg = "transcoding command (%r) failed: %s. " % (' '.join(cmd), e)
if 'No such file or directory' in str(e):
err_msg += '\nffmpeg or avconv must be installed and in the system path.'
if err_output is not None:
err_msg += "\nstderr: '%s'" % err_output
log.exception('transcoding failure:\n%s', err_msg)
raise OSError(err_msg)
else:
return audio_out
def truncate(x, max_els=100, recurse_levels=0):
"""Return a 'shorter' truncated x of the same type, useful for logging.
recurse_levels is only valid for homogeneous lists/tuples.
max_els ignored for song dictionaries."""
# Coerce tuple to list to ease truncation.
is_tuple = False
if isinstance(x, tuple):
is_tuple = True
x = list(x)
try:
if len(x) > max_els:
if isinstance(x, str):
return x[:max_els] + '...'
elif isinstance(x, bytes):
return x[:max_els] + b'...'
if isinstance(x, dict):
if 'id' in x and 'titleNorm' in x:
# assume to be a song dict
trunc = {k: x.get(k) for k in ['title', 'artist', 'album']}
trunc['...'] = '...'
return trunc
else:
return dict(
itertools.chain(
itertools.islice(x.items(), 0, max_els),
[('...', '...')]))
if isinstance(x, list):
trunc = x[:max_els] + ['...']
if recurse_levels > 0:
trunc = [truncate(e, recurse_levels - 1) for e in trunc[:-1]]
if is_tuple:
trunc = tuple(trunc)
return trunc
except TypeError:
# does not have len
pass
return x
@dual_decorator
def empty_arg_shortcircuit(return_code='[]', position=1):
"""Decorate a function to shortcircuit and return something immediately if
the length of a positional arg is 0.
:param return_code: (optional) simple expression to eval as the return value - default is a list
:param position: (optional) the position of the expected list - default is 1.
"""
# The normal pattern when making a collection an optional arg is to use
# a sentinel (like None). Otherwise, you run the risk of the collection
# being mutated - there's only one, not a new one on each call.
# Here we've got multiple things we'd like to
# return, so we can't do that. Rather than make some kind of enum for
# 'accepted return values' I'm just allowing freedom to return basic values.
# ast.literal_eval only can evaluate most literal expressions (e.g. [] and {})
@decorator
def wrapper(function, *args, **kw):
if len(args[position]) == 0:
return ast.literal_eval(return_code)
else:
return function(*args, **kw)
return wrapper
def accept_singleton(expected_type, position=1):
"""Allows a function expecting a list to accept a single item as well.
The item will be wrapped in a list.
Will not work for nested lists.
:param expected_type: the type of the items in the list
:param position: (optional) the position of the expected list - defaults to 1.
"""
@decorator
def wrapper(function, *args, **kw):
if isinstance(args[position], expected_type):
# args are a tuple, can't assign into them
args = list(args)
args[position] = [args[position]]
args = tuple(args)
return function(*args, **kw)
return wrapper
@decorator
def require_subscription(function, *args, **kwargs):
self = args[0]
if not self.is_subscribed:
raise NotSubscribed("%s requires a subscription." % function.__name__)
return function(*args, **kwargs)
# Modification of recipe found at
# https://wiki.python.org/moin/PythonDecoratorLibrary#Cached_Properties.
class cached_property:
"""Version of @property decorator that caches the result with a TTL.
Tracks the property's value and last refresh time in a dict attribute
of a class instance (``self._cache``) using the property name as the key.
"""
def __init__(self, ttl=0):
self.ttl = ttl
def __call__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
now = time.time()
try:
value, last_update = inst._cache[self.__name__]
if (self.ttl > 0) and (now - last_update > self.ttl):
raise AttributeError
except (KeyError, AttributeError):
value = self.fget(inst)
try:
cache = inst._cache
except AttributeError:
cache = inst._cache = {}
cache[self.__name__] = (value, now)
return value
def __set__(self, inst, value):
raise AttributeError("Can't set cached properties")
def __delete__(self, inst):
try:
del inst._cache[self.__name__]
except (KeyError, AttributeError):
if not inst._cache:
inst._cache = {}
# Used to mark a field as unimplemented.
@property
def NotImplementedField(self):
raise NotImplementedError
|
from homeassistant.components.lock import LockEntity
from . import DATA_KEY, VolvoEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Volvo On Call lock."""
if discovery_info is None:
return
async_add_entities([VolvoLock(hass.data[DATA_KEY], *discovery_info)])
class VolvoLock(VolvoEntity, LockEntity):
"""Represents a car lock."""
@property
def is_locked(self):
"""Return true if lock is locked."""
return self.instrument.is_locked
async def async_lock(self, **kwargs):
"""Lock the car."""
await self.instrument.lock()
async def async_unlock(self, **kwargs):
"""Unlock the car."""
await self.instrument.unlock()
|
import os
import sh
from molecule import logger
from molecule import util
from molecule.verifier.lint import base
LOG = logger.get_logger(__name__)
class RuboCop(base.Base):
"""
`RuboCop`_ is not the default verifier linter.
`RuboCop`_ is a linter for ruby files.
Additional options can be passed to `rubocop` through the options
dict. Any option set in this section will override the defaults.
.. code-block:: yaml
verifier:
name: inspec
lint:
name: rubocop
options:
auto-correct: True
Test file linting can be disabled by setting `enabled` to False.
.. code-block:: yaml
verifier:
name: inspec
lint:
name: rubocop
enabled: False
Environment variables can be passed to lint.
.. code-block:: yaml
verifier:
name: inspec
lint:
name: rubocop
env:
FOO: bar
.. _`RuboCop`: https://rubocop.readthedocs.io/en/latest/
"""
def __init__(self, config):
"""
Sets up the requirements to execute `rubocop` and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
super(RuboCop, self).__init__(config)
self._rubocop_command = None
if config:
self._tests = self._get_tests()
@property
def default_options(self):
d = {}
if self._config.debug:
d['d'] = True
return d
@property
def default_env(self):
return util.merge_dicts(os.environ.copy(), self._config.env)
def bake(self):
"""
Bake a `rubocop` command so it's ready to execute and returns None.
:return: None
"""
self._rubocop_command = sh.rubocop.bake(
self.options,
self._tests,
_env=self.env,
_out=LOG.out,
_err=LOG.error)
def execute(self):
if not self.enabled:
msg = 'Skipping, verifier_lint is disabled.'
LOG.warn(msg)
return
if not len(self._tests) > 0:
msg = 'Skipping, no tests found.'
LOG.warn(msg)
return
if self._rubocop_command is None:
self.bake()
msg = 'Executing RuboCop on files found in {}/...'.format(
self._config.verifier.directory)
LOG.info(msg)
try:
util.run_command(self._rubocop_command, debug=self._config.debug)
msg = 'Lint completed successfully.'
LOG.success(msg)
except sh.ErrorReturnCode as e:
util.sysexit(e.exit_code)
def _get_tests(self):
"""
Walk the verifier's directory for tests and returns a list.
:return: list
"""
return [
filename for filename in util.os_walk(
self._config.verifier.directory, 'test_*.rb')
]
|
import configparser
import io
import itertools
import os
import shutil
from nikola.plugin_categories import LateTask
from nikola import utils
class BuildBundles(LateTask):
"""Bundle assets."""
name = "create_bundles"
def gen_tasks(self):
"""Bundle assets."""
kw = {
'filters': self.site.config['FILTERS'],
'output_folder': self.site.config['OUTPUT_FOLDER'],
'cache_folder': self.site.config['CACHE_FOLDER'],
'theme_bundles': get_theme_bundles(self.site.THEMES),
'themes': self.site.THEMES,
'files_folders': self.site.config['FILES_FOLDERS'],
'code_color_scheme': self.site.config['CODE_COLOR_SCHEME'],
}
def build_bundle(output, inputs):
out_dir = os.path.join(kw['output_folder'],
os.path.dirname(output))
inputs = [
os.path.join(
out_dir,
os.path.relpath(i, out_dir))
for i in inputs if os.path.isfile(i)
]
with open(os.path.join(out_dir, os.path.basename(output)), 'wb+') as out_fh:
for i in inputs:
with open(i, 'rb') as in_fh:
shutil.copyfileobj(in_fh, out_fh)
out_fh.write(b'\n')
yield self.group_task()
if self.site.config['USE_BUNDLES']:
for name, _files in kw['theme_bundles'].items():
output_path = os.path.join(kw['output_folder'], name)
dname = os.path.dirname(name)
files = []
for fname in _files:
# paths are relative to dirname
files.append(os.path.join(dname, fname))
file_dep = [os.path.join(kw['output_folder'], fname)
for fname in files if
utils.get_asset_path(
fname,
self.site.THEMES,
self.site.config['FILES_FOLDERS'],
output_dir=kw['output_folder']) or fname == os.path.join('assets', 'css', 'code.css')]
# code.css will be generated by us if it does not exist in
# FILES_FOLDERS or theme assets. It is guaranteed that the
# generation will happen before this task.
task = {
'file_dep': list(file_dep),
'task_dep': ['copy_assets', 'copy_files'],
'basename': str(self.name),
'name': str(output_path),
'actions': [(build_bundle, (name, file_dep))],
'targets': [output_path],
'uptodate': [
utils.config_changed({
1: kw,
2: file_dep
}, 'nikola.plugins.task.bundles')],
'clean': True,
}
yield utils.apply_filters(task, kw['filters'])
def get_theme_bundles(themes):
"""Given a theme chain, return the bundle definitions."""
for theme_name in themes:
bundles_path = os.path.join(
utils.get_theme_path(theme_name), 'bundles')
if os.path.isfile(bundles_path):
config = configparser.ConfigParser()
header = io.StringIO('[bundles]\n')
with open(bundles_path, 'rt') as fd:
config.read_file(itertools.chain(header, fd))
bundles = {}
for name, files in config['bundles'].items():
name = name.strip().replace('/', os.sep)
files = [f.strip() for f in files.split(',') if f.strip()]
bundles[name] = files
return bundles
|
from datetime import timedelta
import logging
from pyebox import EboxClient
from pyebox.client import PyEboxError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_VARIABLES,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
DATA_GIGABITS,
PERCENTAGE,
TIME_DAYS,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
PRICE = "CAD"
DEFAULT_NAME = "EBox"
REQUESTS_TIMEOUT = 15
SCAN_INTERVAL = timedelta(minutes=15)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
SENSOR_TYPES = {
"usage": ["Usage", PERCENTAGE, "mdi:percent"],
"balance": ["Balance", PRICE, "mdi:cash-usd"],
"limit": ["Data limit", DATA_GIGABITS, "mdi:download"],
"days_left": ["Days left", TIME_DAYS, "mdi:calendar-today"],
"before_offpeak_download": [
"Download before offpeak",
DATA_GIGABITS,
"mdi:download",
],
"before_offpeak_upload": ["Upload before offpeak", DATA_GIGABITS, "mdi:upload"],
"before_offpeak_total": ["Total before offpeak", DATA_GIGABITS, "mdi:download"],
"offpeak_download": ["Offpeak download", DATA_GIGABITS, "mdi:download"],
"offpeak_upload": ["Offpeak Upload", DATA_GIGABITS, "mdi:upload"],
"offpeak_total": ["Offpeak Total", DATA_GIGABITS, "mdi:download"],
"download": ["Download", DATA_GIGABITS, "mdi:download"],
"upload": ["Upload", DATA_GIGABITS, "mdi:upload"],
"total": ["Total", DATA_GIGABITS, "mdi:download"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_VARIABLES): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the EBox sensor."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
httpsession = hass.helpers.aiohttp_client.async_get_clientsession()
ebox_data = EBoxData(username, password, httpsession)
name = config.get(CONF_NAME)
try:
await ebox_data.async_update()
except PyEboxError as exp:
_LOGGER.error("Failed login: %s", exp)
raise PlatformNotReady from exp
sensors = []
for variable in config[CONF_MONITORED_VARIABLES]:
sensors.append(EBoxSensor(ebox_data, variable, name))
async_add_entities(sensors, True)
class EBoxSensor(Entity):
"""Implementation of a EBox sensor."""
def __init__(self, ebox_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.ebox_data = ebox_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
async def async_update(self):
"""Get the latest data from EBox and update the state."""
await self.ebox_data.async_update()
if self.type in self.ebox_data.data:
self._state = round(self.ebox_data.data[self.type], 2)
class EBoxData:
"""Get data from Ebox."""
def __init__(self, username, password, httpsession):
"""Initialize the data object."""
self.client = EboxClient(username, password, REQUESTS_TIMEOUT, httpsession)
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Ebox."""
try:
await self.client.fetch_data()
except PyEboxError as exp:
_LOGGER.error("Error on receive last EBox data: %s", exp)
return
# Update data
self.data = self.client.get_data()
|
import os
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
flags.DEFINE_boolean('openstack_swift_insecure', False,
'Allow swiftclient to access Swift service without \n'
'having to verify the SSL certificate')
FLAGS = flags.FLAGS
SWIFTCLIENT_LIB_VERSION = 'python-swiftclient_lib_version'
class SwiftStorageService(object_storage_service.ObjectStorageService):
"""Interface to OpenStack Swift."""
STORAGE_NAME = providers.OPENSTACK
def __init__(self):
self.swift_command_prefix = ''
def PrepareService(self, location):
openstack_creds_set = ('OS_AUTH_URL' in os.environ,
'OS_TENANT_NAME' in os.environ,
'OS_USERNAME' in os.environ,
'OS_PASSWORD' in os.environ,)
if not all(openstack_creds_set):
raise errors.Benchmarks.MissingObjectCredentialException(
'OpenStack credentials not found in environment variables')
self.swift_command_parts = [
'--os-auth-url', os.environ['OS_AUTH_URL'],
'--os-tenant-name', os.environ['OS_TENANT_NAME'],
'--os-username', os.environ['OS_USERNAME'],
'--os-password', os.environ['OS_PASSWORD']]
if FLAGS.openstack_swift_insecure:
self.swift_command_parts.append('--insecure')
self.swift_command_prefix = ' '.join(self.swift_command_parts)
def MakeBucket(self, bucket, raise_on_failure=True):
_, stderr, ret_code = vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['post', bucket],
raise_on_failure=False)
if ret_code and raise_on_failure:
raise errors.Benchmarks.BucketCreationError(stderr)
def DeleteBucket(self, bucket):
self.EmptyBucket(bucket)
vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['delete', bucket],
raise_on_failure=False)
def Copy(self, src_url, dst_url):
"""See base class."""
raise NotImplementedError()
def CopyToBucket(self, src_path, bucket, object_path):
"""See base class."""
raise NotImplementedError()
def MakeRemoteCliDownloadUrl(self, bucket, object_path):
"""See base class."""
raise NotImplementedError()
def GenerateCliDownloadFileCommand(self, src_url, local_path):
"""See base class."""
raise NotImplementedError()
def List(self, buckets):
"""See base class."""
raise NotImplementedError()
def EmptyBucket(self, bucket):
vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['delete', bucket],
raise_on_failure=False)
def PrepareVM(self, vm):
vm.Install('swift_client')
def CleanupVM(self, vm):
vm.Uninstall('swift_client')
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall absl-py')
def CLIUploadDirectory(self, vm, directory, file_names, bucket):
return vm.RemoteCommand(
'time swift %s upload %s %s'
% (self.swift_command_prefix, bucket, directory))
def CLIDownloadBucket(self, vm, bucket, objects, dest):
return vm.RemoteCommand(
'time swift %s download %s -D %s'
% (self.swift_command_prefix, bucket, dest))
def Metadata(self, vm):
return {SWIFTCLIENT_LIB_VERSION:
linux_packages.GetPipPackageVersion(vm, 'python-swiftclient')}
|
import asyncio
from datetime import timedelta
import json
import logging
import aiohttp
import async_timeout
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, HTTP_OK
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from homeassistant.util import dt
_LOGGER = logging.getLogger(__name__)
LWA_TOKEN_URI = "https://api.amazon.com/auth/o2/token"
LWA_HEADERS = {"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"}
PREEMPTIVE_REFRESH_TTL_IN_SECONDS = 300
STORAGE_KEY = "alexa_auth"
STORAGE_VERSION = 1
STORAGE_EXPIRE_TIME = "expire_time"
STORAGE_ACCESS_TOKEN = "access_token"
STORAGE_REFRESH_TOKEN = "refresh_token"
class Auth:
"""Handle authentication to send events to Alexa."""
def __init__(self, hass, client_id, client_secret):
"""Initialize the Auth class."""
self.hass = hass
self.client_id = client_id
self.client_secret = client_secret
self._prefs = None
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._get_token_lock = asyncio.Lock()
async def async_do_auth(self, accept_grant_code):
"""Do authentication with an AcceptGrant code."""
# access token not retrieved yet for the first time, so this should
# be an access token request
lwa_params = {
"grant_type": "authorization_code",
"code": accept_grant_code,
CONF_CLIENT_ID: self.client_id,
CONF_CLIENT_SECRET: self.client_secret,
}
_LOGGER.debug(
"Calling LWA to get the access token (first time), with: %s",
json.dumps(lwa_params),
)
return await self._async_request_new_token(lwa_params)
@callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._prefs[STORAGE_ACCESS_TOKEN] = None
async def async_get_access_token(self):
"""Perform access token or token refresh request."""
async with self._get_token_lock:
if self._prefs is None:
await self.async_load_preferences()
if self.is_token_valid():
_LOGGER.debug("Token still valid, using it")
return self._prefs[STORAGE_ACCESS_TOKEN]
if self._prefs[STORAGE_REFRESH_TOKEN] is None:
_LOGGER.debug("Token invalid and no refresh token available")
return None
lwa_params = {
"grant_type": "refresh_token",
"refresh_token": self._prefs[STORAGE_REFRESH_TOKEN],
CONF_CLIENT_ID: self.client_id,
CONF_CLIENT_SECRET: self.client_secret,
}
_LOGGER.debug("Calling LWA to refresh the access token")
return await self._async_request_new_token(lwa_params)
@callback
def is_token_valid(self):
"""Check if a token is already loaded and if it is still valid."""
if not self._prefs[STORAGE_ACCESS_TOKEN]:
return False
expire_time = dt.parse_datetime(self._prefs[STORAGE_EXPIRE_TIME])
preemptive_expire_time = expire_time - timedelta(
seconds=PREEMPTIVE_REFRESH_TTL_IN_SECONDS
)
return dt.utcnow() < preemptive_expire_time
async def _async_request_new_token(self, lwa_params):
try:
session = aiohttp_client.async_get_clientsession(self.hass)
with async_timeout.timeout(10):
response = await session.post(
LWA_TOKEN_URI,
headers=LWA_HEADERS,
data=lwa_params,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout calling LWA to get auth token")
return None
_LOGGER.debug("LWA response header: %s", response.headers)
_LOGGER.debug("LWA response status: %s", response.status)
if response.status != HTTP_OK:
_LOGGER.error("Error calling LWA to get auth token")
return None
response_json = await response.json()
_LOGGER.debug("LWA response body : %s", response_json)
access_token = response_json["access_token"]
refresh_token = response_json["refresh_token"]
expires_in = response_json["expires_in"]
expire_time = dt.utcnow() + timedelta(seconds=expires_in)
await self._async_update_preferences(
access_token, refresh_token, expire_time.isoformat()
)
return access_token
async def async_load_preferences(self):
"""Load preferences with stored tokens."""
self._prefs = await self._store.async_load()
if self._prefs is None:
self._prefs = {
STORAGE_ACCESS_TOKEN: None,
STORAGE_REFRESH_TOKEN: None,
STORAGE_EXPIRE_TIME: None,
}
async def _async_update_preferences(self, access_token, refresh_token, expire_time):
"""Update user preferences."""
if self._prefs is None:
await self.async_load_preferences()
if access_token is not None:
self._prefs[STORAGE_ACCESS_TOKEN] = access_token
if refresh_token is not None:
self._prefs[STORAGE_REFRESH_TOKEN] = refresh_token
if expire_time is not None:
self._prefs[STORAGE_EXPIRE_TIME] = expire_time
await self._store.async_save(self._prefs)
|
import io
import datetime
import operator
import os
import shutil
import subprocess
import sys
import dateutil.tz
from blinker import signal
from nikola.plugin_categories import Command
from nikola import utils
COMPILERS_DOC_LINK = 'https://getnikola.com/handbook.html#configuring-other-input-formats'
POSTLOGGER = utils.get_logger('new_post')
PAGELOGGER = utils.get_logger('new_page')
LOGGER = POSTLOGGER
def get_default_compiler(is_post, compilers, post_pages):
"""Given compilers and post_pages, return a reasonable default compiler for this kind of post/page."""
# First throw away all the post_pages with the wrong is_post
filtered = [entry for entry in post_pages if entry[3] == is_post]
# Get extensions in filtered post_pages until one matches a compiler
for entry in filtered:
extension = os.path.splitext(entry[0])[-1]
for compiler, extensions in compilers.items():
if extension in extensions:
return compiler
# No idea, back to default behaviour
return 'rest'
def get_date(schedule=False, rule=None, last_date=None, tz=None, iso8601=False):
"""Return a date stamp, given a recurrence rule.
schedule - bool:
whether to use the recurrence rule or not
rule - str:
an iCal RRULE string that specifies the rule for scheduling posts
last_date - datetime:
timestamp of the last post
tz - tzinfo:
the timezone used for getting the current time.
iso8601 - bool:
whether to force ISO 8601 dates (instead of locale-specific ones)
"""
if tz is None:
tz = dateutil.tz.tzlocal()
date = now = datetime.datetime.now(tz)
if schedule:
try:
from dateutil import rrule
except ImportError:
LOGGER.error('To use the --schedule switch of new_post, '
'you have to install the "dateutil" package.')
rrule = None
if schedule and rrule and rule:
try:
rule_ = rrule.rrulestr(rule, dtstart=last_date or date)
except Exception:
LOGGER.error('Unable to parse rule string, using current time.')
else:
date = rule_.after(max(now, last_date or now), last_date is None)
offset = tz.utcoffset(now)
offset_sec = (offset.days * 24 * 3600 + offset.seconds)
offset_hrs = offset_sec // 3600
offset_min = offset_sec % 3600
if iso8601:
tz_str = '{0:+03d}:{1:02d}'.format(offset_hrs, offset_min // 60)
else:
if offset:
tz_str = ' UTC{0:+03d}:{1:02d}'.format(offset_hrs, offset_min // 60)
else:
tz_str = ' UTC'
return (date.strftime('%Y-%m-%d %H:%M:%S') + tz_str, date)
class CommandNewPost(Command):
"""Create a new post."""
name = "new_post"
doc_usage = "[options] [path]"
doc_purpose = "create a new blog post or site page"
cmd_options = [
{
'name': 'is_page',
'short': 'p',
'long': 'page',
'type': bool,
'default': False,
'help': 'Create a page instead of a blog post. (see also: `nikola new_page`)'
},
{
'name': 'title',
'short': 't',
'long': 'title',
'type': str,
'default': '',
'help': 'Title for the post.'
},
{
'name': 'author',
'short': 'a',
'long': 'author',
'type': str,
'default': '',
'help': 'Author of the post.'
},
{
'name': 'tags',
'long': 'tags',
'type': str,
'default': '',
'help': 'Comma-separated tags for the post.'
},
{
'name': 'onefile',
'short': '1',
'type': bool,
'default': False,
'help': 'Create the post with embedded metadata (single file format)'
},
{
'name': 'twofile',
'short': '2',
'type': bool,
'default': False,
'help': 'Create the post with separate metadata (two file format)'
},
{
'name': 'edit',
'short': 'e',
'type': bool,
'default': False,
'help': 'Open the post (and meta file, if any) in $EDITOR after creation.'
},
{
'name': 'content_format',
'short': 'f',
'long': 'format',
'type': str,
'default': '',
'help': 'Markup format for the post (use --available-formats for list)',
},
{
'name': 'available-formats',
'short': 'F',
'long': 'available-formats',
'type': bool,
'default': False,
'help': 'List all available input formats'
},
{
'name': 'schedule',
'short': 's',
'type': bool,
'default': False,
'help': 'Schedule the post based on recurrence rule'
},
{
'name': 'import',
'short': 'i',
'long': 'import',
'type': str,
'default': '',
'help': 'Import an existing file instead of creating a placeholder'
},
{
'name': 'date-path',
'short': 'd',
'long': 'date-path',
'type': bool,
'default': False,
'help': 'Create post with date path (eg. year/month/day, see NEW_POST_DATE_PATH_FORMAT in config)'
},
]
def _execute(self, options, args):
"""Create a new post or page."""
global LOGGER
compiler_names = [p.name for p in
self.site.plugin_manager.getPluginsOfCategory(
"PageCompiler")]
if len(args) > 1:
print(self.help())
return False
elif args:
path = args[0]
else:
path = None
# Even though stuff was split into `new_page`, it’s easier to do it
# here not to duplicate the code.
is_page = options.get('is_page', False)
is_post = not is_page
content_type = 'page' if is_page else 'post'
title = options['title'] or None
author = options['author'] or ''
tags = options['tags']
onefile = options['onefile']
twofile = options['twofile']
import_file = options['import']
wants_available = options['available-formats']
date_path_opt = options['date-path']
date_path_auto = self.site.config['NEW_POST_DATE_PATH'] and content_type == 'post'
date_path_format = self.site.config['NEW_POST_DATE_PATH_FORMAT'].strip('/')
post_type = options.get('type', 'text')
if wants_available:
self.print_compilers()
return
if is_page:
LOGGER = PAGELOGGER
else:
LOGGER = POSTLOGGER
if twofile:
onefile = False
if not onefile and not twofile:
onefile = self.site.config.get('ONE_FILE_POSTS', True)
content_format = options['content_format']
content_subformat = None
if "@" in content_format:
content_format, content_subformat = content_format.split("@")
if not content_format and path and not os.path.isdir(path):
# content_format not specified. If path was given, use
# it to guess (Issue #2798)
extension = os.path.splitext(path)[-1]
for compiler, extensions in self.site.config['COMPILERS'].items():
if extension in extensions:
content_format = compiler
if not content_format:
LOGGER.error("Unknown {0} extension {1}, maybe you need to install a plugin or enable an existing one?".format(content_type, extension))
return
elif not content_format and import_file:
# content_format not specified. If import_file was given, use
# it to guess (Issue #2798)
extension = os.path.splitext(import_file)[-1]
for compiler, extensions in self.site.config['COMPILERS'].items():
if extension in extensions:
content_format = compiler
if not content_format:
LOGGER.error("Unknown {0} extension {1}, maybe you need to install a plugin or enable an existing one?".format(content_type, extension))
return
elif not content_format: # Issue #400
content_format = get_default_compiler(
is_post,
self.site.config['COMPILERS'],
self.site.config['post_pages'])
elif content_format not in compiler_names:
LOGGER.error("Unknown {0} format {1}, maybe you need to install a plugin or enable an existing one?".format(content_type, content_format))
self.print_compilers()
return
compiler_plugin = self.site.plugin_manager.getPluginByName(
content_format, "PageCompiler").plugin_object
# Guess where we should put this
entry = self.filter_post_pages(content_format, is_post)
if entry is False:
return 1
if import_file:
print("Importing Existing {xx}".format(xx=content_type.title()))
print("-----------------------\n")
else:
print("Creating New {xx}".format(xx=content_type.title()))
print("-----------------\n")
if title is not None:
print("Title:", title)
else:
while not title:
title = utils.ask('Title')
if isinstance(title, bytes):
try:
title = title.decode(sys.stdin.encoding)
except (AttributeError, TypeError): # for tests
title = title.decode('utf-8')
title = title.strip()
if not path:
slug = utils.slugify(title, lang=self.site.default_lang)
else:
if isinstance(path, bytes):
try:
path = path.decode(sys.stdin.encoding)
except (AttributeError, TypeError): # for tests
path = path.decode('utf-8')
if os.path.isdir(path):
# If the user provides a directory, add the file name generated from title (Issue #2651)
slug = utils.slugify(title, lang=self.site.default_lang)
pattern = os.path.basename(entry[0])
suffix = pattern[1:]
path = os.path.join(path, slug + suffix)
else:
slug = utils.slugify(os.path.splitext(os.path.basename(path))[0], lang=self.site.default_lang)
if isinstance(author, bytes):
try:
author = author.decode(sys.stdin.encoding)
except (AttributeError, TypeError): # for tests
author = author.decode('utf-8')
# Calculate the date to use for the content
# SCHEDULE_ALL is post-only (Issue #2921)
schedule = options['schedule'] or (self.site.config['SCHEDULE_ALL'] and is_post)
rule = self.site.config['SCHEDULE_RULE']
self.site.scan_posts()
timeline = self.site.timeline
last_date = None if not timeline else timeline[0].date
date, dateobj = get_date(schedule, rule, last_date, self.site.tzinfo, self.site.config['FORCE_ISO8601'])
data = {
'title': title,
'slug': slug,
'date': date,
'tags': tags,
'link': '',
'description': '',
'type': post_type,
}
if not path:
pattern = os.path.basename(entry[0])
suffix = pattern[1:]
output_path = os.path.dirname(entry[0])
if date_path_auto or date_path_opt:
output_path += os.sep + dateobj.strftime(date_path_format)
txt_path = os.path.join(output_path, slug + suffix)
meta_path = os.path.join(output_path, slug + ".meta")
else:
if date_path_opt:
LOGGER.warning("A path has been specified, ignoring -d")
txt_path = os.path.join(self.site.original_cwd, path)
meta_path = os.path.splitext(txt_path)[0] + ".meta"
if (not onefile and os.path.isfile(meta_path)) or \
os.path.isfile(txt_path):
# Emit an event when a post exists
event = dict(path=txt_path)
if not onefile: # write metadata file
event['meta_path'] = meta_path
signal('existing_' + content_type).send(self, **event)
LOGGER.error("The title already exists!")
LOGGER.info("Existing {0}'s text is at: {1}".format(content_type, txt_path))
if not onefile:
LOGGER.info("Existing {0}'s metadata is at: {1}".format(content_type, meta_path))
return 8
d_name = os.path.dirname(txt_path)
utils.makedirs(d_name)
metadata = {}
if author:
metadata['author'] = author
metadata.update(self.site.config['ADDITIONAL_METADATA'])
data.update(metadata)
# ipynb plugin needs the Jupyter kernel info. We get the kernel name
# from the content_subformat and pass it to the compiler in the metadata
if content_format == "ipynb" and content_subformat is not None:
metadata["jupyter_kernel"] = content_subformat
# Override onefile if not really supported.
if not compiler_plugin.supports_onefile and onefile:
onefile = False
LOGGER.warning('This compiler does not support one-file posts.')
if onefile and import_file:
with io.open(import_file, 'r', encoding='utf-8-sig') as fh:
content = fh.read()
elif not import_file:
if is_page:
content = self.site.MESSAGES[self.site.default_lang]["Write your page here."]
else:
content = self.site.MESSAGES[self.site.default_lang]["Write your post here."]
if (not onefile) and import_file:
# Two-file posts are copied on import (Issue #2380)
shutil.copy(import_file, txt_path)
else:
compiler_plugin.create_post(
txt_path, content=content, onefile=onefile, title=title,
slug=slug, date=date, tags=tags, is_page=is_page, type=post_type, **metadata)
event = dict(path=txt_path)
if not onefile: # write metadata file
with io.open(meta_path, "w+", encoding="utf8") as fd:
fd.write(utils.write_metadata(data, comment_wrap=False, site=self.site))
LOGGER.info("Your {0}'s metadata is at: {1}".format(content_type, meta_path))
event['meta_path'] = meta_path
LOGGER.info("Your {0}'s text is at: {1}".format(content_type, txt_path))
signal('new_' + content_type).send(self, **event)
if options['edit']:
editor = os.getenv('EDITOR', '').split()
to_run = editor + [txt_path]
if not onefile:
to_run.append(meta_path)
if editor:
subprocess.call(to_run)
else:
LOGGER.error('The $EDITOR environment variable is not set, cannot edit the post with \'-e\'. Please edit the post manually.')
def filter_post_pages(self, compiler, is_post):
"""Return the correct entry from post_pages.
Information based on:
* selected compilers
* available compilers
* post/page status
"""
compilers = self.site.config['COMPILERS']
post_pages = self.site.config['post_pages']
compiler_objs = self.site.compilers
# First throw away all the post_pages with the wrong is_post
filtered = [entry for entry in post_pages if entry[3] == is_post]
# These are the extensions supported by the required format
extensions = compilers.get(compiler)
if extensions is None:
if compiler in compiler_objs:
LOGGER.error("There is a {0} compiler available, but it's not set in your COMPILERS option.".format(compiler))
LOGGER.info("Read more: {0}".format(COMPILERS_DOC_LINK))
else:
LOGGER.error('Unknown format {0}'.format(compiler))
self.print_compilers()
return False
# Throw away the post_pages with the wrong extensions
filtered = [entry for entry in filtered if any([ext in entry[0] for ext in
extensions])]
if not filtered:
type_name = "post" if is_post else "page"
LOGGER.error("Can't find a way, using your configuration, to create "
"a {0} in format {1}. You may want to tweak "
"COMPILERS or {2}S in conf.py".format(
type_name, compiler, type_name.upper()))
LOGGER.info("Read more: {0}".format(COMPILERS_DOC_LINK))
return False
return filtered[0]
def print_compilers(self):
"""List all available compilers in a human-friendly format."""
# We use compilers_raw, because the normal dict can contain
# garbage coming from the translation candidate implementation.
# Entries are in format: (name, extensions, used_in_post_pages)
compilers_raw = self.site.config['_COMPILERS_RAW']
used_compilers = []
unused_compilers = []
disabled_compilers = []
for name, plugin in self.site.compilers.items():
if name in compilers_raw:
used_compilers.append([
name,
plugin.friendly_name or name,
compilers_raw[name],
True
])
else:
disabled_compilers.append([
name,
plugin.friendly_name or name,
(),
False
])
for name, (_, _, pi) in self.site.disabled_compilers.items():
if pi.details.has_option('Nikola', 'Friendlyname'):
f_name = pi.details.get('Nikola', 'Friendlyname')
else:
f_name = name
if name in compilers_raw:
unused_compilers.append([
name,
f_name,
compilers_raw[name],
False
])
else:
disabled_compilers.append([
name,
f_name,
(),
False
])
used_compilers.sort(key=operator.itemgetter(0))
unused_compilers.sort(key=operator.itemgetter(0))
disabled_compilers.sort(key=operator.itemgetter(0))
# We also group the compilers by status for readability.
parsed_list = used_compilers + unused_compilers + disabled_compilers
print("Available input formats:\n")
name_width = max([len(i[0]) for i in parsed_list] + [4]) # 4 == len('NAME')
fname_width = max([len(i[1]) for i in parsed_list] + [11]) # 11 == len('DESCRIPTION')
print((' {0:<' + str(name_width) + '} {1:<' + str(fname_width) + '} EXTENSIONS\n').format('NAME', 'DESCRIPTION'))
for name, fname, extensions, used in parsed_list:
flag = ' ' if used else '!'
flag = flag if extensions else '~'
extensions = ', '.join(extensions) if extensions else '(disabled: not in COMPILERS)'
print(('{flag}{name:<' + str(name_width) + '} {fname:<' + str(fname_width) + '} {extensions}').format(flag=flag, name=name, fname=fname, extensions=extensions))
print("""
More compilers are available in the Plugins Index.
Compilers marked with ! and ~ require additional configuration:
! not in the POSTS/PAGES tuples and any post scanners (unused)
~ not in the COMPILERS dict (disabled)
Read more: {0}""".format(COMPILERS_DOC_LINK))
|
import numpy as np
from sklearn.decomposition import PCA
from scattertext.Scalers import stretch_0_to_1
from scattertext.termscoring.RankDifference import RankDifference
from scattertext.categoryprojector.CategoryProjector import CategoryProjector
from scattertext.termcompaction.AssociationCompactor \
import AssociationCompactor, AssociationCompactorByRank, TermCategoryRanker
def morista_index(points):
# Morisita Index of Dispersion
N = points.shape[1]
ims = []
for i in range(1, N):
bins, _, _ = np.histogram2d(points[0], points[1], i)
# I_M = Q * (\sum_{k=1}^{Q}{n_k * (n_k - 1)})/(N * (N _ 1))
Q = len(bins) # num_quadrants
# Eqn 1.
I_M = Q * np.sum(np.ravel(bins) * (np.ravel(bins) - 1)) / (N * (N - 1))
ims.append([i, I_M])
return np.array(ims).T[1].max()
def ripley_poisson_difference(points):
try:
from astropy.stats import RipleysKEstimator
except:
raise Exception("Please install astropy")
r = np.linspace(0, np.sqrt(2), 100)
ripley = RipleysKEstimator(area=1., x_max=1., y_max=1., x_min=0., y_min=0.)
return np.sum(np.abs(ripley(points, r, mode='ripley') - ripley.poisson(r)))
def get_optimal_category_projection(
corpus,
n_dims=3,
n_steps=10,
projector=lambda n_terms, n_dims: CategoryProjector(
selector=AssociationCompactor(n_terms, scorer=RankDifference),
projector=PCA(n_dims)),
optimizer = morista_index,
verbose=False
):
min_dev = None
best_k = None
best_x = None
best_y = None
best_projector = None
for k in np.power(2, np.linspace(np.log(corpus.get_num_categories()) / np.log(2),
np.log(corpus.get_num_terms()) / np.log(2), n_steps)).astype(int):
category_projector = projector(k, n_dims)
category_projection = category_projector.project(corpus)
for dim_1 in range(0, n_dims):
for dim_2 in range(dim_1 + 1, n_dims):
proj = category_projection.projection[:, [dim_1, dim_2]]
scaled_proj = np.array([stretch_0_to_1(proj.T[0]), stretch_0_to_1(proj.T[1])]).T
dev = optimizer(scaled_proj)
#dev = np.sum(np.abs(ripley(scaled_proj, r, mode='ripley') - ripley.poisson(r)))
if min_dev is None or dev < min_dev:
min_dev = dev
best_k = k
best_projector = category_projector
best_x, best_y = (dim_1, dim_2)
if verbose:
print(k, dim_1, dim_2, dev, best_k, best_x, best_y, min_dev)
if verbose:
print(best_k, best_x, best_y)
return best_projector.project(corpus, best_x, best_y)
def get_optimal_category_projection_by_rank(
corpus,
n_dims=2,
n_steps=20,
projector=lambda rank, n_dims: CategoryProjector(AssociationCompactorByRank(rank),
projector=PCA(n_dims)),
verbose=False
):
try:
from astropy.stats import RipleysKEstimator
except:
raise Exception("Please install astropy")
ripley = RipleysKEstimator(area=1., x_max=1., y_max=1., x_min=0., y_min=0.)
min_dev = None
best_rank = None
best_x = None
best_y = None
best_projector = None
for rank in np.linspace(1, TermCategoryRanker().get_max_rank(corpus), n_steps):
r = np.linspace(0, np.sqrt(2), 100)
category_projector = projector(rank, n_dims)
category_projection = category_projector.project(corpus)
for dim_1 in range(0, n_dims):
for dim_2 in range(dim_1 + 1, n_dims):
proj = category_projection.projection[:, [dim_1, dim_2]]
scaled_proj = np.array([stretch_0_to_1(proj.T[0]), stretch_0_to_1(proj.T[1])]).T
dev = np.sum(np.abs(ripley(scaled_proj, r, mode='ripley') - ripley.poisson(r)))
if min_dev is None or dev < min_dev:
min_dev = dev
best_rank = rank
best_projector = category_projector
best_x, best_y = (dim_1, dim_2)
if verbose:
print('rank', rank, 'dims', dim_1, dim_2, 'K', dev)
print(' best rank', best_rank, 'dims', best_x, best_y, 'K', min_dev)
if verbose:
print(best_rank, best_x, best_y)
return best_projector.project(corpus, best_x, best_y)
|
import logging
import os
import random
import warnings
import tempfile
import xml.etree.ElementTree as et
import zipfile
from itertools import chain
import numpy
from gensim import utils, matutils
from gensim.models import basemodel
from gensim.models.ldamodel import LdaModel
from gensim.utils import check_output, revdict
logger = logging.getLogger(__name__)
class LdaMallet(utils.SaveLoad, basemodel.BaseTopicModel):
"""Python wrapper for LDA using `MALLET <http://mallet.cs.umass.edu/>`_.
Communication between MALLET and Python takes place by passing around data files on disk
and calling Java with subprocess.call().
Warnings
--------
This is **only** python wrapper for `MALLET LDA <http://mallet.cs.umass.edu/>`_,
you need to install original implementation first and pass the path to binary to ``mallet_path``.
"""
def __init__(self, mallet_path, corpus=None, num_topics=100, alpha=50, id2word=None, workers=4, prefix=None,
optimize_interval=0, iterations=1000, topic_threshold=0.0, random_seed=0):
"""
Parameters
----------
mallet_path : str
Path to the mallet binary, e.g. `/home/username/mallet-2.0.7/bin/mallet`.
corpus : iterable of iterable of (int, int), optional
Collection of texts in BoW format.
num_topics : int, optional
Number of topics.
alpha : int, optional
Alpha parameter of LDA.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping between tokens ids and words from corpus, if not specified - will be inferred from `corpus`.
workers : int, optional
Number of threads that will be used for training.
prefix : str, optional
Prefix for produced temporary files.
optimize_interval : int, optional
Optimize hyperparameters every `optimize_interval` iterations
(sometimes leads to Java exception 0 to switch off hyperparameter optimization).
iterations : int, optional
Number of training iterations.
topic_threshold : float, optional
Threshold of the probability above which we consider a topic.
random_seed: int, optional
Random seed to ensure consistent results, if 0 - use system clock.
"""
self.mallet_path = mallet_path
self.id2word = id2word
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 0 if not self.id2word else 1 + max(self.id2word.keys())
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.num_topics = num_topics
self.topic_threshold = topic_threshold
self.alpha = alpha
if prefix is None:
rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '_'
prefix = os.path.join(tempfile.gettempdir(), rand_prefix)
self.prefix = prefix
self.workers = workers
self.optimize_interval = optimize_interval
self.iterations = iterations
self.random_seed = random_seed
if corpus is not None:
self.train(corpus)
def finferencer(self):
"""Get path to inferencer.mallet file.
Returns
-------
str
Path to inferencer.mallet file.
"""
return self.prefix + 'inferencer.mallet'
def ftopickeys(self):
"""Get path to topic keys text file.
Returns
-------
str
Path to topic keys text file.
"""
return self.prefix + 'topickeys.txt'
def fstate(self):
"""Get path to temporary file.
Returns
-------
str
Path to file.
"""
return self.prefix + 'state.mallet.gz'
def fdoctopics(self):
"""Get path to document topic text file.
Returns
-------
str
Path to document topic text file.
"""
return self.prefix + 'doctopics.txt'
def fcorpustxt(self):
"""Get path to corpus text file.
Returns
-------
str
Path to corpus text file.
"""
return self.prefix + 'corpus.txt'
def fcorpusmallet(self):
"""Get path to corpus.mallet file.
Returns
-------
str
Path to corpus.mallet file.
"""
return self.prefix + 'corpus.mallet'
def fwordweights(self):
"""Get path to word weight file.
Returns
-------
str
Path to word weight file.
"""
return self.prefix + 'wordweights.txt'
def corpus2mallet(self, corpus, file_like):
"""Convert `corpus` to Mallet format and write it to `file_like` descriptor.
Format ::
document id[SPACE]label (not used)[SPACE]whitespace delimited utf8-encoded tokens[NEWLINE]
Parameters
----------
corpus : iterable of iterable of (int, int)
Collection of texts in BoW format.
file_like : file-like object
Opened file.
"""
for docno, doc in enumerate(corpus):
if self.id2word:
tokens = chain.from_iterable([self.id2word[tokenid]] * int(cnt) for tokenid, cnt in doc)
else:
tokens = chain.from_iterable([str(tokenid)] * int(cnt) for tokenid, cnt in doc)
file_like.write(utils.to_utf8("%s 0 %s\n" % (docno, ' '.join(tokens))))
def convert_input(self, corpus, infer=False, serialize_corpus=True):
"""Convert corpus to Mallet format and save it to a temporary text file.
Parameters
----------
corpus : iterable of iterable of (int, int)
Collection of texts in BoW format.
infer : bool, optional
...
serialize_corpus : bool, optional
...
"""
if serialize_corpus:
logger.info("serializing temporary corpus to %s", self.fcorpustxt())
with utils.open(self.fcorpustxt(), 'wb') as fout:
self.corpus2mallet(corpus, fout)
# convert the text file above into MALLET's internal format
cmd = \
self.mallet_path + \
" import-file --preserve-case --keep-sequence " \
"--remove-stopwords --token-regex \"\\S+\" --input %s --output %s"
if infer:
cmd += ' --use-pipe-from ' + self.fcorpusmallet()
cmd = cmd % (self.fcorpustxt(), self.fcorpusmallet() + '.infer')
else:
cmd = cmd % (self.fcorpustxt(), self.fcorpusmallet())
logger.info("converting temporary corpus to MALLET format with %s", cmd)
check_output(args=cmd, shell=True)
def train(self, corpus):
"""Train Mallet LDA.
Parameters
----------
corpus : iterable of iterable of (int, int)
Corpus in BoW format
"""
self.convert_input(corpus, infer=False)
cmd = self.mallet_path + ' train-topics --input %s --num-topics %s --alpha %s --optimize-interval %s '\
'--num-threads %s --output-state %s --output-doc-topics %s --output-topic-keys %s '\
'--num-iterations %s --inferencer-filename %s --doc-topics-threshold %s --random-seed %s'
cmd = cmd % (
self.fcorpusmallet(), self.num_topics, self.alpha, self.optimize_interval,
self.workers, self.fstate(), self.fdoctopics(), self.ftopickeys(), self.iterations,
self.finferencer(), self.topic_threshold, str(self.random_seed)
)
# NOTE "--keep-sequence-bigrams" / "--use-ngrams true" poorer results + runs out of memory
logger.info("training MALLET LDA with %s", cmd)
check_output(args=cmd, shell=True)
self.word_topics = self.load_word_topics()
# NOTE - we are still keeping the wordtopics variable to not break backward compatibility.
# word_topics has replaced wordtopics throughout the code;
# wordtopics just stores the values of word_topics when train is called.
self.wordtopics = self.word_topics
def __getitem__(self, bow, iterations=100):
"""Get vector for document(s).
Parameters
----------
bow : {list of (int, int), iterable of list of (int, int)}
Document (or corpus) in BoW format.
iterations : int, optional
Number of iterations that will be used for inferring.
Returns
-------
list of (int, float)
LDA vector for document as sequence of (topic_id, topic_probability) **OR**
list of list of (int, float)
LDA vectors for corpus in same format.
"""
is_corpus, corpus = utils.is_corpus(bow)
if not is_corpus:
# query is a single document => make a corpus out of it
bow = [bow]
self.convert_input(bow, infer=True)
cmd = \
self.mallet_path + ' infer-topics --input %s --inferencer %s ' \
'--output-doc-topics %s --num-iterations %s --doc-topics-threshold %s --random-seed %s'
cmd = cmd % (
self.fcorpusmallet() + '.infer', self.finferencer(),
self.fdoctopics() + '.infer', iterations, self.topic_threshold, str(self.random_seed)
)
logger.info("inferring topics with MALLET LDA '%s'", cmd)
check_output(args=cmd, shell=True)
result = list(self.read_doctopics(self.fdoctopics() + '.infer'))
return result if is_corpus else result[0]
def load_word_topics(self):
"""Load words X topics matrix from :meth:`gensim.models.wrappers.ldamallet.LdaMallet.fstate` file.
Returns
-------
numpy.ndarray
Matrix words X topics.
"""
logger.info("loading assigned topics from %s", self.fstate())
word_topics = numpy.zeros((self.num_topics, self.num_terms), dtype=numpy.float64)
if hasattr(self.id2word, 'token2id'):
word2id = self.id2word.token2id
else:
word2id = revdict(self.id2word)
with utils.open(self.fstate(), 'rb') as fin:
_ = next(fin) # header
self.alpha = numpy.fromiter(next(fin).split()[2:], dtype=float)
assert len(self.alpha) == self.num_topics, "mismatch between MALLET vs. requested topics"
_ = next(fin) # noqa:F841 beta
for lineno, line in enumerate(fin):
line = utils.to_unicode(line)
doc, source, pos, typeindex, token, topic = line.split(" ")
if token not in word2id:
continue
tokenid = word2id[token]
word_topics[int(topic), tokenid] += 1.0
return word_topics
def load_document_topics(self):
"""Load document topics from :meth:`gensim.models.wrappers.ldamallet.LdaMallet.fdoctopics` file.
Shortcut for :meth:`gensim.models.wrappers.ldamallet.LdaMallet.read_doctopics`.
Returns
-------
iterator of list of (int, float)
Sequence of LDA vectors for documents.
"""
return self.read_doctopics(self.fdoctopics())
def get_topics(self):
"""Get topics X words matrix.
Returns
-------
numpy.ndarray
Topics X words matrix, shape `num_topics` x `vocabulary_size`.
"""
topics = self.word_topics
return topics / topics.sum(axis=1)[:, None]
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""Get the `num_words` most probable words for `num_topics` number of topics.
Parameters
----------
num_topics : int, optional
Number of topics to return, set `-1` to get all topics.
num_words : int, optional
Number of words.
log : bool, optional
If True - write topic with logging too, used for debug proposes.
formatted : bool, optional
If `True` - return the topics as a list of strings, otherwise as lists of (weight, word) pairs.
Returns
-------
list of str
Topics as a list of strings (if formatted=True) **OR**
list of (float, str)
Topics as list of (weight, word) pairs (if formatted=False)
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
# add a little random jitter, to randomize results around the same alpha
sort_alpha = self.alpha + 0.0001 * numpy.random.rand(len(self.alpha))
sorted_topics = list(matutils.argsort(sort_alpha))
chosen_topics = sorted_topics[: num_topics // 2] + sorted_topics[-num_topics // 2:]
shown = []
for i in chosen_topics:
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append((i, topic))
if log:
logger.info("topic #%i (%.3f): %s", i, self.alpha[i], topic)
return shown
def show_topic(self, topicid, topn=10, num_words=None):
"""Get `num_words` most probable words for the given `topicid`.
Parameters
----------
topicid : int
Id of topic.
topn : int, optional
Top number of topics that you'll receive.
num_words : int, optional
DEPRECATED PARAMETER, use `topn` instead.
Returns
-------
list of (str, float)
Sequence of probable words, as a list of `(word, word_probability)` for `topicid` topic.
"""
if num_words is not None: # deprecated num_words is used
warnings.warn("The parameter `num_words` is deprecated, will be removed in 4.0.0, use `topn` instead.")
topn = num_words
if self.word_topics is None:
logger.warning("Run train or load_word_topics before showing topics.")
topic = self.word_topics[topicid]
topic = topic / topic.sum() # normalize to probability dist
bestn = matutils.argsort(topic, topn, reverse=True)
beststr = [(self.id2word[idx], topic[idx]) for idx in bestn]
return beststr
def get_version(self, direc_path):
""""Get the version of Mallet.
Parameters
----------
direc_path : str
Path to mallet archive.
Returns
-------
str
Version of mallet.
"""
try:
archive = zipfile.ZipFile(direc_path, 'r')
if u'cc/mallet/regression/' not in archive.namelist():
return '2.0.7'
else:
return '2.0.8RC3'
except Exception:
xml_path = direc_path.split("bin")[0]
try:
doc = et.parse(xml_path + "pom.xml").getroot()
namespace = doc.tag[:doc.tag.index('}') + 1]
return doc.find(namespace + 'version').text.split("-")[0]
except Exception:
return "Can't parse pom.xml version file"
def read_doctopics(self, fname, eps=1e-6, renorm=True):
"""Get document topic vectors from MALLET's "doc-topics" format, as sparse gensim vectors.
Parameters
----------
fname : str
Path to input file with document topics.
eps : float, optional
Threshold for probabilities.
renorm : bool, optional
If True - explicitly re-normalize distribution.
Raises
------
RuntimeError
If any line in invalid format.
Yields
------
list of (int, float)
LDA vectors for document.
"""
mallet_version = self.get_version(self.mallet_path)
with utils.open(fname, 'rb') as fin:
for lineno, line in enumerate(fin):
if lineno == 0 and line.startswith(b"#doc "):
continue # skip the header line if it exists
parts = line.split()[2:] # skip "doc" and "source" columns
# the MALLET doctopic format changed in 2.0.8 to exclude the id,
# this handles the file differently dependent on the pattern
if len(parts) == 2 * self.num_topics:
doc = [
(int(id_), float(weight)) for id_, weight in zip(*[iter(parts)] * 2)
if abs(float(weight)) > eps
]
elif len(parts) == self.num_topics and mallet_version != '2.0.7':
doc = [(id_, float(weight)) for id_, weight in enumerate(parts) if abs(float(weight)) > eps]
else:
if mallet_version == "2.0.7":
"""
1 1 0 1.0780612802674239 30.005575655428533364 2 0.005575655428533364
2 2 0 0.9184413079632608 40.009062076892971008 3 0.009062076892971008
In the above example there is a mix of the above if and elif statement.
There are neither `2*num_topics` nor `num_topics` elements.
It has 2 formats 40.009062076892971008 and 0 1.0780612802674239
which cannot be handled by above if elif.
Also, there are some topics are missing(meaning that the topic is not there)
which is another reason why the above if elif fails even when the `mallet`
produces the right results
"""
count = 0
doc = []
if len(parts) > 0:
while count < len(parts):
"""
if section is to deal with formats of type 2 0.034
so if count reaches index of 2 and since int(2) == float(2) so if block is executed
now there is one extra element afer 2, so count + 1 access should not give an error
else section handles formats of type 20.034
now count is there on index of 20.034 since float(20.034) != int(20.034) so else block
is executed
"""
if float(parts[count]) == int(parts[count]):
if float(parts[count + 1]) > eps:
doc.append((int(parts[count]), float(parts[count + 1])))
count += 2
else:
if float(parts[count]) - int(parts[count]) > eps:
doc.append((int(parts[count]) % 10, float(parts[count]) - int(parts[count])))
count += 1
else:
raise RuntimeError("invalid doc topics format at line %i in %s" % (lineno + 1, fname))
if renorm:
# explicitly normalize weights to sum up to 1.0, just to be sure...
total_weight = float(sum(weight for _, weight in doc))
if total_weight:
doc = [(id_, float(weight) / total_weight) for id_, weight in doc]
yield doc
@classmethod
def load(cls, *args, **kwargs):
"""Load a previously saved LdaMallet class. Handles backwards compatibility from
older LdaMallet versions which did not use random_seed parameter.
"""
model = super(LdaMallet, cls).load(*args, **kwargs)
if not hasattr(model, 'random_seed'):
model.random_seed = 0
return model
def malletmodel2ldamodel(mallet_model, gamma_threshold=0.001, iterations=50):
"""Convert :class:`~gensim.models.wrappers.ldamallet.LdaMallet` to :class:`~gensim.models.ldamodel.LdaModel`.
This works by copying the training model weights (alpha, beta...) from a trained mallet model into the gensim model.
Parameters
----------
mallet_model : :class:`~gensim.models.wrappers.ldamallet.LdaMallet`
Trained Mallet model
gamma_threshold : float, optional
To be used for inference in the new LdaModel.
iterations : int, optional
Number of iterations to be used for inference in the new LdaModel.
Returns
-------
:class:`~gensim.models.ldamodel.LdaModel`
Gensim native LDA.
"""
model_gensim = LdaModel(
id2word=mallet_model.id2word, num_topics=mallet_model.num_topics,
alpha=mallet_model.alpha, eta=0,
iterations=iterations,
gamma_threshold=gamma_threshold,
dtype=numpy.float64 # don't loose precision when converting from MALLET
)
model_gensim.state.sstats[...] = mallet_model.wordtopics
model_gensim.sync_state()
return model_gensim
|
from datetime import timedelta
import logging
from typing import Optional
from georss_ign_sismologia_client import IgnSismologiaFeedManager
import voluptuous as vol
from homeassistant.components.geo_location import PLATFORM_SCHEMA, GeolocationEvent
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
EVENT_HOMEASSISTANT_START,
LENGTH_KILOMETERS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.event import track_time_interval
_LOGGER = logging.getLogger(__name__)
ATTR_EXTERNAL_ID = "external_id"
ATTR_IMAGE_URL = "image_url"
ATTR_MAGNITUDE = "magnitude"
ATTR_PUBLICATION_DATE = "publication_date"
ATTR_REGION = "region"
ATTR_TITLE = "title"
CONF_MINIMUM_MAGNITUDE = "minimum_magnitude"
DEFAULT_MINIMUM_MAGNITUDE = 0.0
DEFAULT_RADIUS_IN_KM = 50.0
SCAN_INTERVAL = timedelta(minutes=5)
SOURCE = "ign_sismologia"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(
CONF_MINIMUM_MAGNITUDE, default=DEFAULT_MINIMUM_MAGNITUDE
): cv.positive_float,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IGN Sismologia Feed platform."""
scan_interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
coordinates = (
config.get(CONF_LATITUDE, hass.config.latitude),
config.get(CONF_LONGITUDE, hass.config.longitude),
)
radius_in_km = config[CONF_RADIUS]
minimum_magnitude = config[CONF_MINIMUM_MAGNITUDE]
# Initialize the entity manager.
feed = IgnSismologiaFeedEntityManager(
hass, add_entities, scan_interval, coordinates, radius_in_km, minimum_magnitude
)
def start_feed_manager(event):
"""Start feed manager."""
feed.startup()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_feed_manager)
class IgnSismologiaFeedEntityManager:
"""Feed Entity Manager for IGN Sismologia GeoRSS feed."""
def __init__(
self,
hass,
add_entities,
scan_interval,
coordinates,
radius_in_km,
minimum_magnitude,
):
"""Initialize the Feed Entity Manager."""
self._hass = hass
self._feed_manager = IgnSismologiaFeedManager(
self._generate_entity,
self._update_entity,
self._remove_entity,
coordinates,
filter_radius=radius_in_km,
filter_minimum_magnitude=minimum_magnitude,
)
self._add_entities = add_entities
self._scan_interval = scan_interval
def startup(self):
"""Start up this manager."""
self._feed_manager.update()
self._init_regular_updates()
def _init_regular_updates(self):
"""Schedule regular updates at the specified interval."""
track_time_interval(
self._hass, lambda now: self._feed_manager.update(), self._scan_interval
)
def get_entry(self, external_id):
"""Get feed entry by external id."""
return self._feed_manager.feed_entries.get(external_id)
def _generate_entity(self, external_id):
"""Generate new entity."""
new_entity = IgnSismologiaLocationEvent(self, external_id)
# Add new entities to HA.
self._add_entities([new_entity], True)
def _update_entity(self, external_id):
"""Update entity."""
dispatcher_send(self._hass, f"ign_sismologia_update_{external_id}")
def _remove_entity(self, external_id):
"""Remove entity."""
dispatcher_send(self._hass, f"ign_sismologia_delete_{external_id}")
class IgnSismologiaLocationEvent(GeolocationEvent):
"""This represents an external event with IGN Sismologia feed data."""
def __init__(self, feed_manager, external_id):
"""Initialize entity with data from feed entry."""
self._feed_manager = feed_manager
self._external_id = external_id
self._title = None
self._distance = None
self._latitude = None
self._longitude = None
self._attribution = None
self._region = None
self._magnitude = None
self._publication_date = None
self._image_url = None
self._remove_signal_delete = None
self._remove_signal_update = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_delete = async_dispatcher_connect(
self.hass,
f"ign_sismologia_delete_{self._external_id}",
self._delete_callback,
)
self._remove_signal_update = async_dispatcher_connect(
self.hass,
f"ign_sismologia_update_{self._external_id}",
self._update_callback,
)
@callback
def _delete_callback(self):
"""Remove this entity."""
self._remove_signal_delete()
self._remove_signal_update()
self.hass.async_create_task(self.async_remove())
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for IGN Sismologia feed location events."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._external_id)
feed_entry = self._feed_manager.get_entry(self._external_id)
if feed_entry:
self._update_from_feed(feed_entry)
def _update_from_feed(self, feed_entry):
"""Update the internal state from the provided feed entry."""
self._title = feed_entry.title
self._distance = feed_entry.distance_to_home
self._latitude = feed_entry.coordinates[0]
self._longitude = feed_entry.coordinates[1]
self._attribution = feed_entry.attribution
self._region = feed_entry.region
self._magnitude = feed_entry.magnitude
self._publication_date = feed_entry.published
self._image_url = feed_entry.image_url
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:pulse"
@property
def source(self) -> str:
"""Return source value of this external event."""
return SOURCE
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
if self._magnitude and self._region:
return f"M {self._magnitude:.1f} - {self._region}"
if self._magnitude:
return f"M {self._magnitude:.1f}"
if self._region:
return self._region
return self._title
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return self._distance
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return self._latitude
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return self._longitude
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return LENGTH_KILOMETERS
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_EXTERNAL_ID, self._external_id),
(ATTR_TITLE, self._title),
(ATTR_REGION, self._region),
(ATTR_MAGNITUDE, self._magnitude),
(ATTR_ATTRIBUTION, self._attribution),
(ATTR_PUBLICATION_DATE, self._publication_date),
(ATTR_IMAGE_URL, self._image_url),
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
|
from .base_classes import CommandBase
from .package import Package
from .base_classes import LatexObject
def _remove_invalid_char(s):
"""Remove invalid and dangerous characters from a string."""
s = ''.join([i if ord(i) >= 32 and ord(i) < 127 else '' for i in s])
s = s.translate(dict.fromkeys(map(ord, "&%$#_{}~^\\\n\xA0[]\":;' ")))
return s
class Marker(LatexObject):
"""A class that represents a marker (label/ref parameter)."""
_repr_attributes_override = [
'name',
'prefix',
]
def __init__(self, name, prefix="", del_invalid_char=True):
"""
Args
----
name: str
Name of the marker.
prefix: str
Prefix to add before the name (prefix:name).
del_invalid_char: bool
If True invalid and dangerous characters will be
removed from the marker
"""
if del_invalid_char:
prefix = _remove_invalid_char(prefix)
name = _remove_invalid_char(name)
self.prefix = prefix
self.name = name
def __str__(self):
return ((self.prefix + ":") if self.prefix != "" else "") + self.name
def dumps(self):
"""Represent the Marker as a string in LaTeX syntax.
Returns
-------
str
"""
return str(self)
class RefLabelBase(CommandBase):
"""A class used as base for command that take a marker only."""
_repr_attributes_mapping = {
'marker': 'arguments',
}
def __init__(self, marker):
"""
Args
----
marker: Marker
The marker to use with the label/ref.
"""
self.marker = marker
super().__init__(arguments=(str(marker)))
class Label(RefLabelBase):
"""A class that represents a label."""
class Ref(RefLabelBase):
"""A class that represents a reference."""
class Pageref(RefLabelBase):
"""A class that represents a page reference."""
class Eqref(RefLabelBase):
"""A class that represent a ref to a formulae."""
packages = [Package('amsmath')]
class Cref(RefLabelBase):
"""A class that represent a cref (not a Cref)."""
packages = [Package('cleveref')]
class CrefUp(RefLabelBase):
"""A class that represent a Cref."""
packages = [Package('cleveref')]
latex_name = 'Cref'
class Autoref(RefLabelBase):
"""A class that represent an autoref."""
packages = [Package('hyperref')]
class Hyperref(CommandBase):
"""A class that represents an hyperlink to a label."""
_repr_attributes_mapping = {
'marker': 'options',
'text': 'arguments',
}
packages = [Package('hyperref')]
def __init__(self, marker, text):
"""
Args
----
marker: Marker
The marker to use with the label/ref.
text: str
The text that will be shown as a link
to the label of the same marker.
"""
self.marker = marker
super().__init__(options=(str(marker)), arguments=text)
|
import os
import unittest
from Tests.utils.utils import get_test_path
from kalliope.core.Models import Singleton, Signal
from kalliope.core.ConfigurationManager import BrainLoader
from kalliope.core.Models import Neuron
from kalliope.core.Models import Synapse
from kalliope.core.Models.Brain import Brain
from kalliope.core.Models.settings.Settings import Settings
class TestBrainLoader(unittest.TestCase):
def setUp(self):
# be sure the brain haven't been instantiated before
Singleton._instances = dict()
self.brain_to_test = get_test_path("brains/brain_test.yml")
self.expected_result = [
{'signals': [{'order': 'test_order'}],
'neurons': [{'say': {'message': ['test message']}}],
'name': 'test'},
{'signals': [{'order': 'test_order_2'}],
'neurons': [{'say': {'message': ['test message']}}],
'name': 'test2'},
{'signals': [{'order': 'order_for_int'}],
'neurons': [{'sleep': {'seconds': 60}}],
'name': 'testint'},
{'includes': ['included_brain_test.yml']},
{'signals': [{'order': 'test_order_3'}],
'neurons': [{'say': {'message': ['test message']}}],
'name': 'test3'}
]
def tearDown(self):
Singleton._instances = dict()
def test_get_yaml_config(self):
"""
Test we can get a yaml config from the path
"""
brain_loader = BrainLoader(file_path=self.brain_to_test)
self.assertEqual(brain_loader.yaml_config, self.expected_result)
def test_load_brain(self):
"""
Test the class return a valid brain object
"""
neuron = Neuron(name='say', parameters={'message': ['test message']})
neuron2 = Neuron(name='sleep', parameters={'seconds': 60})
signal1 = Signal(name="order", parameters="test_order")
signal2 = Signal(name="order", parameters="test_order_2")
signal3 = Signal(name="order", parameters="test_order_3")
signal4 = Signal(name="order", parameters="order_for_int")
synapse1 = Synapse(name="test", neurons=[neuron], signals=[signal1])
synapse2 = Synapse(name="test2", neurons=[neuron], signals=[signal2])
synapse3 = Synapse(name="test3", neurons=[neuron], signals=[signal3])
synapse4 = Synapse(name="testint", neurons=[neuron2], signals=[signal4])
synapses = [synapse1, synapse2, synapse4, synapse3]
brain = Brain()
brain.synapses = synapses
brain.brain_file = self.brain_to_test
brain.brain_yaml = self.expected_result
brain_loader = BrainLoader(file_path=self.brain_to_test)
self.assertEqual(brain, brain_loader.brain)
def test_get_neurons(self):
"""
Test to get neurons from the brainLoader
scenarii:
- 1/ get a simple neuron from the brainloader
- 2/ get a neuron with brackets
- 3/ get a neuron with int as parameters
"""
# 1/ get a simple neuron from the brainloader
st = Settings()
neuron_list = [{'say': {'message': ['test message']}}]
neuron = Neuron(name='say', parameters={'message': ['test message']})
bl = BrainLoader(file_path=self.brain_to_test)
neurons_from_brain_loader = bl.get_neurons(neuron_list,
settings=st)
self.assertEqual([neuron], neurons_from_brain_loader)
# 2/ get a neuron with global variables as parameters
neuron_list = [{'say': {'message': ['bonjour {{name}}']}}]
st = Settings()
bl = BrainLoader(file_path=self.brain_to_test)
neurons_from_brain_loader = bl.get_neurons(neuron_list,
settings=st)
neuron = Neuron(name='say', parameters={'message': ['bonjour {{name}}']})
self.assertEqual([neuron], neurons_from_brain_loader)
# 3/ get a neuron with int as parameters
st = Settings()
neuron_list = [{'sleep': {'seconds': 60}}]
neuron = Neuron(name='sleep', parameters={'seconds': 60})
bl = BrainLoader(file_path=self.brain_to_test)
neurons_from_brain_loader = bl.get_neurons(neuron_list,
settings=st)
self.assertEqual([neuron], neurons_from_brain_loader)
def test_get_signals(self):
signals = [{'order': 'test_order'}]
signal = Signal(name="order", parameters="test_order")
bl = BrainLoader(file_path=self.brain_to_test)
signals_from_brain_loader = bl.get_signals(signals)
self.assertEqual([signal], signals_from_brain_loader)
def test_singleton(self):
bl1 = BrainLoader(file_path=self.brain_to_test)
bl2 = BrainLoader(file_path=self.brain_to_test)
self.assertTrue(bl1.brain is bl2.brain)
if __name__ == '__main__':
unittest.main()
|
import os.path
import cherrypy
class WelcomePage:
@cherrypy.expose
def index(self):
# Ask for the user's name.
return '''
<form action="greetUser" method="GET">
What is your name?
<input type="text" name="name" />
<input type="submit" />
</form>'''
@cherrypy.expose
def greetUser(self, name=None):
# CherryPy passes all GET and POST variables as method parameters.
# It doesn't make a difference where the variables come from, how
# large their contents are, and so on.
#
# You can define default parameter values as usual. In this
# example, the "name" parameter defaults to None so we can check
# if a name was actually specified.
if name:
# Greet the user!
return "Hey %s, what's up?" % name
else:
if name is None:
# No name was specified
return 'Please enter your name <a href="./">here</a>.'
else:
return 'No, really, enter your name <a href="./">here</a>.'
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(WelcomePage(), config=tutconf)
|
from typing import Any, Callable, Tuple
import pyvera as pv
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, LIGHT_LUX, PERCENTAGE
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, new_simple_controller_config
from tests.async_mock import MagicMock
async def run_sensor_test(
hass: HomeAssistant,
vera_component_factory: ComponentFactory,
category: int,
class_property: str,
assert_states: Tuple[Tuple[Any, Any]],
assert_unit_of_measurement: str = None,
setup_callback: Callable[[pv.VeraController], None] = None,
) -> None:
"""Test generic sensor."""
vera_device = MagicMock(spec=pv.VeraSensor) # type: pv.VeraSensor
vera_device.device_id = 1
vera_device.vera_device_id = vera_device.device_id
vera_device.name = "dev1"
vera_device.category = category
setattr(vera_device, class_property, "33")
entity_id = "sensor.dev1_1"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
devices=(vera_device,), setup_callback=setup_callback
),
)
update_callback = component_data.controller_data[0].update_callback
for (initial_value, state_value) in assert_states:
setattr(vera_device, class_property, initial_value)
update_callback(vera_device)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == state_value
if assert_unit_of_measurement:
assert (
state.attributes[ATTR_UNIT_OF_MEASUREMENT] == assert_unit_of_measurement
)
async def test_temperature_sensor_f(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
def setup_callback(controller: pv.VeraController) -> None:
controller.temperature_units = "F"
await run_sensor_test(
hass=hass,
vera_component_factory=vera_component_factory,
category=pv.CATEGORY_TEMPERATURE_SENSOR,
class_property="temperature",
assert_states=(("33", "1"), ("44", "7")),
setup_callback=setup_callback,
)
async def test_temperature_sensor_c(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
await run_sensor_test(
hass=hass,
vera_component_factory=vera_component_factory,
category=pv.CATEGORY_TEMPERATURE_SENSOR,
class_property="temperature",
assert_states=(("33", "33"), ("44", "44")),
)
async def test_light_sensor(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
await run_sensor_test(
hass=hass,
vera_component_factory=vera_component_factory,
category=pv.CATEGORY_LIGHT_SENSOR,
class_property="light",
assert_states=(("12", "12"), ("13", "13")),
assert_unit_of_measurement=LIGHT_LUX,
)
async def test_uv_sensor(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
await run_sensor_test(
hass=hass,
vera_component_factory=vera_component_factory,
category=pv.CATEGORY_UV_SENSOR,
class_property="light",
assert_states=(("12", "12"), ("13", "13")),
assert_unit_of_measurement="level",
)
async def test_humidity_sensor(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
await run_sensor_test(
hass=hass,
vera_component_factory=vera_component_factory,
category=pv.CATEGORY_HUMIDITY_SENSOR,
class_property="humidity",
assert_states=(("12", "12"), ("13", "13")),
assert_unit_of_measurement=PERCENTAGE,
)
async def test_power_meter_sensor(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
await run_sensor_test(
hass=hass,
vera_component_factory=vera_component_factory,
category=pv.CATEGORY_POWER_METER,
class_property="power",
assert_states=(("12", "12"), ("13", "13")),
assert_unit_of_measurement="watts",
)
async def test_trippable_sensor(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
def setup_callback(controller: pv.VeraController) -> None:
controller.get_devices()[0].is_trippable = True
await run_sensor_test(
hass=hass,
vera_component_factory=vera_component_factory,
category=999,
class_property="is_tripped",
assert_states=((True, "Tripped"), (False, "Not Tripped"), (True, "Tripped")),
setup_callback=setup_callback,
)
async def test_unknown_sensor(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
def setup_callback(controller: pv.VeraController) -> None:
controller.get_devices()[0].is_trippable = False
await run_sensor_test(
hass=hass,
vera_component_factory=vera_component_factory,
category=999,
class_property="is_tripped",
assert_states=((True, "Unknown"), (False, "Unknown"), (True, "Unknown")),
setup_callback=setup_callback,
)
async def test_scene_controller_sensor(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device = MagicMock(spec=pv.VeraSensor) # type: pv.VeraSensor
vera_device.device_id = 1
vera_device.vera_device_id = vera_device.device_id
vera_device.name = "dev1"
vera_device.category = pv.CATEGORY_SCENE_CONTROLLER
vera_device.get_last_scene_id = MagicMock(return_value="id0")
vera_device.get_last_scene_time = MagicMock(return_value="0000")
entity_id = "sensor.dev1_1"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(devices=(vera_device,)),
)
update_callback = component_data.controller_data[0].update_callback
vera_device.get_last_scene_time.return_value = "1111"
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "id0"
|
import os.path
import types
import zipimport
from coverage import env, files
from coverage.misc import contract, expensive, isolate_module, join_regex
from coverage.misc import CoverageException, NoSource
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
from coverage.plugin import FileReporter
os = isolate_module(os)
@contract(returns='bytes')
def read_python_source(filename):
"""Read the Python source text from `filename`.
Returns bytes.
"""
with open(filename, "rb") as f:
source = f.read()
if env.IRONPYTHON:
# IronPython reads Unicode strings even for "rb" files.
source = bytes(source)
return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
@contract(returns='unicode')
def get_python_source(filename):
"""Return the source code, as unicode."""
base, ext = os.path.splitext(filename)
if ext == ".py" and env.WINDOWS:
exts = [".py", ".pyw"]
else:
exts = [ext]
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
# A regular text file: open it.
source = read_python_source(try_filename)
break
# Maybe it's in a zip file?
source = get_zip_bytes(try_filename)
if source is not None:
break
else:
# Couldn't find source.
exc_msg = "No source for code: '%s'.\n" % (filename,)
exc_msg += "Aborting report output, consider using -i."
raise NoSource(exc_msg)
# Replace \f because of http://bugs.python.org/issue19035
source = source.replace(b'\f', b' ')
source = source.decode(source_encoding(source), "replace")
# Python code should always end with a line with a newline.
if source and source[-1] != '\n':
source += '\n'
return source
@contract(returns='bytes|None')
def get_zip_bytes(filename):
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
markers = ['.zip'+os.sep, '.egg'+os.sep, '.pex'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return data
return None
def source_for_file(filename):
"""Return the source filename for `filename`.
Given a file name being traced, return the best guess as to the source
file to attribute it to.
"""
if filename.endswith(".py"):
# .py files are themselves source files.
return filename
elif filename.endswith((".pyc", ".pyo")):
# Bytecode files probably have source files near them.
py_filename = filename[:-1]
if os.path.exists(py_filename):
# Found a .py file, use that.
return py_filename
if env.WINDOWS:
# On Windows, it could be a .pyw file.
pyw_filename = py_filename + "w"
if os.path.exists(pyw_filename):
return pyw_filename
# Didn't find source, but it's probably the .py file we want.
return py_filename
elif filename.endswith("$py.class"):
# Jython is easy to guess.
return filename[:-9] + ".py"
# No idea, just use the file name as-is.
return filename
def source_for_morf(morf):
"""Get the source filename for the module-or-file `morf`."""
if hasattr(morf, '__file__') and morf.__file__:
filename = morf.__file__
elif isinstance(morf, types.ModuleType):
# A module should have had .__file__, otherwise we can't use it.
# This could be a PEP-420 namespace package.
raise CoverageException("Module {} has no file".format(morf))
else:
filename = morf
filename = source_for_file(files.unicode_filename(filename))
return filename
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
def __init__(self, morf, coverage=None):
self.coverage = coverage
filename = source_for_morf(morf)
super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
if hasattr(morf, '__name__'):
name = morf.__name__.replace(".", os.sep)
if os.path.basename(filename).startswith('__init__.'):
name += os.sep + "__init__"
name += ".py"
name = files.unicode_filename(name)
else:
name = files.relative_filename(filename)
self.relname = name
self._source = None
self._parser = None
self._excluded = None
def __repr__(self):
return "<PythonFileReporter {!r}>".format(self.filename)
@contract(returns='unicode')
def relative_filename(self):
return self.relname
@property
def parser(self):
"""Lazily create a :class:`PythonParser`."""
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
exclude=self.coverage._exclude_regex('exclude'),
)
self._parser.parse_source()
return self._parser
def lines(self):
"""Return the line numbers of statements in the file."""
return self.parser.statements
def excluded_lines(self):
"""Return the line numbers of statements in the file."""
return self.parser.excluded
def translate_lines(self, lines):
return self.parser.translate_lines(lines)
def translate_arcs(self, arcs):
return self.parser.translate_arcs(arcs)
@expensive
def no_branch_lines(self):
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
return no_branch
@expensive
def arcs(self):
return self.parser.arcs()
@expensive
def exit_counts(self):
return self.parser.exit_counts()
def missing_arc_description(self, start, end, executed_arcs=None):
return self.parser.missing_arc_description(start, end, executed_arcs)
@contract(returns='unicode')
def source(self):
if self._source is None:
self._source = get_python_source(self.filename)
return self._source
def should_be_python(self):
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the execution of
a program was really likely to have contained Python in the first
place.
"""
# Get the file extension.
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
if ext.startswith('.py'):
return True
# A file with no extension should be Python.
if not ext:
return True
# Everything else is probably not Python.
return False
def source_token_lines(self):
return source_token_lines(self.source())
|
import logging
import errno
import os
import stat
import socket
from plumbum.commands.base import shquote
from plumbum.machines.base import PopenAddons
from plumbum.machines.remote import BaseRemoteMachine
from plumbum.machines.session import ShellSession
from plumbum.lib import _setdoc, six
from plumbum.path.local import LocalPath
from plumbum.path.remote import RemotePath, StatRes
from plumbum.commands.processes import iter_lines, ProcessLineTimedOut
try:
# Sigh... we need to gracefully-import paramiko for Sphinx builds, etc
import paramiko
except ImportError:
class paramiko(object):
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __getattr__(self, name):
raise ImportError("No module named paramiko")
paramiko = paramiko()
logger = logging.getLogger("plumbum.paramiko")
class ParamikoPopen(PopenAddons):
def __init__(self,
argv,
stdin,
stdout,
stderr,
encoding,
stdin_file=None,
stdout_file=None,
stderr_file=None):
self.argv = argv
self.channel = stdout.channel
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.custom_encoding = encoding
self.returncode = None
self.pid = None
self.stdin_file = stdin_file
self.stdout_file = stdout_file
self.stderr_file = stderr_file
def poll(self):
if self.returncode is None:
if self.channel.exit_status_ready():
return self.wait()
return self.returncode
def wait(self):
if self.returncode is None:
self.channel.recv_exit_status()
self.returncode = self.channel.exit_status
self.close()
return self.returncode
def close(self):
self.channel.shutdown_read()
self.channel.shutdown_write()
self.channel.close()
def kill(self):
# possible way to obtain pid:
# "(cmd ; echo $?) & echo ?!"
# and then client.exec_command("kill -9 %s" % (pid,))
raise EnvironmentError(
"Cannot kill remote processes, we don't have their PIDs")
terminate = kill
def send_signal(self, sig):
raise NotImplementedError()
def communicate(self):
stdout = []
stderr = []
infile = self.stdin_file
sources = [("1", stdout, self.stdout, self.stdout_file),
("2", stderr, self.stderr, self.stderr_file)]
i = 0
while sources:
if infile:
try:
line = infile.readline()
except (ValueError, IOError):
line = None
logger.debug("communicate: %r", line)
if not line:
infile.close()
infile = None
self.stdin.close()
else:
self.stdin.write(line)
self.stdin.flush()
i = (i + 1) % len(sources)
name, coll, pipe, outfile = sources[i]
line = pipe.readline()
# logger.debug("%s> %r", name, line)
if not line:
del sources[i]
elif outfile:
outfile.write(line)
outfile.flush()
else:
coll.append(line)
self.wait()
stdout = "".join(s for s in stdout).encode(self.custom_encoding)
stderr = "".join(s for s in stderr).encode(self.custom_encoding)
return stdout, stderr
def iter_lines(self, timeout=None, **kwargs):
if timeout is not None:
raise NotImplementedError(
"The 'timeout' parameter is not supported with ParamikoMachine"
)
return iter_lines(self, _iter_lines=_iter_lines, **kwargs)
__iter__ = iter_lines
class ParamikoMachine(BaseRemoteMachine):
"""
An implementation of :class:`remote machine <plumbum.machines.remote.BaseRemoteMachine>`
over Paramiko (a Python implementation of openSSH2 client/server). Invoking a remote command
translates to invoking it over SSH ::
with ParamikoMachine("yourhostname") as rem:
r_ls = rem["ls"]
# r_ls is the remote `ls`
# executing r_ls() is equivalent to `ssh yourhostname ls`, only without
# spawning a new ssh client
:param host: the host name to connect to (SSH server)
:param user: the user to connect as (if ``None``, the default will be used)
:param port: the server's port (if ``None``, the default will be used)
:param password: the user's password (if a password-based authentication is to be performed)
(if ``None``, key-based authentication will be used)
:param keyfile: the path to the identity file (if ``None``, the default will be used)
:param load_system_host_keys: whether or not to load the system's host keys (from ``/etc/ssh``
and ``~/.ssh``). The default is ``True``, which means Paramiko
behaves much like the ``ssh`` command-line client
:param missing_host_policy: the value passed to the underlying ``set_missing_host_key_policy``
of the client. The default is ``None``, which means
``set_missing_host_key_policy`` is not invoked and paramiko's
default behavior (reject) is employed
:param encoding: the remote machine's encoding (defaults to UTF8)
:param look_for_keys: set to False to disable searching for discoverable
private key files in ``~/.ssh``
:param connect_timeout: timeout for TCP connection
.. note:: If Paramiko 1.15 or above is installed, can use GSS_API authentication
:param bool gss_auth: ``True`` if you want to use GSS-API authentication
:param bool gss_kex: Perform GSS-API Key Exchange and user authentication
:param bool gss_deleg_creds: Delegate GSS-API client credentials or not
:param str gss_host: The targets name in the kerberos database. default: hostname
:param bool get_pty: Execute remote commands with allocated pseudo-tty. default: False
:param bool load_system_ssh_config: read system SSH config for ProxyCommand configuration. default: False
"""
class RemoteCommand(BaseRemoteMachine.RemoteCommand):
def __or__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __gt__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __rshift__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __ge__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __lt__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __lshift__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __init__(self,
host,
user=None,
port=None,
password=None,
keyfile=None,
load_system_host_keys=True,
missing_host_policy=None,
encoding="utf8",
look_for_keys=None,
connect_timeout=None,
keep_alive=0,
gss_auth=False,
gss_kex=None,
gss_deleg_creds=None,
gss_host=None,
get_pty=False,
load_system_ssh_config=False):
self.host = host
kwargs = {}
if user:
self._fqhost = "%s@%s" % (user, host)
kwargs['username'] = user
else:
self._fqhost = host
self._client = paramiko.SSHClient()
if load_system_host_keys:
self._client.load_system_host_keys()
if port is not None:
kwargs["port"] = port
if keyfile is not None:
kwargs["key_filename"] = keyfile
if password is not None:
kwargs["password"] = password
if missing_host_policy is not None:
self._client.set_missing_host_key_policy(missing_host_policy)
if look_for_keys is not None:
kwargs["look_for_keys"] = look_for_keys
if connect_timeout is not None:
kwargs["timeout"] = connect_timeout
if gss_auth:
kwargs['gss_auth'] = gss_auth
kwargs['gss_kex'] = gss_kex
kwargs['gss_deleg_creds'] = gss_deleg_creds
if not gss_host:
gss_host = host
kwargs['gss_host'] = gss_host
if load_system_ssh_config:
ssh_config = paramiko.SSHConfig()
with open(os.path.expanduser('~/.ssh/config')) as f:
ssh_config.parse(f)
try:
hostConfig = ssh_config.lookup(host)
kwargs['sock'] = paramiko.ProxyCommand(hostConfig['proxycommand'])
except KeyError:
pass
self._client.connect(host, **kwargs)
self._keep_alive = keep_alive
self._sftp = None
self._get_pty = get_pty
BaseRemoteMachine.__init__(self, encoding, connect_timeout)
def __str__(self):
return "paramiko://%s" % (self._fqhost, )
def close(self):
BaseRemoteMachine.close(self)
self._client.close()
@property
def sftp(self):
"""
Returns an SFTP client on top of the current SSH connection; it can be used to manipulate
files directly, much like an interactive FTP/SFTP session
"""
if not self._sftp:
self._sftp = self._client.open_sftp()
return self._sftp
@_setdoc(BaseRemoteMachine)
def session(self,
isatty=False,
term="vt100",
width=80,
height=24,
new_session=False):
# new_session is ignored for ParamikoMachine
trans = self._client.get_transport()
trans.set_keepalive(self._keep_alive)
chan = trans.open_session()
if isatty:
chan.get_pty(term, width, height)
chan.set_combine_stderr(True)
chan.invoke_shell()
stdin = chan.makefile('wb', -1)
stdout = chan.makefile('rb', -1)
stderr = chan.makefile_stderr('rb', -1)
proc = ParamikoPopen(["<shell>"], stdin, stdout, stderr,
self.custom_encoding)
return ShellSession(proc, self.custom_encoding, isatty)
@_setdoc(BaseRemoteMachine)
def popen(self,
args,
stdin=None,
stdout=None,
stderr=None,
new_session=False,
cwd=None):
# new_session is ignored for ParamikoMachine
argv = []
envdelta = self.env.getdelta()
argv.extend(["cd", str(cwd or self.cwd), "&&"])
if envdelta:
argv.append("env")
argv.extend("%s=%s" % (k, shquote(v)) for k, v in envdelta.items())
argv.extend(args.formulate())
cmdline = " ".join(argv)
logger.debug(cmdline)
si, so, se = self._client.exec_command(cmdline, 1, get_pty=self._get_pty)
return ParamikoPopen(
argv,
si,
so,
se,
self.custom_encoding,
stdin_file=stdin,
stdout_file=stdout,
stderr_file=stderr)
@_setdoc(BaseRemoteMachine)
def download(self, src, dst):
if isinstance(src, LocalPath):
raise TypeError("src of download cannot be %r" % (src, ))
if isinstance(src, RemotePath) and src.remote != self:
raise TypeError(
"src %r points to a different remote machine" % (src, ))
if isinstance(dst, RemotePath):
raise TypeError("dst of download cannot be %r" % (dst, ))
return self._download(
src if isinstance(src, RemotePath) else self.path(src), dst
if isinstance(dst, LocalPath) else LocalPath(dst))
def _download(self, src, dst):
if src.is_dir():
if not dst.exists():
self.sftp.mkdir(str(dst))
for fn in src:
self._download(fn, dst / fn.name)
elif dst.is_dir():
self.sftp.get(str(src), str(dst / src.name))
else:
self.sftp.get(str(src), str(dst))
@_setdoc(BaseRemoteMachine)
def upload(self, src, dst):
if isinstance(src, RemotePath):
raise TypeError("src of upload cannot be %r" % (src, ))
if isinstance(dst, LocalPath):
raise TypeError("dst of upload cannot be %r" % (dst, ))
if isinstance(dst, RemotePath) and dst.remote != self:
raise TypeError(
"dst %r points to a different remote machine" % (dst, ))
return self._upload(
src if isinstance(src, LocalPath) else LocalPath(src), dst
if isinstance(dst, RemotePath) else self.path(dst))
def _upload(self, src, dst):
if src.is_dir():
if not dst.exists():
self.sftp.mkdir(str(dst))
for fn in src:
self._upload(fn, dst / fn.name)
elif dst.is_dir():
self.sftp.put(str(src), str(dst / src.name))
else:
self.sftp.put(str(src), str(dst))
def connect_sock(self, dport, dhost="localhost", ipv6=False):
"""Returns a Paramiko ``Channel``, connected to dhost:dport on the remote machine.
The ``Channel`` behaves like a regular socket; you can ``send`` and ``recv`` on it
and the data will pass encrypted over SSH. Usage::
mach = ParamikoMachine("myhost")
sock = mach.connect_sock(12345)
data = sock.recv(100)
sock.send("foobar")
sock.close()
"""
if ipv6 and dhost == "localhost":
dhost = "::1"
srcaddr = ("::1", 0, 0, 0) if ipv6 else ("127.0.0.1", 0)
trans = self._client.get_transport()
trans.set_keepalive(self._keep_alive)
chan = trans.open_channel('direct-tcpip', (dhost, dport), srcaddr)
return SocketCompatibleChannel(chan)
#
# Path implementation
#
def _path_listdir(self, fn):
return self.sftp.listdir(str(fn))
def _path_read(self, fn):
f = self.sftp.open(str(fn), 'rb')
data = f.read()
f.close()
return data
def _path_write(self, fn, data):
if self.custom_encoding and isinstance(data, six.unicode_type):
data = data.encode(self.custom_encoding)
f = self.sftp.open(str(fn), 'wb')
f.write(data)
f.close()
def _path_stat(self, fn):
try:
st = self.sftp.stat(str(fn))
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise OSError(e.errno)
res = StatRes((st.st_mode, 0, 0, 0, st.st_uid, st.st_gid, st.st_size,
st.st_atime, st.st_mtime, 0))
if stat.S_ISDIR(st.st_mode):
res.text_mode = 'directory'
if stat.S_ISREG(st.st_mode):
res.text_mode = 'regular file'
return res
###################################################################################################
# Make paramiko.Channel adhere to the socket protocol, namely, send and recv should fail
# when the socket has been closed
###################################################################################################
class SocketCompatibleChannel(object):
def __init__(self, chan):
self._chan = chan
def __getattr__(self, name):
return getattr(self._chan, name)
def send(self, s):
if self._chan.closed:
raise socket.error(errno.EBADF, 'Bad file descriptor')
return self._chan.send(s)
def recv(self, count):
if self._chan.closed:
raise socket.error(errno.EBADF, 'Bad file descriptor')
return self._chan.recv(count)
###################################################################################################
# Custom iter_lines for paramiko.Channel
###################################################################################################
def _iter_lines(proc, decode, linesize, line_timeout=None):
try:
from selectors import DefaultSelector, EVENT_READ
except ImportError:
# Pre Python 3.4 implementation
from select import select
def selector():
while True:
rlist, _, _ = select([proc.stdout.channel], [], [], line_timeout)
if not rlist and line_timeout:
raise ProcessLineTimedOut("popen line timeout expired", getattr(proc, "argv", None), getattr(proc, "machine", None))
for _ in rlist:
yield
else:
# Python 3.4 implementation
def selector():
sel = DefaultSelector()
sel.register(proc.stdout.channel, EVENT_READ)
while True:
ready = sel.select(line_timeout)
if not ready and line_timeout:
raise ProcessLineTimedOut("popen line timeout expired", getattr(proc, "argv", None), getattr(proc, "machine", None))
for key, mask in ready:
yield
for _ in selector():
if proc.stdout.channel.recv_ready():
yield 0, proc.stdout.readline(linesize)
if proc.stdout.channel.recv_stderr_ready():
yield 1, proc.stderr.readline(linesize)
if proc.poll() is not None:
break
for line in proc.stdout:
yield 0, line
for line in proc.stderr:
yield 1, line
|
import os.path
import pytest_bdd as bdd
bdd.scenarios('javascript.feature')
@bdd.then("the window sizes should be the same")
def check_window_sizes(quteproc):
hidden = quteproc.wait_for_js('hidden window size: *')
quteproc.send_cmd(':jseval --world main updateText("visible")')
visible = quteproc.wait_for_js('visible window size: *')
hidden_size = hidden.message.split()[-1]
visible_size = visible.message.split()[-1]
assert hidden_size == visible_size
test_gm_script = r"""
// ==UserScript==
// @name qutebrowser test userscript
// @namespace invalid.org
// @include http://localhost:*/data/hints/iframe.html
// @include http://localhost:*/data/hints/html/wrapped.html
// @exclude ???
// @run-at {stage}
// {frames}
// ==/UserScript==
console.log("Script is running on " + window.location.pathname);
"""
@bdd.when(bdd.parsers.parse("I have a GreaseMonkey file saved for {stage} "
"with noframes {frameset}"))
def create_greasemonkey_file(quteproc, stage, frameset):
script_path = os.path.join(quteproc.basedir, 'data', 'greasemonkey')
try:
os.mkdir(script_path)
except FileExistsError:
pass
file_path = os.path.join(script_path, 'test.user.js')
if frameset == "set":
frames = "@noframes"
elif frameset == "unset":
frames = ""
else:
raise ValueError("noframes can only be set or unset, "
"not {}".format(frameset))
with open(file_path, 'w', encoding='utf-8') as f:
f.write(test_gm_script.format(stage=stage,
frames=frames))
|
import pytest
import numpy as np
from matchzoo.preprocessors import units
@pytest.fixture
def raw_input():
return "This is an Example sentence to BE ! cleaned with digits 31."
@pytest.fixture
def list_input():
return ['this', 'Is', 'a', 'the', 'test', 'lIst', '36', '!', 'input']
@pytest.fixture
def vec_input():
return np.array([[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]])
def test_tokenize_unit(raw_input):
tu = units.Tokenize()
out = tu.transform(raw_input)
assert len(out) == 13
assert 'an' in out
def test_lowercase_unit(list_input):
lu = units.Lowercase()
out = lu.transform(list_input)
assert 'is' in out
def test_digitremoval_unit(list_input):
du = units.DigitRemoval()
out = du.transform(list_input)
assert 36 not in out
def test_puncremoval_unit(list_input):
pu = units.PuncRemoval()
out = pu.transform(list_input)
assert '!' not in out
def test_stopremoval_unit(list_input):
su = units.StopRemoval()
out = su.transform(list_input)
assert 'the' not in out
def test_stemming_unit(list_input):
su_porter = units.Stemming()
out_porter = su_porter.transform(list_input)
assert 'thi' in out_porter
su_lancaster = units.Stemming(stemmer='lancaster')
out_lancaster = su_lancaster.transform(list_input)
assert 'thi' in out_lancaster
su_not_exist = units.Stemming(stemmer='fake_stemmer')
with pytest.raises(ValueError):
su_not_exist.transform(list_input)
def test_lemma_unit(list_input):
lemma = units.Lemmatization()
out = lemma.transform(list_input)
assert 'this' in out
def test_ngram_unit(list_input):
ngram = units.NgramLetter()
out = ngram.transform(list_input)
assert '#a#' in out
ngram = units.NgramLetter(reduce_dim=False)
out = ngram.transform(list_input)
assert len(out) == 9
def test_fixedlength_unit(list_input):
fixedlength = units.FixedLength(3)
out = fixedlength.transform([])
assert list(out) == [0] * 3
out = fixedlength.transform(list_input)
assert list(out) == ['36', '!', 'input']
fixedlength = units.FixedLength(3, truncate_mode='post')
out = fixedlength.transform(list_input)
assert list(out) == ['this', 'Is', 'a']
fixedlength = units.FixedLength(12, pad_value='0',
truncate_mode='pre', pad_mode='pre')
out = fixedlength.transform(list_input)
assert list(out[3:]) == list_input
assert list(out[:3]) == ['0'] * 3
fixedlength = units.FixedLength(12, pad_value='0',
truncate_mode='pre', pad_mode='post')
out = fixedlength.transform(list_input)
assert list(out[:-3]) == list_input
assert list(out[-3:]) == ['0'] * 3
@pytest.fixture(scope='module', params=['CH', 'NH', 'LCH'])
def hist_mode(request):
return request.param
def test_matchinghistogram_unit(hist_mode):
embedding = np.array([[1.0, -1.0], [1.0, 2.0], [1.0, 3.0]])
text_left = [0, 1]
text_right = [1, 2]
histogram = units.MatchingHistogram(3, embedding, True, hist_mode)
out = histogram.transform([text_left, text_right])
out = [[round(elem, 2) for elem in list_val] for list_val in out]
if hist_mode == 'CH':
assert out == [[3.0, 1.0, 1.0], [1.0, 2.0, 2.0]]
elif hist_mode == 'NH':
assert out == [[0.6, 0.2, 0.2], [0.2, 0.4, 0.4]]
elif hist_mode == 'LCH':
assert out == [[1.1, 0.0, 0.0], [0.0, 0.69, 0.69]]
else:
assert False
import matchzoo as mz
def test_this():
train_data = mz.datasets.toy.load_data()
test_data = mz.datasets.toy.load_data(stage='test')
dssm_preprocessor = mz.preprocessors.DSSMPreprocessor()
train_data_processed = dssm_preprocessor.fit_transform(
train_data, verbose=0)
type(train_data_processed)
test_data_transformed = dssm_preprocessor.transform(test_data)
type(test_data_transformed)
import tempfile
import os
def test_bert_tokenizer_unit():
vocab_tokens = [
"[PAD]", "further", "##more", ",", "under", "the", "micro", "##scope", "neither",
"entity", "contains", "glands", ".", "此", "外", "在", "显", "微", "镜", "下"
]
raw_text = "furthermore, \r under the microscope \t neither entity \n contains sebaceous glands. 此外, 在显微镜下"
golden_tokens = ['further', '##more', ',', 'under', 'the', 'micro', '##scope', 'neither', 'entity', 'contains',
'[UNK]', 'glands', '.', '此', '外', ',', '在', '显', '微', '镜', '下']
vocab_dict = {}
for idx, token in enumerate(vocab_tokens):
vocab_dict[token] = idx
clean_unit = units.BertClean()
cleaned_text = clean_unit.transform(raw_text)
chinese_tokenize_unit = units.ChineseTokenize()
chinese_tokenized_text = chinese_tokenize_unit.transform(cleaned_text)
basic_tokenize_unit = units.BasicTokenize()
basic_tokens = basic_tokenize_unit.transform(chinese_tokenized_text)
wordpiece_unit = units.WordPieceTokenize(vocab_dict)
wordpiece_tokens = wordpiece_unit.transform(basic_tokens)
assert wordpiece_tokens == golden_tokens
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import numpy as np
from six.moves import urllib
import tensorflow as tf
from datasets import dataset_utils
# The URLs where the MNIST data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILENAME = 'train-labels-idx1-ubyte.gz'
_TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILENAME = 't10k-labels-idx1-ubyte.gz'
_IMAGE_SIZE = 28
_NUM_CHANNELS = 1
# The names of the classes.
_CLASS_NAMES = [
'zero',
'one',
'two',
'three',
'four',
'five',
'size',
'seven',
'eight',
'nine',
]
def _extract_images(filename, num_images):
"""Extract the images into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
_IMAGE_SIZE * _IMAGE_SIZE * num_images * _NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def _add_to_tfrecord(data_filename, labels_filename, num_images,
tfrecord_writer):
"""Loads data from the binary MNIST files and writes files to a TFRecord.
Args:
data_filename: The filename of the MNIST images.
labels_filename: The filename of the MNIST labels.
num_images: The number of images in the dataset.
tfrecord_writer: The TFRecord writer to use for writing.
"""
images = _extract_images(data_filename, num_images)
labels = _extract_labels(labels_filename, num_images)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(
png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/mnist_%s.tfrecord' % (dataset_dir, split_name)
def _download_dataset(dataset_dir):
"""Downloads MNIST locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL + filename,
filepath,
_progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_dataset(dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 60000, tfrecord_writer)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 10000, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the MNIST dataset!')
|
from homeassistant.components.image_processing import DOMAIN, SERVICE_SCAN
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL
from homeassistant.core import callback
from homeassistant.loader import bind_hass
@bind_hass
def scan(hass, entity_id=ENTITY_MATCH_ALL):
"""Force process of all cameras or given entity."""
hass.add_job(async_scan, hass, entity_id)
@callback
@bind_hass
def async_scan(hass, entity_id=ENTITY_MATCH_ALL):
"""Force process of all cameras or given entity."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_SCAN, data))
|
import re
import ast
import os
import os.path
from scripts import setupcommon as common
import setuptools
try:
BASEDIR = os.path.dirname(os.path.realpath(__file__))
except NameError:
BASEDIR = None
def read_file(name):
"""Get the string contained in the file named name."""
with common.open_file(name, 'r', encoding='utf-8') as f:
return f.read()
def _get_constant(name):
"""Read a __magic__ constant from qutebrowser/__init__.py.
We don't import qutebrowser here because it can go wrong for multiple
reasons. Instead we use re/ast to get the value directly from the source
file.
Args:
name: The name of the argument to get.
Return:
The value of the argument.
"""
field_re = re.compile(r'__{}__\s+=\s+(.*)'.format(re.escape(name)))
path = os.path.join(BASEDIR, 'qutebrowser', '__init__.py')
line = field_re.search(read_file(path)).group(1)
value = ast.literal_eval(line)
return value
try:
common.write_git_file()
setuptools.setup(
packages=setuptools.find_packages(exclude=['scripts', 'scripts.*']),
include_package_data=True,
entry_points={'gui_scripts':
['qutebrowser = qutebrowser.qutebrowser:main']},
zip_safe=True,
install_requires=['pypeg2', 'jinja2', 'pygments', 'PyYAML', 'attrs'],
python_requires='>=3.6',
name='qutebrowser',
version=_get_constant('version'),
description=_get_constant('description'),
long_description=read_file('README.asciidoc'),
long_description_content_type='text/plain',
url='https://www.qutebrowser.org/',
author=_get_constant('author'),
author_email=_get_constant('email'),
license=_get_constant('license'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 or later '
'(GPLv3+)',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'Operating System :: POSIX :: BSD',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Browsers',
],
keywords='pyqt browser web qt webkit qtwebkit qtwebengine',
)
finally:
if BASEDIR is not None:
path = os.path.join(BASEDIR, 'qutebrowser', 'git-commit-id')
if os.path.exists(path):
os.remove(path)
|
from __future__ import absolute_import
def signature(func, param_num, params):
args = ','.join("{!r}".format(a) for a in params.args)
kwargs = ','.join("{}={!r}".format(k, v) for k,v in params.kwargs.items())
if args and kwargs:
return "{}({},{})".format(func.__name__, args, kwargs)
else:
return "{}({})".format(func.__name__, args or kwargs)
def firstparam(func, param_num, params):
return "{}({!r})".format(func.__name__, params.args[0])
def num(func, param_num, params):
return "{}_{}".format(func.__name__, param_num)
|
import os
import yaml
__all__ = ['source']
_DEFAULT = {
'REGISTRY_PORT': '5000',
'REGISTRY_HOST': '0.0.0.0',
'SETTINGS_FLAVOR': 'dev',
'GUNICORN_WORKERS': '4',
'GUNICORN_GRACEFUL_TIMEOUT': '3600',
'GUNICORN_SILENT_TIMEOUT': '3600',
'GUNICORN_USER': '',
'GUNICORN_GROUP': '',
'GUNICORN_ACCESS_LOG_FILE': '"-"',
'GUNICORN_ERROR_LOG_FILE': '"-"',
'GUNICORN_OPTS': '[]',
'NEW_RELIC_LICENSE_KEY': '',
'NEW_RELIC_CONFIG_FILE': '',
'NEW_RELIC_ENVIRONMENT': 'dev'
}
def source(key, override=''):
# Using yaml gives us proper typage
return yaml.load(
os.environ.get(key, _DEFAULT[key] if key in _DEFAULT else override))
|
import json
import time
import uuid
from contextlib import contextmanager
from queue import Empty
from threading import Condition
from typing import Generator
from typing import Iterable
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from kazoo.client import KazooClient
from kazoo.exceptions import NodeExistsError
from kazoo.exceptions import NoNodeError
from kazoo.protocol.states import WatchedEvent
from kazoo.protocol.states import ZnodeStat
from paasta_tools.deployd.common import DelayDeadlineQueueProtocol
from paasta_tools.deployd.common import ServiceInstance
DEPLOYD_QUEUE_ROOT = "/paasta-deployd-queue"
MAX_SLEEP_TIME = 3600
class ZKDelayDeadlineQueue(DelayDeadlineQueueProtocol):
def __init__(self, client: KazooClient, path: str = DEPLOYD_QUEUE_ROOT) -> None:
self.client = client
self.id = uuid.uuid4().hex.encode()
self.locks_path = path + "/locks"
self.entries_path = path + "/entries"
for path in (self.locks_path, self.entries_path):
self.client.ensure_path(path)
self.local_state_condition = Condition()
self.entry_nodes: List[str] = []
self.locked_entry_nodes: Set[str] = set()
self._update_local_state(None)
def _update_local_state(self, event: WatchedEvent) -> None:
with self.local_state_condition:
entry_nodes = self.client.retry(
self.client.get_children,
self.entries_path,
watch=self._update_local_state,
)
self.entry_nodes = sorted(entry_nodes)
self.locked_entry_nodes = set(
self.client.retry(
self.client.get_children,
self.locks_path,
watch=self._update_local_state,
)
)
self.local_state_condition.notify()
def _format_timestamp(self, timestamp: float) -> str:
if not isinstance(timestamp, (int, float)):
raise TypeError(f"timestamp must be int or float, got {timestamp!r}")
if not (0 < timestamp < 9999999999.9995):
raise ValueError(
f"timestamp must be between 0 and 9999999999.9995, got {timestamp}"
)
formatted = f"{timestamp:014.3f}"
assert len(formatted) == 14
return formatted
def put(self, si: ServiceInstance) -> None:
bounce_by = self._format_timestamp(si.bounce_by)
wait_until = self._format_timestamp(si.wait_until)
self.client.create(
f"{self.entries_path}/entry-{bounce_by}-{wait_until}-",
value=self._serialize_si(si),
sequence=True,
)
def _serialize_si(self, si: ServiceInstance) -> bytes:
si_dict = si._asdict()
return json.dumps(si_dict).encode("utf-8")
@contextmanager
def get(
self, block: bool = True, timeout: float = float("inf")
) -> Generator[ServiceInstance, None, None]:
if not block:
timeout = 0.0
timeout_timestamp = time.time() + timeout
entry = None
with self.local_state_condition:
while True:
first_available_entry_node = self._get_first_available_entry_node()
if first_available_entry_node is not None:
entry = self._lock_and_get_entry(first_available_entry_node)
if entry is not None:
break
next_upcoming_wait_until = self._get_next_upcoming_wait_until()
cond_wait_until = min(
timeout_timestamp,
next_upcoming_wait_until,
time.time() + MAX_SLEEP_TIME,
)
hit_timeout = not self.local_state_condition.wait(
timeout=cond_wait_until - time.time()
)
if hit_timeout and time.time() >= timeout_timestamp:
raise Empty()
entry_data, entry_stat = entry
try:
yield self._parse_data(entry_data)
except Exception:
self._release(first_available_entry_node)
raise
else:
self._consume(first_available_entry_node)
def _parse_data(self, entry_data: bytes) -> ServiceInstance:
now = time.time()
defaults = {
"watcher": "unknown",
"bounce_by": now,
"wait_until": now,
"enqueue_time": now,
"bounce_start_time": now,
"failures": 0,
}
si_dict = json.loads(entry_data.decode("utf-8"))
merged = {**defaults, **si_dict}
return ServiceInstance(**merged) # type: ignore
def _parse_entry_node(self, path: str) -> Tuple[float, float]:
basename = path.split("/")[-1]
_, priority, wait_until, _ = basename.split("-", maxsplit=4)
return float(priority), float(wait_until)
def _get_first_available_entry_node(self) -> Optional[str]:
for entry_node in self.entry_nodes:
if entry_node not in self.locked_entry_nodes:
_, wait_until = self._parse_entry_node(entry_node)
now = time.time()
if wait_until <= now:
return entry_node
return None
def _get_next_upcoming_wait_until(self) -> float:
next_upcoming_wait_until = float("inf")
for entry_node in self.entry_nodes:
if entry_node not in self.locked_entry_nodes:
_, wait_until = self._parse_entry_node(entry_node)
next_upcoming_wait_until = min(next_upcoming_wait_until, wait_until)
return next_upcoming_wait_until
def _lock_and_get_entry(self, entry_node: str) -> Optional[Tuple[bytes, ZnodeStat]]:
try:
lock_path = f"{self.locks_path}/{entry_node}"
self.locked_entry_nodes.add(entry_node)
self.client.create(lock_path, value=self.id, ephemeral=True)
except NodeExistsError:
self.locked_entry_nodes.add(entry_node)
return None
try:
return self.client.get(f"{self.entries_path}/{entry_node}")
except NoNodeError:
self.client.delete(lock_path)
return None
def _consume(self, entry_node: str) -> None:
# necessary in case we lose connection at some point
if not self._holds_lock(entry_node):
return # TODO: log?
with self.client.transaction() as transaction:
transaction.delete(f"{self.locks_path}/{entry_node}")
transaction.delete(f"{self.entries_path}/{entry_node}")
def _holds_lock(self, entry_node: str) -> bool:
lock_path = f"{self.locks_path}/{entry_node}"
self.client.sync(lock_path)
value, stat = self.client.retry(self.client.get, lock_path)
return value == self.id
def _release(self, entry_node: str) -> None:
if not self._holds_lock(entry_node):
return
self.client.delete(f"{self.locks_path}/{entry_node}")
def _get_all_unlocked_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, float, Optional[ServiceInstance]]]:
results = []
with self.local_state_condition:
for entry_node in self.entry_nodes:
if entry_node not in self.locked_entry_nodes:
deadline, wait_until = self._parse_entry_node(entry_node)
if fetch_service_instances:
data, _ = self.client.get(f"{self.entries_path}/{entry_node}")
si = self._parse_data(data)
else:
si = None
results.append((deadline, wait_until, si))
return results
def get_available_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, Optional[ServiceInstance]]]:
now = time.time()
for deadline, wait_until, si in self._get_all_unlocked_service_instances(
fetch_service_instances
):
if wait_until <= now:
yield (deadline, si)
def get_unavailable_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, float, Optional[ServiceInstance]]]:
now = time.time()
for deadline, wait_until, si in self._get_all_unlocked_service_instances(
fetch_service_instances
):
if wait_until > now:
yield (wait_until, deadline, si)
|
import warnings
from sklearn.decomposition import PCA, FastICA, IncrementalPCA, KernelPCA, FactorAnalysis, TruncatedSVD, SparsePCA, MiniBatchSparsePCA, DictionaryLearning, MiniBatchDictionaryLearning
from sklearn.manifold import TSNE, MDS, SpectralEmbedding, LocallyLinearEmbedding, Isomap
from umap import UMAP
from .._shared.helpers import *
from .normalize import normalize as normalizer
from .align import align as aligner
from .format_data import format_data as formatter
# dictionary of models
models = {
'PCA': PCA,
'IncrementalPCA': IncrementalPCA,
'SparsePCA': SparsePCA,
'MiniBatchSparsePCA': MiniBatchSparsePCA,
'KernelPCA': KernelPCA,
'FastICA': FastICA,
'FactorAnalysis': FactorAnalysis,
'TruncatedSVD': TruncatedSVD,
'DictionaryLearning': DictionaryLearning,
'MiniBatchDictionaryLearning': MiniBatchDictionaryLearning,
'TSNE': TSNE,
'Isomap': Isomap,
'SpectralEmbedding': SpectralEmbedding,
'LocallyLinearEmbedding': LocallyLinearEmbedding,
'MDS': MDS,
'UMAP': UMAP
}
# main function
@memoize
def reduce(x, reduce='IncrementalPCA', ndims=None, normalize=None, align=None,
model=None, model_params=None, internal=False, format_data=True):
"""
Reduces dimensionality of an array, or list of arrays
Parameters
----------
x : Numpy array or list of arrays
Dimensionality reduction using PCA is performed on this array.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, MDS and UMAP.
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA',
'params' : {'whiten' : True}}. See scikit-learn specific model docs
for details on parameters supported for each model.
ndims : int
Number of dimensions to reduce
format_data : bool
Whether or not to first call the format_data function (default: True).
model : None
Deprecated argument. Please use reduce.
model_params : None
Deprecated argument. Please use reduce.
align : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
x_reduced : Numpy array or list of arrays
The reduced data with ndims dimensionality is returned. If the input
is a list, a list is returned.
"""
# deprecation warning
if (model is not None) or (model_params is not None):
warnings.warn('Model and model params will be deprecated. Please use the \
reduce keyword. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.reduce.html#hypertools.tools.reduce')
reduce = {
'model': model,
'params': model_params
}
# if model is None, just return data
if reduce is None:
return x
elif isinstance(reduce, (str, np.string_)):
model_name = reduce
model_params = {
'n_components': ndims
}
elif isinstance(reduce, dict):
try:
model_name = reduce['model']
model_params = reduce['params']
except KeyError:
raise ValueError('If passing a dictionary, pass the model as the value of the "model" key and a \
dictionary of custom params as the value of the "params" key.')
else:
# handle other possibilities below
model_name = reduce
try:
# if the model passed is a string, make sure it's one of the supported options
if isinstance(model_name, (str, np.string_)):
model = models[model_name]
# otherwise check any custom object for necessary methods
else:
model = model_name
getattr(model, 'fit_transform')
getattr(model, 'n_components')
except (KeyError, AttributeError):
raise ValueError('reduce must be one of the supported options or support n_components and fit_transform \
methods. See http://hypertools.readthedocs.io/en/latest/hypertools.tools.reduce.html#hypertools.tools.reduce \
for supported models')
# check for multiple values from n_components & ndims args
if 'n_components' in model_params:
if (ndims is None) or (ndims == model_params['n_components']):
pass
else:
warnings.warn('Unequal values passed to dims and n_components. Using ndims parameter.')
model_params['n_components'] = ndims
else:
model_params['n_components'] = ndims
# convert to common format
if format_data:
x = formatter(x, ppca=True)
# if ndims/n_components is not passed or all data is < ndims-dimensional, just return it
if model_params['n_components'] is None or all([i.shape[1] <= model_params['n_components'] for i in x]):
return x
stacked_x = np.vstack(x)
if stacked_x.shape[0] == 1:
warnings.warn('Cannot reduce the dimensionality of a single row of'
' data. Return zeros length of ndims')
return [np.zeros((1, model_params['n_components']))]
elif stacked_x.shape[0] < model_params['n_components']:
warnings.warn('The number of rows in your data is less than ndims.'
' The data will be reduced to the number of rows.')
# deprecation warnings
if normalize is not None:
warnings.warn('The normalize argument will be deprecated for this function. Please use the \
analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')
x = normalizer(x, normalize=normalize)
if align is not None:
warnings.warn('The align argument will be deprecated for this function. Please use the \
analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')
x = aligner(x, align=align)
# initialize model
model = model(**model_params)
# reduce data
x_reduced = reduce_list(x, model)
# return data
if internal or len(x_reduced) > 1:
return x_reduced
else:
return x_reduced[0]
# sub functions
def reduce_list(x, model):
split = np.cumsum([len(xi) for xi in x])[:-1]
x_r = np.vsplit(model.fit_transform(np.vstack(x)), split)
if len(x) > 1:
return [xi for xi in x_r]
else:
return [x_r[0]]
|
import asyncio
import upb_lib
from homeassistant.const import CONF_FILE_PATH, CONF_HOST
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_ADDRESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COMMAND,
ATTR_RATE,
DOMAIN,
EVENT_UPB_SCENE_CHANGED,
)
UPB_PLATFORMS = ["light", "scene"]
async def async_setup(hass: HomeAssistant, hass_config: ConfigType) -> bool:
"""Set up the UPB platform."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up a new config_entry for UPB PIM."""
url = config_entry.data[CONF_HOST]
file = config_entry.data[CONF_FILE_PATH]
upb = upb_lib.UpbPim({"url": url, "UPStartExportFile": file})
upb.connect()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = {"upb": upb}
for component in UPB_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
def _element_changed(element, changeset):
change = changeset.get("last_change")
if change is None:
return
if change.get("command") is None:
return
hass.bus.async_fire(
EVENT_UPB_SCENE_CHANGED,
{
ATTR_COMMAND: change["command"],
ATTR_ADDRESS: element.addr.index,
ATTR_BRIGHTNESS_PCT: change.get("level", -1),
ATTR_RATE: change.get("rate", -1),
},
)
for link in upb.links:
element = upb.links[link]
element.add_callback(_element_changed)
return True
async def async_unload_entry(hass, config_entry):
"""Unload the config_entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in UPB_PLATFORMS
]
)
)
if unload_ok:
upb = hass.data[DOMAIN][config_entry.entry_id]["upb"]
upb.disconnect()
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
class UpbEntity(Entity):
"""Base class for all UPB entities."""
def __init__(self, element, unique_id, upb):
"""Initialize the base of all UPB devices."""
self._upb = upb
self._element = element
element_type = "link" if element.addr.is_link else "device"
self._unique_id = f"{unique_id}_{element_type}_{element.addr}"
@property
def name(self):
"""Name of the element."""
return self._element.name
@property
def unique_id(self):
"""Return unique id of the element."""
return self._unique_id
@property
def should_poll(self) -> bool:
"""Don't poll this device."""
return False
@property
def device_state_attributes(self):
"""Return the default attributes of the element."""
return self._element.as_dict()
@property
def available(self):
"""Is the entity available to be updated."""
return self._upb.is_connected()
def _element_changed(self, element, changeset):
pass
@callback
def _element_callback(self, element, changeset):
"""Handle callback from an UPB element that has changed."""
self._element_changed(element, changeset)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callback for UPB changes and update entity state."""
self._element.add_callback(self._element_callback)
self._element_callback(self._element, {})
class UpbAttachedEntity(UpbEntity):
"""Base class for UPB attached entities."""
@property
def device_info(self):
"""Device info for the entity."""
return {
"name": self._element.name,
"identifiers": {(DOMAIN, self._element.index)},
"sw_version": self._element.version,
"manufacturer": self._element.manufacturer,
"model": self._element.product,
}
|
import importlib
from operator import attrgetter
from pathlib import Path
from pprint import pformat
from textwrap import indent
from types import SimpleNamespace
INCLUDES_DIR = Path(__file__).parent.resolve()
CERBERUS_DIR = INCLUDES_DIR.parent.parent / 'cerberus'
def load_module_members(name, path):
module_spec = importlib.util.spec_from_file_location(name, path)
_module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(_module)
return vars(_module)
errors_module = load_module_members('errors', CERBERUS_DIR / 'errors.py')
error_type = errors_module['ErrorDefinition']
error_definitions = []
for name, member in errors_module.items():
if not isinstance(member, error_type):
continue
error_definition = SimpleNamespace(code=member.code, rule=member.rule)
error_definition.name = name
error_definitions.append(error_definition)
error_definitions.sort(key=attrgetter('code'))
with (INCLUDES_DIR / 'error-codes.rst').open('wt') as f:
print(
"""
.. list-table::
:header-rows: 1
* - Code (dec.)
- Code (hex.)
- Name
- Rule""".lstrip(
'\n'
),
file=f,
)
for error_definition in error_definitions:
print(
f"""
* - {error_definition.code}
- {hex(error_definition.code)}
- {error_definition.name}
- {error_definition.rule}""".lstrip(
'\n'
),
file=f,
)
print('Generated table with ErrorDefinitions.')
validator_module = load_module_members('validator', CERBERUS_DIR / 'validator.py')
validator = validator_module['Validator']()
schema_validation_schema = pformat(
validator.rules, width=68
) # width seems w/o effect, use black?
with (INCLUDES_DIR / 'schema-validation-schema.rst').open('wt') as f:
print(
'.. code-block:: python\n\n', indent(schema_validation_schema, ' '), file=f
)
print("Generated schema for a vanilla validator's, well, schema.")
|
from abodepy import Abode
from abodepy.exceptions import AbodeException
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, HTTP_BAD_REQUEST
from homeassistant.core import callback
from .const import DEFAULT_CACHEDB, DOMAIN, LOGGER # pylint: disable=unused-import
CONF_POLLING = "polling"
class AbodeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for Abode."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self.data_schema = {
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if not user_input:
return self._show_form()
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
polling = user_input.get(CONF_POLLING, False)
cache = self.hass.config.path(DEFAULT_CACHEDB)
try:
await self.hass.async_add_executor_job(
Abode, username, password, True, True, True, cache
)
except (AbodeException, ConnectTimeout, HTTPError) as ex:
LOGGER.error("Unable to connect to Abode: %s", str(ex))
if ex.errcode == HTTP_BAD_REQUEST:
return self._show_form({"base": "invalid_auth"})
return self._show_form({"base": "cannot_connect"})
return self.async_create_entry(
title=user_input[CONF_USERNAME],
data={
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_POLLING: polling,
},
)
@callback
def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(self.data_schema),
errors=errors if errors else {},
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
if self._async_current_entries():
LOGGER.warning("Only one configuration of abode is allowed.")
return self.async_abort(reason="single_instance_allowed")
return await self.async_step_user(import_config)
|
import os
import pytest
import random
import string
from queue import Empty
from unittest.mock import Mock, patch
from kombu import messaging
from kombu import Connection, Exchange, Queue
boto3 = pytest.importorskip('boto3')
from kombu.transport import SQS # noqa
from botocore.exceptions import ClientError # noqa
SQS_Channel_sqs = SQS.Channel.sqs
example_predefined_queues = {
'queue-1': {
'url': 'https://sqs.us-east-1.amazonaws.com/xxx/queue-1',
'access_key_id': 'a',
'secret_access_key': 'b',
},
'queue-2': {
'url': 'https://sqs.us-east-1.amazonaws.com/xxx/queue-2',
'access_key_id': 'c',
'secret_access_key': 'd',
},
}
class SQSMessageMock:
def __init__(self):
"""
Imitate the SQS Message from boto3.
"""
self.body = ""
self.receipt_handle = "receipt_handle_xyz"
class QueueMock:
""" Hold information about a queue. """
def __init__(self, url, creation_attributes=None):
self.url = url
# arguments of boto3.sqs.create_queue
self.creation_attributes = creation_attributes
self.attributes = {'ApproximateNumberOfMessages': '0'}
self.messages = []
def __repr__(self):
return 'QueueMock: {} {} messages'.format(self.url, len(self.messages))
class SQSClientMock:
def __init__(self, QueueName='unittest_queue'):
"""
Imitate the SQS Client from boto3.
"""
self._receive_messages_calls = 0
# _queues doesn't exist on the real client, here for testing.
self._queues = {}
url = self.create_queue(QueueName=QueueName)['QueueUrl']
self.send_message(QueueUrl=url, MessageBody='hello')
def _get_q(self, url):
""" Helper method to quickly get a queue. """
for q in self._queues.values():
if q.url == url:
return q
raise Exception(f"Queue url {url} not found")
def create_queue(self, QueueName=None, Attributes=None):
q = self._queues[QueueName] = QueueMock(
'https://sqs.us-east-1.amazonaws.com/xxx/' + QueueName,
Attributes,
)
return {'QueueUrl': q.url}
def list_queues(self, QueueNamePrefix=None):
""" Return a list of queue urls """
urls = (val.url for key, val in self._queues.items()
if key.startswith(QueueNamePrefix))
return {'QueueUrls': urls}
def get_queue_url(self, QueueName=None):
return self._queues[QueueName]
def send_message(self, QueueUrl=None, MessageBody=None):
for q in self._queues.values():
if q.url == QueueUrl:
handle = ''.join(random.choice(string.ascii_lowercase) for
x in range(10))
q.messages.append({'Body': MessageBody,
'ReceiptHandle': handle})
break
def receive_message(self, QueueUrl=None, MaxNumberOfMessages=1,
WaitTimeSeconds=10):
self._receive_messages_calls += 1
for q in self._queues.values():
if q.url == QueueUrl:
msgs = q.messages[:MaxNumberOfMessages]
q.messages = q.messages[MaxNumberOfMessages:]
return {'Messages': msgs} if msgs else {}
def get_queue_attributes(self, QueueUrl=None, AttributeNames=None):
if 'ApproximateNumberOfMessages' in AttributeNames:
count = len(self._get_q(QueueUrl).messages)
return {'Attributes': {'ApproximateNumberOfMessages': count}}
def purge_queue(self, QueueUrl=None):
for q in self._queues.values():
if q.url == QueueUrl:
q.messages = []
class test_Channel:
def handleMessageCallback(self, message):
self.callback_message = message
def setup(self):
"""Mock the back-end SQS classes"""
# Sanity check... if SQS is None, then it did not import and we
# cannot execute our tests.
SQS.Channel._queue_cache.clear()
# Common variables used in the unit tests
self.queue_name = 'unittest'
# Mock the sqs() method that returns an SQSConnection object and
# instead return an SQSConnectionMock() object.
sqs_conn_mock = SQSClientMock()
self.sqs_conn_mock = sqs_conn_mock
predefined_queues_sqs_conn_mocks = {
'queue-1': SQSClientMock(QueueName='queue-1'),
'queue-2': SQSClientMock(QueueName='queue-2'),
}
def mock_sqs():
def sqs(self, queue=None):
if queue in predefined_queues_sqs_conn_mocks:
return predefined_queues_sqs_conn_mocks[queue]
return sqs_conn_mock
return sqs
SQS.Channel.sqs = mock_sqs()
# Set up a task exchange for passing tasks through the queue
self.exchange = Exchange('test_SQS', type='direct')
self.queue = Queue(self.queue_name, self.exchange, self.queue_name)
# Mock up a test SQS Queue with the QueueMock class (and always
# make sure its a clean empty queue)
self.sqs_queue_mock = QueueMock('sqs://' + self.queue_name)
# Now, create our Connection object with the SQS Transport and store
# the connection/channel objects as references for use in these tests.
self.connection = Connection(transport=SQS.Transport)
self.channel = self.connection.channel()
self.queue(self.channel).declare()
self.producer = messaging.Producer(self.channel,
self.exchange,
routing_key=self.queue_name)
# Lastly, make sure that we're set up to 'consume' this queue.
self.channel.basic_consume(self.queue_name,
no_ack=False,
callback=self.handleMessageCallback,
consumer_tag='unittest')
def teardown(self):
# Removes QoS reserved messages so we don't restore msgs on shutdown.
try:
qos = self.channel._qos
except AttributeError:
pass
else:
if qos:
qos._dirty.clear()
qos._delivered.clear()
def test_init(self):
"""kombu.SQS.Channel instantiates correctly with mocked queues"""
assert self.queue_name in self.channel._queue_cache
def test_region(self):
_environ = dict(os.environ)
# when the region is unspecified
connection = Connection(transport=SQS.Transport)
channel = connection.channel()
assert channel.transport_options.get('region') is None
# the default region is us-east-1
assert channel.region == 'us-east-1'
# when boto3 picks a region
os.environ['AWS_DEFAULT_REGION'] = 'us-east-2'
assert boto3.Session().region_name == 'us-east-2'
# the default region should match
connection = Connection(transport=SQS.Transport)
channel = connection.channel()
assert channel.region == 'us-east-2'
# when transport_options are provided
connection = Connection(transport=SQS.Transport, transport_options={
'region': 'us-west-2'
})
channel = connection.channel()
assert channel.transport_options.get('region') == 'us-west-2'
# the specified region should be used
assert connection.channel().region == 'us-west-2'
os.environ.clear()
os.environ.update(_environ)
def test_endpoint_url(self):
url = 'sqs://@localhost:5493'
self.connection = Connection(hostname=url, transport=SQS.Transport)
self.channel = self.connection.channel()
self.channel._sqs = None
expected_endpoint_url = 'http://localhost:5493'
assert self.channel.endpoint_url == expected_endpoint_url
boto3_sqs = SQS_Channel_sqs.__get__(self.channel, SQS.Channel)
assert boto3_sqs()._endpoint.host == expected_endpoint_url
def test_none_hostname_persists(self):
conn = Connection(hostname=None, transport=SQS.Transport)
assert conn.hostname == conn.clone().hostname
def test_entity_name(self):
assert self.channel.entity_name('foo') == 'foo'
assert self.channel.entity_name('foo.bar-baz*qux_quux') == \
'foo-bar-baz_qux_quux'
assert self.channel.entity_name('abcdef.fifo') == 'abcdef.fifo'
def test_new_queue(self):
queue_name = 'new_unittest_queue'
self.channel._new_queue(queue_name)
assert queue_name in self.sqs_conn_mock._queues.keys()
# For cleanup purposes, delete the queue and the queue file
self.channel._delete(queue_name)
def test_new_queue_custom_creation_attributes(self):
self.connection.transport_options['sqs-creation-attributes'] = {
'KmsMasterKeyId': 'alias/aws/sqs',
}
queue_name = 'new_custom_attribute_queue'
self.channel._new_queue(queue_name)
assert queue_name in self.sqs_conn_mock._queues.keys()
queue = self.sqs_conn_mock._queues[queue_name]
assert 'KmsMasterKeyId' in queue.creation_attributes
assert queue.creation_attributes['KmsMasterKeyId'] == 'alias/aws/sqs'
# For cleanup purposes, delete the queue and the queue file
self.channel._delete(queue_name)
def test_botocore_config_override(self):
expected_connect_timeout = 5
client_config = {'connect_timeout': expected_connect_timeout}
self.connection = Connection(
transport=SQS.Transport,
transport_options={'client-config': client_config},
)
self.channel = self.connection.channel()
self.channel._sqs = None
boto3_sqs = SQS_Channel_sqs.__get__(self.channel, SQS.Channel)
botocore_config = boto3_sqs()._client_config
assert botocore_config.connect_timeout == expected_connect_timeout
def test_dont_create_duplicate_new_queue(self):
# All queue names start with "q", except "unittest_queue".
# which is definitely out of cache when get_all_queues returns the
# first 1000 queues sorted by name.
queue_name = 'unittest_queue'
# This should not create a new queue.
self.channel._new_queue(queue_name)
assert queue_name in self.sqs_conn_mock._queues.keys()
queue = self.sqs_conn_mock._queues[queue_name]
# The queue originally had 1 message in it.
assert 1 == len(queue.messages)
assert 'hello' == queue.messages[0]['Body']
def test_delete(self):
queue_name = 'new_unittest_queue'
self.channel._new_queue(queue_name)
self.channel._delete(queue_name)
assert queue_name not in self.channel._queue_cache
def test_get_from_sqs(self):
# Test getting a single message
message = 'my test message'
self.producer.publish(message)
result = self.channel._get(self.queue_name)
assert 'body' in result.keys()
# Now test getting many messages
for i in range(3):
message = f'message: {i}'
self.producer.publish(message)
self.channel._get_bulk(self.queue_name, max_if_unlimited=3)
assert len(self.sqs_conn_mock._queues[self.queue_name].messages) == 0
def test_get_with_empty_list(self):
with pytest.raises(Empty):
self.channel._get(self.queue_name)
def test_get_bulk_raises_empty(self):
with pytest.raises(Empty):
self.channel._get_bulk(self.queue_name)
def test_messages_to_python(self):
from kombu.asynchronous.aws.sqs.message import Message
kombu_message_count = 3
json_message_count = 3
# Create several test messages and publish them
for i in range(kombu_message_count):
message = 'message: %s' % i
self.producer.publish(message)
# json formatted message NOT created by kombu
for i in range(json_message_count):
message = {'foo': 'bar'}
self.channel._put(self.producer.routing_key, message)
q_url = self.channel._new_queue(self.queue_name)
# Get the messages now
kombu_messages = []
for m in self.sqs_conn_mock.receive_message(
QueueUrl=q_url,
MaxNumberOfMessages=kombu_message_count)['Messages']:
m['Body'] = Message(body=m['Body']).decode()
kombu_messages.append(m)
json_messages = []
for m in self.sqs_conn_mock.receive_message(
QueueUrl=q_url,
MaxNumberOfMessages=json_message_count)['Messages']:
m['Body'] = Message(body=m['Body']).decode()
json_messages.append(m)
# Now convert them to payloads
kombu_payloads = self.channel._messages_to_python(
kombu_messages, self.queue_name,
)
json_payloads = self.channel._messages_to_python(
json_messages, self.queue_name,
)
# We got the same number of payloads back, right?
assert len(kombu_payloads) == kombu_message_count
assert len(json_payloads) == json_message_count
# Make sure they're payload-style objects
for p in kombu_payloads:
assert 'properties' in p
for p in json_payloads:
assert 'properties' in p
def test_put_and_get(self):
message = 'my test message'
self.producer.publish(message)
results = self.queue(self.channel).get().payload
assert message == results
def test_redelivered(self):
self.channel.sqs().change_message_visibility = \
Mock(name='change_message_visibility')
message = {
'redelivered': True,
'properties': {'delivery_tag': 'test_message_id'}
}
self.channel._put(self.producer.routing_key, message)
self.sqs_conn_mock.change_message_visibility.assert_called_once()
def test_put_and_get_bulk(self):
# With QoS.prefetch_count = 0
message = 'my test message'
self.producer.publish(message)
self.channel.connection._deliver = Mock(name='_deliver')
self.channel._get_bulk(self.queue_name)
self.channel.connection._deliver.assert_called_once()
def test_puts_and_get_bulk(self):
# Generate 8 messages
message_count = 8
# Set the prefetch_count to 5
self.channel.qos.prefetch_count = 5
# Now, generate all the messages
for i in range(message_count):
message = 'message: %s' % i
self.producer.publish(message)
# Count how many messages are retrieved the first time. Should
# be 5 (message_count).
self.channel.connection._deliver = Mock(name='_deliver')
self.channel._get_bulk(self.queue_name)
assert self.channel.connection._deliver.call_count == 5
for i in range(5):
self.channel.qos.append(Mock(name=f'message{i}'), i)
# Now, do the get again, the number of messages returned should be 1.
self.channel.connection._deliver.reset_mock()
self.channel._get_bulk(self.queue_name)
self.channel.connection._deliver.assert_called_once()
def test_drain_events_with_empty_list(self):
def mock_can_consume():
return False
self.channel.qos.can_consume = mock_can_consume
with pytest.raises(Empty):
self.channel.drain_events()
def test_drain_events_with_prefetch_5(self):
# Generate 20 messages
message_count = 20
prefetch_count = 5
current_delivery_tag = [1]
# Set the prefetch_count to 5
self.channel.qos.prefetch_count = prefetch_count
self.channel.connection._deliver = Mock(name='_deliver')
def on_message_delivered(message, queue):
current_delivery_tag[0] += 1
self.channel.qos.append(message, current_delivery_tag[0])
self.channel.connection._deliver.side_effect = on_message_delivered
# Now, generate all the messages
for i in range(message_count):
self.producer.publish('message: %s' % i)
# Now drain all the events
for i in range(1000):
try:
self.channel.drain_events(timeout=0)
except Empty:
break
else:
assert False, 'disabled infinite loop'
self.channel.qos._flush()
assert len(self.channel.qos._delivered) == prefetch_count
assert self.channel.connection._deliver.call_count == prefetch_count
def test_drain_events_with_prefetch_none(self):
# Generate 20 messages
message_count = 20
expected_receive_messages_count = 3
current_delivery_tag = [1]
# Set the prefetch_count to None
self.channel.qos.prefetch_count = None
self.channel.connection._deliver = Mock(name='_deliver')
def on_message_delivered(message, queue):
current_delivery_tag[0] += 1
self.channel.qos.append(message, current_delivery_tag[0])
self.channel.connection._deliver.side_effect = on_message_delivered
# Now, generate all the messages
for i in range(message_count):
self.producer.publish('message: %s' % i)
# Now drain all the events
for i in range(1000):
try:
self.channel.drain_events(timeout=0)
except Empty:
break
else:
assert False, 'disabled infinite loop'
assert self.channel.connection._deliver.call_count == message_count
# How many times was the SQSConnectionMock receive_message method
# called?
assert (expected_receive_messages_count ==
self.sqs_conn_mock._receive_messages_calls)
def test_basic_ack(self, ):
"""Test that basic_ack calls the delete_message properly"""
message = {
'sqs_message': {
'ReceiptHandle': '1'
},
'sqs_queue': 'testing_queue'
}
mock_messages = Mock()
mock_messages.delivery_info = message
self.channel.qos.append(mock_messages, 1)
self.channel.sqs().delete_message = Mock()
self.channel.basic_ack(1)
self.sqs_conn_mock.delete_message.assert_called_with(
QueueUrl=message['sqs_queue'],
ReceiptHandle=message['sqs_message']['ReceiptHandle']
)
assert {1} == self.channel.qos._dirty
@patch('kombu.transport.virtual.base.Channel.basic_ack')
@patch('kombu.transport.virtual.base.Channel.basic_reject')
def test_basic_ack_with_mocked_channel_methods(self, basic_reject_mock,
basic_ack_mock):
"""Test that basic_ack calls the delete_message properly"""
message = {
'sqs_message': {
'ReceiptHandle': '1'
},
'sqs_queue': 'testing_queue'
}
mock_messages = Mock()
mock_messages.delivery_info = message
self.channel.qos.append(mock_messages, 1)
self.channel.sqs().delete_message = Mock()
self.channel.basic_ack(1)
self.sqs_conn_mock.delete_message.assert_called_with(
QueueUrl=message['sqs_queue'],
ReceiptHandle=message['sqs_message']['ReceiptHandle']
)
basic_ack_mock.assert_called_with(1)
assert not basic_reject_mock.called
@patch('kombu.transport.virtual.base.Channel.basic_ack')
@patch('kombu.transport.virtual.base.Channel.basic_reject')
def test_basic_ack_without_sqs_message(self, basic_reject_mock,
basic_ack_mock):
"""Test that basic_ack calls the delete_message properly"""
message = {
'sqs_queue': 'testing_queue'
}
mock_messages = Mock()
mock_messages.delivery_info = message
self.channel.qos.append(mock_messages, 1)
self.channel.sqs().delete_message = Mock()
self.channel.basic_ack(1)
assert not self.sqs_conn_mock.delete_message.called
basic_ack_mock.assert_called_with(1)
assert not basic_reject_mock.called
@patch('kombu.transport.virtual.base.Channel.basic_ack')
@patch('kombu.transport.virtual.base.Channel.basic_reject')
def test_basic_ack_invalid_receipt_handle(self, basic_reject_mock,
basic_ack_mock):
"""Test that basic_ack calls the delete_message properly"""
message = {
'sqs_message': {
'ReceiptHandle': '2'
},
'sqs_queue': 'testing_queue'
}
error_response = {
'Error': {
'Code': 'InvalidParameterValue',
'Message': 'Value 2 for parameter ReceiptHandle is invalid.'
' Reason: The receipt handle has expired.'
}
}
operation_name = 'DeleteMessage'
mock_messages = Mock()
mock_messages.delivery_info = message
self.channel.qos.append(mock_messages, 2)
self.channel.sqs().delete_message = Mock()
self.channel.sqs().delete_message.side_effect = ClientError(
error_response=error_response,
operation_name=operation_name
)
self.channel.basic_ack(2)
self.sqs_conn_mock.delete_message.assert_called_with(
QueueUrl=message['sqs_queue'],
ReceiptHandle=message['sqs_message']['ReceiptHandle']
)
basic_reject_mock.assert_called_with(2)
assert not basic_ack_mock.called
def test_predefined_queues_primes_queue_cache(self):
connection = Connection(transport=SQS.Transport, transport_options={
'predefined_queues': example_predefined_queues,
})
channel = connection.channel()
assert 'queue-1' in channel._queue_cache
assert 'queue-2' in channel._queue_cache
def test_predefined_queues_new_queue_raises_if_queue_not_exists(self):
connection = Connection(transport=SQS.Transport, transport_options={
'predefined_queues': example_predefined_queues,
})
channel = connection.channel()
with pytest.raises(SQS.UndefinedQueueException):
channel._new_queue('queue-99')
def test_predefined_queues_get_from_sqs(self):
connection = Connection(transport=SQS.Transport, transport_options={
'predefined_queues': example_predefined_queues,
})
channel = connection.channel()
def message_to_python(message, queue_name, queue):
return message
channel._message_to_python = Mock(side_effect=message_to_python)
queue_name = "queue-1"
exchange = Exchange('test_SQS', type='direct')
p = messaging.Producer(channel, exchange, routing_key=queue_name)
queue = Queue(queue_name, exchange, queue_name)
queue(channel).declare()
# Getting a single message
p.publish('message')
result = channel._get(queue_name)
assert 'Body' in result.keys()
# Getting many messages
for i in range(3):
p.publish(f'message: {i}')
channel.connection._deliver = Mock(name='_deliver')
channel._get_bulk(queue_name, max_if_unlimited=3)
channel.connection._deliver.assert_called()
assert len(channel.sqs(queue_name)._queues[queue_name].messages) == 0
|
import unittest
from mock import patch, mock_open, MagicMock, sentinel
import os
import sys
import json
import httpretty
import acd_cli
from acdcli.cache import db
from .test_helper import gen_file, gen_folder, gen_bunch_of_nodes
cache_path = os.path.join(os.path.dirname(__file__), 'dummy_files')
os.environ['ACD_CLI_CACHE_PATH'] = cache_path
try:
from importlib import reload
except ImportError:
from imp import reload
def run_main() -> int:
try:
acd_cli.main()
except SystemExit as e:
return e.code
class ActionTestCase(unittest.TestCase):
stdout = sys.stdout
def setUp(self):
reload(acd_cli)
sys.argv = [acd_cli._app_name, '-nw']
self.cache = db.NodeCache(cache_path)
def tearDown(self):
db.NodeCache.remove_db_file(cache_path)
# tests
@patch('sys.stdout.write')
def testHelp(self, print_):
sys.argv.append('-h')
self.assertEqual(run_main(), 0)
def testClearCache(self):
sys.argv.append('cc')
self.assertEqual(run_main(), None)
def testClearCacheNonExist(self):
self.cache.remove_db_file()
sys.argv.append('cc')
self.assertEqual(run_main(), None)
# listing
@patch('sys.stdout.write')
def testTree(self, print_):
files, folders = gen_bunch_of_nodes(50)
self.cache.insert_nodes(files + folders)
sys.argv.extend(['tree', '-t'])
self.assertEqual(run_main(), None)
self.assertEqual(len(print_.mock_calls), 100)
@patch('sys.stdout.write')
def testList(self, print_):
db.NodeCache(cache_path)
folder = gen_folder([])
files = [gen_file([folder]) for _ in range(50)]
self.cache.insert_nodes(files + [folder])
sys.argv.extend(['ls', '-t', '/'])
self.assertEqual(run_main(), None)
self.assertEqual(len(print_.mock_calls), 100)
# find actions
# transfer actions
# create
# trashing
# move/rename, resolve
# child ops
# stats
# FUSE
# @httpretty.activate
# def testMount(self):
# httpretty. \
# register_uri(httpretty.GET, acd_cli.acd_client.metadata_url + 'account/quota',
# body=json.dumps({'available:': 100, 'quota': 100}))
#
# sys.argv.extend(['-d', 'mount', '-i', '0',
# os.path.join(os.path.dirname(__file__), 'dummy_files/mountpoint')])
# self.cache.insert_nodes([gen_folder()])
# self.assertEqual(run_main(), None)
def testUnmount(self):
sys.argv.append('umount')
self.assertEqual(run_main(), 0)
# undocumented actions
def testInit(self):
sys.argv.append('init')
self.cache.insert_nodes([gen_folder()])
self.assertEqual(run_main(), None)
# misc
def testCheckCacheEmpty(self):
sys.argv.extend(['ls', '/'])
self.assertEqual(run_main(), acd_cli.INIT_FAILED_RETVAL)
def testCheckCacheNonEmpty(self):
folder = gen_folder()
self.cache.insert_nodes([folder])
sys.argv.extend(['ls', '/'])
self.assertEqual(run_main(), None)
# helper functions
|
from typing import Any
from homeassistant.components.scene import Scene
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import NEW_SCENE
from .gateway import get_gateway_from_config_entry
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up scenes for deCONZ component."""
gateway = get_gateway_from_config_entry(hass, config_entry)
@callback
def async_add_scene(scenes):
"""Add scene from deCONZ."""
entities = [DeconzScene(scene, gateway) for scene in scenes]
if entities:
async_add_entities(entities)
gateway.listeners.append(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_SCENE), async_add_scene
)
)
async_add_scene(gateway.api.scenes.values())
class DeconzScene(Scene):
"""Representation of a deCONZ scene."""
def __init__(self, scene, gateway):
"""Set up a scene."""
self._scene = scene
self.gateway = gateway
async def async_added_to_hass(self):
"""Subscribe to sensors events."""
self.gateway.deconz_ids[self.entity_id] = self._scene.deconz_id
async def async_will_remove_from_hass(self) -> None:
"""Disconnect scene object when removed."""
del self.gateway.deconz_ids[self.entity_id]
self._scene = None
async def async_activate(self, **kwargs: Any) -> None:
"""Activate the scene."""
await self._scene.async_set_state({})
@property
def name(self):
"""Return the name of the scene."""
return self._scene.full_name
|
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
from tests.components.homekit_controller.common import setup_test_component
CURRENT_STATE = ("security-system", "security-system-state.current")
TARGET_STATE = ("security-system", "security-system-state.target")
def create_security_system_service(accessory):
"""Define a security-system characteristics as per page 219 of HAP spec."""
service = accessory.add_service(ServicesTypes.SECURITY_SYSTEM)
cur_state = service.add_char(CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT)
cur_state.value = 0
targ_state = service.add_char(CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET)
targ_state.value = 0
# According to the spec, a battery-level characteristic is normally
# part of a separate service. However as the code was written (which
# predates this test) the battery level would have to be part of the lock
# service as it is here.
targ_state = service.add_char(CharacteristicsTypes.BATTERY_LEVEL)
targ_state.value = 50
async def test_switch_change_alarm_state(hass, utcnow):
"""Test that we can turn a HomeKit alarm on and off again."""
helper = await setup_test_component(hass, create_security_system_service)
await hass.services.async_call(
"alarm_control_panel",
"alarm_arm_home",
{"entity_id": "alarm_control_panel.testdevice"},
blocking=True,
)
assert helper.characteristics[TARGET_STATE].value == 0
await hass.services.async_call(
"alarm_control_panel",
"alarm_arm_away",
{"entity_id": "alarm_control_panel.testdevice"},
blocking=True,
)
assert helper.characteristics[TARGET_STATE].value == 1
await hass.services.async_call(
"alarm_control_panel",
"alarm_arm_night",
{"entity_id": "alarm_control_panel.testdevice"},
blocking=True,
)
assert helper.characteristics[TARGET_STATE].value == 2
await hass.services.async_call(
"alarm_control_panel",
"alarm_disarm",
{"entity_id": "alarm_control_panel.testdevice"},
blocking=True,
)
assert helper.characteristics[TARGET_STATE].value == 3
async def test_switch_read_alarm_state(hass, utcnow):
"""Test that we can read the state of a HomeKit alarm accessory."""
helper = await setup_test_component(hass, create_security_system_service)
helper.characteristics[CURRENT_STATE].value = 0
state = await helper.poll_and_get_state()
assert state.state == "armed_home"
assert state.attributes["battery_level"] == 50
helper.characteristics[CURRENT_STATE].value = 1
state = await helper.poll_and_get_state()
assert state.state == "armed_away"
helper.characteristics[CURRENT_STATE].value = 2
state = await helper.poll_and_get_state()
assert state.state == "armed_night"
helper.characteristics[CURRENT_STATE].value = 3
state = await helper.poll_and_get_state()
assert state.state == "disarmed"
helper.characteristics[CURRENT_STATE].value = 4
state = await helper.poll_and_get_state()
assert state.state == "triggered"
|
import logging
from libpurecool.const import Dyson360EyeMode, PowerMode
from libpurecool.dyson_360_eye import Dyson360Eye
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumEntity,
)
from homeassistant.helpers.icon import icon_for_battery_level
from . import DYSON_DEVICES
_LOGGER = logging.getLogger(__name__)
ATTR_CLEAN_ID = "clean_id"
ATTR_FULL_CLEAN_TYPE = "full_clean_type"
ATTR_POSITION = "position"
DYSON_360_EYE_DEVICES = "dyson_360_eye_devices"
SUPPORT_DYSON = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PAUSE
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_STATUS
| SUPPORT_BATTERY
| SUPPORT_STOP
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson 360 Eye robot vacuum platform."""
_LOGGER.debug("Creating new Dyson 360 Eye robot vacuum")
if DYSON_360_EYE_DEVICES not in hass.data:
hass.data[DYSON_360_EYE_DEVICES] = []
# Get Dyson Devices from parent component
for device in [d for d in hass.data[DYSON_DEVICES] if isinstance(d, Dyson360Eye)]:
dyson_entity = Dyson360EyeDevice(device)
hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity)
add_entities(hass.data[DYSON_360_EYE_DEVICES])
return True
class Dyson360EyeDevice(VacuumEntity):
"""Dyson 360 Eye robot vacuum device."""
def __init__(self, device):
"""Dyson 360 Eye robot vacuum device."""
_LOGGER.debug("Creating device %s", device.name)
self._device = device
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._device.add_message_listener(self.on_message)
def on_message(self, message):
"""Handle a new messages that was received from the vacuum."""
_LOGGER.debug("Message received for %s device: %s", self.name, message)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def status(self):
"""Return the status of the vacuum cleaner."""
dyson_labels = {
Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging",
Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged",
Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused",
Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning",
Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home",
Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning",
Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked",
Dyson360EyeMode.FAULT_REPLACE_ON_DOCK: "Error - Replace device on dock",
Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished",
Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging",
}
return dyson_labels.get(self._device.state.state, self._device.state.state)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self._device.state.battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
speed_labels = {PowerMode.MAX: "Max", PowerMode.QUIET: "Quiet"}
return speed_labels[self._device.state.power_mode]
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return ["Quiet", "Max"]
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
return {ATTR_POSITION: str(self._device.state.position)}
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._device.state.state in [
Dyson360EyeMode.FULL_CLEAN_INITIATED,
Dyson360EyeMode.FULL_CLEAN_ABORTED,
Dyson360EyeMode.FULL_CLEAN_RUNNING,
]
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DYSON
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGING]
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
def turn_on(self, **kwargs):
"""Turn the vacuum on."""
_LOGGER.debug("Turn on device %s", self.name)
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
self._device.resume()
else:
self._device.start()
def turn_off(self, **kwargs):
"""Turn the vacuum off and return to home."""
_LOGGER.debug("Turn off device %s", self.name)
self._device.pause()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
_LOGGER.debug("Stop device %s", self.name)
self._device.pause()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
_LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name)
power_modes = {"Quiet": PowerMode.QUIET, "Max": PowerMode.MAX}
self._device.set_power_mode(power_modes[fan_speed])
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
_LOGGER.debug("Resume device %s", self.name)
self._device.resume()
elif self._device.state.state in [
Dyson360EyeMode.INACTIVE_CHARGED,
Dyson360EyeMode.INACTIVE_CHARGING,
]:
_LOGGER.debug("Start device %s", self.name)
self._device.start()
else:
_LOGGER.debug("Pause device %s", self.name)
self._device.pause()
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
_LOGGER.debug("Return to base device %s", self.name)
self._device.abort()
|
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.const import CONF_PORT
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
CONF_SERVER = "server"
CONF_BROADCAST = "broadcast"
INTERFACE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_SERVER): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_BROADCAST): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: {LIGHT_DOMAIN: vol.Schema(vol.All(cv.ensure_list, [INTERFACE_SCHEMA]))}},
extra=vol.ALLOW_EXTRA,
)
DATA_LIFX_MANAGER = "lifx_manager"
async def async_setup(hass, config):
"""Set up the LIFX component."""
conf = config.get(DOMAIN)
hass.data[DOMAIN] = conf or {}
if conf is not None:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up LIFX from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, LIGHT_DOMAIN)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.data.pop(DATA_LIFX_MANAGER).cleanup()
await hass.config_entries.async_forward_entry_unload(entry, LIGHT_DOMAIN)
return True
|
import pytest
from mne.viz.backends._utils import _get_colormap_from_array, _check_color
def test_get_colormap_from_array():
"""Test setting a colormap."""
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
cmap = _get_colormap_from_array()
assert isinstance(cmap, LinearSegmentedColormap)
cmap = _get_colormap_from_array(colormap='viridis')
assert isinstance(cmap, ListedColormap)
cmap = _get_colormap_from_array(colormap=[1, 1, 1],
normalized_colormap=True)
assert isinstance(cmap, ListedColormap)
cmap = _get_colormap_from_array(colormap=[255, 255, 255],
normalized_colormap=False)
assert isinstance(cmap, ListedColormap)
def test_check_color():
"""Test color format."""
assert _check_color('red') == (1., 0., 0.)
assert _check_color((0., 1., 0., 1.)) == (0., 1., 0., 1.)
assert _check_color((0, 0, 255, 255)) == (0, 0, 255, 255)
with pytest.raises(ValueError, match='RGB or RGBA'):
_check_color([255, 0])
with pytest.raises(ValueError, match='out of range'):
_check_color([256, 0, 0])
with pytest.raises(ValueError, match='out of range'):
_check_color([-1.0, 0.0, 0.0])
with pytest.raises(TypeError, match='Expected data type'):
_check_color(['foo', 'bar', 'foo'])
with pytest.raises(TypeError, match='Expected type'):
_check_color(None)
|
from collections import defaultdict
from functools import partial
import numpy as np
from ..bem import _check_origin
from ..io.pick import pick_info, pick_types
from ..io import _loc_to_coil_trans, _coil_trans_to_loc, BaseRaw
from ..transforms import _find_vector_rotation
from ..utils import (logger, verbose, check_fname, _check_fname, _pl,
_ensure_int, _check_option, _validate_type, _reg_pinv)
from .maxwell import (_col_norm_pinv, _trans_sss_basis, _prep_mf_coils,
_get_grad_point_coilsets, _read_cross_talk,
_prep_fine_cal)
@verbose
def compute_fine_calibration(raw, n_imbalance=3, t_window=10., ext_order=2,
origin=(0., 0., 0.), cross_talk=None,
calibration=None, verbose=None):
"""Compute fine calibration from empty-room data.
Parameters
----------
raw : instance of Raw
The raw data to use. Should be from an empty-room recording,
and all channels should be good.
n_imbalance : int
Can be 1 or 3 (default), indicating the number of gradiometer
imbalance components. Only used if gradiometers are present.
t_window : float
Time window to use for surface normal rotation in seconds.
Default is 10.
%(maxwell_ext)s
Default is 2, which is lower than the default (3) for
:func:`mne.preprocessing.maxwell_filter` because it tends to yield
more stable parameter estimates.
%(maxwell_origin)s
%(maxwell_cross)s
calibration : dict | None
Dictionary with existing calibration. If provided, the magnetometer
imbalances and adjusted normals will be used and only the gradiometer
imbalances will be estimated (see step 2 in Notes below).
%(verbose)s
Returns
-------
calibration : dict
Fine calibration data.
count : int
The number of good segments used to compute the magnetometer
parameters.
See Also
--------
mne.preprocessing.maxwell_filter
Notes
-----
This algorithm proceeds in two steps, both optimizing the fit between the
data and a reconstruction of the data based only on an external multipole
expansion:
1. Estimate magnetometer normal directions and scale factors. All
coils (mag and matching grad) are rotated by the adjusted normal
direction.
2. Estimate gradiometer imbalance factors. These add point magnetometers
in just the gradiometer difference direction or in all three directions
(depending on ``n_imbalance``).
Magnetometer normal and coefficient estimation (1) is typically the most
time consuming step. Gradiometer imbalance parameters (2) can be
iteratively reestimated (for example, first using ``n_imbalance=1`` then
subsequently ``n_imbalance=3``) by passing the previous ``calibration``
output to the ``calibration`` input in the second call.
MaxFilter processes at most 120 seconds of data, so consider cropping
your raw instance prior to processing. It also checks to make sure that
there were some minimal usable ``count`` number of segments (default 5)
that were included in the estimate.
.. versionadded:: 0.21
"""
n_imbalance = _ensure_int(n_imbalance, 'n_imbalance')
_check_option('n_imbalance', n_imbalance, (1, 3))
_validate_type(raw, BaseRaw, 'raw')
ext_order = _ensure_int(ext_order, 'ext_order')
origin = _check_origin(origin, raw.info, 'meg', disp=True)
_check_option("raw.info['bads']", raw.info['bads'], ([],))
picks = pick_types(raw.info, meg=True, ref_meg=False)
if raw.info['dev_head_t'] is not None:
raise ValueError('info["dev_head_t"] is not None, suggesting that the '
'data are not from an empty-room recording')
info = pick_info(raw.info, picks) # make a copy and pick MEG channels
mag_picks = pick_types(info, meg='mag', exclude=())
grad_picks = pick_types(info, meg='grad', exclude=())
# Get cross-talk
ctc, _ = _read_cross_talk(cross_talk, info['ch_names'])
# Check fine cal
_validate_type(calibration, (dict, None), 'calibration')
#
# 1. Rotate surface normals using magnetometer information (if present)
#
cals = np.ones(len(info['ch_names']))
time_idxs = raw.time_as_index(
np.arange(0., raw.times[-1], t_window))
if len(time_idxs) <= 1:
time_idxs = np.array([0, len(raw.times)], int)
else:
time_idxs[-1] = len(raw.times)
count = 0
locs = np.array([ch['loc'] for ch in info['chs']])
zs = locs[mag_picks, -3:].copy()
if calibration is not None:
_, calibration, _ = _prep_fine_cal(info, calibration)
for pi, pick in enumerate(mag_picks):
idx = calibration['ch_names'].index(info['ch_names'][pick])
cals[pick] = calibration['imb_cals'][idx]
zs[pi] = calibration['locs'][idx][-3:]
elif len(mag_picks) > 0:
cal_list = list()
z_list = list()
logger.info('Adjusting normals for %s magnetometers '
'(averaging over %s time intervals)'
% (len(mag_picks), len(time_idxs) - 1))
for start, stop in zip(time_idxs[:-1], time_idxs[1:]):
logger.info(' Processing interval %0.3f - %0.3f sec'
% (start / info['sfreq'], stop / info['sfreq']))
data = raw[picks, start:stop][0]
if ctc is not None:
data = ctc.dot(data)
z, cal, good = _adjust_mag_normals(info, data, origin, ext_order)
if good:
z_list.append(z)
cal_list.append(cal)
count = len(cal_list)
if count == 0:
raise RuntimeError('No usable segments found')
cals[:] = np.mean(cal_list, axis=0)
zs[:] = np.mean(z_list, axis=0)
if len(mag_picks) > 0:
for ii, new_z in enumerate(zs):
z_loc = locs[mag_picks[ii]]
# Find sensors with same NZ and R0 (should be three for VV)
idxs = _matched_loc_idx(z_loc, locs)
# Rotate the direction vectors to the plane defined by new normal
_rotate_locs(locs, idxs, new_z)
for ci, loc in enumerate(locs):
info['chs'][ci]['loc'][:] = loc
del calibration, zs
#
# 2. Estimate imbalance parameters (always done)
#
if len(grad_picks) > 0:
extra = 'X direction' if n_imbalance == 1 else ('XYZ directions')
logger.info('Computing imbalance for %s gradimeters (%s)'
% (len(grad_picks), extra))
imb_list = list()
for start, stop in zip(time_idxs[:-1], time_idxs[1:]):
logger.info(' Processing interval %0.3f - %0.3f sec'
% (start / info['sfreq'], stop / info['sfreq']))
data = raw[picks, start:stop][0]
if ctc is not None:
data = ctc.dot(data)
out = _estimate_imbalance(info, data, cals,
n_imbalance, origin, ext_order)
imb_list.append(out)
imb = np.mean(imb_list, axis=0)
else:
imb = np.zeros((len(info['ch_names']), n_imbalance))
#
# Put in output structure
#
assert len(np.intersect1d(mag_picks, grad_picks)) == 0
imb_cals = [cals[ii:ii + 1] if ii in mag_picks else imb[ii]
for ii in range(len(info['ch_names']))]
calibration = dict(ch_names=info['ch_names'], locs=locs, imb_cals=imb_cals)
return calibration, count
def _matched_loc_idx(mag_loc, all_loc):
return np.where([np.allclose(mag_loc[-3:], loc[-3:]) and
np.allclose(mag_loc[:3], loc[:3]) for loc in all_loc])[0]
def _rotate_locs(locs, idxs, new_z):
new_z = new_z / np.linalg.norm(new_z)
old_z = locs[idxs[0]][-3:]
old_z = old_z / np.linalg.norm(old_z)
rot = _find_vector_rotation(old_z, new_z)
for ci in idxs:
this_trans = _loc_to_coil_trans(locs[ci])
this_trans[:3, :3] = np.dot(rot, this_trans[:3, :3])
locs[ci][:] = _coil_trans_to_loc(this_trans)
np.testing.assert_allclose(locs[ci][-3:], new_z, atol=1e-4)
def _vector_angle(x, y):
"""Get the angle between two vectors in degrees."""
return np.abs(np.arccos(
np.clip((x * y).sum(axis=-1) /
(np.linalg.norm(x, axis=-1) *
np.linalg.norm(y, axis=-1)), -1, 1.)))
def _adjust_mag_normals(info, data, origin, ext_order):
"""Adjust coil normals using magnetometers and empty-room data."""
from scipy.optimize import fmin_cobyla
# in principle we could allow using just mag or mag+grad, but MF uses
# just mag so let's follow suit
mag_scale = 100.
picks_use = pick_types(info, meg='mag', exclude='bads')
picks_meg = pick_types(info, meg=True, exclude=())
picks_mag_orig = pick_types(info, meg='mag', exclude='bads')
info = pick_info(info, picks_use) # copy
data = data[picks_use]
cals = np.ones((len(data), 1))
angles = np.zeros(len(cals))
picks_mag = pick_types(info, meg='mag')
data[picks_mag] *= mag_scale
# Transform variables so we're only dealing with good mags
exp = dict(int_order=0, ext_order=ext_order, origin=origin)
all_coils = _prep_mf_coils(info, ignore_ref=True)
S_tot = _trans_sss_basis(exp, all_coils, coil_scale=mag_scale)
first_err = _data_err(data, S_tot, cals)
count = 0
# two passes: first do the worst, then do all in order
zs = np.array([ch['loc'][-3:] for ch in info['chs']])
zs /= np.linalg.norm(zs, axis=-1, keepdims=True)
orig_zs = zs.copy()
match_idx = dict()
locs = np.array([ch['loc'] for ch in info['chs']])
for pick in picks_mag:
match_idx[pick] = _matched_loc_idx(locs[pick], locs)
counts = defaultdict(lambda: 0)
for ki, kind in enumerate(('worst first', 'in order')):
logger.info(f' Magnetometer normal adjustment ({kind}) ...')
S_tot = _trans_sss_basis(exp, all_coils, coil_scale=mag_scale)
for pick in picks_mag:
err = _data_err(data, S_tot, cals, axis=1)
# First pass: do worst; second pass: do all in order (up to 3x/sen)
if ki == 0:
order = list(np.argsort(err[picks_mag]))
cal_idx = 0
while len(order) > 0:
cal_idx = picks_mag[order.pop(-1)]
if counts[cal_idx] < 3:
break
if err[cal_idx] < 2.5:
break # move on to second loop
else:
cal_idx = pick
counts[cal_idx] += 1
assert cal_idx in picks_mag
count += 1
old_z = zs[cal_idx].copy()
objective = partial(
_cal_sss_target, old_z=old_z, all_coils=all_coils,
cal_idx=cal_idx, data=data, cals=cals, match_idx=match_idx,
S_tot=S_tot, origin=origin, ext_order=ext_order)
# Figure out the additive term for z-component
zs[cal_idx] = fmin_cobyla(
objective, old_z, cons=(), rhobeg=1e-3, rhoend=1e-4,
disp=False)
# Do in-place adjustment to all_coils
cals[cal_idx] = 1. / np.linalg.norm(zs[cal_idx])
zs[cal_idx] *= cals[cal_idx]
for idx in match_idx[cal_idx]:
_rotate_coil(zs[cal_idx], old_z, all_coils, idx, inplace=True)
# Recalculate S_tot, taking into account rotations
S_tot = _trans_sss_basis(exp, all_coils)
# Reprt results
old_err = err[cal_idx]
new_err = _data_err(data, S_tot, cals, idx=cal_idx)
angles[cal_idx] = np.abs(np.rad2deg(_vector_angle(
zs[cal_idx], orig_zs[cal_idx])))
ch_name = info['ch_names'][cal_idx]
logger.debug(
f' Optimization step {count:3d} | '
f'{ch_name} ({counts[cal_idx]}) | '
f'res {old_err:5.2f}→{new_err:5.2f}% | '
f'×{cals[cal_idx, 0]:0.3f} | {angles[cal_idx]:0.2f}°')
last_err = _data_err(data, S_tot, cals)
# Chunk is usable if all angles and errors are both small
reason = list()
max_angle = np.max(angles)
if max_angle >= 5.:
reason.append(f'max angle {max_angle:0.2f} >= 5°')
each_err = _data_err(data, S_tot, cals, axis=-1)[picks_mag]
n_bad = (each_err > 5.).sum()
if n_bad:
reason.append(f'{n_bad} residual{_pl(n_bad)} > 5%')
reason = ', '.join(reason)
if reason:
reason = f' ({reason})'
good = not bool(reason)
assert np.allclose(np.linalg.norm(zs, axis=1), 1.)
logger.info(f' Fit mismatch {first_err:0.2f}→{last_err:0.2f}%')
logger.info(f' Data segment {"" if good else "un"}usable{reason}')
# Reformat zs and cals to be the n_mags (including bads)
assert zs.shape == (len(data), 3)
assert cals.shape == (len(data), 1)
imb_cals = np.ones(len(picks_meg))
imb_cals[picks_mag_orig] = cals[:, 0]
return zs, imb_cals, good
def _data_err(data, S_tot, cals, idx=None, axis=None):
if idx is None:
idx = slice(None)
S_tot = S_tot / cals
data_model = np.dot(
np.dot(S_tot[idx], _col_norm_pinv(S_tot.copy())[0]), data)
err = 100 * (np.linalg.norm(data_model - data[idx], axis=axis) /
np.linalg.norm(data[idx], axis=axis))
return err
def _rotate_coil(new_z, old_z, all_coils, idx, inplace=False):
"""Adjust coils."""
# Turn NX and NY to the plane determined by NZ
old_z = old_z / np.linalg.norm(old_z)
new_z = new_z / np.linalg.norm(new_z)
rot = _find_vector_rotation(old_z, new_z) # additional coil rotation
this_sl = all_coils[5][idx]
this_rmag = np.dot(rot, all_coils[0][this_sl].T).T
this_cosmag = np.dot(rot, all_coils[1][this_sl].T).T
if inplace:
all_coils[0][this_sl] = this_rmag
all_coils[1][this_sl] = this_cosmag
subset = (this_rmag, this_cosmag, np.zeros(this_rmag.shape[0], int),
1, all_coils[4][[idx]], {0: this_sl})
return subset
def _cal_sss_target(new_z, old_z, all_coils, cal_idx, data, cals,
S_tot, origin, ext_order, match_idx):
"""Evaluate objective function for SSS-based magnetometer calibration."""
cals[cal_idx] = 1. / np.linalg.norm(new_z)
exp = dict(int_order=0, ext_order=ext_order, origin=origin)
S_tot = S_tot.copy()
# Rotate necessary coils properly and adjust correct element in c
for idx in match_idx[cal_idx]:
this_coil = _rotate_coil(new_z, old_z, all_coils, idx)
# Replace correct row of S_tot with new value
S_tot[idx] = _trans_sss_basis(exp, this_coil)
# Get the GOF
return _data_err(data, S_tot, cals, idx=cal_idx)
def _estimate_imbalance(info, data, cals, n_imbalance, origin, ext_order):
"""Estimate gradiometer imbalance parameters."""
mag_scale = 100.
n_iterations = 3
mag_picks = pick_types(info, meg='mag', exclude=())
grad_picks = pick_types(info, meg='grad', exclude=())
data = data.copy()
data[mag_picks, :] *= mag_scale
del mag_picks
grad_imb = np.zeros((len(grad_picks), n_imbalance))
exp = dict(origin=origin, int_order=0, ext_order=ext_order)
all_coils = _prep_mf_coils(info, ignore_ref=True)
grad_point_coils = _get_grad_point_coilsets(
info, n_imbalance, ignore_ref=True)
S_orig = _trans_sss_basis(exp, all_coils, coil_scale=mag_scale)
S_orig /= cals[:, np.newaxis]
# Compute point gradiometers for each grad channel
this_cs = np.array([mag_scale], float)
S_pt = np.array([_trans_sss_basis(exp, coils, None, this_cs)
for coils in grad_point_coils])
for k in range(n_iterations):
S_tot = S_orig.copy()
# In theory we could zero out the homogeneous components with:
# S_tot[grad_picks, :3] = 0
# But in practice it doesn't seem to matter
S_recon = S_tot[grad_picks]
# Add influence of point magnetometers
S_tot[grad_picks, :] += np.einsum('ij,ijk->jk', grad_imb.T, S_pt)
# Compute multipolar moments
mm = np.dot(_col_norm_pinv(S_tot.copy())[0], data)
# Use good channels to recalculate
prev_imb = grad_imb.copy()
data_recon = np.dot(S_recon, mm)
assert S_pt.shape == (n_imbalance, len(grad_picks), S_tot.shape[1])
khi_pts = (S_pt @ mm).transpose(1, 2, 0)
assert khi_pts.shape == (len(grad_picks), data.shape[1], n_imbalance)
residual = data[grad_picks] - data_recon
assert residual.shape == (len(grad_picks), data.shape[1])
d = (residual[:, np.newaxis, :] @ khi_pts)[:, 0]
assert d.shape == (len(grad_picks), n_imbalance)
dinv, _, _ = _reg_pinv(khi_pts.swapaxes(-1, -2) @ khi_pts, rcond=1e-6)
assert dinv.shape == (len(grad_picks), n_imbalance, n_imbalance)
grad_imb[:] = (d[:, np.newaxis] @ dinv)[:, 0]
# This code is equivalent but hits a np.linalg.pinv bug on old NumPy:
# grad_imb[:] = np.sum( # dot product across the time dim
# np.linalg.pinv(khi_pts) * residual[:, np.newaxis], axis=-1)
deltas = (np.linalg.norm(grad_imb - prev_imb) /
max(np.linalg.norm(grad_imb), np.linalg.norm(prev_imb)))
logger.debug(f' Iteration {k + 1}/{n_iterations}: '
f'max ∆ = {100 * deltas.max():7.3f}%')
imb = np.zeros((len(data), n_imbalance))
imb[grad_picks] = grad_imb
return imb
def read_fine_calibration(fname):
"""Read fine calibration information from a .dat file.
The fine calibration typically includes improved sensor locations,
calibration coefficients, and gradiometer imbalance information.
Parameters
----------
fname : str
The filename.
Returns
-------
calibration : dict
Fine calibration information. Key-value pairs are:
- ``ch_names``
List of str of the channel names.
- ``locs``
Coil location and orientation parameters.
- ``imb_cals``
For magnetometers, the calibration coefficients.
For gradiometers, one or three imbalance parameters.
"""
# Read new sensor locations
fname = _check_fname(fname, overwrite='read', must_exist=True)
check_fname(fname, 'cal', ('.dat',))
ch_names, locs, imb_cals = list(), list(), list()
with open(fname, 'r') as fid:
for line in fid:
if line[0] in '#\n':
continue
vals = line.strip().split()
if len(vals) not in [14, 16]:
raise RuntimeError('Error parsing fine calibration file, '
'should have 14 or 16 entries per line '
'but found %s on line:\n%s'
% (len(vals), line))
# `vals` contains channel number
ch_name = vals[0]
if len(ch_name) in (3, 4): # heuristic for Neuromag fix
try:
ch_name = int(ch_name)
except ValueError: # something other than e.g. 113 or 2642
pass
else:
ch_name = 'MEG' + '%04d' % ch_name
# (x, y, z), x-norm 3-vec, y-norm 3-vec, z-norm 3-vec
# and 1 or 3 imbalance terms
ch_names.append(ch_name)
locs.append(np.array(vals[1:13], float))
imb_cals.append(np.array(vals[13:], float))
locs = np.array(locs)
return dict(ch_names=ch_names, locs=locs, imb_cals=imb_cals)
def write_fine_calibration(fname, calibration):
"""Write fine calibration information to a .dat file.
Parameters
----------
fname : str
The filename to write out.
calibration : dict
Fine calibration information.
"""
fname = _check_fname(fname, overwrite=True)
check_fname(fname, 'cal', ('.dat',))
keys = ('ch_names', 'locs', 'imb_cals')
with open(fname, 'wb') as cal_file:
for ch_name, loc, imb_cal in zip(*(calibration[key] for key in keys)):
cal_line = np.concatenate([loc, imb_cal]).round(6)
cal_line = ' '.join(f'{c:0.6f}' for c in cal_line)
cal_file.write(f'{ch_name} {cal_line}\n'.encode('ASCII'))
|
import unittest
import numpy as np
import chainer
from chainer.backends import cuda
from chainer.functions import relu
from chainer import testing
from chainermn import create_communicator
from chainercv.links import Conv2DBNActiv
from chainercv.utils.testing import attr
def _add_one(x):
return x + 1
@testing.parameterize(*testing.product({
'dilate': [1, 2],
'args_style': ['explicit', 'None', 'omit'],
'activ': ['relu', 'add_one', None],
}))
class TestConv2DBNActiv(unittest.TestCase):
in_channels = 1
out_channels = 1
ksize = 3
stride = 1
pad = 1
def setUp(self):
if self.activ == 'relu':
activ = relu
elif self.activ == 'add_one':
activ = _add_one
elif self.activ is None:
activ = None
self.x = np.random.uniform(
-1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
self.gy = np.random.uniform(
-1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)
# Convolution is the identity function.
initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
dtype=np.float32).reshape((1, 1, 3, 3))
bn_kwargs = {'decay': 0.8}
initial_bias = 0
if self.args_style == 'explicit':
self.l = Conv2DBNActiv(
self.in_channels, self.out_channels, self.ksize,
self.stride, self.pad, self.dilate,
initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
elif self.args_style == 'None':
self.l = Conv2DBNActiv(
None, self.out_channels, self.ksize, self.stride, self.pad,
self.dilate, initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
elif self.args_style == 'omit':
self.l = Conv2DBNActiv(
self.out_channels, self.ksize, stride=self.stride,
pad=self.pad, dilate=self.dilate, initialW=initialW,
initial_bias=initial_bias, activ=activ, bn_kwargs=bn_kwargs)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
# Make the batch normalization to be the identity function.
self.l.bn.avg_var[:] = 1
self.l.bn.avg_mean[:] = 0
with chainer.using_config('train', False):
y = self.l(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.array, self.l.xp.ndarray)
if self.dilate == 1:
_x_data = x_data
elif self.dilate == 2:
_x_data = x_data[:, :, 1:-1, 1:-1]
if self.activ == 'relu':
np.testing.assert_almost_equal(
cuda.to_cpu(y.array), np.maximum(cuda.to_cpu(_x_data), 0),
decimal=4
)
elif self.activ == 'add_one':
np.testing.assert_almost_equal(
cuda.to_cpu(y.array), cuda.to_cpu(_x_data) + 1,
decimal=4
)
elif self.activ is None:
np.testing.assert_almost_equal(
cuda.to_cpu(y.array), cuda.to_cpu(_x_data),
decimal=4
)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.l(x)
if self.dilate == 1:
y.grad = y_grad
elif self.dilate == 2:
y.grad = y_grad[:, :, 1:-1, 1:-1]
y.backward()
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.mpi
class TestConv2DMultiNodeBNActiv(unittest.TestCase):
in_channels = 1
out_channels = 1
ksize = 3
stride = 1
pad = 1
dilate = 1
def setUp(self):
self.x = np.random.uniform(
-1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
self.gy = np.random.uniform(
-1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)
# Convolution is the identity function.
initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
dtype=np.float32).reshape((1, 1, 3, 3))
bn_kwargs = {'decay': 0.8, 'comm': create_communicator('naive')}
initial_bias = 0
activ = relu
self.l = Conv2DBNActiv(
self.in_channels, self.out_channels, self.ksize, self.stride,
self.pad, self.dilate, initialW=initialW,
initial_bias=initial_bias, activ=activ, bn_kwargs=bn_kwargs)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
# Make the batch normalization to be the identity function.
self.l.bn.avg_var[:] = 1
self.l.bn.avg_mean[:] = 0
with chainer.using_config('train', False):
y = self.l(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.array, self.l.xp.ndarray)
np.testing.assert_almost_equal(
cuda.to_cpu(y.array), np.maximum(cuda.to_cpu(x_data), 0),
decimal=4
)
def test_multi_node_batch_normalization_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_multi_node_batch_normalization_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.l(x)
y.grad = y_grad
y.backward()
def test_multi_node_batch_normalization_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_multi_node_batch_normalization_backward_gpu(self):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
|
import asyncio
import logging
from pykodi import CannotConnectError, InvalidAuthError, Kodi, get_kodi_connection
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
CONF_WS_PORT,
DATA_CONNECTION,
DATA_KODI,
DATA_REMOVE_LISTENER,
DATA_VERSION,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["media_player"]
async def async_setup(hass, config):
"""Set up the Kodi integration."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Kodi from a config entry."""
conn = get_kodi_connection(
entry.data[CONF_HOST],
entry.data[CONF_PORT],
entry.data[CONF_WS_PORT],
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
entry.data[CONF_SSL],
session=async_get_clientsession(hass),
)
try:
await conn.connect()
kodi = Kodi(conn)
await kodi.ping()
raw_version = (await kodi.get_application_properties(["version"]))["version"]
except CannotConnectError as error:
raise ConfigEntryNotReady from error
except InvalidAuthError as error:
_LOGGER.error(
"Login to %s failed: [%s]",
entry.data[CONF_HOST],
error,
)
return False
async def _close(event):
await conn.close()
remove_stop_listener = hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close)
version = f"{raw_version['major']}.{raw_version['minor']}"
hass.data[DOMAIN][entry.entry_id] = {
DATA_CONNECTION: conn,
DATA_KODI: kodi,
DATA_REMOVE_LISTENER: remove_stop_listener,
DATA_VERSION: version,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
data = hass.data[DOMAIN].pop(entry.entry_id)
await data[DATA_CONNECTION].close()
data[DATA_REMOVE_LISTENER]()
return unload_ok
|
import logging
from typing import Any, Dict, List, Optional
from toonapi import Agreement, Toon, ToonError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.config_entry_oauth2_flow import AbstractOAuth2FlowHandler
from .const import CONF_AGREEMENT, CONF_AGREEMENT_ID, CONF_MIGRATE, DOMAIN
class ToonFlowHandler(AbstractOAuth2FlowHandler, domain=DOMAIN):
"""Handle a Toon config flow."""
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
DOMAIN = DOMAIN
VERSION = 2
agreements: Optional[List[Agreement]] = None
data: Optional[Dict[str, Any]] = None
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
async def async_oauth_create_entry(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Test connection and load up agreements."""
self.data = data
toon = Toon(
token=self.data["token"]["access_token"],
session=async_get_clientsession(self.hass),
)
try:
self.agreements = await toon.agreements()
except ToonError:
return self.async_abort(reason="connection_error")
if not self.agreements:
return self.async_abort(reason="no_agreements")
return await self.async_step_agreement()
async def async_step_import(
self, config: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Start a configuration flow based on imported data.
This step is merely here to trigger "discovery" when the `toon`
integration is listed in the user configuration, or when migrating from
the version 1 schema.
"""
if config is not None and CONF_MIGRATE in config:
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context.update({CONF_MIGRATE: config[CONF_MIGRATE]})
else:
await self._async_handle_discovery_without_unique_id()
return await self.async_step_user()
async def async_step_agreement(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Select Toon agreement to add."""
if len(self.agreements) == 1:
return await self._create_entry(self.agreements[0])
agreements_list = [
f"{agreement.street} {agreement.house_number}, {agreement.city}"
for agreement in self.agreements
]
if user_input is None:
return self.async_show_form(
step_id="agreement",
data_schema=vol.Schema(
{vol.Required(CONF_AGREEMENT): vol.In(agreements_list)}
),
)
agreement_index = agreements_list.index(user_input[CONF_AGREEMENT])
return await self._create_entry(self.agreements[agreement_index])
async def _create_entry(self, agreement: Agreement) -> Dict[str, Any]:
if ( # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
CONF_MIGRATE in self.context
):
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
await self.hass.config_entries.async_remove(self.context[CONF_MIGRATE])
await self.async_set_unique_id(agreement.agreement_id)
self._abort_if_unique_id_configured()
self.data[CONF_AGREEMENT_ID] = agreement.agreement_id
return self.async_create_entry(
title=f"{agreement.street} {agreement.house_number}, {agreement.city}",
data=self.data,
)
|
from __future__ import print_function
import argparse
import os
import re
import sys
import six
from Crypto.Hash import MD5
def get_hash(fileobj):
h = MD5.new()
chunk_size = 8192
while True:
chunk = fileobj.read(chunk_size)
if len(chunk) == 0:
break
h.update(chunk)
return h.hexdigest()
def check_list(fileobj):
correct = True
for line in fileobj:
if line.strip() == "":
continue
match = re.match(r'(\w+)[ \t]+(.+)', line)
try:
with open(match.group(2), 'rb') as f1:
if match.group(1) == get_hash(f1):
print(match.group(2) + ': Pass')
else:
print(match.group(2) + ': Fail')
correct = False
except:
print('Invalid format.')
correct = False
return correct
def make_file(txt):
f = six.BytesIO()
if isinstance(txt, six.binary_type):
f.write(txt)
else:
f.write(txt.encode("utf-8"))
f.seek(0)
return f
ap = argparse.ArgumentParser()
ap.add_argument(
'-c',
'--check',
action='store_true',
default=False,
help='''Check a file with md5 hashes and file names for a match. format: hash filename'''
)
ap.add_argument('file', action='store', nargs='*', help='String or file to hash.')
args = ap.parse_args(sys.argv[1:])
if args.check:
if args.file:
for arg in args.file:
if os.path.isfile(arg):
s = check_list(open(arg))
if s:
sys.exit(0)
else:
sys.exit(1)
else:
check_list(make_file(sys.stdin.read()))
else:
if args.file:
for arg in args.file:
if os.path.isfile(arg):
# hash file
with open(arg, 'rb') as f:
print(get_hash(f) + ' ' + arg)
elif arg == "-":
# read from stdin
print(get_hash(make_file(sys.stdin.read())))
else:
# hash arg
# TODO: should we realy do this? It does not seem like normal md5sum behavior
print(get_hash(make_file(arg)))
else:
print(get_hash(make_file(sys.stdin.read())))
|
import re
from django.http import HttpResponse
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from lxml.etree import XMLSyntaxError
from translate.misc.multistring import multistring
from translate.storage.aresource import AndroidResourceFile
from translate.storage.csvl10n import csvfile
from translate.storage.jsonl10n import JsonFile
from translate.storage.mo import mofile
from translate.storage.po import pofile
from translate.storage.poxliff import PoXliffFile
from translate.storage.properties import stringsfile
from translate.storage.tbx import tbxfile
from translate.storage.tmx import tmxfile
from translate.storage.xliff import xlifffile
import weblate
from weblate.formats.external import XlsxFormat
from weblate.formats.ttkit import TTKitFormat
from weblate.trans.util import split_plural, xliff_string_to_rich
from weblate.utils.site import get_site_url
# Map to remove control characters except newlines and tabs
_CHARMAP = dict.fromkeys(x for x in range(32) if x not in (9, 10, 13))
DASHES = re.compile("--+")
class BaseExporter:
content_type = "text/plain"
extension = "txt"
name = ""
verbose = ""
set_id = False
def __init__(
self,
project=None,
source_language=None,
language=None,
url=None,
translation=None,
fieldnames=None,
):
if translation is not None:
self.plural = translation.plural
self.project = translation.component.project
self.source_language = translation.component.source_language
self.language = translation.language
self.url = get_site_url(translation.get_absolute_url())
else:
self.project = project
self.language = language
self.source_language = source_language
self.plural = language.plural
self.url = url
self.fieldnames = fieldnames
@staticmethod
def supports(translation):
return True
@cached_property
def storage(self):
storage = self.get_storage()
storage.setsourcelanguage(self.source_language.code)
storage.settargetlanguage(self.language.code)
return storage
def string_filter(self, text):
return text
def handle_plurals(self, plurals):
if len(plurals) == 1:
return self.string_filter(plurals[0])
return multistring([self.string_filter(plural) for plural in plurals])
@classmethod
def get_identifier(cls):
return cls.name
def get_storage(self):
return self.storage_class()
def add(self, unit, word):
unit.target = word
def add_glossary_term(self, word):
"""Add glossary term."""
unit = self.storage.UnitClass(self.string_filter(word.source))
self.add(unit, self.string_filter(word.target))
self.storage.addunit(unit)
def add_units(self, units):
for unit in units:
self.add_unit(unit)
def build_unit(self, unit):
output = self.storage.UnitClass(self.handle_plurals(unit.get_source_plurals()))
self.add(output, self.handle_plurals(unit.get_target_plurals()))
return output
def add_note(self, output, note: str, origin: str):
output.addnote(note, origin=origin)
def add_unit(self, unit):
output = self.build_unit(unit)
# Propagate source language
if hasattr(output, "setsource"):
output.setsource(output.source, sourcelang=self.source_language.code)
# Location needs to be set prior to ID to avoid overwrite
# on some formats (for example xliff)
for location in unit.location.split():
if location:
output.addlocation(location)
# Store context as context and ID
context = self.string_filter(unit.context)
if context:
output.setcontext(context)
if self.set_id:
output.setid(context)
elif self.set_id:
# Use checksum based ID on formats requiring it
output.setid(unit.checksum)
# Store note
note = self.string_filter(unit.note)
if note:
self.add_note(output, note, origin="developer")
# In Weblate explanation
note = self.string_filter(unit.source_unit.explanation)
if note:
self.add_note(output, note, origin="developer")
# Comments
for comment in unit.unresolved_comments:
self.add_note(output, comment.comment, origin="translator")
# Suggestions
for suggestion in unit.suggestions:
self.add_note(
output,
"Suggested in Weblate: {}".format(
", ".join(split_plural(suggestion.target))
),
origin="translator",
)
# Store flags
if unit.all_flags:
self.store_flags(output, unit.all_flags)
# Store fuzzy flag
if unit.fuzzy:
output.markfuzzy(True)
self.storage.addunit(output)
def get_response(self, filetemplate="{project}-{language}.{extension}"):
filename = filetemplate.format(
project=self.project.slug,
language=self.language.code,
extension=self.extension,
)
response = HttpResponse(content_type=f"{self.content_type}; charset=utf-8")
response["Content-Disposition"] = f"attachment; filename={filename}"
# Save to response
response.write(self.serialize())
return response
def serialize(self):
"""Return storage content."""
return TTKitFormat.serialize(self.storage)
def store_flags(self, output, flags):
return
class PoExporter(BaseExporter):
name = "po"
content_type = "text/x-po"
extension = "po"
verbose = _("gettext PO")
storage_class = pofile
def store_flags(self, output, flags):
for flag in flags.items():
output.settypecomment(flags.format_flag(flag))
def get_storage(self):
store = super().get_storage()
plural = self.plural
# Set po file header
store.updateheader(
add=True,
language=self.language.code,
x_generator=f"Weblate {weblate.VERSION}",
project_id_version=f"{self.language.name} ({self.project.name})",
plural_forms=plural.plural_form,
language_team=f"{self.language.name} <{self.url}>",
)
return store
class XMLExporter(BaseExporter):
"""Wrapper for XML based exporters to strip control characters."""
def get_storage(self):
return self.storage_class(
sourcelanguage=self.source_language.code,
targetlanguage=self.language.code,
)
def string_filter(self, text):
return text.translate(_CHARMAP)
def add(self, unit, word):
unit.settarget(word, self.language.code)
class PoXliffExporter(XMLExporter):
name = "xliff"
content_type = "application/x-xliff+xml"
extension = "xlf"
set_id = True
verbose = _("XLIFF with gettext extensions")
storage_class = PoXliffFile
def store_flags(self, output, flags):
if flags.has_value("max-length"):
output.xmlelement.set("maxwidth", str(flags.get_value("max-length")))
output.xmlelement.set("weblate-flags", flags.format())
def handle_plurals(self, plurals):
if len(plurals) == 1:
return self.string_filter(plurals[0])
return multistring([self.string_filter(plural) for plural in plurals])
def build_unit(self, unit):
output = super().build_unit(unit)
try:
converted_source = xliff_string_to_rich(unit.get_source_plurals())
converted_target = xliff_string_to_rich(unit.get_target_plurals())
except (XMLSyntaxError, TypeError):
return output
output.rich_source = converted_source
output.set_rich_target(converted_target, self.language.code)
return output
class XliffExporter(PoXliffExporter):
name = "xliff11"
content_type = "application/x-xliff+xml"
extension = "xlf"
set_id = True
verbose = _("XLIFF 1.1")
storage_class = xlifffile
class TBXExporter(XMLExporter):
name = "tbx"
content_type = "application/x-tbx"
extension = "tbx"
verbose = _("TBX")
storage_class = tbxfile
class TMXExporter(XMLExporter):
name = "tmx"
content_type = "application/x-tmx"
extension = "tmx"
verbose = _("TMX")
storage_class = tmxfile
class MoExporter(PoExporter):
name = "mo"
content_type = "application/x-gettext-catalog"
extension = "mo"
verbose = _("gettext MO")
storage_class = mofile
def __init__(
self,
project=None,
source_language=None,
language=None,
url=None,
translation=None,
fieldnames=None,
):
super().__init__(
project=project,
source_language=source_language,
language=language,
url=url,
translation=translation,
fieldnames=fieldnames,
)
# Detect storage properties
self.monolingual = False
self.use_context = False
if translation:
self.monolingual = translation.component.has_template()
if self.monolingual:
unit = next(translation.store.content_units, None)
self.use_context = unit is not None and not unit.template.source
def store_flags(self, output, flags):
return
def add_unit(self, unit):
# We do not store not translated units
if not unit.translated:
return
# Parse properties from unit
if self.monolingual:
if self.use_context:
source = ""
context = unit.context
else:
source = unit.context
context = ""
else:
source = self.handle_plurals(unit.get_source_plurals())
context = unit.context
# Actually create the unit and set attributes
output = self.storage.UnitClass(source)
output.target = self.handle_plurals(unit.get_target_plurals())
if context:
# The setcontext doesn't work on mounit
output.msgctxt = [context]
# Add unit to the storage
self.storage.addunit(output)
@staticmethod
def supports(translation):
return translation.component.file_format == "po"
class CVSBaseExporter(BaseExporter):
storage_class = csvfile
def get_storage(self):
return self.storage_class(fieldnames=self.fieldnames)
class CSVExporter(CVSBaseExporter):
name = "csv"
content_type = "text/csv"
extension = "csv"
verbose = _("CSV")
def string_filter(self, text):
"""Avoid Excel interpreting text as formula.
This is really bad idea, implemented in Excel, as this change leads to
displaying additional ' in all other tools, but this seems to be what most
people have gotten used to. Hopefully these characters are not widely used at
first position of translatable strings, so that harm is reduced.
Reverse for this is in weblate.formats.ttkit.CSVUnit.unescape_csv
"""
if text and text[0] in ("=", "+", "-", "@", "|", "%"):
return "'{}'".format(text.replace("|", "\\|"))
return text
class XlsxExporter(CVSBaseExporter):
name = "xlsx"
content_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
extension = "xlsx"
verbose = _("Excel Open XML")
def serialize(self):
"""Return storage content."""
return XlsxFormat.serialize(self.storage)
class MonolingualExporter(BaseExporter):
"""Base class for monolingual exports."""
@staticmethod
def supports(translation):
return translation.component.has_template()
def build_unit(self, unit):
output = self.storage.UnitClass(unit.context)
output.setid(unit.context)
self.add(output, self.handle_plurals(unit.get_target_plurals()))
return output
class JSONExporter(MonolingualExporter):
storage_class = JsonFile
name = "json"
content_type = "application/json"
extension = "json"
verbose = _("JSON")
class AndroidResourceExporter(MonolingualExporter):
storage_class = AndroidResourceFile
name = "aresource"
content_type = "application/xml"
extension = "xml"
verbose = _("Android String Resource")
def add(self, unit, word):
# Need to have storage to handle plurals
unit._store = self.storage
super().add(unit, word)
def string_filter(self, text):
return text.translate(_CHARMAP)
def add_note(self, output, note: str, origin: str):
# Remove -- from the comment or - at the end as that is not
# allowed inside XML comment
note = DASHES.sub("-", note)
if note.endswith("-"):
note += " "
super().add_note(output, note, origin)
class StringsExporter(MonolingualExporter):
storage_class = stringsfile
name = "strings"
content_type = "text/plain"
extension = "strings"
verbose = _("iOS strings")
|
import logging
from typing import List
from gi.repository import Gio, GObject, Gtk
log = logging.getLogger(__name__)
def map_widgets_into_lists(widget, widgetnames):
"""Put sequentially numbered widgets into lists.
Given an object with widgets self.button0, self.button1, ...,
after a call to object.map_widgets_into_lists(["button"])
object.button == [self.button0, self.button1, ...]
"""
for item in widgetnames:
i, lst = 0, []
while 1:
key = "%s%i" % (item, i)
try:
val = getattr(widget, key)
except AttributeError:
if i == 0:
log.critical(
f"Tried to map missing attribute {key}")
break
lst.append(val)
i += 1
setattr(widget, item, lst)
# The functions `extract_accel_from_menu_item` and `extract_accels_from_menu`
# are converted straight from GTK+'s GtkApplication handling. I don't
# understand why these aren't public API, but here we are.
def extract_accel_from_menu_item(
model: Gio.MenuModel, item: int, app: Gtk.Application):
accel, action, target = None, None, None
more, it = True, model.iterate_item_attributes(item)
while more:
more, key, value = it.get_next()
if key == 'action':
action = value.get_string()
elif key == 'accel':
accel = value.get_string()
# TODO: Handle targets
if accel and action:
detailed_action_name = Gio.Action.print_detailed_name(action, target)
app.set_accels_for_action(detailed_action_name, [accel])
def extract_accels_from_menu(model: Gio.MenuModel, app: Gtk.Application):
for i in range(model.get_n_items()):
extract_accel_from_menu_item(model, i, app)
more, it = True, model.iterate_item_links(i)
while more:
more, name, submodel = it.get_next()
if submodel:
extract_accels_from_menu(submodel, app)
def make_multiobject_property_action(
obj_list: List[GObject.Object], prop_name: str) -> Gio.PropertyAction:
"""Construct a property action linked to multiple objects
This is useful for creating actions linked to a GObject property,
where changing the property via the action should affect multiple
GObjects.
As an example, changing the text wrapping mode of a file comparison
pane should change the wrapping mode for *all* panes.
"""
source, *targets = obj_list
action = Gio.PropertyAction.new(prop_name, source, prop_name)
for target in targets:
source.bind_property(prop_name, target, prop_name)
return action
|
import tensornetwork
from examples.sat import sat_tensornetwork
def test_sanity_check():
nodes = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
])
count = tensornetwork.contractors.greedy(nodes).tensor
assert count == 7
def test_dual_clauses():
nodes = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
(1, -2, 3),
])
count = tensornetwork.contractors.greedy(nodes).tensor
assert count == 6
def test_many_clauses():
nodes = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
(1, 2, -3),
(1, -2, 3),
(1, -2, -3),
(-1, 2, 3),
(-1, 2, -3),
(-1, -2, 3),
(-1, -2, -3),
])
count = tensornetwork.contractors.greedy(nodes).tensor
assert count == 0
def test_four_variables():
nodes = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
(1, 2, 4),
])
count = tensornetwork.contractors.greedy(nodes).tensor
assert count == 13
def test_four_variables_four_clauses():
nodes = sat_tensornetwork.sat_count_tn([
(1, 2, 3),
(1, 2, 4),
(-3, -4, 2),
(-1, 3, -2),
])
count = tensornetwork.contractors.greedy(nodes).tensor
assert count == 9
def test_single_variable():
nodes = sat_tensornetwork.sat_count_tn([
(1, 1, 1),
])
count = tensornetwork.contractors.greedy(nodes).tensor
assert count == 1
def test_solutions():
edge_order = sat_tensornetwork.sat_tn([
(1, 2, -3),
])
solutions = tensornetwork.contractors.greedy(
tensornetwork.reachable(edge_order[0].node1), edge_order).tensor
assert solutions[0][0][0] == 1
# Only unaccepted value.
assert solutions[0][0][1] == 0
assert solutions[0][1][0] == 1
assert solutions[0][1][1] == 1
assert solutions[1][0][0] == 1
assert solutions[1][0][1] == 1
assert solutions[1][1][0] == 1
assert solutions[1][1][1] == 1
|
import numpy as np
import os
from six.moves.urllib import request
import unittest
from chainer import testing
from chainercv.evaluations import eval_detection_coco
try:
import pycocotools # NOQA
_available = True
except ImportError:
_available = False
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalDetectionCOCOSingleClass(unittest.TestCase):
def setUp(self):
self.pred_bboxes = np.array([[[0, 0, 10, 10], [0, 0, 20, 20]]])
self.pred_labels = np.array([[0, 0]])
self.pred_scores = np.array([[0.8, 0.9]])
self.gt_bboxes = np.array([[[0, 0, 10, 9]]])
self.gt_labels = np.array([[0, 0]])
def test_crowded(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels,
gt_crowdeds=[[True]])
# When the only ground truth is crowded, nothing is evaluated.
# In that case, all the results are nan.
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=all/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50/area=all/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.75/area=all/max_dets=100']))
def test_area_not_supplied(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels)
self.assertFalse(
'map/iou=0.50:0.95/area=small/max_dets=100' in result)
self.assertFalse(
'map/iou=0.50:0.95/area=medium/max_dets=100' in result)
self.assertFalse(
'map/iou=0.50:0.95/area=large/max_dets=100' in result)
def test_area_specified(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels,
gt_areas=[[2048]])
self.assertFalse(
np.isnan(result['map/iou=0.50:0.95/area=medium/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=small/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=large/max_dets=100']))
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalDetectionCOCOSomeClassNonExistent(unittest.TestCase):
def setUp(self):
self.pred_bboxes = np.array([[[0, 0, 10, 10], [0, 0, 20, 20]]])
self.pred_labels = np.array([[1, 2]])
self.pred_scores = np.array([[0.8, 0.9]])
self.gt_bboxes = np.array([[[0, 0, 10, 9]]])
self.gt_labels = np.array([[1, 2]])
def test(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels)
self.assertEqual(
result['ap/iou=0.50:0.95/area=all/max_dets=100'].shape, (3,))
self.assertTrue(
np.isnan(result['ap/iou=0.50:0.95/area=all/max_dets=100'][0]))
self.assertEqual(
np.nanmean(result['ap/iou=0.50:0.95/area=all/max_dets=100'][1:]),
result['map/iou=0.50:0.95/area=all/max_dets=100'])
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalDetectionCOCO(unittest.TestCase):
@classmethod
def setUpClass(cls):
base_url = 'https://chainercv-models.preferred.jp/tests'
cls.dataset = np.load(request.urlretrieve(os.path.join(
base_url, 'eval_detection_coco_dataset_2017_10_16.npz'))[0],
allow_pickle=True)
cls.result = np.load(request.urlretrieve(os.path.join(
base_url, 'eval_detection_coco_result_2017_10_16.npz'))[0],
allow_pickle=True)
def test_eval_detection_coco(self):
pred_bboxes = self.result['bboxes']
pred_labels = self.result['labels']
pred_scores = self.result['scores']
gt_bboxes = self.dataset['bboxes']
gt_labels = self.dataset['labels']
gt_areas = self.dataset['areas']
gt_crowdeds = self.dataset['crowdeds']
result = eval_detection_coco(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_areas, gt_crowdeds)
expected = {
'map/iou=0.50:0.95/area=all/max_dets=100': 0.5069852,
'map/iou=0.50/area=all/max_dets=100': 0.69937725,
'map/iou=0.75/area=all/max_dets=100': 0.57538619,
'map/iou=0.50:0.95/area=small/max_dets=100': 0.58562572,
'map/iou=0.50:0.95/area=medium/max_dets=100': 0.51939969,
'map/iou=0.50:0.95/area=large/max_dets=100': 0.5013979,
'mar/iou=0.50:0.95/area=all/max_dets=1': 0.38919373,
'mar/iou=0.50:0.95/area=all/max_dets=10': 0.59606053,
'mar/iou=0.50:0.95/area=all/max_dets=100': 0.59773394,
'mar/iou=0.50:0.95/area=small/max_dets=100': 0.63981096,
'mar/iou=0.50:0.95/area=medium/max_dets=100': 0.5664206,
'mar/iou=0.50:0.95/area=large/max_dets=100': 0.5642906
}
non_existent_labels = np.setdiff1d(
np.arange(max(result['existent_labels'])),
result['existent_labels'])
for key, item in expected.items():
non_mean_key = key[1:]
self.assertIsInstance(result[non_mean_key], np.ndarray)
self.assertEqual(result[non_mean_key].shape, (80,))
self.assertTrue(
np.all(np.isnan(result[non_mean_key][non_existent_labels])))
np.testing.assert_almost_equal(
result[key], expected[key], decimal=5)
testing.run_module(__name__, __file__)
|
import functools
import time
from homeassistant.components.device_tracker import DOMAIN, SOURCE_TYPE_ROUTER
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .core import discovery
from .core.const import (
CHANNEL_POWER_CONFIGURATION,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
from .sensor import Battery
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation device tracker from config entry."""
entities_to_create = hass.data[DATA_ZHA][DOMAIN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
@STRICT_MATCH(channel_names=CHANNEL_POWER_CONFIGURATION)
class ZHADeviceScannerEntity(ScannerEntity, ZhaEntity):
"""Represent a tracked device."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Initialize the ZHA device tracker."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._battery_channel = self.cluster_channels.get(CHANNEL_POWER_CONFIGURATION)
self._connected = False
self._keepalive_interval = 60
self._should_poll = True
self._battery_level = None
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
if self._battery_channel:
self.async_accept_signal(
self._battery_channel,
SIGNAL_ATTR_UPDATED,
self.async_battery_percentage_remaining_updated,
)
async def async_update(self):
"""Handle polling."""
if self.zha_device.last_seen is None:
self._connected = False
else:
difference = time.time() - self.zha_device.last_seen
if difference > self._keepalive_interval:
self._connected = False
else:
self._connected = True
@property
def is_connected(self):
"""Return true if the device is connected to the network."""
return self._connected
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_ROUTER
@callback
def async_battery_percentage_remaining_updated(self, attr_id, attr_name, value):
"""Handle tracking."""
if not attr_name == "battery_percentage_remaining":
return
self.debug("battery_percentage_remaining updated: %s", value)
self._connected = True
self._battery_level = Battery.formatter(value)
self.async_write_ha_state()
@property
def battery_level(self):
"""Return the battery level of the device.
Percentage from 0-100.
"""
return self._battery_level
|
import posixpath
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
PACKAGE_NAME = 'iperf'
IPERF_TAR = 'iperf-2.0.13.tar.gz'
IPERF_URL = 'https://sourceforge.net/projects/iperf2/files/iperf-2.0.13.tar.gz'
IPERF_DIR = '%s/iperf-2.0.13' % linux_packages.INSTALL_DIR
def _Install(vm):
"""Installs the iperf package on the VM."""
vm.Install('build_tools')
vm.Install('wget')
vm.RemoteCommand('wget -O %s/%s %s' %
(linux_packages.INSTALL_DIR, IPERF_TAR, IPERF_URL))
vm.RemoteCommand('cd %s; tar xvf %s; cd %s; '
'./configure; make; sudo make install' %
(linux_packages.INSTALL_DIR, IPERF_TAR, IPERF_DIR))
def YumInstall(vm):
"""Installs the iperf package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the iperf package on the VM."""
_Install(vm)
|
import pytest
import voluptuous as vol
from withings_api.common import UnauthorizedException
import homeassistant.components.webhook as webhook
from homeassistant.components.withings import CONFIG_SCHEMA, DOMAIN, async_setup, const
from homeassistant.components.withings.common import ConfigEntryWithingsApi, DataManager
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_EXTERNAL_URL,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_METRIC,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.setup import async_setup_component
from .common import (
ComponentFactory,
async_get_flow_for_user_id,
get_data_manager_by_user_id,
new_profile_config,
)
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
def config_schema_validate(withings_config) -> dict:
"""Assert a schema config succeeds."""
hass_config = {const.DOMAIN: withings_config}
return CONFIG_SCHEMA(hass_config)
def config_schema_assert_fail(withings_config) -> None:
"""Assert a schema config will fail."""
try:
config_schema_validate(withings_config)
assert False, "This line should not have run."
except vol.error.MultipleInvalid:
assert True
def test_config_schema_basic_config() -> None:
"""Test schema."""
config_schema_validate(
{
CONF_CLIENT_ID: "my_client_id",
CONF_CLIENT_SECRET: "my_client_secret",
const.CONF_USE_WEBHOOK: True,
}
)
def test_config_schema_client_id() -> None:
"""Test schema."""
config_schema_assert_fail({CONF_CLIENT_SECRET: "my_client_secret"})
config_schema_assert_fail(
{CONF_CLIENT_SECRET: "my_client_secret", CONF_CLIENT_ID: ""}
)
config_schema_validate(
{CONF_CLIENT_SECRET: "my_client_secret", CONF_CLIENT_ID: "my_client_id"}
)
def test_config_schema_client_secret() -> None:
"""Test schema."""
config_schema_assert_fail({CONF_CLIENT_ID: "my_client_id"})
config_schema_assert_fail({CONF_CLIENT_ID: "my_client_id", CONF_CLIENT_SECRET: ""})
config_schema_validate(
{CONF_CLIENT_ID: "my_client_id", CONF_CLIENT_SECRET: "my_client_secret"}
)
def test_config_schema_use_webhook() -> None:
"""Test schema."""
config_schema_validate(
{CONF_CLIENT_ID: "my_client_id", CONF_CLIENT_SECRET: "my_client_secret"}
)
config = config_schema_validate(
{
CONF_CLIENT_ID: "my_client_id",
CONF_CLIENT_SECRET: "my_client_secret",
const.CONF_USE_WEBHOOK: True,
}
)
assert config[const.DOMAIN][const.CONF_USE_WEBHOOK] is True
config = config_schema_validate(
{
CONF_CLIENT_ID: "my_client_id",
CONF_CLIENT_SECRET: "my_client_secret",
const.CONF_USE_WEBHOOK: False,
}
)
assert config[const.DOMAIN][const.CONF_USE_WEBHOOK] is False
config_schema_assert_fail(
{
CONF_CLIENT_ID: "my_client_id",
CONF_CLIENT_SECRET: "my_client_secret",
const.CONF_USE_WEBHOOK: "A",
}
)
async def test_async_setup_no_config(hass: HomeAssistant) -> None:
"""Test method."""
hass.async_create_task = MagicMock()
await async_setup(hass, {})
hass.async_create_task.assert_not_called()
@pytest.mark.parametrize(
["exception"],
[
[UnauthorizedException("401")],
[UnauthorizedException("401")],
[Exception("401, this is the message")],
],
)
async def test_auth_failure(
hass: HomeAssistant, component_factory: ComponentFactory, exception: Exception
) -> None:
"""Test auth failure."""
person0 = new_profile_config(
"person0",
0,
api_response_user_get_device=exception,
api_response_measure_get_meas=exception,
api_response_sleep_get_summary=exception,
)
await component_factory.configure_component(profile_configs=(person0,))
assert not async_get_flow_for_user_id(hass, person0.user_id)
await component_factory.setup_profile(person0.user_id)
data_manager = get_data_manager_by_user_id(hass, person0.user_id)
await data_manager.poll_data_update_coordinator.async_refresh()
flows = async_get_flow_for_user_id(hass, person0.user_id)
assert flows
assert len(flows) == 1
flow = flows[0]
assert flow["handler"] == const.DOMAIN
assert flow["context"]["profile"] == person0.profile
assert flow["context"]["userid"] == person0.user_id
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], user_input={}
)
assert result
assert result["type"] == "external"
assert result["handler"] == const.DOMAIN
assert result["step_id"] == "auth"
await component_factory.unload(person0)
async def test_set_config_unique_id(
hass: HomeAssistant, component_factory: ComponentFactory
) -> None:
"""Test upgrading configs to use a unique id."""
person0 = new_profile_config("person0", 0)
await component_factory.configure_component(profile_configs=(person0,))
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"token": {"userid": "my_user_id"}, "profile": person0.profile},
)
with patch("homeassistant.components.withings.async_get_data_manager") as mock:
data_manager: DataManager = MagicMock(spec=DataManager)
data_manager.poll_data_update_coordinator = MagicMock(
spec=DataUpdateCoordinator
)
data_manager.poll_data_update_coordinator.last_update_success = True
mock.return_value = data_manager
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
assert config_entry.unique_id == "my_user_id"
async def test_set_convert_unique_id_to_string(hass: HomeAssistant) -> None:
"""Test upgrading configs to use a unique id."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
"token": {"userid": 1234},
"auth_implementation": "withings",
"profile": "person0",
},
)
config_entry.add_to_hass(hass)
hass_config = {
HA_DOMAIN: {
CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_METRIC,
CONF_EXTERNAL_URL: "http://127.0.0.1:8080/",
},
const.DOMAIN: {
CONF_CLIENT_ID: "my_client_id",
CONF_CLIENT_SECRET: "my_client_secret",
const.CONF_USE_WEBHOOK: False,
},
}
with patch(
"homeassistant.components.withings.common.ConfigEntryWithingsApi",
spec=ConfigEntryWithingsApi,
):
await async_process_ha_core_config(hass, hass_config.get(HA_DOMAIN))
assert await async_setup_component(hass, HA_DOMAIN, {})
assert await async_setup_component(hass, webhook.DOMAIN, hass_config)
assert await async_setup_component(hass, const.DOMAIN, hass_config)
await hass.async_block_till_done()
assert config_entry.unique_id == "1234"
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('instance-1')
def test_distro(host):
f = host.file('/etc/debian_version')
assert f.is_file
def test_cpus(host):
cpus = host.ansible("setup")['ansible_facts']['ansible_processor_vcpus']
assert 1 == int(cpus)
def test_memory(host):
total_memory = host.ansible(
"setup")['ansible_facts']['ansible_memtotal_mb']
assert (1024 / 2) <= int(total_memory) <= 1024
def test_has_shared_directory(host):
f = host.file('/vagrant')
assert f.is_directory
def test_internal_interface(host):
assert '192.168.0.1' in host.interface('eth2').addresses
|
from datetime import datetime, timedelta
from typing import Optional
from pytest import fixture
from homeassistant.components.directv.media_player import (
ATTR_MEDIA_CURRENTLY_RECORDING,
ATTR_MEDIA_RATING,
ATTR_MEDIA_RECORDED,
ATTR_MEDIA_START_TIME,
)
from homeassistant.components.media_player import DEVICE_CLASS_RECEIVER
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_TITLE,
DOMAIN as MP_DOMAIN,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SERVICE_PLAY_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
from tests.components.directv import setup_integration
from tests.test_util.aiohttp import AiohttpClientMocker
ATTR_UNIQUE_ID = "unique_id"
CLIENT_ENTITY_ID = f"{MP_DOMAIN}.client"
MAIN_ENTITY_ID = f"{MP_DOMAIN}.host"
MUSIC_ENTITY_ID = f"{MP_DOMAIN}.music_client"
RESTRICTED_ENTITY_ID = f"{MP_DOMAIN}.restricted_client"
STANDBY_ENTITY_ID = f"{MP_DOMAIN}.standby_client"
UNAVAILABLE_ENTITY_ID = f"{MP_DOMAIN}.unavailable_client"
# pylint: disable=redefined-outer-name
@fixture
def mock_now() -> datetime:
"""Fixture for dtutil.now."""
return dt_util.utcnow()
async def async_turn_on(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_TURN_ON, data)
async def async_turn_off(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_TURN_OFF, data)
async def async_media_pause(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PAUSE, data)
async def async_media_play(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PLAY, data)
async def async_media_stop(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for stop."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_STOP, data)
async def async_media_next_track(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
async def async_media_previous_track(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
async def async_play_media(
hass: HomeAssistantType,
media_type: str,
media_id: str,
entity_id: Optional[str] = None,
enqueue: Optional[str] = None,
) -> None:
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type, ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
await hass.services.async_call(MP_DOMAIN, SERVICE_PLAY_MEDIA, data)
async def test_setup(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test setup with basic config."""
await setup_integration(hass, aioclient_mock)
assert hass.states.get(MAIN_ENTITY_ID)
assert hass.states.get(CLIENT_ENTITY_ID)
assert hass.states.get(UNAVAILABLE_ENTITY_ID)
async def test_unique_id(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test unique id."""
await setup_integration(hass, aioclient_mock)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
main = entity_registry.async_get(MAIN_ENTITY_ID)
assert main.device_class == DEVICE_CLASS_RECEIVER
assert main.unique_id == "028877455858"
client = entity_registry.async_get(CLIENT_ENTITY_ID)
assert client.device_class == DEVICE_CLASS_RECEIVER
assert client.unique_id == "2CA17D1CD30X"
unavailable_client = entity_registry.async_get(UNAVAILABLE_ENTITY_ID)
assert unavailable_client.device_class == DEVICE_CLASS_RECEIVER
assert unavailable_client.unique_id == "9XXXXXXXXXX9"
async def test_supported_features(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test supported features."""
await setup_integration(hass, aioclient_mock)
# Features supported for main DVR
state = hass.states.get(MAIN_ENTITY_ID)
assert (
SUPPORT_PAUSE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
== state.attributes.get("supported_features")
)
# Feature supported for clients.
state = hass.states.get(CLIENT_ENTITY_ID)
assert (
SUPPORT_PAUSE
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
== state.attributes.get("supported_features")
)
async def test_check_attributes(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test attributes."""
await setup_integration(hass, aioclient_mock)
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "17016356"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_MOVIE
assert state.attributes.get(ATTR_MEDIA_DURATION) == 7200
assert state.attributes.get(ATTR_MEDIA_POSITION) == 4437
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Snow Bride"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("HALLHD", "312")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "312"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "TV-G"
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2020, 3, 21, 13, 0, tzinfo=dt_util.UTC
)
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "4405732"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_TVSHOW
assert state.attributes.get(ATTR_MEDIA_DURATION) == 1791
assert state.attributes.get(ATTR_MEDIA_POSITION) == 263
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Tyler's Ultimate"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) == "Spaghetti and Clam Sauce"
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("FOODHD", "231")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "231"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "No Rating"
assert state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2010, 7, 5, 15, 0, 8, tzinfo=dt_util.UTC
)
state = hass.states.get(MUSIC_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "76917562"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_MUSIC
assert state.attributes.get(ATTR_MEDIA_DURATION) == 86400
assert state.attributes.get(ATTR_MEDIA_POSITION) == 15050
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Sparkle In Your Eyes"
assert state.attributes.get(ATTR_MEDIA_ARTIST) == "Gerald Albright"
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) == "Slam Dunk (2014)"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("MCSJ", "851")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "851"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "TV-PG"
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2020, 3, 21, 10, 0, 0, tzinfo=dt_util.UTC
)
state = hass.states.get(STANDBY_ENTITY_ID)
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) is None
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None
assert state.attributes.get(ATTR_MEDIA_DURATION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) is None
assert state.attributes.get(ATTR_MEDIA_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_ARTIST) is None
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) is None
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) is None
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) is None
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
state = hass.states.get(RESTRICTED_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) is None
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None
assert state.attributes.get(ATTR_MEDIA_DURATION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) is None
assert state.attributes.get(ATTR_MEDIA_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_ARTIST) is None
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) is None
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) is None
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) is None
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
state = hass.states.get(UNAVAILABLE_ENTITY_ID)
assert state.state == STATE_UNAVAILABLE
async def test_attributes_paused(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
):
"""Test attributes while paused."""
await setup_integration(hass, aioclient_mock)
state = hass.states.get(CLIENT_ENTITY_ID)
last_updated = state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
# Test to make sure that ATTR_MEDIA_POSITION_UPDATED_AT is not
# updated if TV is paused.
with patch(
"homeassistant.util.dt.utcnow", return_value=mock_now + timedelta(minutes=5)
):
await async_media_pause(hass, CLIENT_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PAUSED
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) == last_updated
async def test_main_services(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test the different services."""
await setup_integration(hass, aioclient_mock)
with patch("directv.DIRECTV.remote") as remote_mock:
await async_turn_off(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("poweroff", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_turn_on(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("poweron", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_pause(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("pause", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_play(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("play", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_next_track(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("ffwd", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_previous_track(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("rew", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_stop(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("stop", "0")
with patch("directv.DIRECTV.tune") as tune_mock:
await async_play_media(hass, "channel", 312, MAIN_ENTITY_ID)
await hass.async_block_till_done()
tune_mock.assert_called_once_with("312", "0")
|
from collections import abc
from typing import Dict, Hashable, Mapping, Sequence, Set
def compare_paths_lt(x, y):
min_length = min(len(x), len(y))
if x[:min_length] == y[:min_length]:
return len(x) == min_length
for i in range(min_length):
a, b = x[i], y[i]
for _type in (int, str, tuple):
if isinstance(a, _type):
if isinstance(b, _type):
break
else:
return True
if a == b:
continue
elif a < b:
return True
else:
return False
raise RuntimeError
def drop_item_from_tuple(t, i):
return t[:i] + t[i + 1 :]
def mapping_to_frozenset(schema: Mapping) -> frozenset:
"""
Be aware that this treats any sequence type with the equal members as equal. As it
is used to identify equality of schemas, this can be considered okay as definitions
are semantically equal regardless the container type.
"""
schema_copy = {} # type: Dict[Hashable, Hashable]
for key, value in schema.items():
if isinstance(value, abc.Mapping):
schema_copy[key] = mapping_to_frozenset(value)
elif isinstance(value, Sequence):
value = list(value)
for i, item in enumerate(value):
if isinstance(item, abc.Mapping):
value[i] = mapping_to_frozenset(item)
schema_copy[key] = tuple(value)
elif isinstance(value, Set):
schema_copy[key] = frozenset(value)
elif isinstance(value, Hashable):
schema_copy[key] = value
else:
raise TypeError("All schema contents must be hashable.")
return frozenset(schema_copy.items())
def quote_string(value):
if isinstance(value, str):
return '"%s"' % value
else:
return value
class readonly_classproperty(property):
def __get__(self, instance, owner):
return super().__get__(owner)
def __set__(self, instance, value):
raise RuntimeError('This is a readonly class property.')
def __delete__(self, instance):
raise RuntimeError('This is a readonly class property.')
def schema_hash(schema: Mapping) -> int:
return hash(mapping_to_frozenset(schema))
|
import logging
from pyjoin import get_devices, send_notification
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_DEVICE_ID = "device_id"
CONF_DEVICE_IDS = "device_ids"
CONF_DEVICE_NAMES = "device_names"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_DEVICE_IDS): cv.string,
vol.Optional(CONF_DEVICE_NAMES): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Join notification service."""
api_key = config.get(CONF_API_KEY)
device_id = config.get(CONF_DEVICE_ID)
device_ids = config.get(CONF_DEVICE_IDS)
device_names = config.get(CONF_DEVICE_NAMES)
if api_key:
if not get_devices(api_key):
_LOGGER.error("Error connecting to Join. Check the API key")
return False
if device_id is None and device_ids is None and device_names is None:
_LOGGER.error(
"No device was provided. Please specify device_id"
", device_ids, or device_names"
)
return False
return JoinNotificationService(api_key, device_id, device_ids, device_names)
class JoinNotificationService(BaseNotificationService):
"""Implement the notification service for Join."""
def __init__(self, api_key, device_id, device_ids, device_names):
"""Initialize the service."""
self._api_key = api_key
self._device_id = device_id
self._device_ids = device_ids
self._device_names = device_names
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA) or {}
send_notification(
device_id=self._device_id,
device_ids=self._device_ids,
device_names=self._device_names,
text=message,
title=title,
icon=data.get("icon"),
smallicon=data.get("smallicon"),
image=data.get("image"),
sound=data.get("sound"),
notification_id=data.get("notification_id"),
url=data.get("url"),
tts=data.get("tts"),
tts_language=data.get("tts_language"),
vibration=data.get("vibration"),
actions=data.get("actions"),
api_key=self._api_key,
)
|
import configparser
import os
import requests
from nikola import utils
from nikola.plugin_categories import Command
LOGGER = utils.get_logger('subtheme')
def _check_for_theme(theme, themes):
for t in themes:
if t.endswith(os.sep + theme):
return True
return False
class CommandSubTheme(Command):
"""Given a swatch name from bootswatch.com and a parent theme, creates a custom theme."""
name = "subtheme"
doc_usage = "[options]"
doc_purpose = "given a swatch name from bootswatch.com or hackerthemes.com and a parent theme, creates a custom"\
" theme"
cmd_options = [
{
'name': 'name',
'short': 'n',
'long': 'name',
'default': 'custom',
'type': str,
'help': 'New theme name',
},
{
'name': 'swatch',
'short': 's',
'default': '',
'type': str,
'help': 'Name of the swatch from bootswatch.com.'
},
{
'name': 'parent',
'short': 'p',
'long': 'parent',
'default': 'bootstrap4',
'help': 'Parent theme name',
},
]
def _execute(self, options, args):
"""Given a swatch name and a parent theme, creates a custom theme."""
name = options['name']
swatch = options['swatch']
if not swatch:
LOGGER.error('The -s option is mandatory')
return 1
parent = options['parent']
version = '4'
# Check which Bootstrap version to use
themes = utils.get_theme_chain(parent, self.site.themes_dirs)
if _check_for_theme('bootstrap', themes) or _check_for_theme('bootstrap-jinja', themes):
version = '2'
elif _check_for_theme('bootstrap3', themes) or _check_for_theme('bootstrap3-jinja', themes):
version = '3'
elif _check_for_theme('bootstrap4', themes) or _check_for_theme('bootstrap4-jinja', themes):
version = '4'
elif not _check_for_theme('bootstrap4', themes) and not _check_for_theme('bootstrap4-jinja', themes):
LOGGER.warning(
'"subtheme" only makes sense for themes that use bootstrap')
elif _check_for_theme('bootstrap3-gradients', themes) or _check_for_theme('bootstrap3-gradients-jinja', themes):
LOGGER.warning(
'"subtheme" doesn\'t work well with the bootstrap3-gradients family')
LOGGER.info("Creating '{0}' theme from '{1}' and '{2}'".format(
name, swatch, parent))
utils.makedirs(os.path.join('themes', name, 'assets', 'css'))
for fname in ('bootstrap.min.css', 'bootstrap.css'):
if swatch in [
'bubblegum', 'business-tycoon', 'charming', 'daydream',
'executive-suite', 'good-news', 'growth', 'harbor', 'hello-world',
'neon-glow', 'pleasant', 'retro', 'vibrant-sea', 'wizardry']: # Hackerthemes
LOGGER.info(
'Hackertheme-based subthemes often require you use a custom font for full effect.')
if version != '4':
LOGGER.error(
'The hackertheme subthemes are only available for Bootstrap 4.')
return 1
if fname == 'bootstrap.css':
url = 'https://raw.githubusercontent.com/HackerThemes/theme-machine/master/dist/{swatch}/css/bootstrap4-{swatch}.css'.format(
swatch=swatch)
else:
url = 'https://raw.githubusercontent.com/HackerThemes/theme-machine/master/dist/{swatch}/css/bootstrap4-{swatch}.min.css'.format(
swatch=swatch)
else: # Bootswatch
url = 'https://bootswatch.com'
if version:
url += '/' + version
url = '/'.join((url, swatch, fname))
LOGGER.info("Downloading: " + url)
r = requests.get(url)
if r.status_code > 299:
LOGGER.error('Error {} getting {}', r.status_code, url)
return 1
data = r.text
with open(os.path.join('themes', name, 'assets', 'css', fname),
'w+') as output:
output.write(data)
with open(os.path.join('themes', name, '%s.theme' % name), 'w+') as output:
parent_theme_data_path = utils.get_asset_path(
'%s.theme' % parent, themes)
cp = configparser.ConfigParser()
cp.read(parent_theme_data_path)
cp['Theme']['parent'] = parent
cp['Family'] = {'family': cp['Family']['family']}
cp.write(output)
LOGGER.info(
'Theme created. Change the THEME setting to "{0}" to use it.'.format(name))
|
import pytest
from decimal import Decimal
from shop.money.money_maker import MoneyMaker
from testshop.models import Commodity
EUR = MoneyMaker('EUR')
@pytest.mark.django_db
def test_field_filter(commodity_factory):
commodity = commodity_factory(unit_price='12.34')
assert list(Commodity.objects.filter(unit_price='12.34')) == [commodity]
assert list(Commodity.objects.filter(unit_price=Decimal('12.34'))) == [commodity]
assert list(Commodity.objects.filter(unit_price=EUR('12.34'))) == [commodity]
assert list(Commodity.objects.filter(unit_price__gt='12.33')) == [commodity]
assert list(Commodity.objects.filter(unit_price__gt=EUR('12.33'))) == [commodity]
assert list(Commodity.objects.filter(unit_price__gt='12.34')) == []
assert list(Commodity.objects.filter(unit_price__gte='12.34')) == [commodity]
assert list(Commodity.objects.filter(unit_price__lt='12.35')) == [commodity]
assert list(Commodity.objects.filter(unit_price__lt=EUR('12.35'))) == [commodity]
assert list(Commodity.objects.filter(unit_price__lt='12.34')) == []
assert list(Commodity.objects.filter(unit_price__lte='12.34')) == [commodity]
|
from kombu.utils.encoding import ensure_bytes
import zlib
try:
import lzma
except ImportError: # pragma: no cover
# TODO: Drop fallback to backports once we drop Python 2.7 support
try:
from backports import lzma
except ImportError: # pragma: no cover
lzma = None
_aliases = {}
_encoders = {}
_decoders = {}
__all__ = ('register', 'encoders', 'get_encoder',
'get_decoder', 'compress', 'decompress')
def register(encoder, decoder, content_type, aliases=None):
"""Register new compression method.
Arguments:
encoder (Callable): Function used to compress text.
decoder (Callable): Function used to decompress previously
compressed text.
content_type (str): The mime type this compression method
identifies as.
aliases (Sequence[str]): A list of names to associate with
this compression method.
"""
_encoders[content_type] = encoder
_decoders[content_type] = decoder
if aliases:
_aliases.update((alias, content_type) for alias in aliases)
def encoders():
"""Return a list of available compression methods."""
return list(_encoders)
def get_encoder(t):
"""Get encoder by alias name."""
t = _aliases.get(t, t)
return _encoders[t], t
def get_decoder(t):
"""Get decoder by alias name."""
return _decoders[_aliases.get(t, t)]
def compress(body, content_type):
"""Compress text.
Arguments:
body (AnyStr): The text to compress.
content_type (str): mime-type of compression method to use.
"""
encoder, content_type = get_encoder(content_type)
return encoder(ensure_bytes(body)), content_type
def decompress(body, content_type):
"""Decompress compressed text.
Arguments:
body (AnyStr): Previously compressed text to uncompress.
content_type (str): mime-type of compression method used.
"""
return get_decoder(content_type)(body)
register(zlib.compress,
zlib.decompress,
'application/x-gzip', aliases=['gzip', 'zlib'])
try:
import bz2
except ImportError:
pass # No bz2 support
else:
register(bz2.compress,
bz2.decompress,
'application/x-bz2', aliases=['bzip2', 'bzip'])
try:
import brotli
except ImportError: # pragma: no cover
pass
else:
register(brotli.compress,
brotli.decompress,
'application/x-brotli', aliases=['brotli'])
# TODO: Drop condition once we drop Python 2.7 support
if lzma: # pragma: no cover
register(lzma.compress,
lzma.decompress,
'application/x-lzma', aliases=['lzma', 'xz'])
try:
import zstandard as zstd
except ImportError: # pragma: no cover
pass
else:
def zstd_compress(body):
c = zstd.ZstdCompressor()
return c.compress(body)
def zstd_decompress(body):
d = zstd.ZstdDecompressor()
return d.decompress(body)
register(zstd_compress,
zstd_decompress,
'application/zstd', aliases=['zstd', 'zstandard'])
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from drbd import DRBDCollector
##########################################################################
class TestDRBDCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DRBDCollector', {})
self.collector = DRBDCollector(config, None)
def test_import(self):
self.assertTrue(DRBDCollector)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from cpu import CPUCollector
##########################################################################
class TestCPUCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('CPUCollector', {
'interval': 10,
'normalize': False
})
self.collector = CPUCollector(config, None)
def test_import(self):
self.assertTrue(CPUCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_stat(self, publish_mock, open_mock):
CPUCollector.PROC = '/proc/stat'
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/stat')
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu 100 200 300 400 500 0 0 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'cpu 110 220 330 440 550 0 0 0 0 0')))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, {
'total.idle': 4.0,
'total.iowait': 5.0,
'total.nice': 2.0,
'total.system': 3.0,
'total.user': 1.0
})
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
CPUCollector.PROC = self.getFixturePath('proc_stat_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
CPUCollector.PROC = self.getFixturePath('proc_stat_2')
self.collector.collect()
metrics = {
'total.idle': 2440.8,
'total.iowait': 0.2,
'total.nice': 0.0,
'total.system': 0.2,
'total.user': 0.4
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_ec2_data(self, publish_mock):
self.collector.config['interval'] = 30
patch_open = patch('os.path.isdir', Mock(return_value=True))
patch_open.start()
CPUCollector.PROC = self.getFixturePath('ec2_stat_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
CPUCollector.PROC = self.getFixturePath('ec2_stat_2')
self.collector.collect()
patch_open.stop()
metrics = {
'total.idle': 68.4,
'total.iowait': 0.6,
'total.nice': 0.0,
'total.system': 13.7,
'total.user': 16.666666666666668
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_473(self, publish_mock):
"""
No cpu value should ever be over 100
"""
self.collector.config['interval'] = 60
patch_open = patch('os.path.isdir', Mock(return_value=True))
patch_open.start()
CPUCollector.PROC = self.getFixturePath('473_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
CPUCollector.PROC = self.getFixturePath('473_2')
self.collector.collect()
patch_open.stop()
totals = {}
for call in publish_mock.mock_calls:
call = call[1]
if call[0][:6] == 'total.':
continue
if call[1] > 100:
raise ValueError("metric %s: %s should not be over 100!" % (
call[0], call[1]))
k = call[0][:4]
totals[k] = totals.get(k, 0) + call[1]
for t in totals:
# Allow rounding errors
if totals[t] >= 101:
raise ValueError(
"metric total for %s: %s should not be over 100!" % (
t, totals[t]))
class TestCPUCollectorNormalize(CollectorTestCase):
def setUp(self):
config = get_collector_config('CPUCollector', {
'interval': 1,
'normalize': True,
})
self.collector = CPUCollector(config, None)
self.num_cpu = 2
# first measurement
self.input_base = {
'user': 100,
'nice': 200,
'system': 300,
'idle': 400,
}
# second measurement
self.input_next = {
'user': 110,
'nice': 220,
'system': 330,
'idle': 440,
}
# expected increment, divided by number of CPUs
# for example, user should be 10/2 = 5
self.expected = {
'total.user': 5.0,
'total.nice': 10.0,
'total.system': 15.0,
'total.idle': 20.0,
}
# convert an input dict with values to a string that might come from
# /proc/stat
def input_dict_to_proc_string(self, cpu_id, dict_):
return ("cpu%s %i %i %i %i 0 0 0 0 0 0" %
(cpu_id,
dict_['user'],
dict_['nice'],
dict_['system'],
dict_['idle'],
)
)
@patch.object(Collector, 'publish')
def test_should_work_proc_stat(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
"\n".join([self.input_dict_to_proc_string('', self.input_base),
self.input_dict_to_proc_string('0', self.input_base),
self.input_dict_to_proc_string('1', self.input_base),
])
)))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
"\n".join([self.input_dict_to_proc_string('', self.input_next),
self.input_dict_to_proc_string('0', self.input_next),
self.input_dict_to_proc_string('1', self.input_next),
])
)))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, self.expected)
@patch.object(Collector, 'publish')
@patch('cpu.os')
@patch('cpu.psutil')
def test_should_work_psutil(self, psutil_mock, os_mock, publish_mock):
os_mock.access.return_value = False
total = Mock(**self.input_base)
cpu_time = [Mock(**self.input_base),
Mock(**self.input_base),
]
psutil_mock.cpu_times.side_effect = [cpu_time, total]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
total = Mock(**self.input_next)
cpu_time = [Mock(**self.input_next),
Mock(**self.input_next),
]
psutil_mock.cpu_times.side_effect = [cpu_time, total]
self.collector.collect()
self.assertPublishedMany(publish_mock, self.expected)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SUPPORT_DIRECTION,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.const import STATE_OFF
FULL_SUPPORT = SUPPORT_SET_SPEED | SUPPORT_OSCILLATE | SUPPORT_DIRECTION
LIMITED_SUPPORT = SUPPORT_SET_SPEED
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the demo fan platform."""
async_add_entities(
[
DemoFan(hass, "fan1", "Living Room Fan", FULL_SUPPORT),
DemoFan(hass, "fan2", "Ceiling Fan", LIMITED_SUPPORT),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoFan(FanEntity):
"""A demonstration fan component."""
def __init__(
self, hass, unique_id: str, name: str, supported_features: int
) -> None:
"""Initialize the entity."""
self.hass = hass
self._unique_id = unique_id
self._supported_features = supported_features
self._speed = STATE_OFF
self._oscillating = None
self._direction = None
self._name = name
if supported_features & SUPPORT_OSCILLATE:
self._oscillating = False
if supported_features & SUPPORT_DIRECTION:
self._direction = "forward"
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self) -> str:
"""Get entity name."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo fan."""
return False
@property
def speed(self) -> str:
"""Return the current speed."""
return self._speed
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [STATE_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity."""
if speed is None:
speed = SPEED_MEDIUM
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Turn off the entity."""
self.oscillate(False)
self.set_speed(STATE_OFF)
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
self._speed = speed
self.schedule_update_ha_state()
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self._direction = direction
self.schedule_update_ha_state()
def oscillate(self, oscillating: bool) -> None:
"""Set oscillation."""
self._oscillating = oscillating
self.schedule_update_ha_state()
@property
def current_direction(self) -> str:
"""Fan direction."""
return self._direction
@property
def oscillating(self) -> bool:
"""Oscillating."""
return self._oscillating
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
|
from __future__ import unicode_literals
from binascii import hexlify
def rsa_encode(item):
"""rsa algorithm and need modify code"""
# replace your "n" and "e" hex value
n_modulus_hex_string = 'd7190a042cd2db97ebc2ab4da366f2a7085556ed613b5a39c9fdd2bb2595d1dc'
e_exponent_hex_string = '1001'
if len(item) < 1:
print("[!]exception item:[%s]" % item)
return item
public_modulus = int(n_modulus_hex_string, 16)
public_exponent = int(e_exponent_hex_string, 16)
item = int(hexlify(item[::-1].encode('utf-8')).decode(), 16)
item = pow(item, public_exponent, public_modulus)
return '%X' % item
|
import os
import unittest
from perfkitbenchmarker.linux_packages import netperf
class NetperfParseHistogramTestCase(unittest.TestCase):
def setUp(self):
data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
result_path = os.path.join(data_dir, 'netperf_results.txt')
with open(result_path) as results_file:
self.netperf_output = results_file.read()
def testParsesHistogram(self):
expected = {
300: 5771, 400: 118948, 500: 7121, 600: 639, 700: 199, 800: 90,
900: 53, 1000: 149, 2000: 31, 3000: 11, 4000: 8, 5000: 1, 6000: 1,
7000: 1, 9000: 1
}
hist = netperf.ParseHistogram(self.netperf_output)
self.assertEqual(hist, expected)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import unittest
from perfkitbenchmarker import errors
from perfkitbenchmarker.linux_packages import ycsb
import six
from six.moves import range
def open_data_file(filename):
path = os.path.join(os.path.dirname(__file__), '..', 'data', filename)
with open(path) as fp:
return fp.read()
class SimpleResultParserTestCase(unittest.TestCase):
maxDiff = None
def setUp(self):
super(SimpleResultParserTestCase, self).setUp()
self.contents = open_data_file('ycsb-test-run.dat')
self.results = ycsb.ParseResults(self.contents, 'histogram')
def testCommandLineSet(self):
self.assertEqual('Command line: -db com.yahoo.ycsb.BasicDB '
'-P workloads/workloada -t', self.results['command_line'])
def testClientSet(self):
self.assertEqual('YCSB Client 0.1', self.results['client'])
def testUpdateStatisticsParsed(self):
self.assertDictEqual(
{
'group': 'update',
'statistics': {
'Operations': 531,
'Return=0': 531,
'AverageLatency(ms)': .0659774011299435,
'MinLatency(ms)': 0.042,
'MaxLatency(ms)': .345,
'95thPercentileLatency(ms)': 0,
'99thPercentileLatency(ms)': 0
},
'histogram': [(0, 530), (19, 1)],
},
dict(self.results['groups']['update']))
def testReadStatisticsParsed(self):
self.assertDictEqual(
{
'group': 'read',
'statistics': {
'Operations': 469,
'Return=0': 469,
'AverageLatency(ms)': 0.03847761194029851,
'MinLatency(ms)': 0.034,
'MaxLatency(ms)': 0.102,
'95thPercentileLatency(ms)': 0,
'99thPercentileLatency(ms)': 0
},
'histogram': [(0, 469)],
},
dict(self.results['groups']['read']))
def testOverallStatisticsParsed(self):
self.assertDictEqual(
{
'statistics': {
'RunTime(ms)': 80.0,
'Throughput(ops/sec)': 12500.0
},
'group': 'overall',
'histogram': []
},
self.results['groups']['overall'])
class DetailedResultParserTestCase(unittest.TestCase):
def setUp(self):
super(DetailedResultParserTestCase, self).setUp()
self.contents = open_data_file('ycsb-test-run-2.dat')
self.results = ycsb.ParseResults(self.contents, 'histogram')
def testPercentilesFromHistogram_read(self):
hist = self.results['groups']['read']['histogram']
percentiles = ycsb._PercentilesFromHistogram(hist)
self.assertEqual(1, percentiles['p50'])
self.assertEqual(7, percentiles['p99'])
def testPercentilesFromHistogram_update(self):
hist = self.results['groups']['update']['histogram']
percentiles = ycsb._PercentilesFromHistogram(hist)
self.assertEqual(1, percentiles['p50'])
self.assertEqual(7, percentiles['p99'])
class BadResultParserTestCase(unittest.TestCase):
def testBadTestRun(self):
contents = open_data_file('ycsb-test-run-3.dat')
self.assertRaises(errors.Benchmarks.KnownIntermittentError,
ycsb.ParseResults, contents, 'histogram')
class WeightedQuantileTestCase(unittest.TestCase):
def testEvenlyWeightedSamples(self):
x = list(range(1, 101)) # 1-100
weights = [1 for _ in x]
self.assertEqual(50, ycsb._WeightedQuantile(x, weights, 0.50))
self.assertEqual(75, ycsb._WeightedQuantile(x, weights, 0.75))
self.assertEqual(90, ycsb._WeightedQuantile(x, weights, 0.90))
self.assertEqual(95, ycsb._WeightedQuantile(x, weights, 0.95))
self.assertEqual(99, ycsb._WeightedQuantile(x, weights, 0.99))
self.assertEqual(100, ycsb._WeightedQuantile(x, weights, 1))
def testLowWeight(self):
x = [1, 4]
weights = [99, 1]
for i in range(100):
self.assertEqual(1, ycsb._WeightedQuantile(x, weights, i / 100.0))
self.assertEqual(4, ycsb._WeightedQuantile(x, weights, 0.995))
def testMidWeight(self):
x = [0, 1.2, 4]
weights = [1, 98, 1]
for i in range(2, 99):
self.assertAlmostEqual(1.2, ycsb._WeightedQuantile(x, weights, i / 100.0))
self.assertEqual(4, ycsb._WeightedQuantile(x, weights, 0.995))
class ParseWorkloadTestCase(unittest.TestCase):
def testParsesEmptyString(self):
self.assertDictEqual({}, ycsb._ParseWorkload(''))
def testIgnoresComment(self):
self.assertDictEqual({}, ycsb._ParseWorkload('#\n'))
self.assertDictEqual({},
ycsb._ParseWorkload('#recordcount = 10\n'
'# columnfamily=cf'))
self.assertDictEqual({'recordcount': '10'},
ycsb._ParseWorkload('#Sample!\nrecordcount = 10'))
def testParsesSampleWorkload(self):
contents = open_data_file('ycsb_workloada')
actual = ycsb._ParseWorkload(contents)
expected = {
'recordcount': '1000',
'operationcount': '1000',
'workload': 'com.yahoo.ycsb.workloads.CoreWorkload',
'readallfields': 'true',
'readproportion': '0.5',
'updateproportion': '0.5',
'scanproportion': '0',
'insertproportion': '0',
'requestdistribution': 'zipfian'
}
self.assertDictEqual(expected, actual)
class CombineResultsTestCase(unittest.TestCase):
def testGroupMissing(self):
r1 = {
'client': '',
'command_line': '',
'groups': {
'read': {
'group': 'read',
'statistics': {'Operations': 100,
'Return=0': 100},
'histogram': []
}
}
}
r2 = {
'client': '',
'command_line': '',
'groups': {
'read': {
'group': 'read',
'statistics': {'Operations': 96, 'Return=0': 94,
'Return=-1': 2},
'histogram': []
},
'update': {
'group': 'update',
'statistics': {'Operations': 100,
'AverageLatency(ms)': 25},
'histogram': []
}
}
}
combined = ycsb._CombineResults([r1, r2], 'histogram', {})
six.assertCountEqual(self, ['read', 'update'], combined['groups'])
six.assertCountEqual(self, ['Operations', 'Return=0', 'Return=-1'],
combined['groups']['read']['statistics'])
read_stats = combined['groups']['read']['statistics']
self.assertEqual({'Operations': 196, 'Return=0': 194, 'Return=-1': 2},
read_stats)
def testDropUnaggregatedFromSingleResult(self):
r = {
'client': '',
'command_line': '',
'groups': {
'read': {
'group': 'read',
'statistics': {'AverageLatency(ms)': 21},
'histogram': []
}
}
}
r_copy = copy.deepcopy(r)
self.assertEqual(r, r_copy)
combined = ycsb._CombineResults([r], 'histogram', {})
self.assertEqual(r, r_copy)
r['groups']['read']['statistics'] = {}
self.assertEqual(r, combined)
class HdrLogsParserTestCase(unittest.TestCase):
def testParseHdrLogFile(self):
rawlog = """
#[StartTime: 1523565997 (seconds since epoch), Thu Apr 12 20:46:37 UTC 2018]
Value Percentile TotalCount 1/(1-Percentile)
314.000 0.000000000000 2 1.00
853.000 0.100000000000 49955 1.11
949.000 0.200000000000 100351 1.25
949.000 0.210000000000 100351 1.27
1033.000 0.300000000000 150110 1.43
#[Mean = 1651.145, StdDeviation = 851.707]
#[Max = 203903.000, Total count = 499019]
#[Buckets = 8, SubBuckets = 2048]
"""
actual = ycsb.ParseHdrLogFile(rawlog)
expected = [(0.0, 0.314, 2), (10.0, 0.853, 49953),
(20.0, 0.949, 50396), (30.0, 1.033, 49759)]
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
from django.utils.functional import cached_property
from weblate_language_data.countries import DEFAULT_LANGS
from zeep import Client
from weblate.machinery.base import MachineTranslation
MST_API_URL = "http://api.terminology.microsoft.com/Terminology.svc"
MST_WSDL_URL = f"{MST_API_URL}?wsdl"
class MicrosoftTerminologyService(MachineTranslation):
"""The Microsoft Terminology Service API.
Allows you to programmatically access the terminology, definitions and user
interface (UI) strings available on the MS Language Portal through a web service
(SOAP).
"""
name = "Microsoft Terminology"
SERVICE = None
@cached_property
def soap(self):
if MicrosoftTerminologyService.SERVICE is None:
MicrosoftTerminologyService.SERVICE = Client(MST_WSDL_URL)
return MicrosoftTerminologyService.SERVICE
def soap_req(self, name, **kwargs):
return getattr(self.soap.service, name)(**kwargs)
def download_languages(self):
"""Get list of supported languages."""
languages = self.soap_req("GetLanguages")
if not languages:
return []
return [lang["Code"] for lang in languages]
def download_translations(
self,
source,
language,
text: str,
unit,
user,
search: bool,
threshold: int = 75,
):
"""Download list of possible translations from the service."""
args = {
"text": text,
"from": source,
"to": language,
"maxTranslations": 20,
"sources": ["Terms", "UiStrings"],
"searchOperator": "AnyWord",
}
result = self.soap_req("GetTranslations", **args)
# It can return None in some error cases
if not result:
return
for item in result:
target = item["Translations"]["Translation"][0]["TranslatedText"]
source = item["OriginalText"]
yield {
"text": target,
"quality": self.comparer.similarity(text, source),
"service": self.name,
"source": source,
}
def map_language_code(self, code):
"""Convert language to service specific code.
Add country part of locale if missing.
"""
code = super().map_language_code(code).replace("_", "-").lower()
if "-" not in code:
for lang in DEFAULT_LANGS:
if lang.split("_")[0] == code:
return lang.replace("_", "-").lower()
return code
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan import utils
import gin
import tensorflow as tf
def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):
"""Checks the shapes and ranks of logits and prediction tensors.
Args:
d_real: prediction for real points, values in [0, 1], shape [batch_size, 1].
d_fake: prediction for fake points, values in [0, 1], shape [batch_size, 1].
d_real_logits: logits for real points, shape [batch_size, 1].
d_fake_logits: logits for fake points, shape [batch_size, 1].
Raises:
ValueError: if the ranks or shapes are mismatched.
"""
def _check_pair(a, b):
if a != b:
raise ValueError("Shape mismatch: %s vs %s." % (a, b))
if len(a) != 2 or len(b) != 2:
raise ValueError("Rank: expected 2, got %s and %s" % (len(a), len(b)))
if (d_real is not None) and (d_fake is not None):
_check_pair(d_real.shape.as_list(), d_fake.shape.as_list())
if (d_real_logits is not None) and (d_fake_logits is not None):
_check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())
if (d_real is not None) and (d_real_logits is not None):
_check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())
@gin.configurable(whitelist=[])
def non_saturating(d_real_logits, d_fake_logits, d_real=None, d_fake=None):
"""Returns the discriminator and generator loss for Non-saturating loss.
Args:
d_real_logits: logits for real points, shape [batch_size, 1].
d_fake_logits: logits for fake points, shape [batch_size, 1].
d_real: ignored.
d_fake: ignored.
Returns:
A tuple consisting of the discriminator loss, discriminator's loss on the
real samples and fake samples, and the generator's loss.
"""
with tf.name_scope("non_saturating_loss"):
check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits)
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_real_logits, labels=tf.ones_like(d_real_logits),
name="cross_entropy_d_real"))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_fake_logits, labels=tf.zeros_like(d_fake_logits),
name="cross_entropy_d_fake"))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_fake_logits, labels=tf.ones_like(d_fake_logits),
name="cross_entropy_g"))
return d_loss, d_loss_real, d_loss_fake, g_loss
@gin.configurable(whitelist=[])
def wasserstein(d_real_logits, d_fake_logits, d_real=None, d_fake=None):
"""Returns the discriminator and generator loss for Wasserstein loss.
Args:
d_real_logits: logits for real points, shape [batch_size, 1].
d_fake_logits: logits for fake points, shape [batch_size, 1].
d_real: ignored.
d_fake: ignored.
Returns:
A tuple consisting of the discriminator loss, discriminator's loss on the
real samples and fake samples, and the generator's loss.
"""
with tf.name_scope("wasserstein_loss"):
check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits)
d_loss_real = -tf.reduce_mean(d_real_logits)
d_loss_fake = tf.reduce_mean(d_fake_logits)
d_loss = d_loss_real + d_loss_fake
g_loss = -d_loss_fake
return d_loss, d_loss_real, d_loss_fake, g_loss
@gin.configurable(whitelist=[])
def least_squares(d_real, d_fake, d_real_logits=None, d_fake_logits=None):
"""Returns the discriminator and generator loss for the least-squares loss.
Args:
d_real: prediction for real points, values in [0, 1], shape [batch_size, 1].
d_fake: prediction for fake points, values in [0, 1], shape [batch_size, 1].
d_real_logits: ignored.
d_fake_logits: ignored.
Returns:
A tuple consisting of the discriminator loss, discriminator's loss on the
real samples and fake samples, and the generator's loss.
"""
with tf.name_scope("least_square_loss"):
check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits)
d_loss_real = tf.reduce_mean(tf.square(d_real - 1.0))
d_loss_fake = tf.reduce_mean(tf.square(d_fake))
d_loss = 0.5 * (d_loss_real + d_loss_fake)
g_loss = 0.5 * tf.reduce_mean(tf.square(d_fake - 1.0))
return d_loss, d_loss_real, d_loss_fake, g_loss
@gin.configurable(whitelist=[])
def hinge(d_real_logits, d_fake_logits, d_real=None, d_fake=None):
"""Returns the discriminator and generator loss for the hinge loss.
Args:
d_real_logits: logits for real points, shape [batch_size, 1].
d_fake_logits: logits for fake points, shape [batch_size, 1].
d_real: ignored.
d_fake: ignored.
Returns:
A tuple consisting of the discriminator loss, discriminator's loss on the
real samples and fake samples, and the generator's loss.
"""
with tf.name_scope("hinge_loss"):
check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits)
d_loss_real = tf.reduce_mean(tf.nn.relu(1.0 - d_real_logits))
d_loss_fake = tf.reduce_mean(tf.nn.relu(1.0 + d_fake_logits))
d_loss = d_loss_real + d_loss_fake
g_loss = - tf.reduce_mean(d_fake_logits)
return d_loss, d_loss_real, d_loss_fake, g_loss
@gin.configurable("loss", whitelist=["fn"])
def get_losses(fn=non_saturating, **kwargs):
"""Returns the losses for the discriminator and generator."""
return utils.call_with_accepted_args(fn, **kwargs)
|
import pytest
from homeassistant.components.camera import (
DOMAIN as CAMERA_DOMAIN,
SERVICE_DISABLE_MOTION,
SERVICE_ENABLE_MOTION,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_IDLE,
STATE_STREAMING,
async_get_image,
)
from homeassistant.components.demo import DOMAIN
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
ENTITY_CAMERA = "camera.demo_camera"
@pytest.fixture(autouse=True)
async def demo_camera(hass):
"""Initialize a demo camera platform."""
assert await async_setup_component(
hass, CAMERA_DOMAIN, {CAMERA_DOMAIN: {"platform": DOMAIN}}
)
await hass.async_block_till_done()
async def test_init_state_is_streaming(hass):
"""Demo camera initialize as streaming."""
state = hass.states.get(ENTITY_CAMERA)
assert state.state == STATE_STREAMING
with patch(
"homeassistant.components.demo.camera.Path.read_bytes", return_value=b"ON"
) as mock_read_bytes:
image = await async_get_image(hass, ENTITY_CAMERA)
assert mock_read_bytes.call_count == 1
assert image.content == b"ON"
async def test_turn_on_state_back_to_streaming(hass):
"""After turn on state back to streaming."""
state = hass.states.get(ENTITY_CAMERA)
assert state.state == STATE_STREAMING
await hass.services.async_call(
CAMERA_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_CAMERA}, blocking=True
)
state = hass.states.get(ENTITY_CAMERA)
assert state.state == STATE_IDLE
await hass.services.async_call(
CAMERA_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_CAMERA}, blocking=True
)
state = hass.states.get(ENTITY_CAMERA)
assert state.state == STATE_STREAMING
async def test_turn_off_image(hass):
"""After turn off, Demo camera raise error."""
await hass.services.async_call(
CAMERA_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_CAMERA}, blocking=True
)
with pytest.raises(HomeAssistantError) as error:
await async_get_image(hass, ENTITY_CAMERA)
assert error.args[0] == "Camera is off"
async def test_turn_off_invalid_camera(hass):
"""Turn off non-exist camera should quietly fail."""
state = hass.states.get(ENTITY_CAMERA)
assert state.state == STATE_STREAMING
await hass.services.async_call(
CAMERA_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "camera.invalid_camera"},
blocking=True,
)
state = hass.states.get(ENTITY_CAMERA)
assert state.state == STATE_STREAMING
async def test_motion_detection(hass):
"""Test motion detection services."""
# Fetch state and check motion detection attribute
state = hass.states.get(ENTITY_CAMERA)
assert not state.attributes.get("motion_detection")
# Call service to turn on motion detection
await hass.services.async_call(
CAMERA_DOMAIN,
SERVICE_ENABLE_MOTION,
{ATTR_ENTITY_ID: ENTITY_CAMERA},
blocking=True,
)
# Check if state has been updated.
state = hass.states.get(ENTITY_CAMERA)
assert state.attributes.get("motion_detection")
# Call service to turn off motion detection
await hass.services.async_call(
CAMERA_DOMAIN,
SERVICE_DISABLE_MOTION,
{ATTR_ENTITY_ID: ENTITY_CAMERA},
blocking=True,
)
# Check if state has been updated.
state = hass.states.get(ENTITY_CAMERA)
assert not state.attributes.get("motion_detection")
|
from collections import OrderedDict
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
os.environ['is_test_suite'] = 'True'
import classifiers as classifier_tests
import regressors as regressor_tests
training_parameters = {
'model_names': ['DeepLearning', 'GradientBoosting', 'XGB', 'LGBM', 'CatBoost']
}
# Make this an OrderedDict so that we run the tests in a consistent order
test_names = OrderedDict([
('optimize_final_model_regression', regressor_tests.optimize_final_model_regression),
# ('getting_single_predictions_regression', regressor_tests.getting_single_predictions_regression),
# ('feature_learning_getting_single_predictions_regression', regressor_tests.feature_learning_getting_single_predictions_regression),
# ('categorical_ensembling_regression', regressor_tests.categorical_ensembling_regression),
# ('feature_learning_categorical_ensembling_getting_single_predictions_regression', regressor_tests.feature_learning_categorical_ensembling_getting_single_predictions_regression)
])
def test_generator():
for model_name in training_parameters['model_names']:
for test_name, test in test_names.items():
test_model_name = model_name + 'Regressor'
test.description = str(test_model_name) + '_' + test_name
yield test, test_model_name
|
from homeassistant.components.sensor import DEVICE_CLASS_TIMESTAMP, DOMAIN
from homeassistant.const import DATA_MEGABYTES
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.dt as dt_util
from .const import DOMAIN as UNIFI_DOMAIN
from .unifi_client import UniFiClient
RX_SENSOR = "rx"
TX_SENSOR = "tx"
UPTIME_SENSOR = "uptime"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up sensors for UniFi integration."""
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.entities[DOMAIN] = {
RX_SENSOR: set(),
TX_SENSOR: set(),
UPTIME_SENSOR: set(),
}
@callback
def items_added(
clients: set = controller.api.clients, devices: set = controller.api.devices
) -> None:
"""Update the values of the controller."""
if controller.option_allow_bandwidth_sensors:
add_bandwith_entities(controller, async_add_entities, clients)
if controller.option_allow_uptime_sensors:
add_uptime_entities(controller, async_add_entities, clients)
for signal in (controller.signal_update, controller.signal_options_update):
controller.listeners.append(async_dispatcher_connect(hass, signal, items_added))
items_added()
@callback
def add_bandwith_entities(controller, async_add_entities, clients):
"""Add new sensor entities from the controller."""
sensors = []
for mac in clients:
for sensor_class in (UniFiRxBandwidthSensor, UniFiTxBandwidthSensor):
if mac in controller.entities[DOMAIN][sensor_class.TYPE]:
continue
client = controller.api.clients[mac]
sensors.append(sensor_class(client, controller))
if sensors:
async_add_entities(sensors)
@callback
def add_uptime_entities(controller, async_add_entities, clients):
"""Add new sensor entities from the controller."""
sensors = []
for mac in clients:
if mac in controller.entities[DOMAIN][UniFiUpTimeSensor.TYPE]:
continue
client = controller.api.clients[mac]
sensors.append(UniFiUpTimeSensor(client, controller))
if sensors:
async_add_entities(sensors)
class UniFiBandwidthSensor(UniFiClient):
"""UniFi bandwidth sensor base class."""
DOMAIN = DOMAIN
@property
def name(self) -> str:
"""Return the name of the client."""
return f"{super().name} {self.TYPE.upper()}"
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity."""
return DATA_MEGABYTES
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if not self.controller.option_allow_bandwidth_sensors:
await self.remove_item({self.client.mac})
class UniFiRxBandwidthSensor(UniFiBandwidthSensor):
"""Receiving bandwidth sensor."""
TYPE = RX_SENSOR
@property
def state(self) -> int:
"""Return the state of the sensor."""
if self._is_wired:
return self.client.wired_rx_bytes / 1000000
return self.client.rx_bytes / 1000000
class UniFiTxBandwidthSensor(UniFiBandwidthSensor):
"""Transmitting bandwidth sensor."""
TYPE = TX_SENSOR
@property
def state(self) -> int:
"""Return the state of the sensor."""
if self._is_wired:
return self.client.wired_tx_bytes / 1000000
return self.client.tx_bytes / 1000000
class UniFiUpTimeSensor(UniFiClient):
"""UniFi uptime sensor."""
DOMAIN = DOMAIN
TYPE = UPTIME_SENSOR
@property
def device_class(self) -> str:
"""Return device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def name(self) -> str:
"""Return the name of the client."""
return f"{super().name} {self.TYPE.capitalize()}"
@property
def state(self) -> int:
"""Return the uptime of the client."""
return dt_util.utc_from_timestamp(float(self.client.uptime)).isoformat()
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if not self.controller.option_allow_uptime_sensors:
await self.remove_item({self.client.mac})
|
from molecule import logger
from molecule.driver import base
from molecule import util
LOG = logger.get_logger(__name__)
class Openstack(base.Base):
"""
The class responsible for managing `OpenStack`_ instances. `OpenStack`_
is `not` the default driver used in Molecule.
Molecule leverages Ansible's `openstack_module`_, by mapping variables
from ``molecule.yml`` into ``create.yml`` and ``destroy.yml``.
.. _`openstack_module`: https://docs.ansible.com/ansible/latest/os_server_module.html
.. code-block:: yaml
driver:
name: openstack
platforms:
- name: instance
.. code-block:: bash
$ pip install molecule[openstack]
Change the options passed to the ssh client.
.. code-block:: yaml
driver:
name: openstack
ssh_connection_options:
- -o ControlPath=~/.ansible/cp/%r@%h-%p
.. important::
Molecule does not merge lists, when overriding the developer must
provide all options.
Provide a list of files Molecule will preserve, relative to the scenario
ephemeral directory, after any ``destroy`` subcommand execution.
.. code-block:: yaml
driver:
name: openstack
safe_files:
- foo
.. _`OpenStack`: https://www.openstack.org
""" # noqa
def __init__(self, config):
super(Openstack, self).__init__(config)
self._name = 'openstack'
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def login_cmd_template(self):
connection_options = ' '.join(self.ssh_connection_options)
return ('ssh {{address}} '
'-l {{user}} '
'-p {{port}} '
'-i {{identity_file}} '
'{}').format(connection_options)
@property
def default_safe_files(self):
return [
self.instance_config,
]
@property
def default_ssh_connection_options(self):
return self._get_ssh_connection_options()
def login_options(self, instance_name):
d = {'instance': instance_name}
return util.merge_dicts(d, self._get_instance_config(instance_name))
def ansible_connection_options(self, instance_name):
try:
d = self._get_instance_config(instance_name)
return {
'ansible_user': d['user'],
'ansible_host': d['address'],
'ansible_port': d['port'],
'ansible_private_key_file': d['identity_file'],
'connection': 'ssh',
'ansible_ssh_common_args':
' '.join(self.ssh_connection_options),
}
except StopIteration:
return {}
except IOError:
# Instance has yet to be provisioned , therefore the
# instance_config is not on disk.
return {}
def _get_instance_config(self, instance_name):
instance_config_dict = util.safe_load_file(
self._config.driver.instance_config)
return next(item for item in instance_config_dict
if item['instance'] == instance_name)
def sanity_checks(self):
# FIXME(decentral1se): Implement sanity checks
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
from perfkitbenchmarker import errors
import six
_SPEC_REGISTRY = {}
def GetSpecClass(base_class, **kwargs):
"""Returns the subclass with the corresponding attributes.
Args:
base_class: The base class of the resource to return
(e.g. BaseVmSpec).
**kwargs: Every attribute/value of the subclass's ATTRS that were
used to register the subclass.
Raises:
Exception: If no class could be found with matching attributes.
"""
key = [base_class.__name__]
key += sorted(kwargs.items())
return _SPEC_REGISTRY.get(tuple(key), base_class)
class BaseSpecMetaClass(type):
"""Metaclass that allows each BaseSpec derived class to have its own decoders.
"""
def __init__(cls, name, bases, dct):
super(BaseSpecMetaClass, cls).__init__(name, bases, dct)
cls._init_decoders_lock = threading.Lock()
cls._decoders = collections.OrderedDict()
cls._required_options = set()
if (all(hasattr(cls, attr) for attr in cls.SPEC_ATTRS) and
cls.SPEC_TYPE):
key = [cls.SPEC_TYPE]
key += sorted([(attr, getattr(cls, attr)) for attr in cls.SPEC_ATTRS])
if tuple(key) in _SPEC_REGISTRY:
raise Exception('Subclasses of %s must define unique values for the '
'attrs: %s.' % (cls.SPEC_TYPE, cls.SPEC_ATTRS))
_SPEC_REGISTRY[tuple(key)] = cls
class BaseSpec(six.with_metaclass(BaseSpecMetaClass, object)):
"""Object decoded from a YAML config."""
# The name of the spec class that will be extended with auto-registered
# subclasses.
SPEC_TYPE = None
# A list of the attributes that are used to register the subclasses.
SPEC_ATTRS = ['CLOUD']
# Each derived class has its own copy of the following three variables. They
# are initialized by BaseSpecMetaClass.__init__ and later populated by
# _InitDecoders when the first instance of the derived class is created.
_init_decoders_lock = None # threading.Lock that protects the next two vars.
_decoders = None # dict mapping config option name to ConfigOptionDecoder.
_required_options = None # set of strings. Required config options.
def __init__(self, component_full_name, flag_values=None, **kwargs):
"""Initializes a BaseSpec.
Translates keyword arguments via the class's decoders and assigns the
corresponding instance attribute. Derived classes can register decoders
for additional attributes by overriding _GetOptionDecoderConstructions
and can add support for additional flags by overriding _ApplyFlags.
Args:
component_full_name: string. Fully qualified name of the configurable
component containing the config options.
flag_values: None or flags.FlagValues. Runtime flags that may override
the provided config option values in kwargs.
**kwargs: dict mapping config option names to provided values.
Raises:
errors.Config.MissingOption: If a config option is required, but a value
was not provided in kwargs.
errors.Config.UnrecognizedOption: If an unrecognized config option is
provided with a value in kwargs.
"""
if not self._decoders:
self._InitDecoders()
if flag_values:
self._ApplyFlags(kwargs, flag_values)
missing_options = self._required_options.difference(kwargs)
if missing_options:
raise errors.Config.MissingOption(
'Required options were missing from {0}: {1}.'.format(
component_full_name, ', '.join(sorted(missing_options))))
unrecognized_options = frozenset(kwargs).difference(self._decoders)
if unrecognized_options:
raise errors.Config.UnrecognizedOption(
'Unrecognized options were found in {0}: {1}.'.format(
component_full_name, ', '.join(sorted(unrecognized_options))))
self._DecodeAndInit(component_full_name, kwargs, self._decoders,
flag_values)
@classmethod
def _InitDecoders(cls):
"""Creates a ConfigOptionDecoder for each config option.
Populates cls._decoders and cls._required_options.
"""
with cls._init_decoders_lock:
if not cls._decoders:
constructions = cls._GetOptionDecoderConstructions()
for option, decoder_construction in sorted(
six.iteritems(constructions)):
decoder_class, init_args = decoder_construction
decoder = decoder_class(option=option, **init_args)
cls._decoders[option] = decoder
if decoder.required:
cls._required_options.add(option)
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
pass
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Can be overridden by derived classes to add options or impose additional
requirements on existing options.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
return {}
def _DecodeAndInit(self, component_full_name, config, decoders, flag_values):
"""Initializes spec attributes from provided config option values.
Args:
component_full_name: string. Fully qualified name of the configurable
component containing the config options.
config: dict mapping option name string to option value.
decoders: OrderedDict mapping option name string to ConfigOptionDecoder.
flag_values: flags.FlagValues. Runtime flags that may override provided
config option values. These flags have already been applied to the
current config, but they may be passed to the decoders for propagation
to deeper spec constructors.
"""
assert isinstance(decoders, collections.OrderedDict), (
'decoders must be an OrderedDict. The order in which options are '
'decoded must be guaranteed.')
for option, decoder in six.iteritems(decoders):
if option in config:
value = decoder.Decode(config[option], component_full_name, flag_values)
else:
value = decoder.default
setattr(self, option, value)
|
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.misc import utilcmds
from qutebrowser.api import cmdutils
from qutebrowser.utils import objreg
def test_repeat_command_initial(mocker, mode_manager):
"""Test repeat_command first-time behavior.
If :repeat-command is called initially, it should err, because there's
nothing to repeat.
"""
objreg_mock = mocker.patch('qutebrowser.misc.utilcmds.objreg')
objreg_mock.get.return_value = mode_manager
with pytest.raises(cmdutils.CommandError,
match="You didn't do anything yet."):
utilcmds.repeat_command(win_id=0)
class FakeWindow:
"""Mock class for window_only."""
def __init__(self, deleted=False):
self.closed = False
self.deleted = deleted
def close(self):
"""Flag as closed."""
self.closed = True
def test_window_only(mocker, monkeypatch):
"""Verify that window_only doesn't close the current or deleted windows."""
test_windows = {0: FakeWindow(), 1: FakeWindow(True), 2: FakeWindow()}
winreg_mock = mocker.patch('qutebrowser.misc.utilcmds.objreg')
winreg_mock.window_registry = test_windows
sip_mock = mocker.patch('qutebrowser.misc.utilcmds.sip')
sip_mock.isdeleted.side_effect = lambda window: window.deleted
utilcmds.window_only(current_win_id=0)
assert not test_windows[0].closed
assert not test_windows[1].closed
assert test_windows[2].closed
@pytest.fixture
def tabbed_browser(stubs, win_registry):
tb = stubs.TabbedBrowserStub()
objreg.register('tabbed-browser', tb, scope='window', window=0)
yield tb
objreg.delete('tabbed-browser', scope='window', window=0)
def test_version(tabbed_browser, qapp):
utilcmds.version(win_id=0)
assert tabbed_browser.loaded_url == QUrl('qute://version/')
|
import random
import string
from django.core.exceptions import ValidationError
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=100)
birthday = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def full_clean(self, exclude=None, validate_unique=True):
super().full_clean(exclude, validate_unique)
if exclude is None:
exclude = []
else:
exclude = list(exclude)
if 'name' not in exclude and self.name == '123':
raise ValidationError({'name': "'123' is not a valid value"})
class Category(models.Model):
name = models.CharField(
max_length=100,
unique=True,
)
def __str__(self):
return self.name
class Book(models.Model):
name = models.CharField('Book name', max_length=100)
author = models.ForeignKey(Author, blank=True, null=True, on_delete=models.CASCADE)
author_email = models.EmailField('Author email', max_length=75, blank=True)
imported = models.BooleanField(default=False)
published = models.DateField('Published', blank=True, null=True)
published_time = models.TimeField('Time published', blank=True, null=True)
price = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
added = models.DateTimeField(blank=True, null=True)
categories = models.ManyToManyField(Category, blank=True)
def __str__(self):
return self.name
class Parent(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Child(models.Model):
parent = models.ForeignKey(Parent, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
def __str__(self):
return '%s - child of %s' % (self.name, self.parent.name)
class Profile(models.Model):
user = models.OneToOneField('auth.User', on_delete=models.CASCADE)
is_private = models.BooleanField(default=True)
class Entry(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
class Role(models.Model):
user = models.OneToOneField('auth.User', on_delete=models.CASCADE, null=True)
class Person(models.Model):
role = models.ForeignKey(Role, on_delete=models.CASCADE)
class WithDefault(models.Model):
name = models.CharField('Default', max_length=75, blank=True,
default='foo_bar')
def random_name():
chars = string.ascii_lowercase
return ''.join(random.SystemRandom().choice(chars) for _ in range(100))
class WithDynamicDefault(models.Model):
name = models.CharField('Dyn Default', max_length=100,
default=random_name)
class WithFloatField(models.Model):
f = models.FloatField(blank=True, null=True)
class EBook(Book):
"""Book proxy model to have a separate admin url access and name"""
class Meta:
proxy = True
|
from pygal.adapters import none_to_zero, positive
from pygal.graph.graph import Graph
from pygal.util import alter, cut, decorate
class Funnel(Graph):
"""Funnel graph class"""
_adapters = [positive, none_to_zero]
def _value_format(self, value):
"""Format value for dual value display."""
return super(Funnel, self)._value_format(value and abs(value))
def funnel(self, serie):
"""Draw a funnel slice"""
serie_node = self.svg.serie(serie)
fmt = lambda x: '%f %f' % x
for i, poly in enumerate(serie.points):
metadata = serie.metadata.get(i)
val = self._format(serie, i)
funnels = decorate(
self.svg, self.svg.node(serie_node['plot'], class_="funnels"),
metadata
)
alter(
self.svg.node(
funnels,
'polygon',
points=' '.join(map(fmt, map(self.view, poly))),
class_='funnel reactive tooltip-trigger'
), metadata
)
# Poly center from label
x, y = self.view((
self._center(self._x_pos[serie.index]),
sum([point[1] for point in poly]) / len(poly)
))
self._tooltip_data(
funnels, val, x, y, 'centered', self._get_x_label(serie.index)
)
self._static_value(serie_node, val, x, y, metadata)
def _center(self, x):
return x - 1 / (2 * self._order)
def _compute(self):
"""Compute y min and max and y scale and set labels"""
self._x_pos = [
(x + 1) / self._order for x in range(self._order)
] if self._order != 1 else [.5] # Center if only one value
previous = [[self.zero, self.zero] for i in range(self._len)]
for i, serie in enumerate(self.series):
y_height = -sum(serie.safe_values) / 2
all_x_pos = [0] + self._x_pos
serie.points = []
for j, value in enumerate(serie.values):
poly = []
poly.append((all_x_pos[i], previous[j][0]))
poly.append((all_x_pos[i], previous[j][1]))
previous[j][0] = y_height
y_height = previous[j][1] = y_height + value
poly.append((all_x_pos[i + 1], previous[j][1]))
poly.append((all_x_pos[i + 1], previous[j][0]))
serie.points.append(poly)
val_max = max(list(map(sum, cut(self.series, 'values'))) + [self.zero])
self._box.ymin = -val_max
self._box.ymax = val_max
if self.range and self.range[0] is not None:
self._box.ymin = self.range[0]
if self.range and self.range[1] is not None:
self._box.ymax = self.range[1]
def _compute_x_labels(self):
self._x_labels = list(
zip(
self.x_labels and map(self._x_format, self.x_labels) or [
serie.title['title']
if isinstance(serie.title, dict) else serie.title or ''
for serie in self.series
], map(self._center, self._x_pos)
)
)
def _plot(self):
"""Plot the funnel"""
for serie in self.series:
self.funnel(serie)
|
from tqdm import tqdm
from . import units
from .chain_transform import chain_transform
from matchzoo import DataPack
from matchzoo.engine.base_preprocessor import BasePreprocessor
from .build_vocab_unit import build_vocab_unit
tqdm.pandas()
class CDSSMPreprocessor(BasePreprocessor):
"""CDSSM Model preprocessor."""
def __init__(self,
fixed_length_left: int = 10,
fixed_length_right: int = 40,
with_word_hashing: bool = True):
"""
CDSSM Model preprocessor.
The word hashing step could eats up a lot of memory. To workaround
this problem, set `with_word_hashing` to `False` and use a
:class:`matchzoo.DynamicDataGenerator` with a
:class:`matchzoo.preprocessor.units.WordHashing`.
TODO: doc here.
:param with_word_hashing: Include a word hashing step if `True`.
Example:
>>> import matchzoo as mz
>>> train_data = mz.datasets.toy.load_data()
>>> test_data = mz.datasets.toy.load_data(stage='test')
>>> cdssm_preprocessor = mz.preprocessors.CDSSMPreprocessor()
>>> train_data_processed = cdssm_preprocessor.fit_transform(
... train_data, verbose=0
... )
>>> type(train_data_processed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
>>> test_data_transformed = cdssm_preprocessor.transform(test_data,
... verbose=0)
>>> type(test_data_transformed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
"""
super().__init__()
self._fixed_length_left = fixed_length_left
self._fixed_length_right = fixed_length_right
self._left_fixedlength_unit = units.FixedLength(
self._fixed_length_left,
pad_value='0', pad_mode='post'
)
self._right_fixedlength_unit = units.FixedLength(
self._fixed_length_right,
pad_value='0', pad_mode='post'
)
self._with_word_hashing = with_word_hashing
def fit(self, data_pack: DataPack, verbose: int = 1):
"""
Fit pre-processing context for transformation.
:param verbose: Verbosity.
:param data_pack: Data_pack to be preprocessed.
:return: class:`CDSSMPreprocessor` instance.
"""
fit_units = self._default_units() + [units.NgramLetter()]
func = chain_transform(fit_units)
data_pack = data_pack.apply_on_text(func, verbose=verbose)
vocab_unit = build_vocab_unit(data_pack, verbose=verbose)
self._context['vocab_unit'] = vocab_unit
vocab_size = len(vocab_unit.state['term_index'])
self._context['input_shapes'] = [
(self._fixed_length_left, vocab_size),
(self._fixed_length_right, vocab_size)
]
return self
def transform(self, data_pack: DataPack, verbose: int = 1) -> DataPack:
"""
Apply transformation on data, create `letter-ngram` representation.
:param data_pack: Inputs to be preprocessed.
:param verbose: Verbosity.
:return: Transformed data as :class:`DataPack` object.
"""
data_pack = data_pack.copy()
func = chain_transform(self._default_units())
data_pack.apply_on_text(func, inplace=True, verbose=verbose)
data_pack.apply_on_text(self._left_fixedlength_unit.transform,
mode='left', inplace=True, verbose=verbose)
data_pack.apply_on_text(self._right_fixedlength_unit.transform,
mode='right', inplace=True, verbose=verbose)
post_units = [units.NgramLetter(reduce_dim=False)]
if self._with_word_hashing:
term_index = self._context['vocab_unit'].state['term_index']
post_units.append(units.WordHashing(term_index))
data_pack.apply_on_text(chain_transform(post_units),
inplace=True, verbose=verbose)
return data_pack
@classmethod
def _default_units(cls) -> list:
"""Prepare needed process units."""
return [
units.Tokenize(),
units.Lowercase(),
units.PuncRemoval(),
units.StopRemoval(),
]
@property
def with_word_hashing(self):
"""`with_word_hashing` getter."""
return self._with_word_hashing
@with_word_hashing.setter
def with_word_hashing(self, value):
"""`with_word_hashing` setter."""
self._with_word_hashing = value
|
import datetime
from sandman2.model import db, Model
INVALID_ACTION_MESSAGE = 'Invalid action'
class User(db.Model, Model):
"""A user of the blogging application."""
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
email = db.Column(db.String, unique=True)
@staticmethod
def is_valid_get(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
@staticmethod
def is_valid_post(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
@staticmethod
def is_valid_patch(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
@staticmethod
def is_valid_put(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
@staticmethod
def is_valid_delete(request, resource):
"""Return error message in all cases (just for testing)."""
return INVALID_ACTION_MESSAGE
class Blog(db.Model, Model):
"""An online weblog."""
__tablename__ = 'blog'
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
subheader = db.Column(db.String, nullable=True)
creator_id = db.Column(db.Integer, db.ForeignKey('user.id'))
creator = db.relationship(User)
class Post(db.Model, Model):
"""An individual blog post."""
__tablename__ = 'post'
id = db.Column(db.Numeric, primary_key=True)
title = db.Column(db.String)
content = db.Column(db.String)
posted_at = db.Column(db.DateTime, default=datetime.datetime.now)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
author = db.relationship(User)
|
from __future__ import unicode_literals
from lib.data.data import pystrs
from lib.fun.decorator import magic
from lib.fun.fun import range_compatible
def pid4_magic(*args):
"""chinese id card last 4 digit"""
posrule = lambda _: str(_) if _ >= 10 else "0" + str(_)
value1516 = " ".join(posrule(x) for x in range_compatible(1, 100))
post18 = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "X")
value1718 = ""
if pystrs.default_sex == pystrs.sex_range[0]:
rand = ("1", "3", "5", "7", "9")
for _ in rand:
for _p in post18:
value1718 += _ + _p + " "
elif pystrs.default_sex == pystrs.sex_range[1]:
rand = ("0", "2", "4", "6", "8")
for _ in rand:
for _p in post18:
value1718 += _ + _p + " "
elif pystrs.default_sex == pystrs.sex_range[2]:
rand = " ".join(str(_) for _ in range_compatible(0, 10))
for _ in rand.split(" "):
for _p in post18:
value1718 += _ + _p + " "
@magic
def pid4():
for v1516 in value1516.split(" "):
for v1718 in value1718.split(" "):
if v1718 != "":
yield "".join(v1516 + v1718)
|
from __future__ import unicode_literals
from lib.fun.fun import walk_pure_file
from lib.data.data import paths, pystrs, pyoptions
def sedb_tricks():
for line in walk_pure_file(paths.sedbtrick_path, pure=False):
trick = line.split(pyoptions.key_value_split)
if len(trick) == 2:
if trick[0].strip() == pystrs.sedb_trick_prefix:
for _ in trick[1].strip().split(pyoptions.trick_split):
pyoptions.sedb_trick_pre.append(_)
elif trick[0].strip() == pystrs.sedb_trick_suffix:
for _ in trick[1].strip().split(pyoptions.trick_split):
pyoptions.sedb_trick_suf.append(_)
elif trick[0].strip() == pystrs.sedb_trick_middle:
for _ in trick[1].strip().split(pyoptions.trick_split):
pyoptions.sedb_trick_mid.append(_)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from perfkitbenchmarker import errors
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec
from perfkitbenchmarker.providers.azure import flags as azure_flags
import six
class MemoryDecoder(option_decoders.StringDecoder):
"""Verifies and decodes a config option value specifying a memory size."""
_CONFIG_MEMORY_PATTERN = re.compile(r'([0-9.]+)([GM]iB)')
def Decode(self, value, component_full_name, flag_values):
"""Decodes memory size in MiB from a string.
The value specified in the config must be a string representation of the
memory size expressed in MiB or GiB. It must be an integer number of MiB
Examples: "1280MiB", "7.5GiB".
Args:
value: The value specified in the config.
component_full_name: string. Fully qualified name of the configurable
component containing the config option.
flag_values: flags.FlagValues. Runtime flag values to be propagated to
BaseSpec constructors.
Returns:
int. Memory size in MiB.
Raises:
errors.Config.InvalidValue upon invalid input value.
"""
string = super(MemoryDecoder, self).Decode(value, component_full_name,
flag_values)
match = self._CONFIG_MEMORY_PATTERN.match(string)
if not match:
raise errors.Config.InvalidValue(
'Invalid {0} value: "{1}". Examples of valid values: "1280MiB", '
'"7.5GiB".'.format(self._GetOptionFullName(component_full_name),
string))
try:
memory_value = float(match.group(1))
except ValueError:
raise errors.Config.InvalidValue(
'Invalid {0} value: "{1}". "{2}" is not a valid float.'.format(
self._GetOptionFullName(component_full_name), string,
match.group(1)))
memory_units = match.group(2)
if memory_units == 'GiB':
memory_value *= 1024
memory_mib_int = int(memory_value)
if memory_value != memory_mib_int:
raise errors.Config.InvalidValue(
'Invalid {0} value: "{1}". The specified size must be an integer '
'number of MiB.'.format(self._GetOptionFullName(component_full_name),
string))
return memory_mib_int
class CustomMachineTypeSpec(spec.BaseSpec):
"""Properties of a custom machine type.
Attributes:
cpus: int. Number of vCPUs.
memory: string. Representation of the size of memory, expressed in MiB or
GiB. Must be an integer number of MiB (e.g. "1280MiB", "7.5GiB").
"""
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(CustomMachineTypeSpec, cls)._GetOptionDecoderConstructions()
result.update({'cpus': (option_decoders.IntDecoder, {'min': 1}),
'memory': (MemoryDecoder, {})})
return result
class MachineTypeDecoder(option_decoders.TypeVerifier):
"""Decodes the machine_type option of a VM config."""
def __init__(self, **kwargs):
super(MachineTypeDecoder, self).__init__((six.string_types + (dict,)),
**kwargs)
def Decode(self, value, component_full_name, flag_values):
"""Decodes the machine_type option of a VM config.
Args:
value: Either a string name of a machine type or a dict containing
'cpu' and 'memory' keys describing a custom VM.
component_full_name: string. Fully qualified name of the configurable
component containing the config option.
flag_values: flags.FlagValues. Runtime flag values to be propagated to
BaseSpec constructors.
Returns:
If value is a string, returns it unmodified. Otherwise, returns the
decoded CustomMachineTypeSpec.
Raises:
errors.Config.InvalidValue upon invalid input value.
"""
super(MachineTypeDecoder, self).Decode(value, component_full_name,
flag_values)
if isinstance(value, six.string_types):
return value
return CustomMachineTypeSpec(self._GetOptionFullName(component_full_name),
flag_values=flag_values, **value)
class AzureMachineTypeDecoder(option_decoders.TypeVerifier):
"""Decodes the machine_type option of a VM config."""
def __init__(self, **kwargs):
super(AzureMachineTypeDecoder, self).__init__(six.string_types + (dict,),
**kwargs)
def Decode(self, value, component_full_name, flag_values):
"""Decodes the machine_type option of a VM config.
Args:
value: Either a string name of a machine type or a dict containing
'compute_units' and 'tier' keys describing a machine type.
component_full_name: string. Fully qualified name of the configurable
component containing the config option.
flag_values: flags.FlagValues. Runtime flag values to be propagated to
BaseSpec constructors.
Returns:
If value is a string, returns it unmodified. Otherwise, returns the
decoded CustomMachineTypeSpec.
Raises:
errors.Config.InvalidValue upon invalid input value.
"""
super(AzureMachineTypeDecoder, self).Decode(value, component_full_name,
flag_values)
if isinstance(value, six.string_types):
return value
return AzurePerformanceTierDecoder(
self._GetOptionFullName(component_full_name),
flag_values=flag_values, **value)
class AzurePerformanceTierDecoder(spec.BaseSpec):
"""Properties of a An Azure custom machine type.
Attributes:
compute_units: int. Number of compute units.
tier: Basic, Standard or Premium
"""
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(
AzurePerformanceTierDecoder, cls)._GetOptionDecoderConstructions()
# https://docs.microsoft.com/en-us/azure/virtual-machines/windows/acu
# https://docs.microsoft.com/en-us/azure/sql-database/sql-database-service-tiers
result.update({'compute_units': (option_decoders.IntDecoder, {'min': 50}),
'tier': (option_decoders.EnumDecoder, {
'valid_values': azure_flags.VALID_TIERS})})
return result
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values.
May be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
if flag_values['azure_tier'].present:
config_values['tier'] = flag_values.azure_tier
if flag_values['azure_compute_units'].present:
config_values['compute_units'] = flag_values.azure_compute_units
|
from collections import namedtuple
import os
import statistics
import sys
import tempfile
import time
from unittest_mixins.mixins import make_file
import coverage
from coverage.backward import import_local_file
from tests.helpers import SuperModuleCleaner
class StressResult(namedtuple('StressResult', ['files', 'calls', 'lines', 'baseline', 'covered'])):
@property
def overhead(self):
return self.covered - self.baseline
TEST_FILE = """\
def parent(call_count, line_count):
for _ in range(call_count):
child(line_count)
def child(line_count):
for i in range(line_count):
x = 1
"""
def mk_main(file_count, call_count, line_count):
lines = []
lines.extend(
"import test{}".format(idx) for idx in range(file_count)
)
lines.extend(
"test{}.parent({}, {})".format(idx, call_count, line_count) for idx in range(file_count)
)
return "\n".join(lines)
class StressTest(object):
def __init__(self):
self.module_cleaner = SuperModuleCleaner()
def _run_scenario(self, file_count, call_count, line_count):
self.module_cleaner.clean_local_file_imports()
for idx in range(file_count):
make_file('test{}.py'.format(idx), TEST_FILE)
make_file('testmain.py', mk_main(file_count, call_count, line_count))
# Run it once just to get the disk caches loaded up.
import_local_file("testmain")
self.module_cleaner.clean_local_file_imports()
# Run it to get the baseline time.
start = time.perf_counter()
import_local_file("testmain")
baseline = time.perf_counter() - start
self.module_cleaner.clean_local_file_imports()
# Run it to get the covered time.
start = time.perf_counter()
cov = coverage.Coverage()
cov.start()
try: # pragma: nested
# Import the Python file, executing it.
import_local_file("testmain")
finally: # pragma: nested
# Stop coverage.py.
covered = time.perf_counter() - start
stats = cov._collector.tracers[0].get_stats()
if stats:
stats = stats.copy()
cov.stop()
return baseline, covered, stats
def _compute_overhead(self, file_count, call_count, line_count):
baseline, covered, stats = self._run_scenario(file_count, call_count, line_count)
#print("baseline = {:.2f}, covered = {:.2f}".format(baseline, covered))
# Empirically determined to produce the same numbers as the collected
# stats from get_stats(), with Python 3.6.
actual_file_count = 17 + file_count
actual_call_count = file_count * call_count + 156 * file_count + 85
actual_line_count = (
2 * file_count * call_count * line_count +
3 * file_count * call_count +
769 * file_count +
345
)
if stats is not None:
assert actual_file_count == stats['files']
assert actual_call_count == stats['calls']
assert actual_line_count == stats['lines']
print("File counts", file_count, actual_file_count, stats['files'])
print("Call counts", call_count, actual_call_count, stats['calls'])
print("Line counts", line_count, actual_line_count, stats['lines'])
print()
return StressResult(
actual_file_count,
actual_call_count,
actual_line_count,
baseline,
covered,
)
fixed = 200
numlo = 100
numhi = 100
step = 50
runs = 5
def count_operations(self):
def operations(thing):
for _ in range(self.runs):
for n in range(self.numlo, self.numhi+1, self.step):
kwargs = {
"file_count": self.fixed,
"call_count": self.fixed,
"line_count": self.fixed,
}
kwargs[thing+"_count"] = n
yield kwargs['file_count'] * kwargs['call_count'] * kwargs['line_count']
ops = sum(sum(operations(thing)) for thing in ["file", "call", "line"])
print("{:.1f}M operations".format(ops/1e6))
def check_coefficients(self):
# For checking the calculation of actual stats:
for f in range(1, 6):
for c in range(1, 6):
for l in range(1, 6):
_, _, stats = self._run_scenario(f, c, l)
print("{0},{1},{2},{3[files]},{3[calls]},{3[lines]}".format(f, c, l, stats))
def stress_test(self):
# For checking the overhead for each component:
def time_thing(thing):
per_thing = []
pct_thing = []
for _ in range(self.runs):
for n in range(self.numlo, self.numhi+1, self.step):
kwargs = {
"file_count": self.fixed,
"call_count": self.fixed,
"line_count": self.fixed,
}
kwargs[thing+"_count"] = n
res = self._compute_overhead(**kwargs)
per_thing.append(res.overhead / getattr(res, "{}s".format(thing)))
pct_thing.append(res.covered / res.baseline * 100)
out = "Per {}: ".format(thing)
out += "mean = {:9.3f}us, stddev = {:8.3f}us, ".format(
statistics.mean(per_thing)*1e6, statistics.stdev(per_thing)*1e6
)
out += "min = {:9.3f}us, ".format(min(per_thing)*1e6)
out += "pct = {:6.1f}%, stddev = {:6.1f}%".format(
statistics.mean(pct_thing), statistics.stdev(pct_thing)
)
print(out)
time_thing("file")
time_thing("call")
time_thing("line")
if __name__ == '__main__':
with tempfile.TemporaryDirectory(prefix="coverage_stress_") as tempdir:
print("Working in {}".format(tempdir))
os.chdir(tempdir)
sys.path.insert(0, ".")
StressTest().stress_test()
|
import re
from datetime import timedelta
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from weblate.checks.base import SourceCheck
from weblate.utils.state import STATE_EMPTY, STATE_FUZZY
# Matches (s) not followed by alphanumeric chars or at the end
PLURAL_MATCH = re.compile(r"\(s\)(\W|\Z)")
class OptionalPluralCheck(SourceCheck):
"""Check for not used plural form."""
check_id = "optional_plural"
name = _("Unpluralised")
description = _("The string is used as plural, but not using plural forms")
def check_source_unit(self, source, unit):
if len(source) > 1:
return False
return len(PLURAL_MATCH.findall(source[0])) > 0
class EllipsisCheck(SourceCheck):
"""Check for using "..." instead of "…"."""
check_id = "ellipsis"
name = _("Ellipsis")
description = _(
"The string uses three dots (...) " "instead of an ellipsis character (…)"
)
def check_source_unit(self, source, unit):
return "..." in source[0]
class MultipleFailingCheck(SourceCheck):
"""Check whether there are more failing checks on this translation."""
check_id = "multiple_failures"
name = _("Multiple failing checks")
description = _("The translations in several languages have failing checks")
def check_source_unit(self, source, unit):
from weblate.checks.models import Check
related = Check.objects.filter(unit__in=unit.unit_set.exclude(pk=unit.id))
return related.count() >= 2
class LongUntranslatedCheck(SourceCheck):
check_id = "long_untranslated"
name = _("Long untranslated")
description = _("The string has not been translated for a long time")
def check_source_unit(self, source, unit):
if unit.timestamp > timezone.now() - timedelta(days=90):
return False
states = list(unit.unit_set.values_list("state", flat=True))
total = len(states)
not_translated = states.count(STATE_EMPTY) + states.count(STATE_FUZZY)
translated_percent = 100 * (total - not_translated) / total
return (
total
and 2 * translated_percent
< unit.translation.component.stats.lazy_translated_percent
)
|
from stash.tests.stashtest import StashTestCase
class CowsayTests(StashTestCase):
"""tests for cowsay"""
def test_help(self):
"""test help output"""
output = self.run_command("cowsay --help", exitcode=0)
self.assertIn("cowsay", output)
self.assertIn("--help", output)
self.assertIn("usage:", output)
def test_singleline_1(self):
"""test for correct text in output"""
output = self.run_command("cowsay test", exitcode=0)
self.assertIn("test", output)
self.assertNotIn("Hello, World!", output)
self.assertEqual(output.count("<"), 1)
self.assertEqual(output.count(">"), 1)
def test_singleline_1(self):
"""test for correct text in output"""
output = self.run_command("cowsay Hello, World!", exitcode=0)
self.assertIn("Hello, World!", output)
self.assertNotIn("test", output)
self.assertEqual(output.count("<"), 1)
self.assertEqual(output.count(">"), 1)
def test_stdin_read(self):
"""test 'echo test | cowsay' printing 'test'"""
output = self.run_command("echo test | cowsay", exitcode=0)
self.assertIn("test", output)
self.assertNotIn("Hello, World!", output)
def test_stdin_ignore(self):
"""test 'echo test | cowsay Hello, World!' printing 'Hello World!'"""
output = self.run_command("echo test | cowsay Hello, World!", exitcode=0)
self.assertIn("Hello, World!", output)
self.assertNotIn("test", output)
def test_multiline_1(self):
"""test for correct multiline output"""
output = self.run_command("cowsay Hello,\\nWorld!", exitcode=0)
self.assertIn("Hello,", output)
self.assertIn("World!", output)
self.assertNotIn("Hello,\nWorld!", output) # text should be splitted allong the lines
self.assertIn("/", output)
self.assertIn("\\", output)
self.assertNotIn("<", output)
self.assertNotIn(">", output)
def test_multiline_2(self):
"""test for correct multiline output"""
output = self.run_command("cowsay Hello,\\nWorld!\\nPython4Ever", exitcode=0)
self.assertIn("Hello,", output)
self.assertIn("World!", output)
self.assertIn("Python4Ever", output)
self.assertNotIn("Hello,\nWorld!\nPython4Ever", output) # text should be splitted allong the lines
self.assertIn("/", output)
self.assertIn("\\", output)
self.assertIn("|", output)
self.assertNotIn("<", output)
self.assertNotIn(">", output)
|
from typing import Dict, List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import DOMAIN, const
CONDITION_TYPES = {"is_hvac_mode", "is_preset_mode"}
HVAC_MODE_CONDITION = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): "is_hvac_mode",
vol.Required(const.ATTR_HVAC_MODE): vol.In(const.HVAC_MODES),
}
)
PRESET_MODE_CONDITION = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): "is_preset_mode",
vol.Required(const.ATTR_PRESET_MODE): str,
}
)
CONDITION_SCHEMA = vol.Any(HVAC_MODE_CONDITION, PRESET_MODE_CONDITION)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions for Climate devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_hvac_mode",
}
)
if (
state
and state.attributes[ATTR_SUPPORTED_FEATURES] & const.SUPPORT_PRESET_MODE
):
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_preset_mode",
}
)
return conditions
@callback
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == "is_hvac_mode":
attribute = const.ATTR_HVAC_MODE
else:
attribute = const.ATTR_PRESET_MODE
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
state = hass.states.get(config[ATTR_ENTITY_ID])
return state and state.attributes.get(attribute) == config[attribute]
return test_is_state
async def async_get_condition_capabilities(hass, config):
"""List condition capabilities."""
state = hass.states.get(config[CONF_ENTITY_ID])
condition_type = config[CONF_TYPE]
fields = {}
if condition_type == "is_hvac_mode":
hvac_modes = state.attributes[const.ATTR_HVAC_MODES] if state else []
fields[vol.Required(const.ATTR_HVAC_MODE)] = vol.In(hvac_modes)
elif condition_type == "is_preset_mode":
if state:
preset_modes = state.attributes.get(const.ATTR_PRESET_MODES, [])
else:
preset_modes = []
fields[vol.Required(const.ATTR_PRESET_MODES)] = vol.In(preset_modes)
return {"extra_fields": vol.Schema(fields)}
|
from django.test import TestCase
from mots_vides import stop_words
from zinnia import comparison
from zinnia.comparison import CachedModelVectorBuilder
from zinnia.comparison import ModelVectorBuilder
from zinnia.comparison import pearson_score
from zinnia.models.entry import Entry
from zinnia.signals import disconnect_entry_signals
class ComparisonTestCase(TestCase):
"""Test cases for comparison tools"""
def setUp(self):
english_stop_words = stop_words('english')
self.original_stop_words = comparison.STOP_WORDS
comparison.STOP_WORDS = english_stop_words
disconnect_entry_signals()
def tearDown(self):
comparison.STOP_WORDS = self.original_stop_words
def test_raw_dataset(self):
params = {'title': 'My entry 1', 'content': 'My content 1.',
'tags': 'zinnia, test', 'slug': 'my-entry-1'}
Entry.objects.create(**params)
params = {'title': 'My entry 2', 'content': 'My content 2.',
'tags': 'zinnia, test', 'slug': 'my-entry-2'}
Entry.objects.create(**params)
v = ModelVectorBuilder(queryset=Entry.objects.all(), fields=['title'])
with self.assertNumQueries(1):
self.assertEqual(len(v.raw_dataset), 2)
self.assertEqual(sorted(v.raw_dataset.values()),
[['entry'], ['entry']])
v = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'content', 'tags'])
self.assertEqual(sorted(v.raw_dataset.values()),
[['entry', 'content', 'zinnia', 'test'],
['entry', 'content', 'zinnia', 'test']])
v = ModelVectorBuilder(queryset=Entry.objects.all().order_by('-pk'),
fields=['title'], limit=1)
self.assertEqual(list(v.raw_dataset.values()), [['entry']])
def test_column_dataset(self):
vectors = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'excerpt', 'content'])
with self.assertNumQueries(1):
self.assertEqual(vectors.dataset, {})
self.assertEqual(vectors.columns, [])
params = {'title': 'My entry 1 (01)', 'content':
'This is my first content 1 (01)',
'slug': 'my-entry-1'}
e1 = Entry.objects.create(**params)
params = {'title': 'My entry 2 (02)', 'content':
'My second content entry 2 (02)',
'slug': 'my-entry-2'}
e2 = Entry.objects.create(**params)
vectors = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'excerpt', 'content'])
self.assertEqual(vectors.columns, ['01', '02', 'content', 'entry'])
self.assertEqual(vectors.dataset[e1.pk], [2, 0, 1, 1])
self.assertEqual(vectors.dataset[e2.pk], [0, 2, 1, 2])
def test_pearson_score(self):
self.assertRaises(ZeroDivisionError, pearson_score,
[42], [42])
self.assertRaises(ZeroDivisionError, pearson_score,
[2, 2, 2], [1, 1, 1])
self.assertEqual(pearson_score([0, 1, 2], [0, 1, 2]), 1.0)
self.assertEqual(pearson_score([0, 1, 3], [0, 1, 2]),
0.9819805060619656)
self.assertEqual(pearson_score([0, 1, 2], [0, 1, 3]),
0.9819805060619656)
self.assertEqual(pearson_score([2, 0, 0, 0], [0, 1, 1, 1]),
-1)
def test_compute_related(self):
class VirtualVectorBuilder(ModelVectorBuilder):
dataset = {1: [1, 2, 3],
2: [1, 5, 7],
3: [2, 8, 3],
4: [1, 8, 3],
5: [7, 3, 5]}
v = VirtualVectorBuilder()
self.assertEqual(v.compute_related('error'), [])
self.assertEqual(v.compute_related(1),
[(2, 0.9819805060619659),
(4, 0.2773500981126146),
(3, 0.15554275420956382),
(5, -0.5)])
self.assertEqual(v.compute_related(2),
[(1, 0.9819805060619659),
(4, 0.4539206495016019),
(3, 0.33942211665106525),
(5, -0.6546536707079772)])
self.assertEqual(v.compute_related(3),
[(4, 0.9922153572367627),
(2, 0.33942211665106525),
(1, 0.15554275420956382),
(5, -0.9332565252573828)])
self.assertEqual(v.compute_related(4),
[(3, 0.9922153572367627),
(2, 0.4539206495016019),
(1, 0.2773500981126146),
(5, -0.9707253433941511)])
v.dataset[2] = [0, 0, 0]
self.assertEqual(v.compute_related(1),
[(4, 0.2773500981126146),
(3, 0.15554275420956382),
(5, -0.5)])
def test_get_related(self):
params = {'title': 'My entry 01', 'content':
'This is my first content 01',
'slug': 'my-entry-1'}
e1 = Entry.objects.create(**params)
vectors = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'content'])
with self.assertNumQueries(1):
self.assertEqual(vectors.get_related(e1, 10), [])
params = {'title': 'My entry 02', 'content':
'My second content entry 02',
'slug': 'my-entry-2'}
e2 = Entry.objects.create(**params)
with self.assertNumQueries(0):
self.assertEqual(vectors.get_related(e1, 10), [])
vectors = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'content'])
with self.assertNumQueries(2):
self.assertEqual(vectors.get_related(e1, 10), [e2])
with self.assertNumQueries(1):
self.assertEqual(vectors.get_related(e1, 10), [e2])
def test_cached_vector_builder(self):
params = {'title': 'My entry number 1',
'content': 'My content number 1',
'slug': 'my-entry-1'}
e1 = Entry.objects.create(**params)
v = CachedModelVectorBuilder(
queryset=Entry.objects.all(), fields=['title', 'content'])
with self.assertNumQueries(1):
self.assertEqual(len(v.columns), 3)
with self.assertNumQueries(0):
self.assertEqual(len(v.columns), 3)
with self.assertNumQueries(0):
self.assertEqual(v.get_related(e1, 5), [])
for i in range(1, 3):
params = {'title': 'My entry %s' % i,
'content': 'My content %s' % i,
'slug': 'my-entry-%s' % i}
Entry.objects.create(**params)
v = CachedModelVectorBuilder(
queryset=Entry.objects.all(), fields=['title', 'content'])
with self.assertNumQueries(0):
self.assertEqual(len(v.columns), 3)
with self.assertNumQueries(0):
self.assertEqual(v.get_related(e1, 5), [])
v.cache_flush()
with self.assertNumQueries(2):
self.assertEqual(len(v.get_related(e1, 5)), 2)
with self.assertNumQueries(0):
self.assertEqual(len(v.get_related(e1, 5)), 2)
with self.assertNumQueries(0):
self.assertEqual(len(v.columns), 3)
v = CachedModelVectorBuilder(
queryset=Entry.objects.all(), fields=['title', 'content'])
with self.assertNumQueries(0):
self.assertEqual(len(v.columns), 3)
with self.assertNumQueries(0):
self.assertEqual(len(v.get_related(e1, 5)), 2)
def test_raw_clean(self):
v = ModelVectorBuilder(queryset=Entry.objects.none(), fields=['title'])
self.assertEqual(v.raw_clean('<p>HTML Content</p>'),
['html', 'content'])
self.assertEqual(v.raw_clean('<p>An HTML Content</p>'),
['html', 'content'])
self.assertEqual(v.raw_clean('<p>An HTML Content 2</p>'),
['html', 'content'])
self.assertEqual(v.raw_clean('<p>!HTML Content ?</p>'),
['html', 'content'])
|
import asyncio
from copy import deepcopy
import logging
from blinkpy.auth import Auth
from blinkpy.blinkpy import Blink
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.components.blink.const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
PLATFORMS,
SERVICE_REFRESH,
SERVICE_SAVE_VIDEO,
SERVICE_SEND_PIN,
)
from homeassistant.const import CONF_FILENAME, CONF_NAME, CONF_PIN, CONF_SCAN_INTERVAL
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
SERVICE_SAVE_VIDEO_SCHEMA = vol.Schema(
{vol.Required(CONF_NAME): cv.string, vol.Required(CONF_FILENAME): cv.string}
)
SERVICE_SEND_PIN_SCHEMA = vol.Schema({vol.Optional(CONF_PIN): cv.string})
def _blink_startup_wrapper(hass, entry):
"""Startup wrapper for blink."""
blink = Blink()
auth_data = deepcopy(dict(entry.data))
blink.auth = Auth(auth_data, no_prompt=True)
blink.refresh_rate = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
if blink.start():
blink.setup_post_verify()
elif blink.auth.check_key_required():
_LOGGER.debug("Attempting a reauth flow")
_reauth_flow_wrapper(hass, auth_data)
return blink
def _reauth_flow_wrapper(hass, data):
"""Reauth flow wrapper."""
hass.add_job(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=data
)
)
persistent_notification.async_create(
hass,
"Blink configuration migrated to a new version. Please go to the integrations page to re-configure (such as sending a new 2FA key).",
"Blink Migration",
)
async def async_setup(hass, config):
"""Set up a Blink component."""
hass.data[DOMAIN] = {}
return True
async def async_migrate_entry(hass, entry):
"""Handle migration of a previous version config entry."""
data = {**entry.data}
if entry.version == 1:
data.pop("login_response", None)
await hass.async_add_executor_job(_reauth_flow_wrapper, hass, data)
return False
return True
async def async_setup_entry(hass, entry):
"""Set up Blink via config entry."""
_async_import_options_from_data_if_missing(hass, entry)
hass.data[DOMAIN][entry.entry_id] = await hass.async_add_executor_job(
_blink_startup_wrapper, hass, entry
)
if not hass.data[DOMAIN][entry.entry_id].available:
raise ConfigEntryNotReady
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
def blink_refresh(event_time=None):
"""Call blink to refresh info."""
hass.data[DOMAIN][entry.entry_id].refresh(force_cache=True)
async def async_save_video(call):
"""Call save video service handler."""
await async_handle_save_video_service(hass, entry, call)
def send_pin(call):
"""Call blink to send new pin."""
pin = call.data[CONF_PIN]
hass.data[DOMAIN][entry.entry_id].auth.send_auth_key(
hass.data[DOMAIN][entry.entry_id],
pin,
)
hass.services.async_register(DOMAIN, SERVICE_REFRESH, blink_refresh)
hass.services.async_register(
DOMAIN, SERVICE_SAVE_VIDEO, async_save_video, schema=SERVICE_SAVE_VIDEO_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_SEND_PIN, send_pin, schema=SERVICE_SEND_PIN_SCHEMA
)
return True
@callback
def _async_import_options_from_data_if_missing(hass, entry):
options = dict(entry.options)
if CONF_SCAN_INTERVAL not in entry.options:
options[CONF_SCAN_INTERVAL] = entry.data.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
hass.config_entries.async_update_entry(entry, options=options)
async def async_unload_entry(hass, entry):
"""Unload Blink entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if not unload_ok:
return False
hass.data[DOMAIN].pop(entry.entry_id)
if len(hass.data[DOMAIN]) != 0:
return True
hass.services.async_remove(DOMAIN, SERVICE_REFRESH)
hass.services.async_remove(DOMAIN, SERVICE_SAVE_VIDEO_SCHEMA)
hass.services.async_remove(DOMAIN, SERVICE_SEND_PIN)
return True
async def async_handle_save_video_service(hass, entry, call):
"""Handle save video service calls."""
camera_name = call.data[CONF_NAME]
video_path = call.data[CONF_FILENAME]
if not hass.config.is_allowed_path(video_path):
_LOGGER.error("Can't write %s, no access to path!", video_path)
return
def _write_video(camera_name, video_path):
"""Call video write."""
all_cameras = hass.data[DOMAIN][entry.entry_id].cameras
if camera_name in all_cameras:
all_cameras[camera_name].video_to_file(video_path)
try:
await hass.async_add_executor_job(_write_video, camera_name, video_path)
except OSError as err:
_LOGGER.error("Can't write image to file: %s", err)
|
from flask import current_app
from lemur.plugins.base import plugins
class Metrics(object):
"""
:param app: The Flask application object. Defaults to None.
"""
_providers = []
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initializes the application with the extension.
:param app: The Flask application object.
"""
self._providers = app.config.get("METRIC_PROVIDERS", [])
def send(self, metric_name, metric_type, metric_value, *args, **kwargs):
for provider in self._providers:
current_app.logger.debug(
"Sending metric '{metric}' to the {provider} provider.".format(
metric=metric_name, provider=provider
)
)
p = plugins.get(provider)
p.submit(metric_name, metric_type, metric_value, *args, **kwargs)
|
Subsets and Splits