text
stringlengths 213
32.3k
|
---|
from unittest import TestCase
import pandas as pd
from scattertext.CorpusFromParsedDocuments import CorpusFromParsedDocuments
from scattertext.WhitespaceNLP import whitespace_nlp
from scattertext.representations.Word2VecFromParsedCorpus import Word2VecFromParsedCorpus, \
Word2VecFromParsedCorpusBigrams
from scattertext.test.test_corpusFromPandas import get_docs_categories
class TestWord2VecFromParsedCorpus(TestCase):
@classmethod
def setUp(cls):
cls.categories, cls.documents = get_docs_categories()
cls.parsed_docs = []
for doc in cls.documents:
cls.parsed_docs.append(whitespace_nlp(doc))
cls.df = pd.DataFrame({'category': cls.categories,
'author': ['a', 'a', 'c', 'c', 'c',
'c', 'd', 'd', 'e', 'e'],
'parsed': cls.parsed_docs,
'document_lengths': [len(doc) for doc in cls.documents]})
cls.corpus = CorpusFromParsedDocuments(cls.df, 'category', 'parsed').build()
def test_make(self):
gensim_is_present_and_working = False
try:
from gensim.models import word2vec
gensim_is_present_and_working = True
except:
pass
if gensim_is_present_and_working:
with self.assertRaises(Exception):
Word2VecFromParsedCorpus(3)
Word2VecFromParsedCorpus(self.corpus)
Word2VecFromParsedCorpus(self.corpus, word2vec.Word2Vec())
def test_train(self):
gensim_is_present_and_working = False
try:
from gensim.models import word2vec
gensim_is_present_and_working = True
except:
pass
if gensim_is_present_and_working:
Word2VecFromParsedCorpus(self.corpus).train()
def test_bigrams(self):
gensim_is_present_and_working = False
try:
from gensim.models import word2vec
gensim_is_present_and_working = True
except:
pass
if gensim_is_present_and_working:
Word2VecFromParsedCorpusBigrams(self.corpus).train()
|
import pandas as pd
from scipy.stats import beta, norm
from scattertext.termranking.OncePerDocFrequencyRanker import OncePerDocFrequencyRanker
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
class BetaPosterior(CorpusBasedTermScorer):
'''
Beta Posterior Scoring. Code adapted from
https://github.com/serinachang5/gender-associations/blob/master/score_words.py (Chang 2019).
Serina Chang and Kathleen McKeown. Automatically Inferring Gender Associations from Language. To appear
in Empirical Methods in Natural Language Processing (EMNLP) 2019 (Short Paper).
Method was originally introduced in
David Bamman, Jacob Eisenstein, and Tyler Schnoebelen. GENDER IDENTITY AND LEXICAL VARIATION IN SOCIAL MEDIA. 2014.
Direct quote from Bamman (2014)
Identifying gender markers. Our goal is to identify words that are used with
unusual frequency by authors of a single gender. Assume that each term has an
unknown likelihood fi, indicating the proportion of authors who use term i. For
gender j, there are Nj authors, of whom kji use term i; the total count of the term i
is ki. We ask whether the count kji is significantly larger than expected. Assuming
a non-informative prior distribution on fi, the posterior distribution (conditioned on
the observations ki and N) is Beta(ki, N-ki). The distribution of the gender-specific
counts can be described by an integral over all possible fi. This integral defines the
Beta-Binomial distribution (Gelman, Carlin, Stern, and Rubin 2004), and has a
closed form solution. We mark a term as having a significant gender association if
the cumulative distribution at the count kji is p < .05.
```
>>> term_scorer = BetaPosterior(corpus).set_categories('Positive', ['Negative'], ['Plot']).get_score_df()
```
'''
def __init__(self, corpus, *args, **kwargs):
CorpusBasedTermScorer.__init__(self, corpus, *args, **kwargs)
self.set_term_ranker(OncePerDocFrequencyRanker)
def _set_scorer_args(self, **kwargs):
pass
def get_scores(self, *args):
return self.get_score_df()['score']
def get_score_df(self):
'''
:return: pd.DataFrame
'''
term_freq_df = self.term_ranker_.get_ranks('')
cat_freq_df = pd.DataFrame({
'cat': term_freq_df[self.category_name],
'ncat': term_freq_df[self.not_category_names].sum(axis=1),
})
if self.neutral_category_names:
cat_freq_df['neut'] = term_freq_df[self.neutral_category_names].sum(axis=1)
cat_freq_df['all'] = cat_freq_df.sum(axis=1)
N = cat_freq_df['all'].sum()
catN = cat_freq_df['cat'].sum()
ncatN = cat_freq_df['ncat'].sum()
cat_freq_df['cat_pct'] = cat_freq_df['cat'] * 1. / catN
cat_freq_df['ncat_pct'] = cat_freq_df['ncat'] * 1. / ncatN
def row_beta_posterior(row):
return pd.Series({
'cat_p': beta(row['all'], N - row['all']).sf(row['cat'] * 1. / catN),
'ncat_p': beta(row['all'], N - row['all']).sf(row['ncat'] * 1. / ncatN),
})
p_val_df = cat_freq_df.apply(row_beta_posterior, axis=1)
cat_freq_df['cat_p'] = p_val_df['cat_p']
cat_freq_df['ncat_p'] = p_val_df['ncat_p']
cat_freq_df['cat_z'] = norm.ppf(p_val_df['cat_p'])
cat_freq_df['ncat_z'] = norm.ppf(p_val_df['ncat_p'])
cat_freq_df['score'] = None
cat_freq_df['score'][cat_freq_df['cat_pct'] == cat_freq_df['ncat_pct']] = 0
cat_freq_df['score'][cat_freq_df['cat_pct'] < cat_freq_df['ncat_pct']] = cat_freq_df['ncat_z']
cat_freq_df['score'][cat_freq_df['cat_pct'] > cat_freq_df['ncat_pct']] = -cat_freq_df['cat_z']
return cat_freq_df
def get_name(self):
return "Beta Posterior"
|
import functools
import logging
from docker_registry.core import compat
json = compat.json
from .. import storage
from .. import toolkit
from . import cache
from . import config
import flask
import requests
logger = logging.getLogger(__name__)
cfg = config.load()
def is_mirror():
return bool(cfg.mirroring and cfg.mirroring.source)
def _response_headers(base):
headers = {}
if not base:
return headers
for k, v in base.iteritems():
if k.lower() == 'content-encoding':
continue
headers[k.lower()] = v
logger.warn(headers)
return headers
def lookup_source(path, stream=False, source=None):
if not source:
if not is_mirror():
return
source = cfg.mirroring.source
source_url = '{0}{1}'.format(source, path)
headers = {}
for k, v in flask.request.headers.iteritems():
if k.lower() != 'location' and k.lower() != 'host':
headers[k] = v
logger.debug('Request: GET {0}\nHeaders: {1}\nArgs: {2}'.format(
source_url, headers, flask.request.args
))
source_resp = requests.get(
source_url,
params=flask.request.args,
headers=headers,
cookies=flask.request.cookies,
stream=stream
)
if source_resp.status_code != 200:
logger.debug('Source responded to request with non-200'
' status')
logger.debug('Response: {0}\n{1}\n'.format(
source_resp.status_code, source_resp.text
))
return None
return source_resp
def source_lookup_tag(f):
@functools.wraps(f)
def wrapper(namespace, repository, *args, **kwargs):
mirroring_cfg = cfg.mirroring
resp = f(namespace, repository, *args, **kwargs)
if not is_mirror():
return resp
source = mirroring_cfg.source
tags_cache_ttl = mirroring_cfg.tags_cache_ttl
if resp.status_code != 404:
logger.debug('Status code is not 404, no source '
'lookup required')
return resp
if not cache.redis_conn:
# No tags cache, just return
logger.warning('mirroring: Tags cache is disabled, please set a '
'valid `cache\' directive in the config.')
source_resp = lookup_source(
flask.request.path, stream=False, source=source
)
if not source_resp:
return resp
headers = _response_headers(source_resp.headers)
return toolkit.response(data=source_resp.content, headers=headers,
raw=True)
store = storage.load()
request_path = flask.request.path
if request_path.endswith('/tags'):
# client GETs a list of tags
tag_path = store.tag_path(namespace, repository)
else:
# client GETs a single tag
tag_path = store.tag_path(namespace, repository, kwargs['tag'])
try:
data = cache.redis_conn.get('{0}:{1}'.format(
cache.cache_prefix, tag_path
))
except cache.redis.exceptions.ConnectionError as e:
data = None
logger.warning("Diff queue: Redis connection error: {0}".format(
e
))
if data is not None:
return toolkit.response(data=data, raw=True)
source_resp = lookup_source(
flask.request.path, stream=False, source=source
)
if not source_resp:
return resp
data = source_resp.content
headers = _response_headers(source_resp.headers)
try:
cache.redis_conn.setex('{0}:{1}'.format(
cache.cache_prefix, tag_path
), tags_cache_ttl, data)
except cache.redis.exceptions.ConnectionError as e:
logger.warning("Diff queue: Redis connection error: {0}".format(
e
))
return toolkit.response(data=data, headers=headers,
raw=True)
return wrapper
def source_lookup(cache=False, stream=False, index_route=False,
merge_results=False):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
mirroring_cfg = cfg.mirroring
resp = f(*args, **kwargs)
if not is_mirror():
return resp
source = mirroring_cfg.source
if index_route and mirroring_cfg.source_index:
source = mirroring_cfg.source_index
logger.debug('Source provided, registry acts as mirror')
if resp.status_code != 404 and not merge_results:
logger.debug('Status code is not 404, no source '
'lookup required')
return resp
source_resp = lookup_source(
flask.request.path, stream=stream, source=source
)
if not source_resp:
return resp
store = storage.load()
headers = _response_headers(source_resp.headers)
if index_route and 'x-docker-endpoints' in headers:
headers['x-docker-endpoints'] = toolkit.get_endpoints()
if not stream:
logger.debug('JSON data found on source, writing response')
resp_data = source_resp.content
if merge_results:
mjson = json.loads(resp_data)
pjson = json.loads(resp.data)
for mr in mjson["results"]:
replaced = False
for pi, pr in enumerate(pjson["results"]):
if pr["name"] == mr["name"]:
pjson["results"][pi] = mr
replaced = True
if not replaced:
pjson["results"].extend([mr])
pjson['num_results'] = len(pjson["results"])
resp_data = json.dumps(pjson)
if cache:
store_mirrored_data(
resp_data, flask.request.url_rule.rule, kwargs,
store
)
return toolkit.response(
data=resp_data,
headers=headers,
raw=True
)
logger.debug('Layer data found on source, preparing to '
'stream response...')
layer_path = store.image_layer_path(kwargs['image_id'])
return _handle_mirrored_layer(source_resp, layer_path, store,
headers)
return wrapper
return decorator
def _handle_mirrored_layer(source_resp, layer_path, store, headers):
sr = toolkit.SocketReader(source_resp)
tmp, hndlr = storage.temp_store_handler()
sr.add_handler(hndlr)
def generate():
for chunk in sr.iterate(store.buffer_size):
yield chunk
# FIXME: this could be done outside of the request context
tmp.seek(0)
store.stream_write(layer_path, tmp)
tmp.close()
return flask.Response(generate(), headers=dict(headers))
def store_mirrored_data(data, endpoint, args, store):
logger.debug('Endpoint: {0}'.format(endpoint))
path_method, arglist = ({
'/v1/images/<image_id>/json': ('image_json_path', ('image_id',)),
'/v1/images/<image_id>/ancestry': (
'image_ancestry_path', ('image_id',)
),
'/v1/repositories/<path:repository>/json': (
'registry_json_path', ('namespace', 'repository')
),
}).get(endpoint, (None, None))
if not path_method:
return
logger.debug('Path method: {0}'.format(path_method))
pm_args = {}
for arg in arglist:
pm_args[arg] = args[arg]
logger.debug('Path method args: {0}'.format(pm_args))
storage_path = getattr(store, path_method)(**pm_args)
logger.debug('Storage path: {0}'.format(storage_path))
store.put_content(storage_path, data)
|
def get_from_xsettings():
from ReText.xsettings import get_xsettings, XSettingsError
try:
xsettings = get_xsettings()
except XSettingsError:
return
if b'Net/IconThemeName' in xsettings:
return xsettings[b'Net/IconThemeName'].decode()
if b'Net/FallbackIconTheme' in xsettings:
return xsettings[b'Net/FallbackIconTheme'].decode()
def get_from_gsettings():
try:
from gi.repository import Gio
except ImportError:
return
schema = 'org.gnome.desktop.interface'
if schema in Gio.Settings.list_schemas():
settings = Gio.Settings.new(schema)
return settings.get_string('icon-theme')
def get_from_gtk():
try:
from gi import require_version
require_version('Gtk', '3.0')
from gi.repository import Gtk
except (ImportError, ValueError):
return
settings = Gtk.Settings.get_default()
return settings.get_property('gtk-icon-theme-name')
def get_icon_theme():
return (get_from_xsettings()
or get_from_gsettings()
or get_from_gtk())
|
import errno
import fcntl
import json
import os
import threading
import time
from subprocess import PIPE
from subprocess import Popen
import mock
import service_configuration_lib
from behave import given
from behave import then
from behave import when
from itest_utils import get_service_connection_string
from kazoo.exceptions import NodeExistsError
from steps.setup_steps import modify_configs
from paasta_tools.deployd.master import DEAD_DEPLOYD_WORKER_MESSAGE
from paasta_tools.marathon_tools import list_all_marathon_app_ids
from paasta_tools.marathon_tools import load_marathon_service_config_no_cache
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import ZookeeperPool
@given("paasta-deployd is running")
def start_deployd(context):
try:
os.makedirs("/nail/etc/services")
except OSError as e:
if e.errno == errno.EEXIST:
pass
with ZookeeperPool() as zk:
try:
zk.create("/autoscaling")
except NodeExistsError:
pass
context.zk_hosts = "%s/mesos-testcluster" % get_service_connection_string(
"zookeeper"
)
context.soa_dir = "/nail/etc/services"
if not hasattr(context, "daemon"):
context.daemon = Popen("paasta-deployd", stderr=PIPE)
output = context.daemon.stderr.readline().decode("utf-8")
start = time.time()
timeout = start + 60
while "Startup finished!" not in output:
output = context.daemon.stderr.readline().decode("utf-8")
if not output:
raise Exception("deployd exited prematurely")
print(output.rstrip("\n"))
if time.time() > timeout:
raise Exception("deployd never ran")
context.num_workers_crashed = 0
def dont_let_stderr_buffer():
while True:
line = context.daemon.stderr.readline()
if not line:
return
if DEAD_DEPLOYD_WORKER_MESSAGE.encode("utf-8") in line:
context.num_workers_crashed += 1
print(f"deployd stderr: {line}")
threading.Thread(target=dont_let_stderr_buffer).start()
time.sleep(5)
@then("no workers should have crashed")
def no_workers_should_crash(context):
if context.num_workers_crashed > 0:
raise Exception(
f"Expected no workers to crash, found {context.num_workers_crashed} stderr lines matching {DEAD_DEPLOYD_WORKER_MESSAGE!r}"
)
@then("paasta-deployd can be stopped")
def stop_deployd(context):
context.daemon.terminate()
context.daemon.wait()
@then("a second deployd does not become leader")
def start_second_deployd(context):
context.daemon1 = Popen("paasta-deployd", stderr=PIPE)
output = context.daemon1.stderr.readline().decode("utf-8")
fd = context.daemon1.stderr
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
for i in range(0, 5):
try:
output = context.daemon1.stderr.readline().decode("utf-8")
print(output.rstrip("\n"))
assert "This node is elected as leader" not in output
except IOError:
pass
time.sleep(1)
@then("a second deployd becomes leader")
def second_deployd_is_leader(context):
try:
output = context.daemon1.stderr.readline().decode("utf-8")
except IOError:
output = ""
start = time.time()
timeout = start + 60
while "This node is elected as leader" not in output:
try:
output = context.daemon1.stderr.readline().decode("utf-8")
except IOError:
output = ""
if output:
print(output.rstrip("\n"))
if time.time() > timeout:
raise Exception("Timed out waiting for second deployd leader")
time.sleep(1)
context.daemon1.terminate()
context.daemon1.wait()
@then('we should see "{service_instance}" listed in marathon after {seconds:d} seconds')
def check_app_running(context, service_instance, seconds):
service, instance, _, _ = decompose_job_id(service_instance)
service_configuration_lib._yaml_cache = {}
context.marathon_config = load_marathon_service_config_no_cache(
service, instance, context.cluster
)
context.app_id = context.marathon_config.format_marathon_app_dict()["id"]
step = 5
attempts = 0
context.current_client = context.marathon_clients.get_current_client_for_service(
context.marathon_config
)
while (attempts * step) < seconds:
if context.app_id in list_all_marathon_app_ids(context.current_client):
break
time.sleep(step)
attempts += 1
assert context.app_id in list_all_marathon_app_ids(context.current_client)
context.old_app_id = context.app_id
@then("we should not see the old version listed in marathon after {seconds:d} seconds")
def check_app_not_running(context, seconds):
step = 5
attempts = 0
while (attempts * step) < seconds:
if context.old_app_id not in list_all_marathon_app_ids(context.current_client):
return
time.sleep(step)
attempts += 1
assert context.old_app_id not in list_all_marathon_app_ids(context.current_client)
@then("we set a new command for our service instance to {cmd}")
def set_cmd(context, cmd):
context.cmd = cmd
@then('the appid for "{service_instance}" should have changed')
def check_sha_changed(context, service_instance):
service, instance, _, _ = decompose_job_id(service_instance)
service_configuration_lib._yaml_cache = {}
context.marathon_config = load_marathon_service_config_no_cache(
service, instance, context.cluster
)
assert context.app_id != context.marathon_config.format_marathon_app_dict()["id"]
@given(
'we have a secret called "{secret_name}" for the service "{service}" with signature "{signature}"'
)
def create_secret_json_file(context, secret_name, service, signature):
secret = {
"environments": {
"devc": {"ciphertext": "ScrambledNonsense", "signature": signature}
}
}
if not os.path.exists(os.path.join(context.soa_dir, service, "secrets")):
os.makedirs(os.path.join(context.soa_dir, service, "secrets"))
with open(
os.path.join(context.soa_dir, service, "secrets", f"{secret_name}.json"), "w"
) as secret_file:
json.dump(secret, secret_file)
@given(
'we set the an environment variable called "{var}" to "{val}" for '
'service "{service}" and instance "{instance}" for framework "{framework}"'
)
def add_env_var(context, var, val, service, instance, framework):
field = "env"
value = {var: val}
modify_configs(context, field, framework, service, instance, value)
@when('we set some arbitrary data at "{zookeeper_path}" in ZK')
def zookeeper_write_bogus_key(context, zookeeper_path):
with mock.patch.object(
SystemPaastaConfig, "get_zk_hosts", autospec=True, return_value=context.zk_hosts
):
with ZookeeperPool() as zookeeper_client:
zookeeper_client.ensure_path(zookeeper_path)
zookeeper_client.set(zookeeper_path, b"WHATEVER")
@given("we remove autoscaling ZK keys for test-service")
def zookeeper_rmr_keys(context):
context.zk_hosts = "%s/mesos-testcluster" % get_service_connection_string(
"zookeeper"
)
with mock.patch.object(
SystemPaastaConfig, "get_zk_hosts", autospec=True, return_value=context.zk_hosts
):
with ZookeeperPool() as zookeeper_client:
zookeeper_client.delete("/autoscaling/test-service", recursive=True)
|
from datetime import timedelta
import logging
from pyfido import FidoClient
from pyfido.client import PyFidoError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_VARIABLES,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
DATA_KILOBITS,
TIME_MINUTES,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
PRICE = "CAD"
MESSAGES = "messages"
DEFAULT_NAME = "Fido"
REQUESTS_TIMEOUT = 15
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
SENSOR_TYPES = {
"fido_dollar": ["Fido dollar", PRICE, "mdi:cash-usd"],
"balance": ["Balance", PRICE, "mdi:cash-usd"],
"data_used": ["Data used", DATA_KILOBITS, "mdi:download"],
"data_limit": ["Data limit", DATA_KILOBITS, "mdi:download"],
"data_remaining": ["Data remaining", DATA_KILOBITS, "mdi:download"],
"text_used": ["Text used", MESSAGES, "mdi:message-text"],
"text_limit": ["Text limit", MESSAGES, "mdi:message-text"],
"text_remaining": ["Text remaining", MESSAGES, "mdi:message-text"],
"mms_used": ["MMS used", MESSAGES, "mdi:message-image"],
"mms_limit": ["MMS limit", MESSAGES, "mdi:message-image"],
"mms_remaining": ["MMS remaining", MESSAGES, "mdi:message-image"],
"text_int_used": ["International text used", MESSAGES, "mdi:message-alert"],
"text_int_limit": ["International text limit", MESSAGES, "mdi:message-alert"],
"text_int_remaining": ["International remaining", MESSAGES, "mdi:message-alert"],
"talk_used": ["Talk used", TIME_MINUTES, "mdi:cellphone"],
"talk_limit": ["Talk limit", TIME_MINUTES, "mdi:cellphone"],
"talk_remaining": ["Talk remaining", TIME_MINUTES, "mdi:cellphone"],
"other_talk_used": ["Other Talk used", TIME_MINUTES, "mdi:cellphone"],
"other_talk_limit": ["Other Talk limit", TIME_MINUTES, "mdi:cellphone"],
"other_talk_remaining": ["Other Talk remaining", TIME_MINUTES, "mdi:cellphone"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_VARIABLES): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Fido sensor."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
httpsession = hass.helpers.aiohttp_client.async_get_clientsession()
fido_data = FidoData(username, password, httpsession)
ret = await fido_data.async_update()
if ret is False:
return
name = config.get(CONF_NAME)
sensors = []
for number in fido_data.client.get_phone_numbers():
for variable in config[CONF_MONITORED_VARIABLES]:
sensors.append(FidoSensor(fido_data, variable, name, number))
async_add_entities(sensors, True)
class FidoSensor(Entity):
"""Implementation of a Fido sensor."""
def __init__(self, fido_data, sensor_type, name, number):
"""Initialize the sensor."""
self.client_name = name
self._number = number
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.fido_data = fido_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._number} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {"number": self._number}
async def async_update(self):
"""Get the latest data from Fido and update the state."""
await self.fido_data.async_update()
if self.type == "balance":
if self.fido_data.data.get(self.type) is not None:
self._state = round(self.fido_data.data[self.type], 2)
else:
if self.fido_data.data.get(self._number, {}).get(self.type) is not None:
self._state = self.fido_data.data[self._number][self.type]
self._state = round(self._state, 2)
class FidoData:
"""Get data from Fido."""
def __init__(self, username, password, httpsession):
"""Initialize the data object."""
self.client = FidoClient(username, password, REQUESTS_TIMEOUT, httpsession)
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Fido."""
try:
await self.client.fetch_data()
except PyFidoError as exp:
_LOGGER.error("Error on receive last Fido data: %s", exp)
return False
# Update data
self.data = self.client.get_data()
return True
|
import copy
import json
from hatasmota.const import CONF_MAC
from hatasmota.utils import config_get_state_online, get_topic_tele_will
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from .test_common import DEFAULT_CONFIG
from tests.async_mock import call
from tests.common import async_fire_mqtt_message
async def test_availability_poll_state_once(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test several entities send a single message to update state."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["rl"][1] = 1
config["swc"][0] = 1
config["swc"][1] = 1
poll_payload_relay = ""
poll_payload_switch = "8"
poll_topic_relay = "tasmota_49A3BC/cmnd/STATE"
poll_topic_switch = "tasmota_49A3BC/cmnd/STATUS"
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config",
json.dumps(config),
)
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Device online, verify poll for state
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_has_calls(
[
call(poll_topic_relay, poll_payload_relay, 0, False),
call(poll_topic_switch, poll_payload_switch, 0, False),
],
any_order=True,
)
|
import os
import flexx
from flexx import flx
# todo: support icons in widgets like Button, TabWidget, etc.
# todo: support fontawesome icons
fname = os.path.join(os.path.dirname(flexx.__file__), 'resources', 'flexx.ico')
black_png = ('data:image/png;base64,'
'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAIUlEQVR42mNgY'
'GD4TyEeTAacOHGCKDxqwKgBtDVgaGYmAD/v6XAYiQl7AAAAAElFTkSuQmCC')
class Icons1(flx.Widget):
def init(self):
flx.Button(text='Not much to see here ...')
class Icons2(flx.Widget):
def init(self):
self.set_title('Icon demo')
self.set_icon(black_png)
flx.Button(text='Not much to see here ...')
if __name__ == '__main__':
# Select application icon. Can be a url, a relative url to a shared asset,
# a base64 encoded image, or a local filename. Note that the local filename
# works for setting the aplication icon in a desktop-like app, but not for
# a web app. File types can be ico or png.
# << Uncomment any of the lines below >>
# icon = None # use default
# icon = 'https://assets-cdn.github.com/favicon.ico'
# icon = flx.assets.add_shared_data('ico.icon', open(fname, 'rb').read())
icon = black_png
# icon = fname
m = flx.App(Icons1, title='Icon demo', icon=icon).launch('app')
flx.start()
|
import os
import re
import subprocess
import sys
import xml.dom.minidom
import diamond.collector
class GridEngineCollector(diamond.collector.Collector):
"""Diamond collector for Grid Engine performance data
"""
class QueueStatsEntry:
def __init__(self, name=None, load=None, used=None, resv=None,
available=None, total=None, temp_disabled=None,
manual_intervention=None):
self.name = name
self.load = load
self.used = used
self.resv = resv
self.available = available
self.total = total
self.temp_disabled = temp_disabled
self.manual_intervention = manual_intervention
class StatsParser(object):
def __init__(self, document):
self.dom = xml.dom.minidom.parseString(document.strip())
def get_tag_text(self, node, tag_name):
el = node.getElementsByTagName(tag_name)[0]
return self.get_text(el)
def get_text(self, node):
rc = []
for node in node.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
class QueueStatsParser(StatsParser):
def __init__(self, document):
self.dom = xml.dom.minidom.parseString(document.strip())
def parse(self):
cluster_queue_summaries = self.dom.getElementsByTagName(
"cluster_queue_summary")
return [
self._parse_cluster_stats_entry(node)
for node in cluster_queue_summaries]
def _parse_cluster_stats_entry(self, node):
name = self.get_tag_text(node, "name")
load = float(self.get_tag_text(node, "load"))
used = int(self.get_tag_text(node, "used"))
resv = int(self.get_tag_text(node, "resv"))
available = int(self.get_tag_text(node, "available"))
total = int(self.get_tag_text(node, "total"))
temp_disabled = int(self.get_tag_text(node, "temp_disabled"))
manual_intervention = int(self.get_tag_text(
node,
"manual_intervention"))
return GridEngineCollector.QueueStatsEntry(
name=name,
load=load,
used=used,
resv=resv,
available=available,
total=total,
temp_disabled=temp_disabled,
manual_intervention=manual_intervention)
def process_config(self):
super(GridEngineCollector, self).process_config()
os.environ['SGE_ROOT'] = self.config['sge_root']
def get_default_config_help(self):
config_help = super(GridEngineCollector,
self).get_default_config_help()
config_help.update({
'bin_path': "The path to Grid Engine's qstat",
'sge_root': "The SGE_ROOT value to provide to qstat"
})
return config_help
def get_default_config(self):
config = super(GridEngineCollector, self).get_default_config()
config.update({
'bin_path': '/opt/gridengine/bin/lx-amd64/qstat',
'path': 'gridengine',
'sge_root': self._sge_root(),
})
return config
def collect(self):
"""Collect statistics from Grid Engine via qstat.
"""
self._collect_queue_stats()
def _capture_output(self, cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
bytestr = p.communicate()[0]
output = bytestr.decode(sys.getdefaultencoding())
return output
def _collect_queue_stats(self):
output = self._queue_stats_xml()
parser = self.QueueStatsParser(output)
for cq in parser.parse():
name = self._sanitize(cq.name)
prefix = 'queues.%s' % (name)
metrics = ['load', 'used', 'resv', 'available', 'total',
'temp_disabled', 'manual_intervention']
for metric in metrics:
path = '%s.%s' % (prefix, metric)
value = getattr(cq, metric)
self.publish(path, value)
def _queue_stats_xml(self):
bin_path = self.config['bin_path']
return self._capture_output([bin_path, '-g', 'c', '-xml'])
def _sanitize(self, s):
"""Sanitize the name of a metric to remove unwanted chars
"""
return re.sub("[^\w-]", "_", s)
def _sge_root(self):
sge_root = os.environ.get('SGE_ROOT')
if sge_root:
return sge_root
else:
return '/opt/gridengine'
|
import tempfile
import os
from os.path import join, dirname, abspath
import re
from sys import version_info
from six import integer_types
from logilab.common import attrdict
from logilab.common.compat import StringIO
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.optik_ext import OptionValueError
from logilab.common.configuration import Configuration, OptionError, \
OptionsManagerMixIn, OptionsProviderMixIn, Method, read_old_config, \
merge_options
DATA = join(dirname(abspath(__file__)), 'data')
OPTIONS = [('dothis', {'type':'yn', 'action': 'store', 'default': True, 'metavar': '<y or n>'}),
('value', {'type': 'string', 'metavar': '<string>', 'short': 'v'}),
('multiple', {'type': 'csv', 'default': ['yop', 'yep'],
'metavar': '<comma separated values>',
'help': 'you can also document the option'}),
('number', {'type': 'int', 'default':2, 'metavar':'<int>', 'help': 'boom'}),
('bytes', {'type': 'bytes', 'default':'1KB', 'metavar':'<bytes>'}),
('choice', {'type': 'choice', 'default':'yo', 'choices': ('yo', 'ye'),
'metavar':'<yo|ye>'}),
('multiple-choice', {'type': 'multiple_choice', 'default':['yo', 'ye'],
'choices': ('yo', 'ye', 'yu', 'yi', 'ya'),
'metavar':'<yo|ye>'}),
('named', {'type':'named', 'default':Method('get_named'),
'metavar': '<key=val>'}),
('diffgroup', {'type':'string', 'default':'pouet', 'metavar': '<key=val>',
'group': 'agroup'}),
('reset-value', {'type': 'string', 'metavar': '<string>', 'short': 'r',
'dest':'value'}),
('opt-b-1', {'type': 'string', 'metavar': '<string>', 'group': 'bgroup'}),
('opt-b-2', {'type': 'string', 'metavar': '<string>', 'group': 'bgroup'}),
]
class MyConfiguration(Configuration):
"""test configuration"""
def get_named(self):
return {'key': 'val'}
class ConfigurationTC(TestCase):
def setUp(self):
self.cfg = MyConfiguration(name='test', options=OPTIONS, usage='Just do it ! (tm)')
def test_default(self):
cfg = self.cfg
self.assertEqual(cfg['dothis'], True)
self.assertEqual(cfg['value'], None)
self.assertEqual(cfg['multiple'], ['yop', 'yep'])
self.assertEqual(cfg['number'], 2)
self.assertEqual(cfg['bytes'], 1024)
self.assertIsInstance(cfg['bytes'], integer_types)
self.assertEqual(cfg['choice'], 'yo')
self.assertEqual(cfg['multiple-choice'], ['yo', 'ye'])
self.assertEqual(cfg['named'], {'key': 'val'})
def test_base(self):
cfg = self.cfg
cfg.set_option('number', '0')
self.assertEqual(cfg['number'], 0)
self.assertRaises(OptionValueError, cfg.set_option, 'number', 'youpi')
self.assertRaises(OptionValueError, cfg.set_option, 'choice', 'youpi')
self.assertRaises(OptionValueError, cfg.set_option, 'multiple-choice', ('yo', 'y', 'ya'))
cfg.set_option('multiple-choice', 'yo, ya')
self.assertEqual(cfg['multiple-choice'], ['yo', 'ya'])
self.assertEqual(cfg.get('multiple-choice'), ['yo', 'ya'])
self.assertEqual(cfg.get('whatever'), None)
def test_load_command_line_configuration(self):
cfg = self.cfg
args = cfg.load_command_line_configuration(['--choice', 'ye', '--number', '4',
'--multiple=1,2,3', '--dothis=n',
'--bytes=10KB',
'other', 'arguments'])
self.assertEqual(args, ['other', 'arguments'])
self.assertEqual(cfg['dothis'], False)
self.assertEqual(cfg['multiple'], ['1', '2', '3'])
self.assertEqual(cfg['number'], 4)
self.assertEqual(cfg['bytes'], 10240)
self.assertEqual(cfg['choice'], 'ye')
self.assertEqual(cfg['value'], None)
args = cfg.load_command_line_configuration(['-v', 'duh'])
self.assertEqual(args, [])
self.assertEqual(cfg['value'], 'duh')
self.assertEqual(cfg['dothis'], False)
self.assertEqual(cfg['multiple'], ['1', '2', '3'])
self.assertEqual(cfg['number'], 4)
self.assertEqual(cfg['bytes'], 10240)
self.assertEqual(cfg['choice'], 'ye')
def test_load_configuration(self):
cfg = self.cfg
args = cfg.load_configuration(choice='ye', number='4',
multiple='1,2,3', dothis='n',
multiple_choice=('yo', 'ya'))
self.assertEqual(cfg['dothis'], False)
self.assertEqual(cfg['multiple'], ['1', '2', '3'])
self.assertEqual(cfg['number'], 4)
self.assertEqual(cfg['choice'], 'ye')
self.assertEqual(cfg['value'], None)
self.assertEqual(cfg['multiple-choice'], ('yo', 'ya'))
def test_load_configuration_file_case_insensitive(self):
file = tempfile.mktemp()
stream = open(file, 'w')
try:
stream.write("""[Test]
dothis=no
#value=
# you can also document the option
multiple=yop,yepii
# boom
number=3
bytes=1KB
choice=yo
multiple-choice=yo,ye
named=key:val
[agroup]
diffgroup=zou
""")
stream.close()
self.cfg.load_file_configuration(file)
self.assertEqual(self.cfg['dothis'], False)
self.assertEqual(self.cfg['value'], None)
self.assertEqual(self.cfg['multiple'], ['yop', 'yepii'])
self.assertEqual(self.cfg['diffgroup'], 'zou')
finally:
os.remove(file)
def test_option_order(self):
""" Check that options are taken into account in the command line order
and not in the order they are defined in the Configuration object.
"""
file = tempfile.mktemp()
stream = open(file, 'w')
try:
stream.write("""[Test]
reset-value=toto
value=tata
""")
stream.close()
self.cfg.load_file_configuration(file)
finally:
os.remove(file)
self.assertEqual(self.cfg['value'], 'tata')
def test_unsupported_options(self):
file = tempfile.mktemp()
stream = open(file, 'w')
try:
stream.write("""[Test]
whatever=toto
value=tata
""")
stream.close()
self.cfg.load_file_configuration(file)
finally:
os.remove(file)
self.assertEqual(self.cfg['value'], 'tata')
self.assertRaises(OptionError, self.cfg.__getitem__, 'whatever')
def test_generate_config(self):
stream = StringIO()
self.cfg.generate_config(stream)
self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST]
dothis=yes
#value=
# you can also document the option
multiple=yop,yep
# boom
number=2
bytes=1KB
choice=yo
multiple-choice=yo,ye
named=key:val
#reset-value=
[AGROUP]
diffgroup=pouet
[BGROUP]
#opt-b-1=
#opt-b-2=""")
def test_generate_config_with_space_string(self):
self.cfg['value'] = ' '
stream = StringIO()
self.cfg.generate_config(stream)
self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST]
dothis=yes
value=' '
# you can also document the option
multiple=yop,yep
# boom
number=2
bytes=1KB
choice=yo
multiple-choice=yo,ye
named=key:val
reset-value=' '
[AGROUP]
diffgroup=pouet
[BGROUP]
#opt-b-1=
#opt-b-2=""")
def test_generate_config_with_multiline_string(self):
self.cfg['value'] = 'line1\nline2\nline3'
stream = StringIO()
self.cfg.generate_config(stream)
self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST]
dothis=yes
value=
line1
line2
line3
# you can also document the option
multiple=yop,yep
# boom
number=2
bytes=1KB
choice=yo
multiple-choice=yo,ye
named=key:val
reset-value=
line1
line2
line3
[AGROUP]
diffgroup=pouet
[BGROUP]
#opt-b-1=
#opt-b-2=""")
def test_roundtrip(self):
cfg = self.cfg
f = tempfile.mktemp()
stream = open(f, 'w')
try:
self.cfg['dothis'] = False
self.cfg['multiple'] = ["toto", "tata"]
self.cfg['number'] = 3
self.cfg['bytes'] = 2048
cfg.generate_config(stream)
stream.close()
new_cfg = MyConfiguration(name='test', options=OPTIONS)
new_cfg.load_file_configuration(f)
self.assertEqual(cfg['dothis'], new_cfg['dothis'])
self.assertEqual(cfg['multiple'], new_cfg['multiple'])
self.assertEqual(cfg['number'], new_cfg['number'])
self.assertEqual(cfg['bytes'], new_cfg['bytes'])
self.assertEqual(cfg['choice'], new_cfg['choice'])
self.assertEqual(cfg['value'], new_cfg['value'])
self.assertEqual(cfg['multiple-choice'], new_cfg['multiple-choice'])
finally:
os.remove(f)
def test_setitem(self):
self.assertRaises(OptionValueError,
self.cfg.__setitem__, 'multiple-choice', ('a', 'b'))
self.cfg['multiple-choice'] = ('yi', 'ya')
self.assertEqual(self.cfg['multiple-choice'], ('yi', 'ya'))
def test_help(self):
self.cfg.add_help_section('bonus', 'a nice additional help')
help = self.cfg.help().strip()
# at least in python 2.4.2 the output is:
# ' -v <string>, --value=<string>'
# it is not unlikely some optik/optparse versions do print -v<string>
# so accept both
help = help.replace(' -v <string>, ', ' -v<string>, ')
help = re.sub('[ ]*(\r?\n)', '\\1', help)
USAGE = """Usage: Just do it ! (tm)
Options:
-h, --help show this help message and exit
--dothis=<y or n>
-v<string>, --value=<string>
--multiple=<comma separated values>
you can also document the option [current: yop,yep]
--number=<int> boom [current: 2]
--bytes=<bytes>
--choice=<yo|ye>
--multiple-choice=<yo|ye>
--named=<key=val>
-r <string>, --reset-value=<string>
Agroup:
--diffgroup=<key=val>
Bgroup:
--opt-b-1=<string>
--opt-b-2=<string>
Bonus:
a nice additional help"""
if version_info < (2, 5):
# 'usage' header is not capitalized in this version
USAGE = USAGE.replace('Usage: ', 'usage: ')
elif version_info < (2, 4):
USAGE = """usage: Just do it ! (tm)
options:
-h, --help show this help message and exit
--dothis=<y or n>
-v<string>, --value=<string>
--multiple=<comma separated values>
you can also document the option
--number=<int>
--choice=<yo|ye>
--multiple-choice=<yo|ye>
--named=<key=val>
Bonus:
a nice additional help
"""
self.assertMultiLineEqual(help, USAGE)
def test_manpage(self):
pkginfo = {}
with open(join(DATA, '__pkginfo__.py')) as fobj:
exec(fobj.read(), pkginfo)
self.cfg.generate_manpage(attrdict(pkginfo), stream=StringIO())
def test_rewrite_config(self):
changes = [('renamed', 'renamed', 'choice'),
('moved', 'named', 'old', 'test'),
]
read_old_config(self.cfg, changes, join(DATA, 'test.ini'))
stream = StringIO()
self.cfg.generate_config(stream)
self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST]
dothis=yes
value=' '
# you can also document the option
multiple=yop
# boom
number=2
bytes=1KB
choice=yo
multiple-choice=yo,ye
named=key:val
reset-value=' '
[AGROUP]
diffgroup=pouet
[BGROUP]
#opt-b-1=
#opt-b-2=""")
class Linter(OptionsManagerMixIn, OptionsProviderMixIn):
options = (
('profile', {'type' : 'yn', 'metavar' : '<y_or_n>',
'default': False,
'help' : 'Profiled execution.'}),
)
def __init__(self):
OptionsManagerMixIn.__init__(self, usage="")
OptionsProviderMixIn.__init__(self)
self.register_options_provider(self)
self.load_provider_defaults()
class RegrTC(TestCase):
def setUp(self):
self.linter = Linter()
def test_load_defaults(self):
self.linter.load_command_line_configuration([])
self.assertEqual(self.linter.config.profile, False)
def test_register_options_multiple_groups(self):
"""ensure multiple option groups can be registered at once"""
config = Configuration()
self.assertEqual(config.options, ())
new_options = (
('option1', {'type': 'string', 'help': '',
'group': 'g1', 'level': 2}),
('option2', {'type': 'string', 'help': '',
'group': 'g1', 'level': 2}),
('option3', {'type': 'string', 'help': '',
'group': 'g2', 'level': 2}),
)
config.register_options(new_options)
self.assertEqual(config.options, new_options)
class MergeTC(TestCase):
def test_merge1(self):
merged = merge_options([('dothis', {'type':'yn', 'action': 'store', 'default': True, 'metavar': '<y or n>'}),
('dothis', {'type':'yn', 'action': 'store', 'default': False, 'metavar': '<y or n>'}),
])
self.assertEqual(len(merged), 1)
self.assertEqual(merged[0][0], 'dothis')
self.assertEqual(merged[0][1]['default'], True)
def test_merge2(self):
merged = merge_options([('dothis', {'type':'yn', 'action': 'store', 'default': True, 'metavar': '<y or n>'}),
('value', {'type': 'string', 'metavar': '<string>', 'short': 'v'}),
('dothis', {'type':'yn', 'action': 'store', 'default': False, 'metavar': '<y or n>'}),
])
self.assertEqual(len(merged), 2)
self.assertEqual(merged[0][0], 'value')
self.assertEqual(merged[1][0], 'dothis')
self.assertEqual(merged[1][1]['default'], True)
if __name__ == '__main__':
unittest_main()
|
import unittest
import mock
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from apply_bpe import isolate_glossary, BPE
class TestIsolateGlossaryFunction(unittest.TestCase):
def setUp(self):
self.glossary = 'like'
def _run_test_case(self, test_case):
orig, expected = test_case
out = isolate_glossary(orig, self.glossary)
self.assertEqual(out, expected)
def test_empty_string(self):
orig = ''
exp = ['']
test_case = (orig, exp)
self._run_test_case(test_case)
def test_no_glossary(self):
orig = 'word'
exp = ['word']
test_case = (orig, exp)
self._run_test_case(test_case)
def test_isolated_glossary(self):
orig = 'like'
exp = ['like']
test_case = (orig, exp)
self._run_test_case(test_case)
def test_word_one_side(self):
orig = 'likeword'
exp = ['like', 'word']
test_case = (orig, exp)
self._run_test_case(test_case)
def test_words_both_sides(self):
orig = 'wordlikeword'
exp = ['word', 'like', 'word']
test_case = (orig, exp)
self._run_test_case(test_case)
def test_back_to_back_glossary(self):
orig = 'likelike'
exp = ['like', 'like']
test_case = (orig, exp)
self._run_test_case(test_case)
def test_multiple_glossaries(self):
orig = 'wordlikewordlike'
exp = ['word', 'like', 'word', 'like']
test_case = (orig, exp)
self._run_test_case(test_case)
class TestBPEIsolateGlossariesMethod(unittest.TestCase):
def setUp(self):
amock = mock.MagicMock()
amock.readline.return_value = 'something'
glossaries = ['like', 'Manuel', 'USA']
self.bpe = BPE(amock, glossaries=glossaries)
def _run_test_case(self, test_case):
orig, expected = test_case
out = self.bpe._isolate_glossaries(orig)
self.assertEqual(out, expected)
def test_multiple_glossaries(self):
orig = 'wordlikeUSAwordManuelManuelwordUSA'
exp = ['word', 'like', 'USA', 'word', 'Manuel', 'Manuel', 'word', 'USA']
test_case = (orig, exp)
self._run_test_case(test_case)
class TestRegexIsolateGlossaries(unittest.TestCase):
def setUp(self):
amock = mock.MagicMock()
amock.readline.return_value = 'something'
glossaries = ["<country>\w*</country>", "<name>\w*</name>", "\d+"]
self.bpe = BPE(amock, glossaries=glossaries)
def _run_test_case(self, test_case):
orig, expected = test_case
out = self.bpe._isolate_glossaries(orig)
self.assertEqual(out, expected)
def test_regex_glossaries(self):
orig = 'wordlike<country>USA</country>word10001word<name>Manuel</name>word<country>USA</country>'
exp = ['wordlike', '<country>USA</country>', 'word', '10001', 'word', '<name>Manuel</name>', 'word', '<country>USA</country>']
test_case = (orig, exp)
self._run_test_case(test_case)
def encode_mock(segment, x2, x3, x4, x5, x6, x7, glosses, dropout):
if glosses.match(segment):
return (segment,)
else:
l = len(segment)
return (segment[:l//2], segment[l//2:])
class TestBPESegmentMethod(unittest.TestCase):
def setUp(self):
amock = mock.MagicMock()
amock.readline.return_value = 'something'
glossaries = ['like', 'Manuel', 'USA']
self.bpe = BPE(amock, glossaries=glossaries)
@mock.patch('apply_bpe.encode', side_effect=encode_mock)
def _run_test_case(self, test_case, encode_function):
orig, expected = test_case
out = self.bpe.segment(orig)
self.assertEqual(out, expected)
def test_multiple_glossaries(self):
orig = 'wordlikeword likeManuelword'
exp = 'wo@@ rd@@ like@@ wo@@ rd like@@ Manuel@@ wo@@ rd'
test_case = (orig, exp)
self._run_test_case(test_case)
if __name__ == '__main__':
unittest.main()
|
import re
import warnings
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
import numpy as np
import pandas as pd
from pandas.errors import OutOfBoundsDatetime
from ..core import indexing
from ..core.common import contains_cftime_datetimes
from ..core.formatting import first_n_items, format_timestamp, last_item
from ..core.variable import Variable
from .variables import (
SerializationWarning,
VariableCoder,
lazy_elemwise_func,
pop_to,
safe_setitem,
unpack_for_decoding,
unpack_for_encoding,
)
# standard calendars recognized by cftime
_STANDARD_CALENDARS = {"standard", "gregorian", "proleptic_gregorian"}
_NS_PER_TIME_DELTA = {
"us": int(1e3),
"ms": int(1e6),
"s": int(1e9),
"m": int(1e9) * 60,
"h": int(1e9) * 60 * 60,
"D": int(1e9) * 60 * 60 * 24,
}
TIME_UNITS = frozenset(
["days", "hours", "minutes", "seconds", "milliseconds", "microseconds"]
)
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith("s"):
units = "%ss" % units
return {
"microseconds": "us",
"milliseconds": "ms",
"seconds": "s",
"minutes": "m",
"hours": "h",
"days": "D",
}[units]
def _ensure_padded_year(ref_date):
# Reference dates without a padded year (e.g. since 1-1-1 or since 2-3-4)
# are ambiguous (is it YMD or DMY?). This can lead to some very odd
# behaviour e.g. pandas (via dateutil) passes '1-1-1 00:00:0.0' as
# '2001-01-01 00:00:00' (because it assumes a) DMY and b) that year 1 is
# shorthand for 2001 (like 02 would be shorthand for year 2002)).
# Here we ensure that there is always a four-digit year, with the
# assumption being that year comes first if we get something ambiguous.
matches_year = re.match(r".*\d{4}.*", ref_date)
if matches_year:
# all good, return
return ref_date
# No four-digit strings, assume the first digits are the year and pad
# appropriately
matches_start_digits = re.match(r"(\d+)(.*)", ref_date)
ref_year, everything_else = [s for s in matches_start_digits.groups()]
ref_date_padded = "{:04d}{}".format(int(ref_year), everything_else)
warning_msg = (
f"Ambiguous reference date string: {ref_date}. The first value is "
"assumed to be the year hence will be padded with zeros to remove "
f"the ambiguity (the padded reference date string is: {ref_date_padded}). "
"To remove this message, remove the ambiguity by padding your reference "
"date strings with zeros."
)
warnings.warn(warning_msg, SerializationWarning)
return ref_date_padded
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace. It also ensures that the year is padded with zeros
# so it will be correctly understood by pandas (via dateutil).
matches = re.match(r"(.+) since (.+)", units)
if not matches:
raise ValueError(f"invalid time units: {units}")
delta_units, ref_date = [s.strip() for s in matches.groups()]
ref_date = _ensure_padded_year(ref_date)
return delta_units, ref_date
def _decode_cf_datetime_dtype(data, units, calendar, use_cftime):
# Verify that at least the first and last date can be decoded
# successfully. Otherwise, tracebacks end up swallowed by
# Dataset.__repr__ when users try to view their lazily decoded array.
values = indexing.ImplicitToExplicitIndexingAdapter(indexing.as_indexable(data))
example_value = np.concatenate(
[first_n_items(values, 1) or [0], last_item(values) or [0]]
)
try:
result = decode_cf_datetime(example_value, units, calendar, use_cftime)
except Exception:
calendar_msg = (
"the default calendar" if calendar is None else "calendar %r" % calendar
)
msg = (
f"unable to decode time units {units!r} with {calendar_msg!r}. Try "
"opening your dataset with decode_times=False or installing cftime "
"if it is not installed."
)
raise ValueError(msg)
else:
dtype = getattr(result, "dtype", np.dtype("object"))
return dtype
def _decode_datetime_with_cftime(num_dates, units, calendar):
import cftime
return np.asarray(
cftime.num2date(num_dates, units, calendar, only_use_cftime_datetimes=True)
)
def _decode_datetime_with_pandas(flat_num_dates, units, calendar):
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime(
"Cannot decode times from a non-standard calendar, {!r}, using "
"pandas.".format(calendar)
)
delta, ref_date = _unpack_netcdf_time_units(units)
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using cftime
raise OutOfBoundsDatetime
# fixes: https://github.com/pydata/pandas/issues/14068
# these lines check if the the lowest or the highest value in dates
# cause an OutOfBoundsDatetime (Overflow) error
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered", RuntimeWarning)
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# Cast input dates to integers of nanoseconds because `pd.to_datetime`
# works much faster when dealing with integers
# make _NS_PER_TIME_DELTA an array to ensure type upcasting
flat_num_dates_ns_int = (
flat_num_dates.astype(np.float64) * _NS_PER_TIME_DELTA[delta]
).astype(np.int64)
return (pd.to_timedelta(flat_num_dates_ns_int, "ns") + ref_date).values
def decode_cf_datetime(num_dates, units, calendar=None, use_cftime=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than cftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
cftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = "standard"
if use_cftime is None:
try:
dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)
except (KeyError, OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(float), units, calendar
)
if (
dates[np.nanargmin(num_dates)].year < 1678
or dates[np.nanargmax(num_dates)].year >= 2262
):
if calendar in _STANDARD_CALENDARS:
warnings.warn(
"Unable to decode time axis into full "
"numpy.datetime64 objects, continuing using "
"cftime.datetime objects instead, reason: dates out "
"of range",
SerializationWarning,
stacklevel=3,
)
else:
if calendar in _STANDARD_CALENDARS:
dates = cftime_to_nptime(dates)
elif use_cftime:
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(float), units, calendar
)
else:
dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)
return dates.reshape(num_dates.shape)
def to_timedelta_unboxed(value, **kwargs):
if LooseVersion(pd.__version__) < "0.25.0":
result = pd.to_timedelta(value, **kwargs, box=False)
else:
result = pd.to_timedelta(value, **kwargs).to_numpy()
assert result.dtype == "timedelta64[ns]"
return result
def to_datetime_unboxed(value, **kwargs):
if LooseVersion(pd.__version__) < "0.25.0":
result = pd.to_datetime(value, **kwargs, box=False)
else:
result = pd.to_datetime(value, **kwargs).to_numpy()
assert result.dtype == "datetime64[ns]"
return result
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
result = to_timedelta_unboxed(num_timedeltas.ravel(), unit=units)
return result.reshape(num_timedeltas.shape)
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit in ["days", "hours", "minutes", "seconds"]:
delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]
unit_delta = np.timedelta64(delta_ns, "ns")
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return "seconds"
def infer_calendar_name(dates):
"""Given an array of datetimes, infer the CF calendar name"""
if np.asarray(dates).dtype == "datetime64[ns]":
return "proleptic_gregorian"
else:
return np.asarray(dates).ravel()[0].calendar
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = np.asarray(dates).ravel()
if np.asarray(dates).dtype == "datetime64[ns]":
dates = to_datetime_unboxed(dates)
dates = dates[pd.notnull(dates)]
reference_date = dates[0] if len(dates) > 0 else "1970-01-01"
reference_date = pd.Timestamp(reference_date)
else:
reference_date = dates[0] if len(dates) > 0 else "1970-01-01"
reference_date = format_cftime_datetime(reference_date)
unique_timedeltas = np.unique(np.diff(dates))
if unique_timedeltas.dtype == np.dtype("O"):
# Convert to np.timedelta64 objects using pandas to work around a
# NumPy casting bug: https://github.com/numpy/numpy/issues/11096
unique_timedeltas = to_timedelta_unboxed(unique_timedeltas)
units = _infer_time_units_from_diff(unique_timedeltas)
return f"{units} since {reference_date}"
def format_cftime_datetime(date):
"""Converts a cftime.datetime object to a string with the format:
YYYY-MM-DD HH:MM:SS.UUUUUU
"""
return "{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06d}".format(
date.year,
date.month,
date.day,
date.hour,
date.minute,
date.second,
date.microsecond,
)
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = to_timedelta_unboxed(np.asarray(deltas).ravel())
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units
def cftime_to_nptime(times):
"""Given an array of cftime.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype="M8[ns]")
for i, t in np.ndenumerate(times):
try:
# Use pandas.Timestamp in place of datetime.datetime, because
# NumPy casts it safely it np.datetime64[ns] for dates outside
# 1678 to 2262 (this is not currently the case for
# datetime.datetime).
dt = pd.Timestamp(
t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond
)
except ValueError as e:
raise ValueError(
"Cannot convert date {} to a date in the "
"standard calendar. Reason: {}.".format(t, e)
)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = "{} since {}".format(delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_cftime(dates, units, calendar):
"""Fallback method for encoding dates using cftime.
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
import cftime
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype("M8[us]").astype(datetime)
def encode_datetime(d):
return np.nan if d is None else cftime.date2num(d, units, calendar)
return np.vectorize(encode_datetime)(dates)
def cast_to_int_if_safe(num):
int_num = np.array(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return num
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF compliant time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See also
--------
cftime.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = infer_calendar_name(dates)
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == "O":
# parse with cftime instead
raise OutOfBoundsDatetime
assert dates.dtype == "datetime64[ns]"
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype("timedelta64[ns]")
ref_date = pd.Timestamp(ref_date)
# If the ref_date Timestamp is timezone-aware, convert to UTC and
# make it timezone-naive (GH 2649).
if ref_date.tz is not None:
ref_date = ref_date.tz_convert(None)
# Wrap the dates in a DatetimeIndex to do the subtraction to ensure
# an OverflowError is raised if the ref_date is too far away from
# dates to be encoded (GH 2272).
num = (pd.DatetimeIndex(dates.ravel()) - ref_date) / time_delta
num = num.values.reshape(dates.shape)
except (OutOfBoundsDatetime, OverflowError):
num = _encode_datetime_with_cftime(dates, units, calendar)
num = cast_to_int_if_safe(num)
return (num, units, calendar)
def encode_cf_timedelta(timedeltas, units=None):
if units is None:
units = infer_timedelta_units(timedeltas)
np_unit = _netcdf_to_numpy_timeunit(units)
num = 1.0 * timedeltas / np.timedelta64(1, np_unit)
num = np.where(pd.isnull(timedeltas), np.nan, num)
num = cast_to_int_if_safe(num)
return (num, units)
class CFDatetimeCoder(VariableCoder):
def __init__(self, use_cftime=None):
self.use_cftime = use_cftime
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if np.issubdtype(data.dtype, np.datetime64) or contains_cftime_datetimes(
variable
):
(data, units, calendar) = encode_cf_datetime(
data, encoding.pop("units", None), encoding.pop("calendar", None)
)
safe_setitem(attrs, "units", units, name=name)
safe_setitem(attrs, "calendar", calendar, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if "units" in attrs and "since" in attrs["units"]:
units = pop_to(attrs, encoding, "units")
calendar = pop_to(attrs, encoding, "calendar")
dtype = _decode_cf_datetime_dtype(data, units, calendar, self.use_cftime)
transform = partial(
decode_cf_datetime,
units=units,
calendar=calendar,
use_cftime=self.use_cftime,
)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
class CFTimedeltaCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if np.issubdtype(data.dtype, np.timedelta64):
data, units = encode_cf_timedelta(data, encoding.pop("units", None))
safe_setitem(attrs, "units", units, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if "units" in attrs and attrs["units"] in TIME_UNITS:
units = pop_to(attrs, encoding, "units")
transform = partial(decode_cf_timedelta, units=units)
dtype = np.dtype("timedelta64[ns]")
data = lazy_elemwise_func(data, transform, dtype=dtype)
return Variable(dims, data, attrs, encoding)
|
from distutils.core import setup
from setuptools import find_packages
import os
import re
import io
packages = find_packages('app')
LONGDOC = """
git-webhook is a web app base on
Python Flask + SQLAchemy + Celery + Redis + React.
Aims to deploy a git webhook platform easily,
now supports Github / GitLab / Gogs / GitOsc
How to deploy & run ?
> pip install git-webhook
1. gitwebhook config : will init config into HOME dir, then modify it
2. gitwebhook runserver : run web server, with default port 18340
3. gitwebhook celery : run celery task
> then visit ip:18340
"""
def read(*names, **kwargs):
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
).read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name='git-webhook',
version=find_version('app/__init__.py'),
description=(u'使用 Python Flask + SQLAchemy + Celery + Redis + React '
u'开发的用于迅速搭建并使用 WebHook '
u'进行自动化部署和运维,'
u'支持 Github / GitLab / Gogs / GitOsc。'),
long_description=LONGDOC,
author='hustcc',
author_email='[email protected]',
url='https://github.com/hustcc',
license='MIT',
install_requires=[
'flask==0.11.1',
'flask-sqlalchemy==2.1',
'pymysql==0.7.9',
'jinja2==2.8',
'github-flask==3.1.3',
'eventlet==0.19.0',
'paramiko==2.0.2',
'celery==3.1.24',
'redis==2.10.5',
'schema==0.6.5',
'validators==0.11.0',
'flask-socketio==2.7.2',
'Flask-Script==2.0.5',
],
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
'Topic :: Utilities'
],
keywords='git, webhook, ci, GitHub, GitLab, Gogs, GitOsc',
include_package_data=True,
packages=['app'],
py_modules=['manage'],
zip_safe=False,
entry_points={
'console_scripts': ['gitwebhook=manage:run']
})
|
import logging
import paasta_tools.paastaapi.models as paastamodels
from paasta_tools.api import client
from paasta_tools.cli.utils import figure_out_service_name
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_instances
from paasta_tools.utils import _log_audit
from paasta_tools.utils import list_clusters
from paasta_tools.utils import list_services
from paasta_tools.utils import PaastaColors
log = logging.getLogger(__name__)
def add_subparser(subparsers):
autoscale_parser = subparsers.add_parser(
"autoscale",
help="Manually scale a service up and down manually, bypassing the normal autoscaler",
)
autoscale_parser.add_argument(
"-s", "--service", help="Service that you want to stop. Like 'example_service'."
).completer = lazy_choices_completer(list_services)
autoscale_parser.add_argument(
"-i",
"--instance",
help="Instance of the service that you want to stop. Like 'main' or 'canary'.",
required=True,
).completer = lazy_choices_completer(list_instances)
autoscale_parser.add_argument(
"-c",
"--cluster",
help="The PaaSTA cluster that has the service instance you want to stop. Like 'norcal-prod'.",
required=True,
).completer = lazy_choices_completer(list_clusters)
autoscale_parser.add_argument(
"--set", help="Set the number to scale to. Must be an Int.", type=int
)
autoscale_parser.set_defaults(command=paasta_autoscale)
def paasta_autoscale(args):
log.setLevel(logging.DEBUG)
service = figure_out_service_name(args)
api = client.get_paasta_oapi_client(cluster=args.cluster, http_res=True)
if not api:
print("Could not connect to paasta api. Maybe you misspelled the cluster?")
return 1
try:
if args.set is None:
log.debug("Getting the current autoscaler count...")
res, status, _ = api.autoscaler.get_autoscaler_count(
service=service, instance=args.instance, _return_http_data_only=False
)
else:
log.debug(f"Setting desired instances to {args.set}.")
msg = paastamodels.AutoscalerCountMsg(desired_instances=int(args.set))
res, status, _ = api.autoscaler.update_autoscaler_count(
service=service,
instance=args.instance,
autoscaler_count_msg=msg,
_return_http_data_only=False,
)
_log_audit(
action="manual-scale",
action_details=str(msg),
service=service,
instance=args.instance,
cluster=args.cluster,
)
except api.api_error as exc:
status = exc.status
if not 200 <= status <= 299:
print(
PaastaColors.red(
f"ERROR: '{args.instance}' is not configured to autoscale, "
f"so paasta autoscale could not scale it up on demand. "
f"If you want to be able to boost this service, please configure autoscaling for the service "
f"in its config file by setting min and max instances. Example: \n"
f"{args.instance}:\n"
f" min_instances: 5\n"
f" max_instances: 50"
)
)
return 0
log.debug(f"Res: {res} Http: {status}")
print(res.desired_instances)
return 0
|
from pathlib import Path
import keras
import matchzoo as mz
_glove_embedding_url = "http://nlp.stanford.edu/data/glove.6B.zip"
def load_glove_embedding(dimension: int = 50) -> mz.embedding.Embedding:
"""
Return the pretrained glove embedding.
:param dimension: the size of embedding dimension, the value can only be
50, 100, or 300.
:return: The :class:`mz.embedding.Embedding` object.
"""
file_name = 'glove.6B.' + str(dimension) + 'd.txt'
file_path = (Path(mz.USER_DATA_DIR) / 'glove').joinpath(file_name)
if not file_path.exists():
keras.utils.data_utils.get_file('glove_embedding',
_glove_embedding_url,
extract=True,
cache_dir=mz.USER_DATA_DIR,
cache_subdir='glove')
return mz.embedding.load_from_file(file_path=str(file_path), mode='glove')
|
from qutebrowser.api import cmdutils, apitypes, message, config
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def zoom_in(tab: apitypes.Tab, count: int = 1, quiet: bool = False) -> None:
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
quiet: Don't show a zoom level message.
"""
try:
perc = tab.zoom.apply_offset(count)
except ValueError as e:
raise cmdutils.CommandError(e)
if not quiet:
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def zoom_out(tab: apitypes.Tab, count: int = 1, quiet: bool = False) -> None:
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
quiet: Don't show a zoom level message.
"""
try:
perc = tab.zoom.apply_offset(-count)
except ValueError as e:
raise cmdutils.CommandError(e)
if not quiet:
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def zoom(tab: apitypes.Tab,
level: str = None,
count: int = None,
quiet: bool = False) -> None:
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
level: The zoom percentage to set.
count: The zoom percentage to set.
quiet: Don't show a zoom level message.
"""
if count is not None:
int_level = count
elif level is not None:
try:
int_level = int(level.rstrip('%'))
except ValueError:
raise cmdutils.CommandError("zoom: Invalid int value {}"
.format(level))
else:
int_level = int(config.val.zoom.default)
try:
tab.zoom.set_factor(int_level / 100)
except ValueError:
raise cmdutils.CommandError("Can't zoom {}%!".format(int_level))
if not quiet:
message.info("Zoom level: {}%".format(int_level), replace=True)
|
from hole.exceptions import HoleError
from homeassistant.components.pi_hole.const import CONF_LOCATION
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from tests.async_mock import AsyncMock, MagicMock, patch
ZERO_DATA = {
"ads_blocked_today": 0,
"ads_percentage_today": 0,
"clients_ever_seen": 0,
"dns_queries_today": 0,
"domains_being_blocked": 0,
"queries_cached": 0,
"queries_forwarded": 0,
"status": "disabled",
"unique_clients": 0,
"unique_domains": 0,
}
HOST = "1.2.3.4"
PORT = 80
LOCATION = "location"
NAME = "Pi hole"
API_KEY = "apikey"
SSL = False
VERIFY_SSL = True
CONF_DATA = {
CONF_HOST: f"{HOST}:{PORT}",
CONF_LOCATION: LOCATION,
CONF_NAME: NAME,
CONF_API_KEY: API_KEY,
CONF_SSL: SSL,
CONF_VERIFY_SSL: VERIFY_SSL,
}
CONF_CONFIG_FLOW = {
CONF_HOST: HOST,
CONF_PORT: PORT,
CONF_LOCATION: LOCATION,
CONF_NAME: NAME,
CONF_API_KEY: API_KEY,
CONF_SSL: SSL,
CONF_VERIFY_SSL: VERIFY_SSL,
}
SWITCH_ENTITY_ID = "switch.pi_hole"
def _create_mocked_hole(raise_exception=False):
mocked_hole = MagicMock()
type(mocked_hole).get_data = AsyncMock(
side_effect=HoleError("") if raise_exception else None
)
type(mocked_hole).enable = AsyncMock()
type(mocked_hole).disable = AsyncMock()
mocked_hole.data = ZERO_DATA
return mocked_hole
def _patch_init_hole(mocked_hole):
return patch("homeassistant.components.pi_hole.Hole", return_value=mocked_hole)
def _patch_config_flow_hole(mocked_hole):
return patch(
"homeassistant.components.pi_hole.config_flow.Hole", return_value=mocked_hole
)
|
import warnings
import numpy as np
from . import dtypes, nputils, utils
from .duck_array_ops import _dask_or_eager_func, count, fillna, isnull, where_method
from .pycompat import dask_array_type
try:
import dask.array as dask_array
from . import dask_array_compat
except ImportError:
dask_array = None
dask_array_compat = None # type: ignore
def _replace_nan(a, val):
"""
replace nan in a by val, and returns the replaced array and the nan
position
"""
mask = isnull(a)
return where_method(val, mask, a), mask
def _maybe_null_out(result, axis, mask, min_count=1):
"""
xarray version of pandas.core.nanops._maybe_null_out
"""
if axis is not None and getattr(result, "ndim", False):
null_mask = (np.take(mask.shape, axis).prod() - mask.sum(axis) - min_count) < 0
if null_mask.any():
dtype, fill_value = dtypes.maybe_promote(result.dtype)
result = result.astype(dtype)
result[null_mask] = fill_value
elif getattr(result, "dtype", None) not in dtypes.NAT_TYPES:
null_mask = mask.size - mask.sum()
if null_mask < min_count:
result = np.nan
return result
def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs):
"""In house nanargmin, nanargmax for object arrays. Always return integer
type
"""
valid_count = count(value, axis=axis)
value = fillna(value, fill_value)
data = _dask_or_eager_func(func)(value, axis=axis, **kwargs)
# TODO This will evaluate dask arrays and might be costly.
if (valid_count == 0).any():
raise ValueError("All-NaN slice encountered")
return data
def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):
""" In house nanmin and nanmax for object array """
valid_count = count(value, axis=axis)
filled_value = fillna(value, fill_value)
data = getattr(np, func)(filled_value, axis=axis, **kwargs)
if not hasattr(data, "dtype"): # scalar case
data = fill_value if valid_count == 0 else data
# we've computed a single min, max value of type object.
# don't let np.array turn a tuple back into an array
return utils.to_0d_object_array(data)
return where_method(data, valid_count != 0)
def nanmin(a, axis=None, out=None):
if a.dtype.kind == "O":
return _nan_minmax_object("min", dtypes.get_pos_infinity(a.dtype), a, axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanmin(a, axis=axis)
def nanmax(a, axis=None, out=None):
if a.dtype.kind == "O":
return _nan_minmax_object("max", dtypes.get_neg_infinity(a.dtype), a, axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanmax(a, axis=axis)
def nanargmin(a, axis=None):
if a.dtype.kind == "O":
fill_value = dtypes.get_pos_infinity(a.dtype)
return _nan_argminmax_object("argmin", fill_value, a, axis=axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanargmin(a, axis=axis)
def nanargmax(a, axis=None):
if a.dtype.kind == "O":
fill_value = dtypes.get_neg_infinity(a.dtype)
return _nan_argminmax_object("argmax", fill_value, a, axis=axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanargmax(a, axis=axis)
def nansum(a, axis=None, dtype=None, out=None, min_count=None):
a, mask = _replace_nan(a, 0)
result = _dask_or_eager_func("sum")(a, axis=axis, dtype=dtype)
if min_count is not None:
return _maybe_null_out(result, axis, mask, min_count)
else:
return result
def _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs):
""" In house nanmean. ddof argument will be used in _nanvar method """
from .duck_array_ops import _dask_or_eager_func, count, fillna, where_method
valid_count = count(value, axis=axis)
value = fillna(value, 0)
# As dtype inference is impossible for object dtype, we assume float
# https://github.com/dask/dask/issues/3162
if dtype is None and value.dtype.kind == "O":
dtype = value.dtype if value.dtype.kind in ["cf"] else float
data = _dask_or_eager_func("sum")(value, axis=axis, dtype=dtype, **kwargs)
data = data / (valid_count - ddof)
return where_method(data, valid_count != 0)
def nanmean(a, axis=None, dtype=None, out=None):
if a.dtype.kind == "O":
return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
if isinstance(a, dask_array_type):
return dask_array.nanmean(a, axis=axis, dtype=dtype)
return np.nanmean(a, axis=axis, dtype=dtype)
def nanmedian(a, axis=None, out=None):
# The dask algorithm works by rechunking to one chunk along axis
# Make sure we trigger the dask error when passing all dimensions
# so that we don't rechunk the entire array to one chunk and
# possibly blow memory
if axis is not None and len(np.atleast_1d(axis)) == a.ndim:
axis = None
return _dask_or_eager_func(
"nanmedian", dask_module=dask_array_compat, eager_module=nputils
)(a, axis=axis)
def _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs):
value_mean = _nanmean_ddof_object(
ddof=0, value=value, axis=axis, keepdims=True, **kwargs
)
squared = (value.astype(value_mean.dtype) - value_mean) ** 2
return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0):
if a.dtype.kind == "O":
return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof)
return _dask_or_eager_func("nanvar", eager_module=nputils)(
a, axis=axis, dtype=dtype, ddof=ddof
)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0):
return _dask_or_eager_func("nanstd", eager_module=nputils)(
a, axis=axis, dtype=dtype, ddof=ddof
)
def nanprod(a, axis=None, dtype=None, out=None, min_count=None):
a, mask = _replace_nan(a, 1)
result = _dask_or_eager_func("nanprod")(a, axis=axis, dtype=dtype, out=out)
if min_count is not None:
return _maybe_null_out(result, axis, mask, min_count)
else:
return result
def nancumsum(a, axis=None, dtype=None, out=None):
return _dask_or_eager_func("nancumsum", eager_module=nputils)(
a, axis=axis, dtype=dtype
)
def nancumprod(a, axis=None, dtype=None, out=None):
return _dask_or_eager_func("nancumprod", eager_module=nputils)(
a, axis=axis, dtype=dtype
)
|
import logging
import threading
from pyflic import (
ButtonConnectionChannel,
ClickType,
ConnectionStatus,
FlicClient,
ScanWizard,
ScanWizardResult,
)
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_DISCOVERY,
CONF_HOST,
CONF_PORT,
CONF_TIMEOUT,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 3
CLICK_TYPE_SINGLE = "single"
CLICK_TYPE_DOUBLE = "double"
CLICK_TYPE_HOLD = "hold"
CLICK_TYPES = [CLICK_TYPE_SINGLE, CLICK_TYPE_DOUBLE, CLICK_TYPE_HOLD]
CONF_IGNORED_CLICK_TYPES = "ignored_click_types"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 5551
EVENT_NAME = "flic_click"
EVENT_DATA_NAME = "button_name"
EVENT_DATA_ADDRESS = "button_address"
EVENT_DATA_TYPE = "click_type"
EVENT_DATA_QUEUED_TIME = "queued_time"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_IGNORED_CLICK_TYPES): vol.All(
cv.ensure_list, [vol.In(CLICK_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the flic platform."""
# Initialize flic client responsible for
# connecting to buttons and retrieving events
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
discovery = config.get(CONF_DISCOVERY)
try:
client = FlicClient(host, port)
except ConnectionRefusedError:
_LOGGER.error("Failed to connect to flic server")
return
def new_button_callback(address):
"""Set up newly verified button as device in Home Assistant."""
setup_button(hass, config, add_entities, client, address)
client.on_new_verified_button = new_button_callback
if discovery:
start_scanning(config, add_entities, client)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lambda event: client.close())
# Start the pyflic event handling thread
threading.Thread(target=client.handle_events).start()
def get_info_callback(items):
"""Add entities for already verified buttons."""
addresses = items["bd_addr_of_verified_buttons"] or []
for address in addresses:
setup_button(hass, config, add_entities, client, address)
# Get addresses of already verified buttons
client.get_info(get_info_callback)
def start_scanning(config, add_entities, client):
"""Start a new flic client for scanning and connecting to new buttons."""
scan_wizard = ScanWizard()
def scan_completed_callback(scan_wizard, result, address, name):
"""Restart scan wizard to constantly check for new buttons."""
if result == ScanWizardResult.WizardSuccess:
_LOGGER.info("Found new button %s", address)
elif result != ScanWizardResult.WizardFailedTimeout:
_LOGGER.warning(
"Failed to connect to button %s. Reason: %s", address, result
)
# Restart scan wizard
start_scanning(config, add_entities, client)
scan_wizard.on_completed = scan_completed_callback
client.add_scan_wizard(scan_wizard)
def setup_button(hass, config, add_entities, client, address):
"""Set up a single button device."""
timeout = config.get(CONF_TIMEOUT)
ignored_click_types = config.get(CONF_IGNORED_CLICK_TYPES)
button = FlicButton(hass, client, address, timeout, ignored_click_types)
_LOGGER.info("Connected to button %s", address)
add_entities([button])
class FlicButton(BinarySensorEntity):
"""Representation of a flic button."""
def __init__(self, hass, client, address, timeout, ignored_click_types):
"""Initialize the flic button."""
self._hass = hass
self._address = address
self._timeout = timeout
self._is_down = False
self._ignored_click_types = ignored_click_types or []
self._hass_click_types = {
ClickType.ButtonClick: CLICK_TYPE_SINGLE,
ClickType.ButtonSingleClick: CLICK_TYPE_SINGLE,
ClickType.ButtonDoubleClick: CLICK_TYPE_DOUBLE,
ClickType.ButtonHold: CLICK_TYPE_HOLD,
}
self._channel = self._create_channel()
client.add_connection_channel(self._channel)
def _create_channel(self):
"""Create a new connection channel to the button."""
channel = ButtonConnectionChannel(self._address)
channel.on_button_up_or_down = self._on_up_down
# If all types of clicks should be ignored, skip registering callbacks
if set(self._ignored_click_types) == set(CLICK_TYPES):
return channel
if CLICK_TYPE_DOUBLE in self._ignored_click_types:
# Listen to all but double click type events
channel.on_button_click_or_hold = self._on_click
elif CLICK_TYPE_HOLD in self._ignored_click_types:
# Listen to all but hold click type events
channel.on_button_single_or_double_click = self._on_click
else:
# Listen to all click type events
channel.on_button_single_or_double_click_or_hold = self._on_click
return channel
@property
def name(self):
"""Return the name of the device."""
return f"flic_{self.address.replace(':', '')}"
@property
def address(self):
"""Return the bluetooth address of the device."""
return self._address
@property
def is_on(self):
"""Return true if sensor is on."""
return self._is_down
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {"address": self.address}
def _queued_event_check(self, click_type, time_diff):
"""Generate a log message and returns true if timeout exceeded."""
time_string = f"{time_diff:d} {'second' if time_diff == 1 else 'seconds'}"
if time_diff > self._timeout:
_LOGGER.warning(
"Queued %s dropped for %s. Time in queue was %s",
click_type,
self.address,
time_string,
)
return True
_LOGGER.info(
"Queued %s allowed for %s. Time in queue was %s",
click_type,
self.address,
time_string,
)
return False
def _on_up_down(self, channel, click_type, was_queued, time_diff):
"""Update device state, if event was not queued."""
if was_queued and self._queued_event_check(click_type, time_diff):
return
self._is_down = click_type == ClickType.ButtonDown
self.schedule_update_ha_state()
def _on_click(self, channel, click_type, was_queued, time_diff):
"""Fire click event, if event was not queued."""
# Return if click event was queued beyond allowed timeout
if was_queued and self._queued_event_check(click_type, time_diff):
return
# Return if click event is in ignored click types
hass_click_type = self._hass_click_types[click_type]
if hass_click_type in self._ignored_click_types:
return
self._hass.bus.fire(
EVENT_NAME,
{
EVENT_DATA_NAME: self.name,
EVENT_DATA_ADDRESS: self.address,
EVENT_DATA_QUEUED_TIME: time_diff,
EVENT_DATA_TYPE: hass_click_type,
},
)
def _connection_status_changed(self, channel, connection_status, disconnect_reason):
"""Remove device, if button disconnects."""
if connection_status == ConnectionStatus.Disconnected:
_LOGGER.warning(
"Button (%s) disconnected. Reason: %s", self.address, disconnect_reason
)
|
from elkm1_lib.const import AlarmState, ArmedStatus, ArmLevel, ArmUpState
from elkm1_lib.util import username
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
ATTR_CHANGED_BY,
FORMAT_NUMBER,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import ElkAttachedEntity, create_elk_entities
from .const import (
ATTR_CHANGED_BY_ID,
ATTR_CHANGED_BY_KEYPAD,
ATTR_CHANGED_BY_TIME,
DOMAIN,
ELK_USER_CODE_SERVICE_SCHEMA,
)
DISPLAY_MESSAGE_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID, default=[]): cv.entity_ids,
vol.Optional("clear", default=2): vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional("beep", default=False): cv.boolean,
vol.Optional("timeout", default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=65535)
),
vol.Optional("line1", default=""): cv.string,
vol.Optional("line2", default=""): cv.string,
}
)
SERVICE_ALARM_DISPLAY_MESSAGE = "alarm_display_message"
SERVICE_ALARM_ARM_VACATION = "alarm_arm_vacation"
SERVICE_ALARM_ARM_HOME_INSTANT = "alarm_arm_home_instant"
SERVICE_ALARM_ARM_NIGHT_INSTANT = "alarm_arm_night_instant"
SERVICE_ALARM_BYPASS = "alarm_bypass"
SERVICE_ALARM_CLEAR_BYPASS = "alarm_clear_bypass"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ElkM1 alarm platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
elk = elk_data["elk"]
entities = []
create_elk_entities(elk_data, elk.areas, "area", ElkArea, entities)
async_add_entities(entities, True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_ALARM_ARM_VACATION,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_vacation",
)
platform.async_register_entity_service(
SERVICE_ALARM_ARM_HOME_INSTANT,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_home_instant",
)
platform.async_register_entity_service(
SERVICE_ALARM_ARM_NIGHT_INSTANT,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_night_instant",
)
platform.async_register_entity_service(
SERVICE_ALARM_DISPLAY_MESSAGE,
DISPLAY_MESSAGE_SERVICE_SCHEMA,
"async_display_message",
)
platform.async_register_entity_service(
SERVICE_ALARM_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_bypass",
)
platform.async_register_entity_service(
SERVICE_ALARM_CLEAR_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_clear_bypass",
)
class ElkArea(ElkAttachedEntity, AlarmControlPanelEntity, RestoreEntity):
"""Representation of an Area / Partition within the ElkM1 alarm panel."""
def __init__(self, element, elk, elk_data):
"""Initialize Area as Alarm Control Panel."""
super().__init__(element, elk, elk_data)
self._elk = elk
self._changed_by_keypad = None
self._changed_by_time = None
self._changed_by_id = None
self._changed_by = None
self._state = None
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes."""
await super().async_added_to_hass()
if len(self._elk.areas.elements) == 1:
for keypad in self._elk.keypads:
keypad.add_callback(self._watch_keypad)
self._element.add_callback(self._watch_area)
# We do not get changed_by back from resync.
last_state = await self.async_get_last_state()
if not last_state:
return
if ATTR_CHANGED_BY_KEYPAD in last_state.attributes:
self._changed_by_keypad = last_state.attributes[ATTR_CHANGED_BY_KEYPAD]
if ATTR_CHANGED_BY_TIME in last_state.attributes:
self._changed_by_time = last_state.attributes[ATTR_CHANGED_BY_TIME]
if ATTR_CHANGED_BY_ID in last_state.attributes:
self._changed_by_id = last_state.attributes[ATTR_CHANGED_BY_ID]
if ATTR_CHANGED_BY in last_state.attributes:
self._changed_by = last_state.attributes[ATTR_CHANGED_BY]
def _watch_keypad(self, keypad, changeset):
if keypad.area != self._element.index:
return
if changeset.get("last_user") is not None:
self._changed_by_keypad = keypad.name
self._changed_by_time = keypad.last_user_time.isoformat()
self._changed_by_id = keypad.last_user + 1
self._changed_by = username(self._elk, keypad.last_user)
self.async_write_ha_state()
def _watch_area(self, area, changeset):
last_log = changeset.get("last_log")
if not last_log:
return
# user_number only set for arm/disarm logs
if not last_log.get("user_number"):
return
self._changed_by_keypad = None
self._changed_by_id = last_log["user_number"]
self._changed_by = username(self._elk, self._changed_by_id - 1)
self._changed_by_time = last_log["timestamp"]
self.async_write_ha_state()
@property
def code_format(self):
"""Return the alarm code format."""
return FORMAT_NUMBER
@property
def state(self):
"""Return the state of the element."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def device_state_attributes(self):
"""Attributes of the area."""
attrs = self.initial_attrs()
elmt = self._element
attrs["is_exit"] = elmt.is_exit
attrs["timer1"] = elmt.timer1
attrs["timer2"] = elmt.timer2
if elmt.armed_status is not None:
attrs["armed_status"] = ArmedStatus(elmt.armed_status).name.lower()
if elmt.arm_up_state is not None:
attrs["arm_up_state"] = ArmUpState(elmt.arm_up_state).name.lower()
if elmt.alarm_state is not None:
attrs["alarm_state"] = AlarmState(elmt.alarm_state).name.lower()
attrs[ATTR_CHANGED_BY_KEYPAD] = self._changed_by_keypad
attrs[ATTR_CHANGED_BY_TIME] = self._changed_by_time
attrs[ATTR_CHANGED_BY_ID] = self._changed_by_id
return attrs
@property
def changed_by(self):
"""Last change triggered by."""
return self._changed_by
def _element_changed(self, element, changeset):
elk_state_to_hass_state = {
ArmedStatus.DISARMED.value: STATE_ALARM_DISARMED,
ArmedStatus.ARMED_AWAY.value: STATE_ALARM_ARMED_AWAY,
ArmedStatus.ARMED_STAY.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_STAY_INSTANT.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_TO_NIGHT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_NIGHT_INSTANT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_VACATION.value: STATE_ALARM_ARMED_AWAY,
}
if self._element.alarm_state is None:
self._state = None
elif self._area_is_in_alarm_state():
self._state = STATE_ALARM_TRIGGERED
elif self._entry_exit_timer_is_running():
self._state = (
STATE_ALARM_ARMING if self._element.is_exit else STATE_ALARM_PENDING
)
else:
self._state = elk_state_to_hass_state[self._element.armed_status]
def _entry_exit_timer_is_running(self):
return self._element.timer1 > 0 or self._element.timer2 > 0
def _area_is_in_alarm_state(self):
return self._element.alarm_state >= AlarmState.FIRE_ALARM.value
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._element.disarm(int(code))
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
self._element.arm(ArmLevel.ARMED_STAY.value, int(code))
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
self._element.arm(ArmLevel.ARMED_AWAY.value, int(code))
async def async_alarm_arm_night(self, code=None):
"""Send arm night command."""
self._element.arm(ArmLevel.ARMED_NIGHT.value, int(code))
async def async_alarm_arm_home_instant(self, code=None):
"""Send arm stay instant command."""
self._element.arm(ArmLevel.ARMED_STAY_INSTANT.value, int(code))
async def async_alarm_arm_night_instant(self, code=None):
"""Send arm night instant command."""
self._element.arm(ArmLevel.ARMED_NIGHT_INSTANT.value, int(code))
async def async_alarm_arm_vacation(self, code=None):
"""Send arm vacation command."""
self._element.arm(ArmLevel.ARMED_VACATION.value, int(code))
async def async_display_message(self, clear, beep, timeout, line1, line2):
"""Display a message on all keypads for the area."""
self._element.display_message(clear, beep, timeout, line1, line2)
async def async_bypass(self, code=None):
"""Bypass all zones in area."""
self._element.bypass(code)
async def async_clear_bypass(self, code=None):
"""Clear bypass for all zones in area."""
self._element.clear_bypass(code)
|
import os
import argparse
import re
import sys
try:
from .common import get_stash_dir
except (ImportError, ValueError):
from common import get_stash_dir
DEFAULT_ENCODING = "utf-8" # encoding to use to set encoding
def is_encoding_line(s):
"""
Check if the given line specifies an encoding.
:param s: line to check
:type s: str
:return: whether the given line specifies an encoding or not
:rtype: bool
"""
return get_encoding_from_line(s) is not None
def get_encoding_from_line(s):
"""
Return the encoding specified in the given line or None if none was specified.
:param s: line to check
:type s: str
:return: the encoding
:rtype: bool or None
"""
exp = "^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)"
m = re.match(exp, s)
if m is None:
return None
else:
return m.groups()[0]
def get_encoding_of_file(p):
"""
Return the encoding of a file.
:param p: path to file
:type p: str
:return: the encoding of the file or None
:rtype: str or None
"""
with open(p, "r") as fin:
lines = fin.readlines()
i = 0
for line in lines:
i += 1
if i > 2:
# encoding must be specified in the first two lines
return None
if is_encoding_line(line):
return get_encoding_from_line(line)
def list_all_encodings(p, recursive=False, ignore_nonpy=False):
"""
List all files in a directory and their encoding.
:param p: path to directory
:type p: str
:param recursive: whether to descend into subdirectories or not
:type recursive: bool
:param ignore_nonpy: skip files not ending with .py
:type ignore_nonpy: bool
"""
for fn in os.listdir(p):
fp = os.path.join(p, fn)
if os.path.isdir(fp):
if recursive:
list_all_encodings(fp, recursive=recursive, ignore_nonpy=ignore_nonpy)
else:
if (not fn.endswith(".py") and ignore_nonpy):
# skip
continue
show_file_encoding(fp)
def show_file_encoding(p):
"""
Show the encoding of the file.
:param p: path to the file
:type p: str
"""
enc = get_encoding_of_file(p)
if enc is None:
encs = "---"
else:
encs = enc
print("{fn:20} {enc}".format(fn=os.path.relpath(p), enc=encs))
def set_all_encodings(p, encoding, recursive=False, ignore_nonpy=False, force=False):
"""
Set the encoding for all files in a directory.
:param p: path to directory
:type p: str
:param encoding: encoding to set
:type encoding: str
:param recursive: whether to descend into subdirectories or not
:type recursive: bool
:param ignore_nonpy: skip files not ending with .py
:type ignore_nonpy: bool
:param force: even set the encoding of a file if it already has an encoding
:type force: bool
"""
for fn in os.listdir(p):
fp = os.path.join(p, fn)
if os.path.isdir(fp):
if recursive:
set_all_encodings(fp, encoding, recursive=recursive, ignore_nonpy=ignore_nonpy, force=force)
else:
if (not fn.endswith(".py") and ignore_nonpy):
# skip
continue
if (get_encoding_of_file(fp) is not None) and not force:
# skip
print("Skipping '{}', it already has an encoding.".format(fp))
continue
set_file_encoding(fp, encoding)
def set_file_encoding(p, encoding):
"""
Set the encoding of the file.
:param p: path to the file
:type p: str
:param encoding: encoding to set
:type encoding: str
"""
fe = get_encoding_of_file(p)
if fe is None:
# we can add the encoding
to_add = "# -*- coding: {} -*-\n".format(encoding)
with open(p, "r") as fin:
lines = fin.readlines()
if len(lines) == 0:
# file empty, but we should still add
lines = [to_add]
elif lines[0].startswith("#!"):
# add after shebang
lines.insert(1, to_add)
else:
# add at start
lines.insert(0, to_add)
with open(p, "w") as fout:
fout.write("".join(lines))
else:
# we need to overwrite the encoding
to_add = "# -*- coding: {} -*-\n".format(encoding)
with open(p, "r") as fin:
lines = fin.readlines()
was_set = False
for i in range(len(lines)):
line = lines[i]
if is_encoding_line(line):
# replace line
lines[i] = line
was_set = True
break
if not was_set:
# we should still set the encoding
if lines[0].startswith("#!"):
# add after shebang
lines.insert(1, to_add)
else:
# add at start
lines.insert(0, to_add)
def remove_all_encodings(p, recursive=False, ignore_nonpy=True):
"""
Set the encoding for all files in a directory.
:param p: path to directory
:type p: str
:param recursive: whether to descend into subdirectories or not
:type recursive: bool
:param ignore_nonpy: skip files not ending with .py
:type ignore_nonpy: bool
"""
for fn in os.listdir(p):
fp = os.path.join(p, fn)
if os.path.isdir(fp):
if recursive:
remove_all_encodings(fp, recursive=recursive, ignore_nonpy=ignore_nonpy)
else:
if (not fn.endswith(".py") and ignore_nonpy):
# skip
continue
if (get_encoding_of_file(fp) is None):
# skip
print("Skipping '{}', it has no encoding.".format(fp))
continue
remove_file_encoding(fp)
def remove_file_encoding(path):
"""
Remove the encoding line from the given file.
:param path: path to remove from
:type path: str
"""
with open(path, "r") as fin:
lines = fin.readlines()
if len(lines) >= 1 and is_encoding_line(lines[0]):
lines.pop(0)
elif len(lines) >= 2 and is_encoding_line(lines[1]):
lines.pop(1)
else:
print("No encoding line found in '{}'!".format(path))
return
with open(path, "w") as fout:
fout.write("".join(lines))
def main():
"""the main function"""
parser = argparse.ArgumentParser(description="encoding tool")
parser.add_argument("action", action="store", help="what to do", choices=["show", "set", "remove"])
parser.add_argument("-p", "--path", action="store", help="path to file(s), defaults to StaSh root")
parser.add_argument("-r", "--recursive", action="store_true", help="descend into subdirectories")
parser.add_argument("--py-only", dest="pyonly", action="store_true", help="ignore non .py files")
parser.add_argument("-f", "--force", action="store_true", help="force the action")
parser.add_argument("-e", "--encoding", action="store", help="encoding to use (required by some actions")
ns = parser.parse_args()
if ns.path is not None:
path = ns.path
else:
path = get_stash_dir()
if ns.encoding is not None:
encoding = ns.encoding
else:
encoding = DEFAULT_ENCODING
if ns.action == "show":
if not os.path.exists(path):
print("Path '{p}' does not exists!".format(p=path))
sys.exit(1)
elif os.path.isdir(path):
list_all_encodings(path, recursive=ns.recursive, ignore_nonpy=ns.pyonly)
else:
show_file_encoding(path)
elif ns.action == "set":
if not os.path.exists(path):
print("Path '{p}' does not exists!".format(p=path))
sys.exit(1)
elif os.path.isdir(path):
set_all_encodings(path, encoding, recursive=ns.recursive, ignore_nonpy=ns.pyonly, force=ns.force)
else:
set_file_encoding(path, encoding)
elif ns.action == "remove":
if not os.path.exists(path):
print("Path '{p}' does not exists!".format(p=path))
sys.exit(1)
elif os.path.isdir(path):
remove_all_encodings(path, recursive=ns.recursive, ignore_nonpy=ns.pyonly)
else:
remove_file_encoding(path)
else:
print("Unknown action: '{}'!".format(ns.action))
sys.exit(2)
if __name__ == "__main__":
main()
|
from homeassistant import data_entry_flow
from homeassistant.components.local_ip.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from tests.common import MockConfigEntry
async def test_config_flow(hass):
"""Test we can finish a config flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
state = hass.states.get(f"sensor.{DOMAIN}")
assert state
async def test_already_setup(hass):
"""Test we abort if already setup."""
MockConfigEntry(
domain=DOMAIN,
data={},
).add_to_hass(hass)
# Should fail, same NAME
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
|
import pytest
from homeassistant.const import (
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.temperature import display_temp
TEMP = 24.636626
def test_temperature_not_a_number(hass):
"""Test that temperature is a number."""
temp = "Temperature"
with pytest.raises(Exception) as exception:
display_temp(hass, temp, TEMP_CELSIUS, PRECISION_HALVES)
assert f"Temperature is not a number: {temp}" in str(exception.value)
def test_celsius_halves(hass):
"""Test temperature to celsius rounding to halves."""
assert display_temp(hass, TEMP, TEMP_CELSIUS, PRECISION_HALVES) == 24.5
def test_celsius_tenths(hass):
"""Test temperature to celsius rounding to tenths."""
assert display_temp(hass, TEMP, TEMP_CELSIUS, PRECISION_TENTHS) == 24.6
def test_fahrenheit_wholes(hass):
"""Test temperature to fahrenheit rounding to wholes."""
assert display_temp(hass, TEMP, TEMP_FAHRENHEIT, PRECISION_WHOLE) == -4
|
import unittest
from credstash import paddedInt
class TestPadLeft(unittest.TestCase):
def test_zero(self):
i = 0
self.assertEqual(paddedInt(i), "0" * 19)
def test_ten(self):
i = 10
self.assertEqual(paddedInt(i), str(i).zfill(19))
def test_arbitrary_number(self):
i = 98218329123
self.assertEqual(paddedInt(i), str(i).zfill(19))
def test_huge_number(self):
i = 12345678901234567890123
self.assertEqual(paddedInt(i), str(i).zfill(19))
|
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import BaseAddon
from weblate.addons.events import EVENT_PRE_COMMIT
from weblate.addons.forms import GenerateForm
from weblate.utils.render import render_template
class GenerateFileAddon(BaseAddon):
events = (EVENT_PRE_COMMIT,)
name = "weblate.generate.generate"
verbose = _("Statistics generator")
description = _(
"Generates a file containing detailed info about the translation status."
)
settings_form = GenerateForm
multiple = True
icon = "poll.svg"
@classmethod
def can_install(cls, component, user):
if not component.translation_set.exists():
return False
return super().can_install(component, user)
def pre_commit(self, translation, author):
filename = self.render_repo_filename(
self.instance.configuration["filename"], translation
)
if not filename:
return
content = render_template(
self.instance.configuration["template"], translation=translation
)
with open(filename, "w") as handle:
handle.write(content)
translation.addon_commit_files.append(filename)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
from absl import flags
from absl.testing import _bazelize_command
from absl.testing import absltest
FLAGS = flags.FLAGS
NUM_TEST_METHODS = 8 # Hard-coded, based on absltest_sharding_test_helper.py
class TestShardingTest(absltest.TestCase):
"""Integration tests: Runs a test binary with sharding.
This is done by setting the sharding environment variables.
"""
def setUp(self):
self._test_name = 'absl/testing/tests/absltest_sharding_test_helper'
self._shard_file = None
def tearDown(self):
if self._shard_file is not None and os.path.exists(self._shard_file):
os.unlink(self._shard_file)
def _run_sharded(self, total_shards, shard_index, shard_file=None, env=None):
"""Runs the py_test binary in a subprocess.
Args:
total_shards: int, the total number of shards.
shard_index: int, the shard index.
shard_file: string, if not 'None', the path to the shard file.
This method asserts it is properly created.
env: Environment variables to be set for the py_test binary.
Returns:
(stdout, exit_code) tuple of (string, int).
"""
if env is None:
env = {}
env.update({
'TEST_TOTAL_SHARDS': str(total_shards),
'TEST_SHARD_INDEX': str(shard_index)
})
if 'SYSTEMROOT' in os.environ:
# This is used by the random module on Windows to locate crypto
# libraries.
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
if shard_file:
self._shard_file = shard_file
env['TEST_SHARD_STATUS_FILE'] = shard_file
if os.path.exists(shard_file):
os.unlink(shard_file)
proc = subprocess.Popen(
args=[_bazelize_command.get_executable_path(self._test_name)],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
stdout = proc.communicate()[0]
if shard_file:
self.assertTrue(os.path.exists(shard_file))
return (stdout, proc.wait())
def _assert_sharding_correctness(self, total_shards):
"""Assert the primary correctness and performance of sharding.
1. Completeness (all methods are run)
2. Partition (each method run at most once)
3. Balance (for performance)
Args:
total_shards: int, total number of shards.
"""
outerr_by_shard = [] # A list of lists of strings
combined_outerr = [] # A list of strings
exit_code_by_shard = [] # A list of ints
for i in range(total_shards):
(out, exit_code) = self._run_sharded(total_shards, i)
method_list = [x for x in out.split('\n') if x.startswith('class')]
outerr_by_shard.append(method_list)
combined_outerr.extend(method_list)
exit_code_by_shard.append(exit_code)
self.assertLen([x for x in exit_code_by_shard if x != 0], 1,
'Expected exactly one failure')
# Test completeness and partition properties.
self.assertLen(combined_outerr, NUM_TEST_METHODS,
'Partition requirement not met')
self.assertLen(set(combined_outerr), NUM_TEST_METHODS,
'Completeness requirement not met')
# Test balance:
for i in range(len(outerr_by_shard)):
self.assertGreaterEqual(len(outerr_by_shard[i]),
(NUM_TEST_METHODS / total_shards) - 1,
'Shard %d of %d out of balance' %
(i, len(outerr_by_shard)))
def test_shard_file(self):
self._run_sharded(3, 1, os.path.join(FLAGS.test_tmpdir, 'shard_file'))
def test_zero_shards(self):
out, exit_code = self._run_sharded(0, 0)
self.assertEqual(1, exit_code)
self.assertGreaterEqual(out.find('Bad sharding values. index=0, total=0'),
0, 'Bad output: %s' % (out))
def test_with_four_shards(self):
self._assert_sharding_correctness(4)
def test_with_one_shard(self):
self._assert_sharding_correctness(1)
def test_with_ten_shards(self):
self._assert_sharding_correctness(10)
def test_sharding_with_randomization(self):
# If we're both sharding *and* randomizing, we need to confirm that we
# randomize within the shard; we use two seeds to confirm we're seeing the
# same tests (sharding is consistent) in a different order.
tests_seen = []
for seed in ('7', '17'):
out, exit_code = self._run_sharded(
2, 0, env={'TEST_RANDOMIZE_ORDERING_SEED': seed})
self.assertEqual(0, exit_code)
tests_seen.append([x for x in out.splitlines() if x.startswith('class')])
first_tests, second_tests = tests_seen # pylint: disable=unbalanced-tuple-unpacking
self.assertEqual(set(first_tests), set(second_tests))
self.assertNotEqual(first_tests, second_tests)
if __name__ == '__main__':
absltest.main()
|
from flexx.util.testing import run_tests_if_main, skipif, skip, raises
from flexx.event.both_tester import run_in_both, this_is_js
from flexx import event
loop = event.loop
class MyObject(event.Component):
floatpair = event.FloatPairProp(settable=True)
enum1 = event.EnumProp(('foo', 'bar', 'spam'), settable=True)
enum2 = event.EnumProp(('foo', 'bar', 'spam'), 'bar', settable=True)
color = event.ColorProp('cyan', settable=True)
@run_in_both(MyObject)
def test_property_FloatPair():
"""
[0.0, 0.0]
[42.0, 42.0]
[3.2, 4.2]
==
? two values, not 3
? 1st value cannot be
? 2nd value cannot be
append failed
----------
[0, 0]
[42, 42]
[3.2, 4.2]
==
? two values, not 3
? 1st value cannot be
? 2nd value cannot be
append failed
"""
# We convert to list when printing, because in JS we cripple the object
# and on Node the repr then includes the crippled methods.
m = MyObject()
print(list(m.floatpair))
m.set_floatpair(42)
loop.iter()
print(list(m.floatpair))
m.set_floatpair((3.2, 4.2))
loop.iter()
print(list(m.floatpair))
print('==')
# Fail - needs scalar or 2-tuple
m.set_floatpair((3.2, 4.2, 1))
loop.iter()
# Fail - needs number
m.set_floatpair(('hi', 1))
loop.iter()
m.set_floatpair((1, 'hi'))
loop.iter()
# Cannot append
try:
m.floatpair.append(9)
except Exception:
print('append failed')
@run_in_both(MyObject)
def test_property_Enum():
"""
FOO
BAR
SPAM
FOO
? TypeError
? Invalid value for enum 'enum1': EGGS
"""
m = MyObject()
print(m.enum1)
print(m.enum2)
m = MyObject(enum1='spam')
print(m.enum1)
m.set_enum1('foo')
loop.iter()
print(m.enum1)
m.set_enum1(3)
loop.iter()
m.set_enum1('eggs')
loop.iter()
@run_in_both(MyObject)
def test_property_Color1():
"""
#00ffff 1.0
[0.0, 1.0, 1.0, 1.0]
rgba(0,255,255,1)
rgba(0,255,255,0.25)
----------
#00ffff 1
[0, 1, 1, 1]
rgba(0,255,255,1)
rgba(0,255,255,0.25)
"""
m = MyObject()
print(m.color.hex, m.color.alpha)
print(list(m.color.t))
print(m.color.css)
m.set_color((0, 1, 1, 0.25))
loop.iter()
print(m.color.css)
@run_in_both(MyObject)
def test_property_Color2():
"""
? #00ffff 1
? #ff8800 1
? #f48404 1
? #ff8800 0.5
? #f48404 0.5
xx
? #00ff00 1
? #ffff00 0.5
xx
? #ffff00 1
? #ff00ff 1
xx
? #ff0000 1
? #00ff00 0.5
"""
m = MyObject()
print(m.color.hex, m.color.alpha)
m.set_color('#f80')
loop.iter()
print(m.color.hex, m.color.alpha)
m.set_color('#f48404')
loop.iter()
print(m.color.hex, m.color.alpha)
m.set_color('#f808')
loop.iter()
print(m.color.hex, m.color.alpha)
m.set_color('#f4840488')
loop.iter()
print(m.color.hex, m.color.alpha)
print('xx')
m.set_color('rgb(0, 255, 0)')
loop.iter()
print(m.color.hex, m.color.alpha)
m.set_color('rgba(255, 255, 0, 0.5)')
loop.iter()
print(m.color.hex, m.color.alpha)
print('xx')
m.set_color('yellow')
loop.iter()
print(m.color.hex, m.color.alpha)
m.set_color('magenta')
loop.iter()
print(m.color.hex, m.color.alpha)
print('xx')
m.set_color((1, 0, 0, 1))
loop.iter()
print(m.color.hex, m.color.alpha)
m.set_color((0, 1, 0, 0.5))
loop.iter()
print(m.color.hex, m.color.alpha)
run_tests_if_main()
# if __name__ == '__main__':
# test_property_Enum()
|
import urwid
import math
import time
UPDATE_INTERVAL = 0.2
def sin100( x ):
"""
A sin function that returns values between 0 and 100 and repeats
after x == 100.
"""
return 50 + 50 * math.sin( x * math.pi / 50 )
class GraphModel:
"""
A class responsible for storing the data that will be displayed
on the graph, and keeping track of which mode is enabled.
"""
data_max_value = 100
def __init__(self):
data = [ ('Saw', list(range(0,100,2))*2),
('Square', [0]*30 + [100]*30),
('Sine 1', [sin100(x) for x in range(100)] ),
('Sine 2', [(sin100(x) + sin100(x*2))/2
for x in range(100)] ),
('Sine 3', [(sin100(x) + sin100(x*3))/2
for x in range(100)] ),
]
self.modes = []
self.data = {}
for m, d in data:
self.modes.append(m)
self.data[m] = d
def get_modes(self):
return self.modes
def set_mode(self, m):
self.current_mode = m
def get_data(self, offset, r):
"""
Return the data in [offset:offset+r], the maximum value
for items returned, and the offset at which the data
repeats.
"""
l = []
d = self.data[self.current_mode]
while r:
offset = offset % len(d)
segment = d[offset:offset+r]
r -= len(segment)
offset += len(segment)
l += segment
return l, self.data_max_value, len(d)
class GraphView(urwid.WidgetWrap):
"""
A class responsible for providing the application's interface and
graph display.
"""
palette = [
('body', 'black', 'light gray', 'standout'),
('header', 'white', 'dark red', 'bold'),
('screen edge', 'light blue', 'dark cyan'),
('main shadow', 'dark gray', 'black'),
('line', 'black', 'light gray', 'standout'),
('bg background','light gray', 'black'),
('bg 1', 'black', 'dark blue', 'standout'),
('bg 1 smooth', 'dark blue', 'black'),
('bg 2', 'black', 'dark cyan', 'standout'),
('bg 2 smooth', 'dark cyan', 'black'),
('button normal','light gray', 'dark blue', 'standout'),
('button select','white', 'dark green'),
('line', 'black', 'light gray', 'standout'),
('pg normal', 'white', 'black', 'standout'),
('pg complete', 'white', 'dark magenta'),
('pg smooth', 'dark magenta','black')
]
graph_samples_per_bar = 10
graph_num_bars = 5
graph_offset_per_second = 5
def __init__(self, controller):
self.controller = controller
self.started = True
self.start_time = None
self.offset = 0
self.last_offset = None
urwid.WidgetWrap.__init__(self, self.main_window())
def get_offset_now(self):
if self.start_time is None:
return 0
if not self.started:
return self.offset
tdelta = time.time() - self.start_time
return int(self.offset + (tdelta*self.graph_offset_per_second))
def update_graph(self, force_update=False):
o = self.get_offset_now()
if o == self.last_offset and not force_update:
return False
self.last_offset = o
gspb = self.graph_samples_per_bar
r = gspb * self.graph_num_bars
d, max_value, repeat = self.controller.get_data( o, r )
l = []
for n in range(self.graph_num_bars):
value = sum(d[n*gspb:(n+1)*gspb])/gspb
# toggle between two bar types
if n & 1:
l.append([0,value])
else:
l.append([value,0])
self.graph.set_data(l,max_value)
# also update progress
if (o//repeat)&1:
# show 100% for first half, 0 for second half
if o%repeat > repeat//2:
prog = 0
else:
prog = 1
else:
prog = float(o%repeat) / repeat
self.animate_progress.set_completion( prog )
return True
def on_animate_button(self, button):
"""Toggle started state and button text."""
if self.started: # stop animation
button.set_label("Start")
self.offset = self.get_offset_now()
self.started = False
self.controller.stop_animation()
else:
button.set_label("Stop")
self.started = True
self.start_time = time.time()
self.controller.animate_graph()
def on_reset_button(self, w):
self.offset = 0
self.start_time = time.time()
self.update_graph(True)
def on_mode_button(self, button, state):
"""Notify the controller of a new mode setting."""
if state:
# The new mode is the label of the button
self.controller.set_mode( button.get_label() )
self.last_offset = None
def on_mode_change(self, m):
"""Handle external mode change by updating radio buttons."""
for rb in self.mode_buttons:
if rb.get_label() == m:
rb.set_state(True, do_callback=False)
break
self.last_offset = None
def on_unicode_checkbox(self, w, state):
self.graph = self.bar_graph( state )
self.graph_wrap._w = self.graph
self.animate_progress = self.progress_bar( state )
self.animate_progress_wrap._w = self.animate_progress
self.update_graph( True )
def main_shadow(self, w):
"""Wrap a shadow and background around widget w."""
bg = urwid.AttrWrap(urwid.SolidFill(u"\u2592"), 'screen edge')
shadow = urwid.AttrWrap(urwid.SolidFill(u" "), 'main shadow')
bg = urwid.Overlay( shadow, bg,
('fixed left', 3), ('fixed right', 1),
('fixed top', 2), ('fixed bottom', 1))
w = urwid.Overlay( w, bg,
('fixed left', 2), ('fixed right', 3),
('fixed top', 1), ('fixed bottom', 2))
return w
def bar_graph(self, smooth=False):
satt = None
if smooth:
satt = {(1,0): 'bg 1 smooth', (2,0): 'bg 2 smooth'}
w = urwid.BarGraph(['bg background','bg 1','bg 2'], satt=satt)
return w
def button(self, t, fn):
w = urwid.Button(t, fn)
w = urwid.AttrWrap(w, 'button normal', 'button select')
return w
def radio_button(self, g, l, fn):
w = urwid.RadioButton(g, l, False, on_state_change=fn)
w = urwid.AttrWrap(w, 'button normal', 'button select')
return w
def progress_bar(self, smooth=False):
if smooth:
return urwid.ProgressBar('pg normal', 'pg complete',
0, 1, 'pg smooth')
else:
return urwid.ProgressBar('pg normal', 'pg complete',
0, 1)
def exit_program(self, w):
raise urwid.ExitMainLoop()
def graph_controls(self):
modes = self.controller.get_modes()
# setup mode radio buttons
self.mode_buttons = []
group = []
for m in modes:
rb = self.radio_button( group, m, self.on_mode_button )
self.mode_buttons.append( rb )
# setup animate button
self.animate_button = self.button( "", self.on_animate_button)
self.on_animate_button( self.animate_button )
self.offset = 0
self.animate_progress = self.progress_bar()
animate_controls = urwid.GridFlow( [
self.animate_button,
self.button("Reset", self.on_reset_button),
], 9, 2, 0, 'center')
if urwid.get_encoding_mode() == "utf8":
unicode_checkbox = urwid.CheckBox(
"Enable Unicode Graphics",
on_state_change=self.on_unicode_checkbox)
else:
unicode_checkbox = urwid.Text(
"UTF-8 encoding not detected")
self.animate_progress_wrap = urwid.WidgetWrap(
self.animate_progress)
l = [ urwid.Text("Mode",align="center"),
] + self.mode_buttons + [
urwid.Divider(),
urwid.Text("Animation",align="center"),
animate_controls,
self.animate_progress_wrap,
urwid.Divider(),
urwid.LineBox( unicode_checkbox ),
urwid.Divider(),
self.button("Quit", self.exit_program ),
]
w = urwid.ListBox(urwid.SimpleListWalker(l))
return w
def main_window(self):
self.graph = self.bar_graph()
self.graph_wrap = urwid.WidgetWrap( self.graph )
vline = urwid.AttrWrap( urwid.SolidFill(u'\u2502'), 'line')
c = self.graph_controls()
w = urwid.Columns([('weight',2,self.graph_wrap),
('fixed',1,vline), c],
dividechars=1, focus_column=2)
w = urwid.Padding(w,('fixed left',1),('fixed right',0))
w = urwid.AttrWrap(w,'body')
w = urwid.LineBox(w)
w = urwid.AttrWrap(w,'line')
w = self.main_shadow(w)
return w
class GraphController:
"""
A class responsible for setting up the model and view and running
the application.
"""
def __init__(self):
self.animate_alarm = None
self.model = GraphModel()
self.view = GraphView( self )
# use the first mode as the default
mode = self.get_modes()[0]
self.model.set_mode( mode )
# update the view
self.view.on_mode_change( mode )
self.view.update_graph(True)
def get_modes(self):
"""Allow our view access to the list of modes."""
return self.model.get_modes()
def set_mode(self, m):
"""Allow our view to set the mode."""
rval = self.model.set_mode( m )
self.view.update_graph(True)
return rval
def get_data(self, offset, range):
"""Provide data to our view for the graph."""
return self.model.get_data( offset, range )
def main(self):
self.loop = urwid.MainLoop(self.view, self.view.palette)
self.loop.run()
def animate_graph(self, loop=None, user_data=None):
"""update the graph and schedule the next update"""
self.view.update_graph()
self.animate_alarm = self.loop.set_alarm_in(
UPDATE_INTERVAL, self.animate_graph)
def stop_animation(self):
"""stop animating the graph"""
if self.animate_alarm:
self.loop.remove_alarm(self.animate_alarm)
self.animate_alarm = None
def main():
GraphController().main()
if '__main__'==__name__:
main()
|
import posixpath
from perfkitbenchmarker import linux_packages
LMBENCH_DIR = posixpath.join(linux_packages.INSTALL_DIR, 'lmbench')
GIT = 'https://github.com/intel/lmbench.git'
COMMIT = '4e4efa113b244b70a1faafd13744578b4edeaeb3'
def _Install(vm):
"""Installs the Lmbench package on the VM."""
vm.Install('build_tools')
vm.RemoteCommand('cd %s && git clone %s && cd %s && git checkout %s' %
(linux_packages.INSTALL_DIR, GIT, 'lmbench', COMMIT))
def YumInstall(vm):
_Install(vm)
def AptInstall(vm):
_Install(vm)
|
import logging
from august.activity import ActivityType
from august.lock import LockStatus
from august.util import update_lock_detail_from_activity
from homeassistant.components.lock import ATTR_CHANGED_BY, LockEntity
from homeassistant.const import ATTR_BATTERY_LEVEL
from homeassistant.core import callback
from homeassistant.helpers.restore_state import RestoreEntity
from .const import DATA_AUGUST, DOMAIN
from .entity import AugustEntityMixin
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up August locks."""
data = hass.data[DOMAIN][config_entry.entry_id][DATA_AUGUST]
devices = []
for lock in data.locks:
_LOGGER.debug("Adding lock for %s", lock.device_name)
devices.append(AugustLock(data, lock))
async_add_entities(devices, True)
class AugustLock(AugustEntityMixin, RestoreEntity, LockEntity):
"""Representation of an August lock."""
def __init__(self, data, device):
"""Initialize the lock."""
super().__init__(data, device)
self._data = data
self._device = device
self._lock_status = None
self._changed_by = None
self._available = False
self._update_from_data()
async def async_lock(self, **kwargs):
"""Lock the device."""
await self._call_lock_operation(self._data.async_lock)
async def async_unlock(self, **kwargs):
"""Unlock the device."""
await self._call_lock_operation(self._data.async_unlock)
async def _call_lock_operation(self, lock_operation):
activities = await lock_operation(self._device_id)
for lock_activity in activities:
update_lock_detail_from_activity(self._detail, lock_activity)
if self._update_lock_status_from_detail():
_LOGGER.debug(
"async_signal_device_id_update (from lock operation): %s",
self._device_id,
)
self._data.async_signal_device_id_update(self._device_id)
def _update_lock_status_from_detail(self):
self._available = self._detail.bridge_is_online
if self._lock_status != self._detail.lock_status:
self._lock_status = self._detail.lock_status
return True
return False
@callback
def _update_from_data(self):
"""Get the latest state of the sensor and update activity."""
lock_activity = self._data.activity_stream.get_latest_device_activity(
self._device_id, [ActivityType.LOCK_OPERATION]
)
if lock_activity is not None:
self._changed_by = lock_activity.operated_by
update_lock_detail_from_activity(self._detail, lock_activity)
self._update_lock_status_from_detail()
@property
def name(self):
"""Return the name of this device."""
return self._device.device_name
@property
def available(self):
"""Return the availability of this sensor."""
return self._available
@property
def is_locked(self):
"""Return true if device is on."""
if self._lock_status is None or self._lock_status is LockStatus.UNKNOWN:
return None
return self._lock_status is LockStatus.LOCKED
@property
def changed_by(self):
"""Last change triggered by."""
return self._changed_by
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attributes = {ATTR_BATTERY_LEVEL: self._detail.battery_level}
if self._detail.keypad is not None:
attributes["keypad_battery_level"] = self._detail.keypad.battery_level
return attributes
async def async_added_to_hass(self):
"""Restore ATTR_CHANGED_BY on startup since it is likely no longer in the activity log."""
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
if not last_state:
return
if ATTR_CHANGED_BY in last_state.attributes:
self._changed_by = last_state.attributes[ATTR_CHANGED_BY]
@property
def unique_id(self) -> str:
"""Get the unique id of the lock."""
return f"{self._device_id:s}_lock"
|
import os.path as op
import pytest
import numpy as np
from mne.datasets.testing import data_path
from mne.io import read_raw_nirx
from mne.preprocessing.nirs import optical_density, tddr
from mne.datasets import testing
fname_nirx_15_2 = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_2_recording')
@testing.requires_testing_data
@pytest.mark.parametrize('fname', ([fname_nirx_15_2]))
def test_temporal_derivative_distribution_repair(fname, tmpdir):
"""Test running artifact rejection."""
raw = read_raw_nirx(fname)
raw = optical_density(raw)
# Add a baseline shift artifact about half way through data
max_shift = np.max(np.diff(raw._data[0]))
shift_amp = 5 * max_shift
raw._data[0, 0:30] = raw._data[0, 0:30] - (shift_amp)
assert np.max(np.diff(raw._data[0])) > shift_amp
# Ensure that applying the algorithm reduces the step change
raw = tddr(raw)
assert np.max(np.diff(raw._data[0])) < shift_amp
|
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.viz import iter_topography
from mne import io
from mne.time_frequency import psd_welch
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
picks = mne.pick_types(raw.info, meg=True, exclude=[])
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 2, 20 # look at frequencies between 2 and 20Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds, freqs = psd_welch(raw, picks=picks, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds = 20 * np.log10(psds) # scale to dB
def my_callback(ax, ch_idx):
"""
This block of code is executed once you click on one of the channel axes
in the plot. To work with the viz internals, this function should only take
two parameters, the axis and the channel or data index.
"""
ax.plot(freqs, psds[ch_idx], color='red')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power (dB)')
for ax, idx in iter_topography(raw.info,
fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white',
on_pick=my_callback):
ax.plot(psds[idx], color='red')
plt.gcf().suptitle('Power spectral densities')
plt.show()
|
import hashlib
import hmac
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import mailgun, webhook
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import CONF_API_KEY, CONF_DOMAIN
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
API_KEY = "abc123"
@pytest.fixture
async def http_client(hass, aiohttp_client):
"""Initialize a Home Assistant Server for testing this module."""
await async_setup_component(hass, webhook.DOMAIN, {})
return await aiohttp_client(hass.http.app)
@pytest.fixture
async def webhook_id_with_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id."""
await async_setup_component(
hass,
mailgun.DOMAIN,
{mailgun.DOMAIN: {CONF_API_KEY: API_KEY, CONF_DOMAIN: "example.com"}},
)
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"mailgun", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
return result["result"].data["webhook_id"]
@pytest.fixture
async def webhook_id_without_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id w/o API key."""
await async_setup_component(hass, mailgun.DOMAIN, {})
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"mailgun", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
return result["result"].data["webhook_id"]
@pytest.fixture
async def mailgun_events(hass):
"""Return a list of mailgun_events triggered."""
events = []
@callback
def handle_event(event):
"""Handle Mailgun event."""
events.append(event)
hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)
return events
async def test_mailgun_webhook_with_missing_signature(
http_client, webhook_id_with_api_key, mailgun_events
):
"""Test that webhook doesn't trigger an event without a signature."""
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={"hello": "mailgun", "signature": {}},
)
assert len(mailgun_events) == event_count
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}", json={"hello": "mailgun"}
)
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_with_different_api_key(
http_client, webhook_id_with_api_key, mailgun_events
):
"""Test that webhook doesn't trigger an event with a wrong signature."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=b"random_api_key",
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_event_with_correct_api_key(
http_client, webhook_id_with_api_key, mailgun_events
):
"""Test that webhook triggers an event after validating a signature."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=bytes(API_KEY, "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_with_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
async def test_mailgun_webhook_with_missing_signature_without_api_key(
http_client, webhook_id_without_api_key, mailgun_events
):
"""Test that webhook triggers an event without a signature w/o API key."""
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}",
json={"hello": "mailgun", "signature": {}},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}", json={"hello": "mailgun"}
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
async def test_mailgun_webhook_event_without_an_api_key(
http_client, webhook_id_without_api_key, mailgun_events
):
"""Test that webhook triggers an event if there is no api key."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=bytes(API_KEY, "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
|
from homeassistant.components import search
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_search(hass):
"""Test that search works."""
area_reg = await hass.helpers.area_registry.async_get_registry()
device_reg = await hass.helpers.device_registry.async_get_registry()
entity_reg = await hass.helpers.entity_registry.async_get_registry()
living_room_area = area_reg.async_create("Living Room")
# Light strip with 2 lights.
wled_config_entry = MockConfigEntry(domain="wled")
wled_config_entry.add_to_hass(hass)
wled_device = device_reg.async_get_or_create(
config_entry_id=wled_config_entry.entry_id,
name="Light Strip",
identifiers=({"wled", "wled-1"}),
)
device_reg.async_update_device(wled_device.id, area_id=living_room_area.id)
wled_segment_1_entity = entity_reg.async_get_or_create(
"light",
"wled",
"wled-1-seg-1",
suggested_object_id="wled segment 1",
config_entry=wled_config_entry,
device_id=wled_device.id,
)
wled_segment_2_entity = entity_reg.async_get_or_create(
"light",
"wled",
"wled-1-seg-2",
suggested_object_id="wled segment 2",
config_entry=wled_config_entry,
device_id=wled_device.id,
)
# Non related info.
kitchen_area = area_reg.async_create("Kitchen")
hue_config_entry = MockConfigEntry(domain="hue")
hue_config_entry.add_to_hass(hass)
hue_device = device_reg.async_get_or_create(
config_entry_id=hue_config_entry.entry_id,
name="Light Strip",
identifiers=({"hue", "hue-1"}),
)
device_reg.async_update_device(hue_device.id, area_id=kitchen_area.id)
hue_segment_1_entity = entity_reg.async_get_or_create(
"light",
"hue",
"hue-1-seg-1",
suggested_object_id="hue segment 1",
config_entry=hue_config_entry,
device_id=hue_device.id,
)
hue_segment_2_entity = entity_reg.async_get_or_create(
"light",
"hue",
"hue-1-seg-2",
suggested_object_id="hue segment 2",
config_entry=hue_config_entry,
device_id=hue_device.id,
)
await async_setup_component(
hass,
"group",
{
"group": {
"wled": {
"name": "wled",
"entities": [
wled_segment_1_entity.entity_id,
wled_segment_2_entity.entity_id,
],
},
"hue": {
"name": "hue",
"entities": [
hue_segment_1_entity.entity_id,
hue_segment_2_entity.entity_id,
],
},
"wled_hue": {
"name": "wled and hue",
"entities": [
wled_segment_1_entity.entity_id,
wled_segment_2_entity.entity_id,
hue_segment_1_entity.entity_id,
hue_segment_2_entity.entity_id,
],
},
}
},
)
await async_setup_component(
hass,
"scene",
{
"scene": [
{
"name": "scene_wled_seg_1",
"entities": {wled_segment_1_entity.entity_id: "on"},
},
{
"name": "scene_hue_seg_1",
"entities": {hue_segment_1_entity.entity_id: "on"},
},
{
"name": "scene_wled_hue",
"entities": {
wled_segment_1_entity.entity_id: "on",
wled_segment_2_entity.entity_id: "on",
hue_segment_1_entity.entity_id: "on",
hue_segment_2_entity.entity_id: "on",
},
},
]
},
)
await async_setup_component(
hass,
"script",
{
"script": {
"wled": {
"sequence": [
{
"service": "test.script",
"data": {"entity_id": wled_segment_1_entity.entity_id},
},
]
},
"hue": {
"sequence": [
{
"service": "test.script",
"data": {"entity_id": hue_segment_1_entity.entity_id},
},
]
},
}
},
)
assert await async_setup_component(
hass,
"automation",
{
"automation": [
{
"alias": "wled_entity",
"trigger": {"platform": "template", "value_template": "true"},
"action": [
{
"service": "test.script",
"data": {"entity_id": wled_segment_1_entity.entity_id},
},
],
},
{
"alias": "wled_device",
"trigger": {"platform": "template", "value_template": "true"},
"action": [
{
"domain": "light",
"device_id": wled_device.id,
"entity_id": wled_segment_1_entity.entity_id,
"type": "turn_on",
},
],
},
]
},
)
# Explore the graph from every node and make sure we find the same results
expected = {
"config_entry": {wled_config_entry.entry_id},
"area": {living_room_area.id},
"device": {wled_device.id},
"entity": {wled_segment_1_entity.entity_id, wled_segment_2_entity.entity_id},
"scene": {"scene.scene_wled_seg_1", "scene.scene_wled_hue"},
"group": {"group.wled", "group.wled_hue"},
"script": {"script.wled"},
"automation": {"automation.wled_entity", "automation.wled_device"},
}
for search_type, search_id in (
("config_entry", wled_config_entry.entry_id),
("area", living_room_area.id),
("device", wled_device.id),
("entity", wled_segment_1_entity.entity_id),
("entity", wled_segment_2_entity.entity_id),
("scene", "scene.scene_wled_seg_1"),
("group", "group.wled"),
("script", "script.wled"),
("automation", "automation.wled_entity"),
("automation", "automation.wled_device"),
):
searcher = search.Searcher(hass, device_reg, entity_reg)
results = searcher.async_search(search_type, search_id)
# Add the item we searched for, it's omitted from results
results.setdefault(search_type, set()).add(search_id)
assert (
results == expected
), f"Results for {search_type}/{search_id} do not match up"
# For combined things, needs to return everything.
expected_combined = {
"config_entry": {wled_config_entry.entry_id, hue_config_entry.entry_id},
"area": {living_room_area.id, kitchen_area.id},
"device": {wled_device.id, hue_device.id},
"entity": {
wled_segment_1_entity.entity_id,
wled_segment_2_entity.entity_id,
hue_segment_1_entity.entity_id,
hue_segment_2_entity.entity_id,
},
"scene": {
"scene.scene_wled_seg_1",
"scene.scene_hue_seg_1",
"scene.scene_wled_hue",
},
"group": {"group.wled", "group.hue", "group.wled_hue"},
"script": {"script.wled", "script.hue"},
"automation": {"automation.wled_entity", "automation.wled_device"},
}
for search_type, search_id in (
("scene", "scene.scene_wled_hue"),
("group", "group.wled_hue"),
):
searcher = search.Searcher(hass, device_reg, entity_reg)
results = searcher.async_search(search_type, search_id)
# Add the item we searched for, it's omitted from results
results.setdefault(search_type, set()).add(search_id)
assert (
results == expected_combined
), f"Results for {search_type}/{search_id} do not match up"
for search_type, search_id in (
("entity", "automation.non_existing"),
("entity", "scene.non_existing"),
("entity", "group.non_existing"),
("entity", "script.non_existing"),
("entity", "light.non_existing"),
("area", "non_existing"),
("config_entry", "non_existing"),
("device", "non_existing"),
("group", "group.non_existing"),
("scene", "scene.non_existing"),
("script", "script.non_existing"),
("automation", "automation.non_existing"),
):
searcher = search.Searcher(hass, device_reg, entity_reg)
assert searcher.async_search(search_type, search_id) == {}
async def test_ws_api(hass, hass_ws_client):
"""Test WS API."""
assert await async_setup_component(hass, "search", {})
area_reg = await hass.helpers.area_registry.async_get_registry()
device_reg = await hass.helpers.device_registry.async_get_registry()
kitchen_area = area_reg.async_create("Kitchen")
hue_config_entry = MockConfigEntry(domain="hue")
hue_config_entry.add_to_hass(hass)
hue_device = device_reg.async_get_or_create(
config_entry_id=hue_config_entry.entry_id,
name="Light Strip",
identifiers=({"hue", "hue-1"}),
)
device_reg.async_update_device(hue_device.id, area_id=kitchen_area.id)
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "search/related",
"item_type": "device",
"item_id": hue_device.id,
}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {
"config_entry": [hue_config_entry.entry_id],
"area": [kitchen_area.id],
}
|
import os
import threading
from typing import List
import av
from homeassistant.core import callback
from .core import PROVIDERS, Segment, StreamOutput
@callback
def async_setup_recorder(hass):
"""Only here so Provider Registry works."""
def recorder_save_worker(file_out: str, segments: List[Segment], container_format: str):
"""Handle saving stream."""
if not os.path.exists(os.path.dirname(file_out)):
os.makedirs(os.path.dirname(file_out), exist_ok=True)
first_pts = {"video": None, "audio": None}
output = av.open(file_out, "w", format=container_format)
output_v = None
output_a = None
for segment in segments:
# Seek to beginning and open segment
segment.segment.seek(0)
source = av.open(segment.segment, "r", format=container_format)
source_v = source.streams.video[0]
# Add output streams
if not output_v:
output_v = output.add_stream(template=source_v)
context = output_v.codec_context
context.flags |= "GLOBAL_HEADER"
if not output_a and len(source.streams.audio) > 0:
source_a = source.streams.audio[0]
output_a = output.add_stream(template=source_a)
# Remux video
for packet in source.demux():
if packet is not None and packet.dts is not None:
if first_pts[packet.stream.type] is None:
first_pts[packet.stream.type] = packet.pts
packet.pts -= first_pts[packet.stream.type]
packet.dts -= first_pts[packet.stream.type]
packet.stream = output_v if packet.stream.type == "video" else output_a
output.mux(packet)
source.close()
output.close()
@PROVIDERS.register("recorder")
class RecorderOutput(StreamOutput):
"""Represents HLS Output formats."""
def __init__(self, stream, timeout: int = 30) -> None:
"""Initialize recorder output."""
super().__init__(stream, timeout)
self.video_path = None
self._segments = []
@property
def name(self) -> str:
"""Return provider name."""
return "recorder"
@property
def format(self) -> str:
"""Return container format."""
return "mp4"
@property
def audio_codecs(self) -> str:
"""Return desired audio codec."""
return {"aac", "mp3"}
@property
def video_codecs(self) -> tuple:
"""Return desired video codecs."""
return {"hevc", "h264"}
def prepend(self, segments: List[Segment]) -> None:
"""Prepend segments to existing list."""
own_segments = self.segments
segments = [s for s in segments if s.sequence not in own_segments]
self._segments = segments + self._segments
@callback
def _timeout(self, _now=None):
"""Handle recorder timeout."""
self._unsub = None
self.cleanup()
def cleanup(self):
"""Write recording and clean up."""
thread = threading.Thread(
name="recorder_save_worker",
target=recorder_save_worker,
args=(self.video_path, self._segments, self.format),
)
thread.start()
self._segments = []
self._stream.remove_provider(self)
|
from .util import async_init_integration
async def test_air_con(hass):
"""Test creation of aircon climate."""
await async_init_integration(hass)
state = hass.states.get("climate.air_conditioning")
assert state.state == "cool"
expected_attributes = {
"current_humidity": 60.9,
"current_temperature": 24.8,
"fan_mode": "auto",
"fan_modes": ["auto", "high", "medium", "low"],
"friendly_name": "Air Conditioning",
"hvac_action": "cooling",
"hvac_modes": ["off", "auto", "heat", "cool", "heat_cool", "dry", "fan_only"],
"max_temp": 31.0,
"min_temp": 16.0,
"preset_mode": "home",
"preset_modes": ["away", "home"],
"supported_features": 25,
"target_temp_step": 1,
"temperature": 17.8,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_heater(hass):
"""Test creation of heater climate."""
await async_init_integration(hass)
state = hass.states.get("climate.baseboard_heater")
assert state.state == "heat"
expected_attributes = {
"current_humidity": 45.2,
"current_temperature": 20.6,
"friendly_name": "Baseboard Heater",
"hvac_action": "idle",
"hvac_modes": ["off", "auto", "heat"],
"max_temp": 31.0,
"min_temp": 16.0,
"preset_mode": "home",
"preset_modes": ["away", "home"],
"supported_features": 17,
"target_temp_step": 1,
"temperature": 20.5,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_smartac_with_swing(hass):
"""Test creation of smart ac with swing climate."""
await async_init_integration(hass)
state = hass.states.get("climate.air_conditioning_with_swing")
assert state.state == "auto"
expected_attributes = {
"current_humidity": 42.3,
"current_temperature": 20.9,
"fan_mode": "auto",
"fan_modes": ["auto", "high", "medium", "low"],
"friendly_name": "Air Conditioning with swing",
"hvac_action": "heating",
"hvac_modes": ["off", "auto", "heat", "cool", "heat_cool", "dry", "fan_only"],
"max_temp": 30.0,
"min_temp": 16.0,
"preset_mode": "home",
"preset_modes": ["away", "home"],
"swing_modes": ["ON", "OFF"],
"supported_features": 57,
"target_temp_step": 1.0,
"temperature": 20.0,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
|
from homeassistant.setup import ATTR_COMPONENT, EVENT_COMPONENT_LOADED
from tests.async_mock import Mock
from tests.common import mock_platform
async def test_process_integration_platforms(hass):
"""Test processing integrations."""
loaded_platform = Mock()
mock_platform(hass, "loaded.platform_to_check", loaded_platform)
hass.config.components.add("loaded")
event_platform = Mock()
mock_platform(hass, "event.platform_to_check", event_platform)
processed = []
async def _process_platform(hass, domain, platform):
"""Process platform."""
processed.append((domain, platform))
await hass.helpers.integration_platform.async_process_integration_platforms(
"platform_to_check", _process_platform
)
assert len(processed) == 1
assert processed[0][0] == "loaded"
assert processed[0][1] == loaded_platform
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: "event"})
await hass.async_block_till_done()
assert len(processed) == 2
assert processed[1][0] == "event"
assert processed[1][1] == event_platform
|
from typing import Any
from aiopvapi.resources.scene import Scene as PvScene
import voluptuous as vol
from homeassistant.components.scene import Scene
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_HOST, CONF_PLATFORM
import homeassistant.helpers.config_validation as cv
from .const import (
COORDINATOR,
DEVICE_INFO,
DOMAIN,
HUB_ADDRESS,
PV_API,
PV_ROOM_DATA,
PV_SCENE_DATA,
ROOM_NAME_UNICODE,
STATE_ATTRIBUTE_ROOM_NAME,
)
from .entity import HDEntity
PLATFORM_SCHEMA = vol.Schema(
{vol.Required(CONF_PLATFORM): DOMAIN, vol.Required(HUB_ADDRESS): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import platform from yaml."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: config[HUB_ADDRESS]},
)
)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up powerview scene entries."""
pv_data = hass.data[DOMAIN][entry.entry_id]
room_data = pv_data[PV_ROOM_DATA]
scene_data = pv_data[PV_SCENE_DATA]
pv_request = pv_data[PV_API]
coordinator = pv_data[COORDINATOR]
device_info = pv_data[DEVICE_INFO]
pvscenes = (
PowerViewScene(
PvScene(raw_scene, pv_request), room_data, coordinator, device_info
)
for scene_id, raw_scene in scene_data.items()
)
async_add_entities(pvscenes)
class PowerViewScene(HDEntity, Scene):
"""Representation of a Powerview scene."""
def __init__(self, scene, room_data, coordinator, device_info):
"""Initialize the scene."""
super().__init__(coordinator, device_info, scene.id)
self._scene = scene
self._room_name = room_data.get(scene.room_id, {}).get(ROOM_NAME_UNICODE, "")
@property
def name(self):
"""Return the name of the scene."""
return self._scene.name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {STATE_ATTRIBUTE_ROOM_NAME: self._room_name}
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:blinds"
async def async_activate(self, **kwargs: Any) -> None:
"""Activate scene. Try to get entities into requested state."""
await self._scene.activate()
|
from PyQt5.QtWidgets import QMessageBox
from qutebrowser.utils import log, utils
def _get_name(exc: BaseException) -> str:
"""Get a suitable exception name as a string."""
prefixes = ['qutebrowser', 'builtins']
name = utils.qualname(exc.__class__)
for prefix in prefixes:
if name.startswith(prefix):
name = name[len(prefix) + 1:]
break
return name
def handle_fatal_exc(exc: BaseException,
title: str, *,
no_err_windows: bool,
pre_text: str = '',
post_text: str = '') -> None:
"""Handle a fatal "expected" exception by displaying an error box.
If --no-err-windows is given as argument, the text is logged to the error
logger instead.
Args:
exc: The Exception object being handled.
no_err_windows: Show text in log instead of error window.
title: The title to be used for the error message.
pre_text: The text to be displayed before the exception text.
post_text: The text to be displayed after the exception text.
"""
if no_err_windows:
lines = [
"Handling fatal {} with --no-err-windows!".format(_get_name(exc)),
"",
"title: {}".format(title),
"pre_text: {}".format(pre_text),
"post_text: {}".format(post_text),
"exception text: {}".format(str(exc) or 'none'),
]
log.misc.exception('\n'.join(lines))
else:
log.misc.exception("Fatal exception:")
if pre_text:
msg_text = '{}: {}'.format(pre_text, exc)
else:
msg_text = str(exc)
if post_text:
msg_text += '\n\n{}'.format(post_text)
msgbox = QMessageBox(QMessageBox.Critical, title, msg_text)
msgbox.exec_()
|
from homeassistant import config_entries
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.entity_registry import async_get_registry as get_ent_reg
from .const import DOMAIN
async def remove_devices(bridge, api_ids, current):
"""Get items that are removed from api."""
removed_items = []
for item_id in current:
if item_id in api_ids:
continue
# Device is removed from Hue, so we remove it from Home Assistant
entity = current[item_id]
removed_items.append(item_id)
await entity.async_remove()
ent_registry = await get_ent_reg(bridge.hass)
if entity.entity_id in ent_registry.entities:
ent_registry.async_remove(entity.entity_id)
dev_registry = await get_dev_reg(bridge.hass)
device = dev_registry.async_get_device(
identifiers={(DOMAIN, entity.device_id)}, connections=set()
)
if device is not None:
dev_registry.async_update_device(
device.id, remove_config_entry_id=bridge.config_entry.entry_id
)
for item_id in removed_items:
del current[item_id]
def create_config_flow(hass, host):
"""Start a config flow."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": host},
)
)
|
import asyncio
from pyotgw.vars import OTGW_ABOUT
from serial import SerialException
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.opentherm_gw.const import (
CONF_FLOOR_TEMP,
CONF_PRECISION,
DOMAIN,
)
from homeassistant.const import CONF_DEVICE, CONF_ID, CONF_NAME, PRECISION_HALVES
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_form_user(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.opentherm_gw.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.opentherm_gw.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"pyotgw.pyotgw.connect",
return_value={OTGW_ABOUT: "OpenTherm Gateway 4.2.5"},
) as mock_pyotgw_connect, patch(
"pyotgw.pyotgw.disconnect", return_value=None
) as mock_pyotgw_disconnect:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_NAME: "Test Entry 1", CONF_DEVICE: "/dev/ttyUSB0"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Test Entry 1"
assert result2["data"] == {
CONF_NAME: "Test Entry 1",
CONF_DEVICE: "/dev/ttyUSB0",
CONF_ID: "test_entry_1",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_pyotgw_connect.mock_calls) == 1
assert len(mock_pyotgw_disconnect.mock_calls) == 1
async def test_form_import(hass):
"""Test import from existing config."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"homeassistant.components.opentherm_gw.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.opentherm_gw.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"pyotgw.pyotgw.connect",
return_value={OTGW_ABOUT: "OpenTherm Gateway 4.2.5"},
) as mock_pyotgw_connect, patch(
"pyotgw.pyotgw.disconnect", return_value=None
) as mock_pyotgw_disconnect:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_ID: "legacy_gateway", CONF_DEVICE: "/dev/ttyUSB1"},
)
assert result["type"] == "create_entry"
assert result["title"] == "legacy_gateway"
assert result["data"] == {
CONF_NAME: "legacy_gateway",
CONF_DEVICE: "/dev/ttyUSB1",
CONF_ID: "legacy_gateway",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_pyotgw_connect.mock_calls) == 1
assert len(mock_pyotgw_disconnect.mock_calls) == 1
async def test_form_duplicate_entries(hass):
"""Test duplicate device or id errors."""
flow1 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
flow2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
flow3 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.opentherm_gw.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.opentherm_gw.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"pyotgw.pyotgw.connect",
return_value={OTGW_ABOUT: "OpenTherm Gateway 4.2.5"},
) as mock_pyotgw_connect, patch(
"pyotgw.pyotgw.disconnect", return_value=None
) as mock_pyotgw_disconnect:
result1 = await hass.config_entries.flow.async_configure(
flow1["flow_id"], {CONF_NAME: "Test Entry 1", CONF_DEVICE: "/dev/ttyUSB0"}
)
result2 = await hass.config_entries.flow.async_configure(
flow2["flow_id"], {CONF_NAME: "Test Entry 1", CONF_DEVICE: "/dev/ttyUSB1"}
)
result3 = await hass.config_entries.flow.async_configure(
flow3["flow_id"], {CONF_NAME: "Test Entry 2", CONF_DEVICE: "/dev/ttyUSB0"}
)
assert result1["type"] == "create_entry"
assert result2["type"] == "form"
assert result2["errors"] == {"base": "id_exists"}
assert result3["type"] == "form"
assert result3["errors"] == {"base": "already_configured"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_pyotgw_connect.mock_calls) == 1
assert len(mock_pyotgw_disconnect.mock_calls) == 1
async def test_form_connection_timeout(hass):
"""Test we handle connection timeout."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"pyotgw.pyotgw.connect", side_effect=(asyncio.TimeoutError)
) as mock_connect:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_NAME: "Test Entry 1", CONF_DEVICE: "socket://192.0.2.254:1234"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
assert len(mock_connect.mock_calls) == 1
async def test_form_connection_error(hass):
"""Test we handle serial connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("pyotgw.pyotgw.connect", side_effect=(SerialException)) as mock_connect:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_NAME: "Test Entry 1", CONF_DEVICE: "/dev/ttyUSB0"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
assert len(mock_connect.mock_calls) == 1
async def test_options_form(hass):
"""Test the options form."""
entry = MockConfigEntry(
domain=DOMAIN,
title="Mock Gateway",
data={
CONF_NAME: "Mock Gateway",
CONF_DEVICE: "/dev/null",
CONF_ID: "mock_gateway",
},
options={},
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_FLOOR_TEMP: True, CONF_PRECISION: PRECISION_HALVES},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_PRECISION] == PRECISION_HALVES
assert result["data"][CONF_FLOOR_TEMP] is True
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_PRECISION: 0}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_PRECISION] == 0.0
assert result["data"][CONF_FLOOR_TEMP] is True
|
import atexit
from functools import partial
import json
import os
import os.path as op
import platform
import shutil
import sys
import tempfile
import re
import numpy as np
from .check import _validate_type, _check_pyqt5_version
from ._logging import warn, logger
_temp_home_dir = None
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir : str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir, set_env=False)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing.
Parameters
----------
memmap_min_size : str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, str):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size, set_env=False)
# List the known configuration values
known_config_types = (
'MNE_3D_OPTION_ANTIALIAS',
'MNE_BROWSE_RAW_SIZE',
'MNE_CACHE_DIR',
'MNE_COREG_ADVANCED_RENDERING',
'MNE_COREG_COPY_ANNOT',
'MNE_COREG_GUESS_MRI_SUBJECT',
'MNE_COREG_HEAD_HIGH_RES',
'MNE_COREG_HEAD_OPACITY',
'MNE_COREG_INTERACTION',
'MNE_COREG_MARK_INSIDE',
'MNE_COREG_PREPARE_BEM',
'MNE_COREG_PROJECT_EEG',
'MNE_COREG_ORIENT_TO_SURFACE',
'MNE_COREG_SCALE_LABELS',
'MNE_COREG_SCALE_BY_DISTANCE',
'MNE_COREG_SCENE_SCALE',
'MNE_COREG_WINDOW_HEIGHT',
'MNE_COREG_WINDOW_WIDTH',
'MNE_COREG_SUBJECTS_DIR',
'MNE_CUDA_DEVICE',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_HF_SEF_PATH',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_MISC_PATH',
'MNE_DATASETS_MTRF_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_MULTIMODAL_PATH',
'MNE_DATASETS_FNIRS_MOTOR_PATH',
'MNE_DATASETS_OPM_PATH',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
'MNE_DATASETS_KILOWORD_PATH',
'MNE_DATASETS_FIELDTRIP_CMC_PATH',
'MNE_DATASETS_PHANTOM_4DBTI_PATH',
'MNE_DATASETS_LIMO_PATH',
'MNE_DATASETS_REFMEG_NOISE_PATH',
'MNE_FORCE_SERIAL',
'MNE_KIT2FIFF_STIM_CHANNELS',
'MNE_KIT2FIFF_STIM_CHANNEL_CODING',
'MNE_KIT2FIFF_STIM_CHANNEL_SLOPE',
'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
'MNE_LOGGING_LEVEL',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_FTP_TESTS',
'MNE_SKIP_NETWORK_TESTS',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_STIM_CHANNEL',
'MNE_TQDM',
'MNE_USE_CUDA',
'MNE_USE_NUMBA',
'SUBJECTS_DIR',
)
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = (
'MNE_STIM_CHANNEL',
)
def _load_config(config_path, raise_error=False):
"""Safely load a config file."""
with open(config_path, 'r') as fid:
try:
config = json.load(fid)
except ValueError:
# No JSON object could be decoded --> corrupt file?
msg = ('The MNE-Python config file (%s) is not a valid JSON '
'file and might be corrupted' % config_path)
if raise_error:
raise RuntimeError(msg)
warn(msg)
config = dict()
return config
def get_config_path(home_dir=None):
r"""Get path to standard mne-python config file.
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%USERPROFILE%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def get_config(key=None, default=None, raise_error=False, home_dir=None,
use_env=True):
"""Read MNE-Python preferences from environment or config file.
Parameters
----------
key : None | str
The preference key to look for. The os environment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in environment variables or
the path are returned. If key is an empty string, a list of all valid
keys (but not values) is returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
use_env : bool
If True, consider env vars, if available.
If False, only use MNE-Python configuration file values.
.. versionadded:: 0.18
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
_validate_type(key, (str, type(None)), "key", 'string or None')
if key == '':
return known_config_types
# first, check to see if key is in env
if use_env and key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
config = {}
else:
config = _load_config(config_path)
if key is None:
# update config with environment variables
if use_env:
env_keys = (set(config).union(known_config_types).
intersection(os.environ))
config.update({key: os.environ[key] for key in env_keys})
return config
elif raise_error is True and key not in config:
loc_env = 'the environment or in the ' if use_env else ''
meth_env = ('either os.environ["%s"] = VALUE for a temporary '
'solution, or ' % key) if use_env else ''
extra_env = (' You can also set the environment variable before '
'running python.' if use_env else '')
meth_file = ('mne.utils.set_config("%s", VALUE, set_env=True) '
'for a permanent one' % key)
raise KeyError('Key "%s" not found in %s'
'the mne-python config file (%s). '
'Try %s%s.%s'
% (key, loc_env, config_path, meth_env, meth_file,
extra_env))
else:
return config.get(key, default)
def set_config(key, value, home_dir=None, set_env=True):
"""Set a MNE-Python preference key in the config file and environment.
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
set_env : bool
If True (default), update :data:`os.environ` in addition to
updating the MNE-Python config file.
See Also
--------
get_config
"""
_validate_type(key, 'str', "key")
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
_validate_type(value, (str, 'path-like', type(None)), 'value')
if value is not None:
value = str(value)
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
config = _load_config(config_path, raise_error=True)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
if set_env and key in os.environ:
del os.environ[key]
else:
config[key] = value
if set_env:
os.environ[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)."""
global _temp_home_dir
if home_dir is None:
home_dir = os.environ.get('_MNE_FAKE_HOME_DIR')
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
if op.isdir(op.join(os.getenv('APPDATA'), '.mne')):
home_dir = os.getenv('APPDATA')
else:
home_dir = os.getenv('USERPROFILE')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR.
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
return subjects_dir
def _get_stim_channel(stim_channel, info, raise_error=True):
"""Determine the appropriate stim_channel.
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
info : instance of Info
An information structure containing information about the channels.
Returns
-------
stim_channel : str | list of str
The name of the stim channel(s) to use
"""
if stim_channel is not None:
if not isinstance(stim_channel, list):
_validate_type(stim_channel, 'str', "Stim channel")
stim_channel = [stim_channel]
for channel in stim_channel:
_validate_type(channel, 'str', "Each provided stim channel")
return stim_channel
stim_channel = list()
ch_count = 0
ch = get_config('MNE_STIM_CHANNEL')
while(ch is not None and ch in info['ch_names']):
stim_channel.append(ch)
ch_count += 1
ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
if ch_count > 0:
return stim_channel
if 'STI101' in info['ch_names']: # combination channel for newer systems
return ['STI101']
if 'STI 014' in info['ch_names']: # for older systems
return ['STI 014']
from ..io.pick import pick_types
stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
if len(stim_channel) > 0:
stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
elif raise_error:
raise ValueError("No stim channels found. Consider specifying them "
"manually using the 'stim_channel' parameter.")
return stim_channel
def _get_root_dir():
"""Get as close to the repo root as possible."""
root_dir = op.abspath(op.join(op.dirname(__file__), '..'))
up_dir = op.join(root_dir, '..')
if op.isfile(op.join(up_dir, 'setup.py')) and all(
op.isdir(op.join(up_dir, x)) for x in ('mne', 'examples', 'doc')):
root_dir = op.abspath(up_dir)
return root_dir
def _get_numpy_libs():
from ._testing import SilenceStdout
with SilenceStdout(close=False) as capture:
np.show_config()
lines = capture.getvalue().split('\n')
capture.close()
libs = []
for li, line in enumerate(lines):
for key in ('lapack', 'blas'):
if line.startswith('%s_opt_info' % key):
lib = lines[li + 1]
if 'NOT AVAILABLE' in lib:
lib = 'unknown'
else:
try:
lib = lib.split('[')[1].split("'")[1]
except IndexError:
pass # keep whatever it was
libs += ['%s=%s' % (key, lib)]
libs = ', '.join(libs)
return libs
def sys_info(fid=None, show_paths=False):
"""Print the system information for debugging.
This function is useful for printing system information
to help triage bugs.
Parameters
----------
fid : file-like | None
The file to write to. Will be passed to :func:`print()`.
Can be None to use :data:`sys.stdout`.
show_paths : bool
If True, print paths for each module.
Examples
--------
Running this function with no arguments prints an output that is
useful when submitting bug reports::
>>> import mne
>>> mne.sys_info() # doctest: +SKIP
Platform: Linux-4.15.0-1067-aws-x86_64-with-glibc2.2.5
Python: 3.8.1 (default, Feb 2 2020, 08:37:37) [GCC 8.3.0]
Executable: /usr/local/bin/python
CPU: : 36 cores
Memory: 68.7 GB
mne: 0.21.dev0
numpy: 1.19.0 {blas=openblas, lapack=openblas}
scipy: 1.5.1
matplotlib: 3.2.2 {backend=Qt5Agg}
sklearn: 0.23.1
numba: 0.50.1
nibabel: 3.1.1
nilearn: 0.7.0
dipy: 1.1.1
cupy: Not found
pandas: 1.0.5
mayavi: Not found
pyvista: 0.25.3 {pyvistaqt=0.1.1, OpenGL 3.3 (Core Profile) Mesa 18.3.6 via llvmpipe (LLVM 7.0, 256 bits)}
vtk: 9.0.1
PyQt5: 5.15.0
""" # noqa: E501
ljust = 15
platform_str = platform.platform()
if platform.system() == 'Darwin' and sys.version_info[:2] < (3, 8):
# platform.platform() in Python < 3.8 doesn't call
# platform.mac_ver() if we're on Darwin, so we don't get a nice macOS
# version number. Therefore, let's do this manually here.
macos_ver = platform.mac_ver()[0]
macos_architecture = re.findall('Darwin-.*?-(.*)', platform_str)
if macos_architecture:
macos_architecture = macos_architecture[0]
platform_str = f'macOS-{macos_ver}-{macos_architecture}'
del macos_ver, macos_architecture
out = 'Platform:'.ljust(ljust) + platform_str + '\n'
out += 'Python:'.ljust(ljust) + str(sys.version).replace('\n', ' ') + '\n'
out += 'Executable:'.ljust(ljust) + sys.executable + '\n'
out += 'CPU:'.ljust(ljust) + ('%s: ' % platform.processor())
try:
import multiprocessing
except ImportError:
out += ('number of processors unavailable ' +
'(requires "multiprocessing" package)\n')
else:
out += '%s cores\n' % multiprocessing.cpu_count()
out += 'Memory:'.ljust(ljust)
try:
import psutil
except ImportError:
out += 'Unavailable (requires "psutil" package)'
else:
out += '%0.1f GB\n' % (psutil.virtual_memory().total / float(2 ** 30),)
out += '\n'
libs = _get_numpy_libs()
has_3d = False
for mod_name in ('mne', 'numpy', 'scipy', 'matplotlib', '', 'sklearn',
'numba', 'nibabel', 'nilearn', 'dipy', 'cupy', 'pandas',
'mayavi', 'pyvista', 'vtk', 'PyQt5'):
if mod_name == '':
out += '\n'
continue
if mod_name == 'PyQt5' and not has_3d:
continue
out += ('%s:' % mod_name).ljust(ljust)
try:
mod = __import__(mod_name)
if mod_name == 'mayavi':
# the real test
from mayavi import mlab # noqa, analysis:ignore
except Exception:
out += 'Not found\n'
else:
extra = (' (%s)' % op.dirname(mod.__file__)) if show_paths else ''
if mod_name == 'numpy':
extra += ' {%s}%s' % (libs, extra)
elif mod_name == 'matplotlib':
extra += ' {backend=%s}%s' % (mod.get_backend(), extra)
elif mod_name == 'pyvista':
extras = list()
try:
from pyvistaqt import __version__
except Exception:
pass
else:
extras += [f'pyvistaqt={__version__}']
try:
from pyvista import GPUInfo
except ImportError:
pass
else:
gi = GPUInfo()
extras += [f'OpenGL {gi.version} via {gi.renderer}']
if extras:
extra += f' {{{", ".join(extras)}}}'
elif mod_name in ('mayavi', 'vtk'):
has_3d = True
if mod_name == 'vtk':
version = getattr(mod, 'VTK_VERSION', 'VTK_VERSION missing')
elif mod_name == 'PyQt5':
version = _check_pyqt5_version()
else:
version = mod.__version__
out += '%s%s\n' % (version, extra)
print(out, end='', file=fid)
|
from __future__ import print_function, division
from plumbum import colors
from .termsize import get_terminal_size
from .. import cli
import sys
class Image(object):
__slots__ = "size char_ratio".split()
def __init__(self, size=None, char_ratio=2.45):
self.size = size
self.char_ratio = char_ratio
def best_aspect(self, orig, term):
"""Select a best possible size matching the original aspect ratio.
Size is width, height.
The char_ratio option gives the height of each char with respect
to its width, zero for no effect."""
if not self.char_ratio: # Don't use if char ratio is 0
return term
orig_ratio = orig[0] / orig[1] / self.char_ratio
if int(term[1] / orig_ratio) <= term[0]:
new_size = int(term[1] / orig_ratio), term[1]
else:
new_size = term[0], int(term[0] * orig_ratio)
return new_size
def show(self, filename, double=False):
"""Display an image on the command line. Can select a size or show in double resolution."""
import PIL.Image
if double:
return self.show_pil_double(PIL.Image.open(filename))
else:
return self.show_pil(PIL.Image.open(filename))
def _init_size(self, im):
"""Return the expected image size"""
if self.size is None:
term_size = get_terminal_size()
return self.best_aspect(im.size, term_size)
else:
return self.size
def show_pil(self, im):
'Standard show routine'
size = self._init_size(im)
new_im = im.resize(size).convert("RGB")
for y in range(size[1]):
for x in range(size[0] - 1):
pix = new_im.getpixel((x, y))
print(colors.bg.rgb(*pix), ' ', sep='', end='') # u'\u2588'
print(colors.reset, ' ', sep='')
print(colors.reset)
def show_pil_double(self, im):
'Show double resolution on some fonts'
size = self._init_size(im)
size = (size[0], size[1] * 2)
new_im = im.resize(size).convert("RGB")
for y in range(size[1] // 2):
for x in range(size[0] - 1):
pix = new_im.getpixel((x, y * 2))
pixl = new_im.getpixel((x, y * 2 + 1))
print(
colors.bg.rgb(*pixl) & colors.fg.rgb(*pix),
u'\u2580',
sep='',
end='')
print(colors.reset, ' ', sep='')
print(colors.reset)
class ShowImageApp(cli.Application):
'Display an image on the terminal'
double = cli.Flag(
['-d', '--double'],
help="Double resolution (looks good only with some fonts)")
@cli.switch(
['-c', '--colors'], cli.Range(1, 4), help="Level of color, 1-4")
def colors_set(self, n):
colors.use_color = n
size = cli.SwitchAttr(
['-s', '--size'], help="Size, should be in the form 100x150")
ratio = cli.SwitchAttr(
['--ratio'], float, default=2.45, help="Aspect ratio of the font")
@cli.positional(cli.ExistingFile)
def main(self, filename):
size = None
if self.size:
size = map(int, self.size.split('x'))
Image(size, self.ratio).show(filename, self.double)
if __name__ == '__main__':
ShowImageApp()
|
OPTIONS = {
"additional_libs": {
"type": "list",
"default": [],
'description': 'Libs for Phantom, to be added to phantom config file in section "module_setup"',
'schema': {
'type': 'string'
}
},
"address": {
'description': 'Address of target. Format: [host]:port, [ipv4]:port, [ipv6]:port. Port is optional. '
'Tank checks each test if port is available',
"type": "string",
"empty": False,
"required": True,
'examples': {'127.0.0.1:8080': '', 'www.w3c.org': ''}
},
'autocases': {
'description': 'Use to automatically tag requests. Requests might be grouped by tag for later analysis.',
'anyof': [
{'type': 'integer'},
{'type': 'string',
'allowed': ['uri', 'uniq']}],
'default': 0,
'values_description': {
'uri': 'tag each request with its uri path, slashes are replaced with underscores',
'uniq': 'tag each request with unique uid',
'<N>': 'use N first uri parts to tag request, slashes are replaced with underscores'
},
'examples': {
2: '/example/'
'search/hello/help/us?param1=50 -> _example_search',
3: '/example/search/hello/help/us?param1=50 -> _example_search_hello',
'uri': '/example/search/hello/help/us?param1=50 -> _example_search_hello_help_us',
'uniq': '/example/search/hello/help/us?param1=50 -> c98b0520bb6a451c8bc924ed1fd72553'
}
},
"affinity": {
'description': 'Use to set CPU affinity',
"type": "string",
"default": '',
'examples': {
'0-3': 'enable first 4 cores',
'0,1,2,16,17,18': 'enable 6 specified cores'
}
},
'ammo_limit': {
'description': 'Sets the upper limit for the total number of requests',
'type': 'integer',
'default': -1
},
'ammo_type': {
'description': 'Ammo format. Don\'t forget to change ammo_type option if you switch the format of your ammo, otherwise you might get errors',
'type': 'string',
'default': 'phantom',
'allowed': ['phantom', 'uri', 'uripost', 'access'],
'values_description': {
'phantom': 'Use Request-style file. Most versatile, HTTP as is. See tutorial for details',
'uri': 'Use URIs listed in file with headers. Simple but allows for GET requests only. See tutorial for details',
'uripost': 'Use URI-POST file. Allows POST requests with bodies. See tutorial for details',
'access': 'Use access.log from your web server as a source of requests'
},
'tutorial_link': 'http://yandextank.readthedocs.io/en/latest/tutorial.html#preparing-requests'
},
'ammofile': {
'type': 'string',
'default': '',
'description': 'Path to ammo file. Ammo file contains requests to be sent to a server. Can be gzipped',
'tutorial_link': 'http://yandextank.readthedocs.io/en/latest/tutorial.html#preparing-requests',
},
"buffered_seconds": {
"type": "integer",
"default": 2,
'description': 'Aggregator latency'
},
'cache_dir': {
'type': 'string',
'nullable': True,
'default': None,
'description': 'stpd-file cache directory'
},
'chosen_cases': {
'type': 'string',
'default': '',
'description': 'Use only selected cases.'
},
'client_certificate': {
'type': 'string',
'default': '',
'description': 'Path to client SSL certificate'
},
'client_cipher_suites': {
'type': 'string',
'default': '',
'description': 'Cipher list, consists of one or more cipher strings separated by colons (see man ciphers)',
},
'client_key': {
'type': 'string',
'default': '',
'description': 'Path to client\'s certificate\'s private key'
},
'config': {
'type': 'string',
'default': '',
'description': 'Use ready phantom config instead of generated'
},
'connection_test': {
'type': 'boolean',
'default': True,
'description': 'Test TCP socket connection before starting the test'
},
"enum_ammo": {
"type": "boolean",
"default": False
},
'file_cache': {
'type': 'integer',
'default': 8192
},
'force_stepping': {
'type': 'integer',
'default': 0,
'description': 'Ignore cached stpd files, force stepping'
},
'gatling_ip': {
'type': 'string',
'default': ''
},
"header_http": {
"type": "string",
'default': '1.0',
'description': 'HTTP version',
'allowed': ['1.0', '1.1'],
'values_description': {
'1.0': 'http 1.0',
'1.1': 'http 1.1'
}
},
"headers": {
"type": "list",
'default': [],
'description': 'HTTP headers',
'schema': {
'description': 'Format: "Header: Value"',
'type': 'string',
'examples': {'accept: text/html': ''}
}
},
'instances': {
'description': 'Max number of concurrent clients.',
'type': 'integer',
'default': 1000
},
'loop': {
'description': 'Loop over ammo file for the given amount of times.',
'type': 'integer',
'default': -1
},
'method_options': {
'description': 'Additional options for method objects. It is used for Elliptics etc.',
'type': 'string',
'default': ''
},
'method_prefix': {
'description': 'Object\'s type, that has a functionality to create test requests.',
'type': 'string',
'default': 'method_stream'
},
'multi': {
'type': 'list',
'schema': {'type': 'dict'},
'default': [],
'description': 'List of configs for multi-test. All of the options from main config supported. All of them not required and inherited from main config if not specified'
},
'name': {
'description': 'Name of a part in multi config',
'type': 'string',
'required': False
},
'phantom_http_entity': {
'type': 'string',
'default': '8M',
'description': 'Limits the amount of bytes Phantom reads from response.'
},
'phantom_http_field': {
'type': 'string',
'default': '8K',
'description': 'Header size.'
},
'phantom_http_field_num': {
'type': 'integer',
'default': 128,
'description': 'Max number of headers'
},
'phantom_http_line': {
'type': 'string',
'default': '1K',
'description': 'First line length'
},
"phantom_modules_path": {
"type": "string",
"default": "/usr/lib/phantom",
'description': 'Phantom modules path.'
},
"phantom_path": {
'description': 'Path to Phantom binary',
"type": "string",
"default": "phantom"
},
"phout_file": {
"type": "string",
'description': 'deprecated',
"default": ""
},
'port': {
'description': 'Explicit target port, overwrites port defined with address',
'type': 'string',
'default': '',
'regex': r'\d{0,5}'
},
"load_profile": {
'description': 'Configure your load setting the number of RPS or instances (clients) as a function of time,'
'or using a prearranged schedule',
"type": "dict",
'tutorial_link': 'http://yandextank.readthedocs.io/en/latest/tutorial.html#tutorials',
'schema': {
'load_type': {
'required': True,
'description': 'Choose control parameter',
'type': 'string',
'allowed': ['rps', 'instances', 'stpd_file'],
'values_description': {
'rps': 'control the rps rate',
'instances': 'control the number of instances',
'stpd_file': 'use prearranged schedule file'}
},
'schedule': {
'type': 'string',
'required': True,
'description': 'load schedule or path to stpd file',
'examples': {
'line(100,200,10m)': 'linear growth from 100 to 200 instances/rps during 10 minutes',
'const(200,90s)': 'constant load of 200 instances/rps during 90s',
'test_dir/test_backend.stpd': 'path to ready schedule file'},
'validator': 'load_scheme'
}
},
'required': True
},
'source_log_prefix': {
'description': 'Prefix added to class name that reads source data',
'type': 'string',
'default': ''
},
'ssl': {
'description': 'Enable ssl',
'type': 'boolean',
'default': False
},
"threads": {
'description': 'Phantom thread count. When not specified, defaults to <processor cores count> / 2 + 1',
"type": "integer",
"default": None,
"nullable": True
},
'tank_type': {
'description': 'Choose between http and pure tcp guns',
'type': 'string',
'default': 'http',
'allowed': ['http', 'none'],
'values_description': {
'http': 'HTTP gun',
'none': 'TCP gun'
}
},
"timeout": {
'description': 'Response timeout',
"type": "string",
"default": "11s"
},
"uris": {
"type": "list",
'default': [],
'description': 'URI list',
'schema': {
'type': 'string',
'description': 'URI path string'
},
'examples': {
'["/example/search", "/example/search/hello", "/example/search/hello/help"]': ''
}
},
'use_caching': {
'description': 'Enable stpd-file caching for similar tests. Set false to reload ammo file and generate new stpd',
'type': 'boolean',
'default': True
},
"writelog": {
'description': 'Enable verbose request/response logging.',
"type": "string",
"default": "0",
'allowed': ['0', 'all', 'proto_warning', 'proto_error'],
'values_description': {
'0': 'disable',
'all': 'all messages',
'proto_warning': '4xx+5xx+network errors',
'proto_error': '5xx+network errors',
}
}
}
MULTI_OPTIONS = {n: {k: v for k, v in d.items() if k != 'required' and k != 'default'} for n, d in OPTIONS.items()}
MULTI = {
'multi': {
'type': 'list',
'allow_unknown': True,
'schema': {'type': 'dict', 'schema': MULTI_OPTIONS},
'default': []}
}
def compile_schema():
schema = OPTIONS.copy()
schema.update(MULTI)
return schema
SCHEMA = compile_schema()
|
import os
from perfkitbenchmarker import data
from perfkitbenchmarker import vm_util
BLAZE_VERSION = '3.0'
BLAZE_TAR = 'blaze-%s.tar.gz' % BLAZE_VERSION
BLAZE_DIR = '%s/blaze-%s' % (vm_util.VM_TMP_DIR, BLAZE_VERSION)
BLAZE_TAR_URL = (
'https://bitbucket.org/blaze-lib/blaze/downloads/%s' % BLAZE_TAR)
CONFIG_TEMPLATE = 'blaze_config.j2'
CONFIG = 'config'
MAX_BLAZE_CACHE_SIZE_IN_B = 100000000
def _Configure(vm):
"""Configure and build blaze library.
See https://bitbucket.org/blaze-lib/blaze/wiki/Configuration%20Files
for more details.
"""
vm.RenderTemplate(
data.ResourcePath(CONFIG_TEMPLATE),
os.path.join(BLAZE_DIR, CONFIG),
{'compiler': 'g++-5',
'compile_flags': ' -DBLAZE_USE_BOOST_THREADS --std=c++14'})
# Adjust cache size
cache_in_KB, _ = vm.RemoteCommand(
'cat /proc/cpuinfo | grep "cache size" | awk \'{print $4}\'')
cache_in_B = int(1024 * float(cache_in_KB.split()[0]))
vm.RemoteCommand(
'sed -i \'s/constexpr size_t cacheSize = 3145728UL;/constexpr '
'size_t cacheSize = %sUL;/g\' %s' % (
min(cache_in_B, MAX_BLAZE_CACHE_SIZE_IN_B - 1), os.path.join(
BLAZE_DIR, 'blaze', 'config', 'CacheSize.h')))
vm.RemoteCommand('cd %s; ./configure %s; make -j %s' % (
BLAZE_DIR, CONFIG, vm.NumCpusForBenchmark()))
def _Install(vm):
"""Installs the blaze package on the VM."""
vm.RemoteCommand(
'cd {tmp_dir}; wget {tar_url}; tar xzvf {tar}'.format(
tmp_dir=vm_util.VM_TMP_DIR,
tar_url=BLAZE_TAR_URL,
tar=BLAZE_TAR))
vm.RemoteCommand('sudo cp -r {blaze_dir}/blaze /usr/local/include'.format(
blaze_dir=BLAZE_DIR))
_Configure(vm)
def YumInstall(vm):
"""Installs the OpenBLAS package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the OpenBLAS package on the VM."""
_Install(vm)
|
import json
from wled import Device as WLEDDevice, WLEDConnectionError
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.components.wled import SCAN_INTERVAL
from homeassistant.components.wled.const import (
ATTR_INTENSITY,
ATTR_PALETTE,
ATTR_PLAYLIST,
ATTR_PRESET,
ATTR_REVERSE,
ATTR_SPEED,
DOMAIN,
SERVICE_EFFECT,
SERVICE_PRESET,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed, load_fixture
from tests.components.wled import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_rgb_light_state(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the creation and values of the WLED lights."""
await init_integration(hass, aioclient_mock)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# First segment of the strip
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 127
assert state.attributes.get(ATTR_EFFECT) == "Solid"
assert state.attributes.get(ATTR_HS_COLOR) == (37.412, 100.0)
assert state.attributes.get(ATTR_ICON) == "mdi:led-strip-variant"
assert state.attributes.get(ATTR_INTENSITY) == 128
assert state.attributes.get(ATTR_PALETTE) == "Default"
assert state.attributes.get(ATTR_PLAYLIST) is None
assert state.attributes.get(ATTR_PRESET) is None
assert state.attributes.get(ATTR_REVERSE) is False
assert state.attributes.get(ATTR_SPEED) == 32
assert state.state == STATE_ON
entry = entity_registry.async_get("light.wled_rgb_light_segment_0")
assert entry
assert entry.unique_id == "aabbccddeeff_0"
# Second segment of the strip
state = hass.states.get("light.wled_rgb_light_segment_1")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 127
assert state.attributes.get(ATTR_EFFECT) == "Blink"
assert state.attributes.get(ATTR_HS_COLOR) == (148.941, 100.0)
assert state.attributes.get(ATTR_ICON) == "mdi:led-strip-variant"
assert state.attributes.get(ATTR_INTENSITY) == 64
assert state.attributes.get(ATTR_PALETTE) == "Random Cycle"
assert state.attributes.get(ATTR_PLAYLIST) is None
assert state.attributes.get(ATTR_PRESET) is None
assert state.attributes.get(ATTR_REVERSE) is False
assert state.attributes.get(ATTR_SPEED) == 16
assert state.state == STATE_ON
entry = entity_registry.async_get("light.wled_rgb_light_segment_1")
assert entry
assert entry.unique_id == "aabbccddeeff_1"
# Test master control of the lightstrip
state = hass.states.get("light.wled_rgb_light_master")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 127
assert state.state == STATE_ON
entry = entity_registry.async_get("light.wled_rgb_light_master")
assert entry
assert entry.unique_id == "aabbccddeeff"
async def test_segment_change_state(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, caplog
) -> None:
"""Test the change of state of the WLED segments."""
await init_integration(hass, aioclient_mock)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_TRANSITION: 5},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
on=False,
segment_id=0,
transition=50,
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 42,
ATTR_EFFECT: "Chase",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_RGB_COLOR: [255, 0, 0],
ATTR_TRANSITION: 5,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
brightness=42,
color_primary=(255, 0, 0),
effect="Chase",
on=True,
segment_id=0,
transition=50,
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_COLOR_TEMP: 400},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
color_primary=(255, 159, 70),
on=True,
segment_id=0,
)
async def test_master_change_state(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, caplog
) -> None:
"""Test the change of state of the WLED master light control."""
await init_integration(hass, aioclient_mock)
with patch("wled.WLED.master") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_TRANSITION: 5},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
on=False,
transition=50,
)
with patch("wled.WLED.master") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 42,
ATTR_ENTITY_ID: "light.wled_rgb_light_master",
ATTR_TRANSITION: 5,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
brightness=42,
on=True,
transition=50,
)
with patch("wled.WLED.master") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_master", ATTR_TRANSITION: 5},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
on=False,
transition=50,
)
with patch("wled.WLED.master") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 42,
ATTR_ENTITY_ID: "light.wled_rgb_light_master",
ATTR_TRANSITION: 5,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
brightness=42,
on=True,
transition=50,
)
async def test_dynamically_handle_segments(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test if a new/deleted segment is dynamically added/removed."""
await init_integration(hass, aioclient_mock)
assert hass.states.get("light.wled_rgb_light_master")
assert hass.states.get("light.wled_rgb_light_segment_0")
assert hass.states.get("light.wled_rgb_light_segment_1")
data = json.loads(load_fixture("wled/rgb_single_segment.json"))
device = WLEDDevice(data)
# Test removal if segment went missing, including the master entity
with patch(
"homeassistant.components.wled.WLED.update",
return_value=device,
):
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
assert hass.states.get("light.wled_rgb_light_segment_0")
assert not hass.states.get("light.wled_rgb_light_segment_1")
assert not hass.states.get("light.wled_rgb_light_master")
# Test adding if segment shows up again, including the master entity
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
assert hass.states.get("light.wled_rgb_light_master")
assert hass.states.get("light.wled_rgb_light_segment_0")
assert hass.states.get("light.wled_rgb_light_segment_1")
async def test_single_segment_behavior(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, caplog
) -> None:
"""Test the behavior of the integration with a single segment."""
await init_integration(hass, aioclient_mock)
data = json.loads(load_fixture("wled/rgb_single_segment.json"))
device = WLEDDevice(data)
# Test absent master
with patch(
"homeassistant.components.wled.WLED.update",
return_value=device,
):
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
assert not hass.states.get("light.wled_rgb_light_master")
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.state == STATE_ON
# Test segment brightness takes master into account
device.state.brightness = 100
device.state.segments[0].brightness = 255
with patch(
"homeassistant.components.wled.WLED.update",
return_value=device,
):
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 100
# Test segment is off when master is off
device.state.on = False
with patch(
"homeassistant.components.wled.WLED.update",
return_value=device,
):
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state
assert state.state == STATE_OFF
# Test master is turned off when turning off a single segment
with patch("wled.WLED.master") as master_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_TRANSITION: 5},
blocking=True,
)
await hass.async_block_till_done()
master_mock.assert_called_once_with(
on=False,
transition=50,
)
# Test master is turned on when turning on a single segment, and segment
# brightness is set to 255.
with patch("wled.WLED.master") as master_mock, patch(
"wled.WLED.segment"
) as segment_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_TRANSITION: 5,
ATTR_BRIGHTNESS: 42,
},
blocking=True,
)
await hass.async_block_till_done()
master_mock.assert_called_once_with(on=True, transition=50, brightness=42)
segment_mock.assert_called_once_with(on=True, segment_id=0, brightness=255)
async def test_light_error(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, caplog
) -> None:
"""Test error handling of the WLED lights."""
aioclient_mock.post("http://192.168.1.123:80/json/state", text="", status=400)
await init_integration(hass, aioclient_mock)
with patch("homeassistant.components.wled.WLED.update"):
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state.state == STATE_ON
assert "Invalid response from API" in caplog.text
async def test_light_connection_error(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test error handling of the WLED switches."""
await init_integration(hass, aioclient_mock)
with patch("homeassistant.components.wled.WLED.update"), patch(
"homeassistant.components.wled.WLED.segment", side_effect=WLEDConnectionError
):
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state.state == STATE_UNAVAILABLE
async def test_rgbw_light(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test RGBW support for WLED."""
await init_integration(hass, aioclient_mock, rgbw=True)
state = hass.states.get("light.wled_rgbw_light")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_HS_COLOR) == (0.0, 100.0)
assert state.attributes.get(ATTR_WHITE_VALUE) == 139
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wled_rgbw_light", ATTR_COLOR_TEMP: 400},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
on=True,
segment_id=0,
color_primary=(255, 159, 70, 139),
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wled_rgbw_light", ATTR_WHITE_VALUE: 100},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
color_primary=(255, 0, 0, 100),
on=True,
segment_id=0,
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "light.wled_rgbw_light",
ATTR_RGB_COLOR: (255, 255, 255),
ATTR_WHITE_VALUE: 100,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
color_primary=(0, 0, 0, 100),
on=True,
segment_id=0,
)
async def test_effect_service(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the effect service of a WLED light."""
await init_integration(hass, aioclient_mock)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_EFFECT: "Rainbow",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_INTENSITY: 200,
ATTR_PALETTE: "Tiamat",
ATTR_REVERSE: True,
ATTR_SPEED: 100,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
effect="Rainbow",
intensity=200,
palette="Tiamat",
reverse=True,
segment_id=0,
speed=100,
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_EFFECT: 9},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
segment_id=0,
effect=9,
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_INTENSITY: 200,
ATTR_REVERSE: True,
ATTR_SPEED: 100,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
intensity=200,
reverse=True,
segment_id=0,
speed=100,
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_EFFECT: "Rainbow",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_PALETTE: "Tiamat",
ATTR_REVERSE: True,
ATTR_SPEED: 100,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
effect="Rainbow",
palette="Tiamat",
reverse=True,
segment_id=0,
speed=100,
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_EFFECT: "Rainbow",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_INTENSITY: 200,
ATTR_SPEED: 100,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
effect="Rainbow",
intensity=200,
segment_id=0,
speed=100,
)
with patch("wled.WLED.segment") as light_mock:
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{
ATTR_EFFECT: "Rainbow",
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_INTENSITY: 200,
ATTR_REVERSE: True,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
effect="Rainbow",
intensity=200,
reverse=True,
segment_id=0,
)
async def test_effect_service_error(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, caplog
) -> None:
"""Test error handling of the WLED effect service."""
aioclient_mock.post("http://192.168.1.123:80/json/state", text="", status=400)
await init_integration(hass, aioclient_mock)
with patch("homeassistant.components.wled.WLED.update"):
await hass.services.async_call(
DOMAIN,
SERVICE_EFFECT,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_EFFECT: 9},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state.state == STATE_ON
assert "Invalid response from API" in caplog.text
async def test_preset_service(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the preset service of a WLED light."""
await init_integration(hass, aioclient_mock)
with patch("wled.WLED.preset") as light_mock:
await hass.services.async_call(
DOMAIN,
SERVICE_PRESET,
{
ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0",
ATTR_PRESET: 1,
},
blocking=True,
)
await hass.async_block_till_done()
light_mock.assert_called_once_with(
preset=1,
)
async def test_preset_service_error(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, caplog
) -> None:
"""Test error handling of the WLED preset service."""
aioclient_mock.post("http://192.168.1.123:80/json/state", text="", status=400)
await init_integration(hass, aioclient_mock)
with patch("homeassistant.components.wled.WLED.update"):
await hass.services.async_call(
DOMAIN,
SERVICE_PRESET,
{ATTR_ENTITY_ID: "light.wled_rgb_light_segment_0", ATTR_PRESET: 1},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wled_rgb_light_segment_0")
assert state.state == STATE_ON
assert "Invalid response from API" in caplog.text
|
import os
import os.path
import sys
import json
import atexit
import shutil
import argparse
import tokenize
import functools
import subprocess
from typing import Iterable, Mapping, MutableSequence, Sequence, cast
from PyQt5.QtCore import QObject, pyqtSignal, QTimer
from PyQt5.QtWidgets import QApplication
try:
import hunter
except ImportError:
hunter = None
import qutebrowser
from qutebrowser.api import cmdutils
from qutebrowser.utils import log
from qutebrowser.misc import sessions, ipc, objects
from qutebrowser.mainwindow import prompt
from qutebrowser.completion.models import miscmodels
instance = cast('Quitter', None)
class Quitter(QObject):
"""Utility class to quit/restart the QApplication.
Attributes:
quit_status: The current quitting status.
_is_shutting_down: Whether we're currently shutting down.
_args: The argparse namespace.
"""
shutting_down = pyqtSignal() # Emitted immediately before shut down
def __init__(self, *,
args: argparse.Namespace,
parent: QObject = None) -> None:
super().__init__(parent)
self.quit_status = {
'crash': True,
'tabs': False,
'main': False,
}
self._is_shutting_down = False
self._args = args
def on_last_window_closed(self) -> None:
"""Slot which gets invoked when the last window was closed."""
self.shutdown(last_window=True)
def _compile_modules(self) -> None:
"""Compile all modules to catch SyntaxErrors."""
if os.path.basename(sys.argv[0]) == 'qutebrowser':
# Launched via launcher script
return
elif hasattr(sys, 'frozen'):
return
else:
path = os.path.abspath(os.path.dirname(qutebrowser.__file__))
if not os.path.isdir(path):
# Probably running from a python egg.
return
for dirpath, _dirnames, filenames in os.walk(path):
for fn in filenames:
if os.path.splitext(fn)[1] == '.py' and os.path.isfile(fn):
with tokenize.open(os.path.join(dirpath, fn)) as f:
compile(f.read(), fn, 'exec')
def _get_restart_args(
self, pages: Iterable[str] = (),
session: str = None,
override_args: Mapping[str, str] = None
) -> Sequence[str]:
"""Get args to relaunch qutebrowser.
Args:
pages: The pages to re-open.
session: The session to load, or None.
override_args: Argument overrides as a dict.
Return:
The commandline as a list of strings.
"""
if os.path.basename(sys.argv[0]) == 'qutebrowser':
# Launched via launcher script
args = [sys.argv[0]]
elif hasattr(sys, 'frozen'):
args = [sys.executable]
else:
args = [sys.executable, '-m', 'qutebrowser']
# Add all open pages so they get reopened.
page_args: MutableSequence[str] = []
for win in pages:
page_args.extend(win)
page_args.append('')
# Serialize the argparse namespace into json and pass that to the new
# process via --json-args.
# We do this as there's no way to "unparse" the namespace while
# ignoring some arguments.
argdict = vars(self._args)
argdict['session'] = None
argdict['url'] = []
argdict['command'] = page_args[:-1]
argdict['json_args'] = None
# Ensure the given session (or none at all) gets opened.
if session is None:
argdict['session'] = None
argdict['override_restore'] = True
else:
argdict['session'] = session
argdict['override_restore'] = False
# Ensure :restart works with --temp-basedir
if self._args.temp_basedir:
argdict['temp_basedir'] = False
argdict['temp_basedir_restarted'] = True
if override_args is not None:
argdict.update(override_args)
# Dump the data
data = json.dumps(argdict)
args += ['--json-args', data]
log.destroy.debug("args: {}".format(args))
return args
def restart(self, pages: Sequence[str] = (),
session: str = None,
override_args: Mapping[str, str] = None) -> bool:
"""Inner logic to restart qutebrowser.
The "better" way to restart is to pass a session (_restart usually) as
that'll save the complete state.
However we don't do that (and pass a list of pages instead) when we
restart because of an exception, as that's a lot simpler and we don't
want to risk anything going wrong.
Args:
pages: A list of URLs to open.
session: The session to load, or None.
override_args: Argument overrides as a dict.
Return:
True if the restart succeeded, False otherwise.
"""
self._compile_modules()
log.destroy.debug("sys.executable: {}".format(sys.executable))
log.destroy.debug("sys.path: {}".format(sys.path))
log.destroy.debug("sys.argv: {}".format(sys.argv))
log.destroy.debug("frozen: {}".format(hasattr(sys, 'frozen')))
# Save the session if one is given.
if session is not None:
sessions.session_manager.save(session, with_private=True)
# Make sure we're not accepting a connection from the new process
# before we fully exited.
assert ipc.server is not None
ipc.server.shutdown()
# Open a new process and immediately shutdown the existing one
try:
args = self._get_restart_args(pages, session, override_args)
subprocess.Popen(args)
except OSError:
log.destroy.exception("Failed to restart")
return False
else:
return True
def shutdown(self, status: int = 0,
session: sessions.ArgType = None,
last_window: bool = False,
is_restart: bool = False) -> None:
"""Quit qutebrowser.
Args:
status: The status code to exit with.
session: A session name if saving should be forced.
last_window: If the shutdown was triggered due to the last window
closing.
is_restart: If we're planning to restart.
"""
if self._is_shutting_down:
return
self._is_shutting_down = True
log.destroy.debug("Shutting down with status {}, session {}...".format(
status, session))
sessions.shutdown(session, last_window=last_window)
if prompt.prompt_queue.shutdown():
# If shutdown was called while we were asking a question, we're in
# a still sub-eventloop (which gets quit now) and not in the main
# one.
# This means we need to defer the real shutdown to when we're back
# in the real main event loop, or we'll get a segfault.
log.destroy.debug("Deferring real shutdown because question was "
"active.")
QTimer.singleShot(0, functools.partial(self._shutdown_2, status,
is_restart=is_restart))
else:
# If we have no questions to shut down, we are already in the real
# event loop, so we can shut down immediately.
self._shutdown_2(status, is_restart=is_restart)
def _shutdown_2(self, status: int, is_restart: bool) -> None:
"""Second stage of shutdown."""
log.destroy.debug("Stage 2 of shutting down...")
# Tell everything to shut itself down
self.shutting_down.emit()
# Delete temp basedir
if ((self._args.temp_basedir or self._args.temp_basedir_restarted) and
not is_restart):
atexit.register(shutil.rmtree, self._args.basedir,
ignore_errors=True)
# Now we can hopefully quit without segfaults
log.destroy.debug("Deferring QApplication::exit...")
# We use a singleshot timer to exit here to minimize the likelihood of
# segfaults.
QTimer.singleShot(0, functools.partial(self._shutdown_3, status))
def _shutdown_3(self, status: int) -> None:
"""Finally shut down the QApplication."""
log.destroy.debug("Now calling QApplication::exit.")
if 'debug-exit' in objects.debug_flags:
if hunter is None:
print("Not logging late shutdown because hunter could not be "
"imported!", file=sys.stderr)
else:
print("Now logging late shutdown.", file=sys.stderr)
hunter.trace()
QApplication.instance().exit(status)
@cmdutils.register(name='quit')
@cmdutils.argument('session', completion=miscmodels.session)
def quit_(save: bool = False,
session: sessions.ArgType = None) -> None:
"""Quit qutebrowser.
Args:
save: When given, save the open windows even if auto_save.session
is turned off.
session: The name of the session to save.
"""
if session is not None and not save:
raise cmdutils.CommandError("Session name given without --save!")
if save:
if session is None:
session = sessions.default
instance.shutdown(session=session)
else:
instance.shutdown()
@cmdutils.register()
def restart() -> None:
"""Restart qutebrowser while keeping existing tabs open."""
try:
ok = instance.restart(session='_restart')
except sessions.SessionError as e:
log.destroy.exception("Failed to save session!")
raise cmdutils.CommandError("Failed to save session: {}!"
.format(e))
except SyntaxError as e:
log.destroy.exception("Got SyntaxError")
raise cmdutils.CommandError("SyntaxError in {}:{}: {}".format(
e.filename, e.lineno, e))
if ok:
instance.shutdown(is_restart=True)
def init(args: argparse.Namespace) -> None:
"""Initialize the global Quitter instance."""
global instance
qapp = QApplication.instance()
instance = Quitter(args=args, parent=qapp)
instance.shutting_down.connect(log.shutdown_log)
qapp.lastWindowClosed.connect(instance.on_last_window_closed)
|
import posixpath
import re
from absl import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
WRK2_URL = ('https://github.com/giltene/wrk2/archive/'
'c4250acb6921c13f8dccfc162d894bd7135a2979.tar.gz')
WRK2_DIR = posixpath.join(vm_util.VM_TMP_DIR, 'wrk2')
WRK2_PATH = posixpath.join(WRK2_DIR, 'wrk')
FLAGS = flags.FLAGS
flags.DEFINE_bool('wrk2_corrected_latency', True,
'Whether or not response latency is corrected.\n'
'If True, wrk2 measure response latency from the time the '
'transmission should have occurred according to the constant '
'throughput configured for the run.\n'
'If False, response latency is the time that actual '
'transmission of a request occured.')
def _Install(vm):
vm.Install('curl')
vm.Install('build_tools')
vm.Install('openssl')
vm.RemoteCommand(
('mkdir -p {0} && '
'curl -L {1} | tar -xzf - -C {0} --strip-components 1').format(
WRK2_DIR, WRK2_URL))
vm.RemoteCommand('make -C {}'.format(WRK2_DIR))
def YumInstall(vm):
_Install(vm)
def AptInstall(vm):
_Install(vm)
def _ParseOutput(output_text):
"""Parses the output of wrk2.
Args:
output_text: str. Output for wrk2
Yields:
(variable_name, value, unit) tuples.
Raises:
ValueError: When requests / latency statistics cannot be found.
"""
inner_pat = r'^\s*(\d+\.\d+)%\s+(\d+\.\d+)(us|ms|s|m)\s*\n'
regex = re.compile(
r'^\s*Latency Distribution \(HdrHistogram - Recorded Latency\)\n'
r'((?:^' + inner_pat + ')+)', re.MULTILINE)
m = regex.search(output_text)
if not m:
raise ValueError('No match for {} in\n{}'.format(regex, output_text))
matches = re.findall(inner_pat, m.group(1), re.MULTILINE)
for percentile, value, unit in matches:
variable = 'p{} latency'.format(percentile.rstrip('0').rstrip('.'))
if unit == 'ms':
value_in_ms = float(value)
elif unit == 'us':
unit = 'ms'
value_in_ms = float(value) / 1000.
elif unit == 's':
unit = 'ms'
value_in_ms = float(value) * 1000.
elif unit == 'm':
unit = 'ms'
value_in_ms = float(value) * 60. * 1000.
else:
raise ValueError('Unknown unit {} for {}'.format(unit, m.group(1)))
yield variable, value_in_ms, unit
# Errors, requests
m = re.search(r'(\d+) requests in \d', output_text)
if not m:
raise ValueError('Request count not found in:\n' + output_text)
requests = int(m.group(1))
yield 'requests', requests, ''
m = re.search(r'Non-2xx or 3xx responses: (\d+)', output_text)
if m:
errors = int(m.group(1))
error_rate = int(m.group(1)) / float(requests)
else:
errors = 0
error_rate = 0
yield 'error_rate', error_rate, ''
yield 'errors', errors, ''
if error_rate > 0.1:
raise ValueError('More than 10% of requests failed.')
def Run(vm, target, rate, connections=1, duration=60, script_path=None,
threads=None):
"""Runs wrk against a given target.
Args:
vm: Virtual machine.
target: URL to fetch.
rate: int. Target request rate, in QPS.
connections: Number of concurrent connections.
duration: Duration of the test, in seconds.
script_path: If specified, a lua script to execute.
threads: Number of threads. Defaults to min(connections, num_cores).
Yields:
sample.Sample objects with results.
"""
if threads is None:
threads = min(connections, vm.NumCpusForBenchmark())
cmd = ('{wrk} '
'--rate={rate} '
'--connections={connections} '
'--threads={threads} '
'--duration={duration} '
'--{corrected}').format(
wrk=WRK2_PATH, connections=connections, threads=threads,
rate=rate, duration=duration,
corrected=(
'latency' if FLAGS.wrk2_corrected_latency else 'u_latency'))
if script_path:
cmd += ' --script ' + script_path
cmd += ' ' + target
stdout, _ = vm.RemoteCommand(cmd)
for variable, value, unit in _ParseOutput(stdout):
yield sample.Sample(variable, value, unit,
metadata={'connections': connections,
'threads': threads,
'duration': duration,
'target_rate': rate,
'corrected': False})
|
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_FREEBOX
from homeassistant.config_entries import SOURCE_DISCOVERY, SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN, PLATFORMS
from .router import FreeboxRouter
_LOGGER = logging.getLogger(__name__)
FREEBOX_SCHEMA = vol.Schema(
{vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PORT): cv.port}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [FREEBOX_SCHEMA]))},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Freebox component."""
conf = config.get(DOMAIN)
async def discovery_dispatch(service, discovery_info):
if conf is None:
host = discovery_info.get("properties", {}).get("api_domain")
port = discovery_info.get("properties", {}).get("https_port")
_LOGGER.info("Discovered Freebox server: %s:%s", host, port)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_DISCOVERY},
data={CONF_HOST: host, CONF_PORT: port},
)
)
discovery.async_listen(hass, SERVICE_FREEBOX, discovery_dispatch)
if conf is None:
return True
for freebox_conf in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=freebox_conf,
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up Freebox component."""
router = FreeboxRouter(hass, entry)
await router.setup()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.unique_id] = router
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
# Services
async def async_reboot(call):
"""Handle reboot service call."""
await router.reboot()
hass.services.async_register(DOMAIN, "reboot", async_reboot)
async def async_close_connection(event):
"""Close Freebox connection on HA Stop."""
await router.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_close_connection)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
router = hass.data[DOMAIN].pop(entry.unique_id)
await router.close()
return unload_ok
|
from weblate.checks.same import SameCheck
from weblate.checks.tests.test_checks import CheckTestCase, MockUnit
class SameCheckTest(CheckTestCase):
check = SameCheck()
def setUp(self):
super().setUp()
self.test_good_none = ("%(source)s", "%(source)s", "python-format")
self.test_good_matching = ("source", "translation", "")
self.test_good_ignore = ("alarm", "alarm", "")
self.test_failure_1 = ("retezec", "retezec", "")
def test_same_source_language(self):
unit = MockUnit(code="en")
# Is template
unit.translation.is_template = True
unit.translation.is_source = True
unit.is_source = True
self.assertTrue(self.check.should_skip(unit))
# Is same as source
unit.translation.template = False
self.assertTrue(self.check.should_skip(unit))
# Interlingua special case
unit.translation.language.code = "ia"
self.assertTrue(self.check.should_skip(unit))
def test_same_db_screen(self):
self.assertTrue(
self.check.check_single(
"some long text is here", "some long text is here", MockUnit(code="de")
)
)
self.assertFalse(
self.check.check_single(
"some long text is here",
"some long text is here",
MockUnit(code="de", note="Tag: screen"),
)
)
def test_same_numbers(self):
self.do_test(False, ("1:4", "1:4", ""))
self.do_test(False, ("1, 3, 10", "1, 3, 10", ""))
def test_same_strict(self):
self.do_test(True, ("Linux kernel", "Linux kernel", "strict-same"))
def test_same_multi(self):
self.do_test(False, ("Linux kernel", "Linux kernel", ""))
self.do_test(
True, ("Linux kernel testing image", "Linux kernel testing image", "")
)
self.do_test(False, ("Gettext (PO)", "Gettext (PO)", ""))
self.do_test(
False, ("powerpc, m68k, i386, amd64", "powerpc, m68k, i386, amd64", "")
)
self.do_test(False, ("Fedora & openSUSE", "Fedora & openSUSE", ""))
self.do_test(False, ("n/a mm", "n/a mm", ""))
self.do_test(False, ("i18n", "i18n", ""))
self.do_test(False, ("i18next", "i18next", ""))
def test_same_copyright(self):
self.do_test(
False,
("(c) Copyright 2013 Michal Čihař", "(c) Copyright 2013 Michal Čihař", ""),
)
self.do_test(
False,
("© Copyright 2013 Michal Čihař", "© Copyright 2013 Michal Čihař", ""),
)
def test_same_format(self):
self.do_test(False, ("%d.%m.%Y, %H:%M", "%d.%m.%Y, %H:%M", "php-format"))
self.do_test(True, ("%d bajt", "%d bajt", "php-format"))
self.do_test(False, ("%d table(s)", "%d table(s)", "php-format"))
self.do_test(
False,
("%s %s %s %s %s %s %s", "%s %s %s %s %s %s %s", "c-format"),
)
self.do_test(
False, ("%s %s %s %s %s%s:%s %s ", "%s %s %s %s %s%s:%s %s ", "c-format")
)
self.do_test(False, ("%s%s, %s%s (", "%s%s, %s%s (", "c-format"))
self.do_test(False, ("%s %s Fax: %s", "%s %s Fax: %s", "c-format"))
self.do_test(False, ("%i C", "%i C", "c-format"))
self.do_test(False, ("%Ln C", "%Ln C", "qt-format"))
self.do_test(False, ("%+.2<amount>f C", "%+.2<amount>f C", "ruby-format"))
self.do_test(False, ("%{amount} C", "%{amount} C", "ruby-format"))
def test_same_rst(self):
self.do_test(False, (":ref:`index`", ":ref:`index`", "rst-text"))
self.do_test(
False,
(
":config:option:`$cfg['Servers'][$i]['pmadb']`",
":config:option:`$cfg['Servers'][$i]['pmadb']`",
"rst-text",
),
)
self.do_test(True, ("See :ref:`index`", "See :ref:`index`", "rst-text"))
self.do_test(False, ("``mysql``", "``mysql``", "rst-text"))
self.do_test(True, ("Use ``mysql`` module", "Use ``mysql`` module", "rst-text"))
def test_same_email(self):
self.do_test(False, ("[email protected]", "[email protected]", ""))
self.do_test(True, ("Write [email protected]", "Write [email protected]", ""))
def test_same_url(self):
self.do_test(False, ("https://weblate.org/", "https://weblate.org/", ""))
self.do_test(True, ("See https://weblate.org/", "See https://weblate.org/", ""))
self.do_test(
False,
(
"[2]: http://code.google.com/p/pybluez/",
"[2]: http://code.google.com/p/pybluez/",
"",
),
)
self.do_test(
False,
(
"[2]: https://sourceforge.net/projects/pywin32/",
"[2]: https://sourceforge.net/projects/pywin32/",
"",
),
)
def test_same_channel(self):
self.do_test(False, ("#weblate", "#weblate", ""))
self.do_test(True, ("Please use #weblate", "Please use #weblate", ""))
def test_same_domain(self):
self.do_test(False, ("weblate.org", "weblate.org", ""))
self.do_test(False, ("demo.weblate.org", "demo.weblate.org", ""))
self.do_test(
False, ("#weblate @ irc.freenode.net", "#weblate @ irc.freenode.net", "")
)
self.do_test(
True, ("Please see demo.weblate.org", "Please see demo.weblate.org", "")
)
def test_same_path(self):
self.do_test(
False,
(
"/cgi-bin/koha/catalogue/search.pl?q=",
"/cgi-bin/koha/catalogue/search.pl?q=",
"",
),
)
self.do_test(True, ("File/path/directory", "File/path/directory", ""))
def test_same_template(self):
self.do_test(
False, ("{building}: {description}", "{building}: {description}", "")
)
self.do_test(False, ("@NAME@: @BOO@", "@NAME@: @BOO@", ""))
self.do_test(True, ("{building}: summary", "{building}: summary", ""))
self.do_test(True, ("@NAME@: long text", "@NAME@: long text", ""))
def test_same_lists(self):
self.do_test(False, ("a.,b.,c.,d.", "a.,b.,c.,d.", ""))
self.do_test(False, ("i.,ii.,iii.,iv.", "i.,ii.,iii.,iv.", ""))
def test_same_alphabet(self):
self.do_test(
False,
(
"!\"#$%%&'()*+,-./0123456789:;<=>?@"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`"
"abcdefghijklmnopqrstuvwxyz{|}~",
"!\"#$%%&'()*+,-./0123456789:;<=>?@"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`"
"abcdefghijklmnopqrstuvwxyz{|}~",
"",
),
)
def test_same_uppercase(self):
self.do_test(False, ("RMS", "RMS", ""))
self.do_test(False, ("<primary>RMS</primary>", "<primary>RMS</primary>", ""))
self.do_test(True, ("Who is RMS?", "Who is RMS?", ""))
def test_same_placeholders(self):
self.do_test(True, ("%location%", "%location%", ""))
self.do_test(False, ("%location%", "%location%.", "placeholders:%location%"))
def test_same_project(self):
self.do_test(False, ("MockProject", "MockProject", ""))
self.do_test(False, ("mockcomponent", "mockcomponent", ""))
|
from unittest.mock import Mock, MagicMock
from kombu.asynchronous.aws.sqs.connection import (
AsyncSQSConnection
)
from kombu.asynchronous.aws.ext import boto3
from kombu.asynchronous.aws.sqs.message import AsyncMessage
from kombu.asynchronous.aws.sqs.queue import AsyncQueue
from kombu.utils.uuid import uuid
from t.mocks import PromiseMock
from ..case import AWSCase
class test_AsyncSQSConnection(AWSCase):
def setup(self):
session = boto3.session.Session(
aws_access_key_id='AAA',
aws_secret_access_key='AAAA',
region_name='us-west-2',
)
sqs_client = session.client('sqs')
self.x = AsyncSQSConnection(sqs_client, 'ak', 'sk', http_client=Mock())
self.x.get_object = Mock(name='X.get_object')
self.x.get_status = Mock(name='X.get_status')
self.x.get_list = Mock(name='X.get_list')
self.callback = PromiseMock(name='callback')
sqs_client.get_queue_url = MagicMock(return_value={
'QueueUrl': 'http://aws.com'
})
def test_create_queue(self):
self.x.create_queue('foo', callback=self.callback)
self.x.get_object.assert_called_with(
'CreateQueue', {'QueueName': 'foo'},
callback=self.callback,
)
def test_create_queue__with_visibility_timeout(self):
self.x.create_queue(
'foo', visibility_timeout=33, callback=self.callback,
)
self.x.get_object.assert_called_with(
'CreateQueue', {
'QueueName': 'foo',
'DefaultVisibilityTimeout': '33'
},
callback=self.callback
)
def test_delete_queue(self):
queue = Mock(name='queue')
self.x.delete_queue(queue, callback=self.callback)
self.x.get_status.assert_called_with(
'DeleteQueue', None, queue.id, callback=self.callback,
)
def test_get_queue_attributes(self):
queue = Mock(name='queue')
self.x.get_queue_attributes(
queue, attribute='QueueSize', callback=self.callback,
)
self.x.get_object.assert_called_with(
'GetQueueAttributes', {'AttributeName': 'QueueSize'},
queue.id, callback=self.callback,
)
def test_set_queue_attribute(self):
queue = Mock(name='queue')
self.x.set_queue_attribute(
queue, 'Expires', '3600', callback=self.callback,
)
self.x.get_status.assert_called_with(
'SetQueueAttribute', {
'Attribute.Name': 'Expires',
'Attribute.Value': '3600',
},
queue.id, callback=self.callback,
)
def test_receive_message(self):
queue = Mock(name='queue')
self.x.receive_message(
queue,
self.x.get_queue_url('queue'),
4,
callback=self.callback,
)
self.x.get_list.assert_called_with(
'ReceiveMessage', {'MaxNumberOfMessages': 4},
[('Message', AsyncMessage)],
'http://aws.com', callback=self.callback,
parent=queue,
)
def test_receive_message__with_visibility_timeout(self):
queue = Mock(name='queue')
self.x.receive_message(
queue,
self.x.get_queue_url('queue'),
4,
3666,
callback=self.callback,
)
self.x.get_list.assert_called_with(
'ReceiveMessage', {
'MaxNumberOfMessages': 4,
'VisibilityTimeout': 3666,
},
[('Message', AsyncMessage)],
'http://aws.com', callback=self.callback,
parent=queue,
)
def test_receive_message__with_wait_time_seconds(self):
queue = Mock(name='queue')
self.x.receive_message(
queue,
self.x.get_queue_url('queue'),
4,
wait_time_seconds=303,
callback=self.callback,
)
self.x.get_list.assert_called_with(
'ReceiveMessage', {
'MaxNumberOfMessages': 4,
'WaitTimeSeconds': 303,
},
[('Message', AsyncMessage)],
'http://aws.com', callback=self.callback,
parent=queue,
)
def test_receive_message__with_attributes(self):
queue = Mock(name='queue')
self.x.receive_message(
queue,
self.x.get_queue_url('queue'),
4,
attributes=['foo', 'bar'],
callback=self.callback,
)
self.x.get_list.assert_called_with(
'ReceiveMessage', {
'AttributeName.1': 'foo',
'AttributeName.2': 'bar',
'MaxNumberOfMessages': 4,
},
[('Message', AsyncMessage)],
'http://aws.com', callback=self.callback,
parent=queue,
)
def MockMessage(self, id=None, receipt_handle=None, body=None):
m = Mock(name='message')
m.id = id or uuid()
m.receipt_handle = receipt_handle or uuid()
m._body = body
def _get_body():
return m._body
m.get_body.side_effect = _get_body
def _set_body(value):
m._body = value
m.set_body.side_effect = _set_body
return m
def test_delete_message(self):
queue = Mock(name='queue')
message = self.MockMessage()
self.x.delete_message(queue, message.receipt_handle,
callback=self.callback)
self.x.get_status.assert_called_with(
'DeleteMessage', {'ReceiptHandle': message.receipt_handle},
queue, callback=self.callback,
)
def test_delete_message_batch(self):
queue = Mock(name='queue')
messages = [self.MockMessage('1', 'r1'),
self.MockMessage('2', 'r2')]
self.x.delete_message_batch(queue, messages, callback=self.callback)
self.x.get_object.assert_called_with(
'DeleteMessageBatch', {
'DeleteMessageBatchRequestEntry.1.Id': '1',
'DeleteMessageBatchRequestEntry.1.ReceiptHandle': 'r1',
'DeleteMessageBatchRequestEntry.2.Id': '2',
'DeleteMessageBatchRequestEntry.2.ReceiptHandle': 'r2',
},
queue.id, verb='POST', callback=self.callback,
)
def test_send_message(self):
queue = Mock(name='queue')
self.x.send_message(queue, 'hello', callback=self.callback)
self.x.get_object.assert_called_with(
'SendMessage', {'MessageBody': 'hello'},
queue.id, verb='POST', callback=self.callback,
)
def test_send_message__with_delay_seconds(self):
queue = Mock(name='queue')
self.x.send_message(
queue, 'hello', delay_seconds='303', callback=self.callback,
)
self.x.get_object.assert_called_with(
'SendMessage', {'MessageBody': 'hello', 'DelaySeconds': 303},
queue.id, verb='POST', callback=self.callback,
)
def test_send_message_batch(self):
queue = Mock(name='queue')
messages = [self.MockMessage('1', 'r1', 'A'),
self.MockMessage('2', 'r2', 'B')]
self.x.send_message_batch(
queue, [(m.id, m.get_body(), 303) for m in messages],
callback=self.callback
)
self.x.get_object.assert_called_with(
'SendMessageBatch', {
'SendMessageBatchRequestEntry.1.Id': '1',
'SendMessageBatchRequestEntry.1.MessageBody': 'A',
'SendMessageBatchRequestEntry.1.DelaySeconds': 303,
'SendMessageBatchRequestEntry.2.Id': '2',
'SendMessageBatchRequestEntry.2.MessageBody': 'B',
'SendMessageBatchRequestEntry.2.DelaySeconds': 303,
},
queue.id, verb='POST', callback=self.callback,
)
def test_change_message_visibility(self):
queue = Mock(name='queue')
self.x.change_message_visibility(
queue, 'rcpt', 33, callback=self.callback,
)
self.x.get_status.assert_called_with(
'ChangeMessageVisibility', {
'ReceiptHandle': 'rcpt',
'VisibilityTimeout': 33,
},
queue.id, callback=self.callback,
)
def test_change_message_visibility_batch(self):
queue = Mock(name='queue')
messages = [
(self.MockMessage('1', 'r1'), 303),
(self.MockMessage('2', 'r2'), 909),
]
self.x.change_message_visibility_batch(
queue, messages, callback=self.callback,
)
def preamble(n):
return '.'.join(['ChangeMessageVisibilityBatchRequestEntry', n])
self.x.get_object.assert_called_with(
'ChangeMessageVisibilityBatch', {
preamble('1.Id'): '1',
preamble('1.ReceiptHandle'): 'r1',
preamble('1.VisibilityTimeout'): 303,
preamble('2.Id'): '2',
preamble('2.ReceiptHandle'): 'r2',
preamble('2.VisibilityTimeout'): 909,
},
queue.id, verb='POST', callback=self.callback,
)
def test_get_all_queues(self):
self.x.get_all_queues(callback=self.callback)
self.x.get_list.assert_called_with(
'ListQueues', {}, [('QueueUrl', AsyncQueue)],
callback=self.callback,
)
def test_get_all_queues__with_prefix(self):
self.x.get_all_queues(prefix='kombu.', callback=self.callback)
self.x.get_list.assert_called_with(
'ListQueues', {'QueueNamePrefix': 'kombu.'},
[('QueueUrl', AsyncQueue)],
callback=self.callback,
)
def MockQueue(self, url):
q = Mock(name='Queue')
q.url = url
return q
def test_get_queue(self):
self.x.get_queue('foo', callback=self.callback)
self.x.get_list.assert_called()
on_ready = self.x.get_list.call_args[1]['callback']
queues = [
self.MockQueue('/queues/bar'),
self.MockQueue('/queues/baz'),
self.MockQueue('/queues/foo'),
]
on_ready(queues)
self.callback.assert_called_with(queues[-1])
self.x.get_list.assert_called_with(
'ListQueues', {'QueueNamePrefix': 'foo'},
[('QueueUrl', AsyncQueue)],
callback=on_ready,
)
def test_get_dead_letter_source_queues(self):
queue = Mock(name='queue')
self.x.get_dead_letter_source_queues(queue, callback=self.callback)
self.x.get_list.assert_called_with(
'ListDeadLetterSourceQueues', {'QueueUrl': queue.url},
[('QueueUrl', AsyncQueue)], callback=self.callback,
)
def test_add_permission(self):
queue = Mock(name='queue')
self.x.add_permission(
queue, 'label', 'accid', 'action', callback=self.callback,
)
self.x.get_status.assert_called_with(
'AddPermission', {
'Label': 'label',
'AWSAccountId': 'accid',
'ActionName': 'action',
},
queue.id, callback=self.callback,
)
def test_remove_permission(self):
queue = Mock(name='queue')
self.x.remove_permission(queue, 'label', callback=self.callback)
self.x.get_status.assert_called_with(
'RemovePermission', {'Label': 'label'}, queue.id,
callback=self.callback,
)
|
import os
from babelfish import Language, language_converters
import pytest
from vcr import VCR
from subliminal.providers.tvsubtitles import TVsubtitlesProvider, TVsubtitlesSubtitle
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
match_on=['method', 'scheme', 'host', 'port', 'path', 'query', 'body'],
cassette_library_dir=os.path.realpath(os.path.realpath(os.path.join('tests', 'cassettes', 'tvsubtitles'))))
@pytest.mark.converter
def test_converter_convert_alpha3_country():
assert language_converters['tvsubtitles'].convert('por', 'BR') == 'br'
@pytest.mark.converter
def test_converter_convert_alpha3():
assert language_converters['tvsubtitles'].convert('ukr') == 'ua'
@pytest.mark.converter
def test_converter_convert_alpha3_alpha2_converter():
assert language_converters['tvsubtitles'].convert('fra') == 'fr'
@pytest.mark.converter
def test_converter_reverse():
assert language_converters['tvsubtitles'].reverse('gr') == ('ell',)
@pytest.mark.converter
def test_converter_reverse_name_converter():
assert language_converters['tvsubtitles'].reverse('en') == ('eng', None, None)
def test_get_matches_format_release_group(episodes):
subtitle = TVsubtitlesSubtitle(Language('fra'), None, 249518, 'The Big Bang Theory', 7, 5, 2007, 'HDTV',
'lol-dimension')
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'season', 'episode', 'year', 'country', 'source', 'release_group'}
def test_get_matches_format_equivalent_release_group(episodes):
subtitle = TVsubtitlesSubtitle(Language('fra'), None, 249518, 'The Big Bang Theory', 7, 5, 2007, 'HDTV',
'lol')
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'season', 'episode', 'year', 'country', 'source', 'release_group'}
def test_get_matches_video_codec_resolution(episodes):
subtitle = TVsubtitlesSubtitle(Language('por'), None, 261077, 'Game of Thrones', 3, 10, None, '720p.BluRay',
'x264-DEMAND')
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == {'series', 'season', 'episode', 'year', 'country', 'video_codec', 'resolution'}
def test_get_matches_only_year_country(episodes):
subtitle = TVsubtitlesSubtitle(Language('por'), None, 261077, 'Game of Thrones', 3, 10, None, '1080p.BluRay',
'DEMAND')
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'year', 'country'}
def test_get_matches_no_match(episodes):
subtitle = TVsubtitlesSubtitle(Language('por'), None, 261077, 'Game of Thrones', 3, 10, 2011, '1080p.BluRay',
'DEMAND')
matches = subtitle.get_matches(episodes['house_of_cards_us_s06e01'])
assert matches == set()
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('The Big Bang Theory')
assert show_id == 154
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_incomplete():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('The Big Bang')
assert show_id is None
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_ambiguous():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('New Girl')
assert show_id == 977
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_us():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('House of Cards', 2013)
assert show_id == 1246
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_uk():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('Beautiful People')
assert show_id == 657
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_no_year():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('Dallas')
assert show_id == 646
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_year_in_title():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('Dallas', 2012)
assert show_id == 1127
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_error():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('The Big How I Met Your Mother')
assert show_id is None
@pytest.mark.integration
@vcr.use_cassette
def test_get_episode_ids():
expected_episode_ids = {1: 34274, 2: 34275, 3: 34276, 4: 34277, 5: 34849, 6: 34923, 7: 35022, 8: 35023, 9: 35436,
10: 35503, 11: 35887, 12: 36369, 13: 36513, 14: 36610, 15: 36718, 16: 36795, 17: 37152,
18: 37153, 19: 37407, 20: 37863, 21: 38218, 22: 38574, 23: 38686, 24: 38687}
with TVsubtitlesProvider() as provider:
episode_ids = provider.get_episode_ids(154, 5)
assert episode_ids == expected_episode_ids
@pytest.mark.integration
@vcr.use_cassette
def test_get_episode_ids_wrong_season():
with TVsubtitlesProvider() as provider:
episode_ids = provider.get_episode_ids(154, 55)
assert len(episode_ids) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_query(episodes):
video = episodes['bbt_s07e05']
expected_subtitles = {268673, 249733, 249518, 249519, 249714, 32596, 249590, 249592, 249499, 261214}
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id(video.series, video.year)
subtitles = provider.query(show_id, video.series, video.season, video.episode, video.year)
assert {subtitle.subtitle_id for subtitle in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_query_no_year(episodes):
video = episodes['dallas_s01e03']
expected_subtitles = {124753}
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id(video.series, video.year)
subtitles = provider.query(show_id, video.series, video.season, video.episode, video.year)
assert {subtitle.subtitle_id for subtitle in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_series(episodes):
video = episodes['bbt_s07e05']
with TVsubtitlesProvider() as provider:
subtitles = provider.query(155, video.series[:12], video.season, video.episode, video.year)
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_episode(episodes):
video = episodes['bbt_s07e05']
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id(video.series, video.year)
subtitles = provider.query(show_id, video.series, video.season, 55, video.year)
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles(episodes):
video = episodes['bbt_s07e05']
languages = {Language('eng'), Language('fra')}
expected_subtitles = {249592, 249499, 32596, 249518}
with TVsubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.subtitle_id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
assert subtitles[0].release == 'The Big Bang Theory 7x05 (HDTV.LOL)'
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(episodes):
video = episodes['bbt_s07e05']
languages = {Language('eng'), Language('fra')}
with TVsubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
assert subtitles[0].is_valid() is True
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode_alternative_series(episodes):
video = episodes['turn_s03e01']
languages = {Language('fra')}
expected_subtitles = {307588}
with TVsubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.subtitle_id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
|
from app.wraps.login_wrap import login_required
from app import app
from app.utils import ResponseUtil, RequestUtil, StringUtil, JsonUtil, AuthUtil
from app.database.model import WebHook, Server, History
from app.tasks import tasks
# get webhook list
@app.route('/api/webhook/list', methods=['GET'])
@login_required()
def api_webhook_list():
# login user
user_id = RequestUtil.get_login_user().get('id', '')
webhooks = AuthUtil.has_auth_webhooks(user_id)
# 转json
webhooks = [webhook.dict(True) for webhook in webhooks]
return ResponseUtil.standard_response(1, webhooks)
# new webhook
@app.route('/api/webhook/new', methods=['POST'])
@login_required()
def api_webhook_new():
# login user
user_id = RequestUtil.get_login_user().get('id', '')
server_id = RequestUtil.get_parameter('server_id', '')
# server must be added by yourself
if not Server.query.filter_by(id=server_id, user_id=user_id).first():
return ResponseUtil.standard_response(0, 'Permission deny!')
repo = RequestUtil.get_parameter('repo', '')
branch = RequestUtil.get_parameter('branch', '')
shell = RequestUtil.get_parameter('shell', '')
if not all((repo, branch, shell, server_id)):
return ResponseUtil.standard_response(0, 'Form data can not be blank!')
webhook_id = RequestUtil.get_parameter('id', '')
if webhook_id:
webhook = AuthUtil.has_admin_auth(user_id, webhook_id)
if not webhook:
return ResponseUtil \
.standard_response(0, 'WebHook not exist or Permission deny!')
webhook.repo = repo
webhook.branch = branch
webhook.shell = shell
webhook.server_id = server_id
else:
# new webhook
webhook = WebHook(
repo=repo,
branch=branch,
shell=shell,
server_id=server_id,
user_id=user_id,
key=StringUtil.md5_token()
)
webhook.save()
return ResponseUtil.standard_response(1, webhook.dict(with_key=True))
@app.route('/api/webhook/delete', methods=['POST'])
@login_required()
def api_webhook_delete():
# login user
user_id = RequestUtil.get_login_user().get('id', '')
webhook_id = RequestUtil.get_parameter('webhook_id', '')
# 验证创建者权限
webhook = AuthUtil.has_admin_auth(user_id, webhook_id)
if not webhook:
return ResponseUtil.standard_response(0, 'Permission deny!')
webhook.deleted = True
webhook.save()
return ResponseUtil.standard_response(1, 'Success')
@app.route('/api/webhook/retry', methods=['POST'])
@login_required()
def api_webhook_retry():
# login user
user_id = RequestUtil.get_login_user().get('id', '')
webhook_id = RequestUtil.get_parameter('webhook_id', '')
data = {
'src': 'Manually executed'
}
webhook = WebHook.query.get(webhook_id)
if not webhook:
return ResponseUtil.standard_response(0, 'WebHooknot exist!')
if not AuthUtil.has_readonly_auth(user_id, webhook_id):
return ResponseUtil.standard_response(0, 'Permission deny!')
# if webhook.status not in ['3', '4', '5']:
# return ResponseUtil.standard_response(0, 'Webhook is Executing!')
history = History(webhook_id=webhook.id,
data=JsonUtil.object_2_json(data))
history.updateStatus('1')
# status is waiting
webhook.updateStatus('1')
# do the async task
tasks.do_webhook_shell.delay(webhook.id, history.id, data, user_id=user_id)
return ResponseUtil.standard_response(1, webhook.dict())
|
from typing import Any, Dict
from homeassistant import config_entries
from homeassistant.const import CONF_HOST
from .bridge import DynaliteBridge
from .const import DOMAIN, LOGGER
class DynaliteFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Dynalite config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self) -> None:
"""Initialize the Dynalite flow."""
self.host = None
async def async_step_import(self, import_info: Dict[str, Any]) -> Any:
"""Import a new bridge as a config entry."""
LOGGER.debug("Starting async_step_import - %s", import_info)
host = import_info[CONF_HOST]
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data[CONF_HOST] == host:
if entry.data != import_info:
self.hass.config_entries.async_update_entry(entry, data=import_info)
return self.async_abort(reason="already_configured")
# New entry
bridge = DynaliteBridge(self.hass, import_info)
if not await bridge.async_setup():
LOGGER.error("Unable to setup bridge - import info=%s", import_info)
return self.async_abort(reason="no_connection")
LOGGER.debug("Creating entry for the bridge - %s", import_info)
return self.async_create_entry(title=host, data=import_info)
|
import sys
import mne
import os.path as op
ANONYMIZE_FILE_PREFIX = 'anon'
def mne_anonymize(fif_fname, out_fname, keep_his, daysback, overwrite):
"""Call *anonymize_info* on fif file and save.
Parameters
----------
fif_fname : str
Raw fif File
out_fname : str | None
Output file name
relative paths are saved relative to parent dir of fif_fname
None will save to parent dir of fif_fname with default prefix
daysback : int | None
Number of days to subtract from all dates.
If None will default to move date of service to Jan 1 2000
keep_his : bool
If True his_id of subject_info will NOT be overwritten.
defaults to False
overwrite : bool
Overwrite output file if it already exists
"""
raw = mne.io.read_raw_fif(fif_fname, allow_maxshield=True)
raw.anonymize(daysback=daysback, keep_his=keep_his)
# determine out_fname
dir_name = op.split(fif_fname)[0]
if out_fname is None:
fif_bname = op.basename(fif_fname)
out_fname = op.join(dir_name,
"{}-{}".format(ANONYMIZE_FILE_PREFIX, fif_bname))
elif not op.isabs(out_fname):
out_fname = op.join(dir_name, out_fname)
raw.save(out_fname, overwrite=overwrite)
def run():
"""Run *mne_anonymize* command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-f", "--file", type="string", dest="file",
help="Name of file to modify.", metavar="FILE",
default=None)
parser.add_option("-o", "--output", type="string", dest="output",
help="Name of anonymized output file."
"`anon-` prefix is added to FILE if not given",
metavar="OUTFILE", default=None)
parser.add_option("--keep_his", dest="keep_his", action="store_true",
help="Keep the HIS tag (not advised)", default=False)
parser.add_option("-d", "--daysback", type="int", dest="daysback",
help="Move dates in file backwards by this many days.",
metavar="N_DAYS", default=None)
parser.add_option("--overwrite", dest="overwrite", action="store_true",
help="Overwrite input file.", default=False)
options, args = parser.parse_args()
if options.file is None:
parser.print_help()
sys.exit(1)
fname = options.file
out_fname = options.output
keep_his = options.keep_his
daysback = options.daysback
overwrite = options.overwrite
if not fname.endswith('.fif'):
raise ValueError('%s does not seem to be a .fif file.' % fname)
mne_anonymize(fname, out_fname, keep_his, daysback, overwrite)
is_main = (__name__ == '__main__')
if is_main:
run()
|
import asyncio
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple, Union
from zigpy.exceptions import ZigbeeException
import zigpy.zcl.clusters.hvac as hvac
from zigpy.zcl.foundation import Status
from homeassistant.core import callback
from .. import registries, typing as zha_typing
from ..const import (
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_OP,
SIGNAL_ATTR_UPDATED,
)
from ..helpers import retryable_req
from .base import ZigbeeChannel
AttributeUpdateRecord = namedtuple("AttributeUpdateRecord", "attr_id, attr_name, value")
REPORT_CONFIG_CLIMATE = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 25)
REPORT_CONFIG_CLIMATE_DEMAND = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 5)
REPORT_CONFIG_CLIMATE_DISCRETE = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 1)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Dehumidification.cluster_id)
class Dehumidification(ZigbeeChannel):
"""Dehumidification channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Fan.cluster_id)
class FanChannel(ZigbeeChannel):
"""Fan channel."""
_value_attribute = 0
REPORT_CONFIG = ({"attr": "fan_mode", "config": REPORT_CONFIG_OP},)
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
):
"""Init Thermostat channel instance."""
super().__init__(cluster, ch_pool)
self._fan_mode = None
@property
def fan_mode(self) -> Optional[int]:
"""Return current fan mode."""
return self._fan_mode
async def async_set_speed(self, value) -> None:
"""Set the speed of the fan."""
try:
await self.cluster.write_attributes({"fan_mode": value})
except ZigbeeException as ex:
self.error("Could not set speed: %s", ex)
return
async def async_update(self) -> None:
"""Retrieve latest state."""
result = await self.get_attribute_value("fan_mode", from_cache=True)
if result is not None:
self._fan_mode = result
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", 0, "fan_mode", result
)
@callback
def attribute_updated(self, attrid: int, value: Any) -> None:
"""Handle attribute update from fan cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
if attrid == self._value_attribute:
self._fan_mode = value
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", attrid, attr_name, value
)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Pump.cluster_id)
class Pump(ZigbeeChannel):
"""Pump channel."""
@registries.CLIMATE_CLUSTERS.register(hvac.Thermostat.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Thermostat.cluster_id)
class ThermostatChannel(ZigbeeChannel):
"""Thermostat channel."""
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Init Thermostat channel instance."""
super().__init__(cluster, ch_pool)
self._init_attrs = {
"abs_min_heat_setpoint_limit": True,
"abs_max_heat_setpoint_limit": True,
"abs_min_cool_setpoint_limit": True,
"abs_max_cool_setpoint_limit": True,
"ctrl_seqe_of_oper": False,
"local_temp": False,
"max_cool_setpoint_limit": True,
"max_heat_setpoint_limit": True,
"min_cool_setpoint_limit": True,
"min_heat_setpoint_limit": True,
"occupancy": False,
"occupied_cooling_setpoint": False,
"occupied_heating_setpoint": False,
"pi_cooling_demand": False,
"pi_heating_demand": False,
"running_mode": False,
"running_state": False,
"system_mode": False,
"unoccupied_heating_setpoint": False,
"unoccupied_cooling_setpoint": False,
}
self._abs_max_cool_setpoint_limit = 3200 # 32C
self._abs_min_cool_setpoint_limit = 1600 # 16C
self._ctrl_seqe_of_oper = 0xFF
self._abs_max_heat_setpoint_limit = 3000 # 30C
self._abs_min_heat_setpoint_limit = 700 # 7C
self._running_mode = None
self._max_cool_setpoint_limit = None
self._max_heat_setpoint_limit = None
self._min_cool_setpoint_limit = None
self._min_heat_setpoint_limit = None
self._local_temp = None
self._occupancy = None
self._occupied_cooling_setpoint = None
self._occupied_heating_setpoint = None
self._pi_cooling_demand = None
self._pi_heating_demand = None
self._running_state = None
self._system_mode = None
self._unoccupied_cooling_setpoint = None
self._unoccupied_heating_setpoint = None
self._report_config = [
{"attr": "local_temp", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupied_cooling_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupied_heating_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "unoccupied_cooling_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "unoccupied_heating_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "running_mode", "config": REPORT_CONFIG_CLIMATE},
{"attr": "running_state", "config": REPORT_CONFIG_CLIMATE_DEMAND},
{"attr": "system_mode", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupancy", "config": REPORT_CONFIG_CLIMATE_DISCRETE},
{"attr": "pi_cooling_demand", "config": REPORT_CONFIG_CLIMATE_DEMAND},
{"attr": "pi_heating_demand", "config": REPORT_CONFIG_CLIMATE_DEMAND},
]
@property
def abs_max_cool_setpoint_limit(self) -> int:
"""Absolute maximum cooling setpoint."""
return self._abs_max_cool_setpoint_limit
@property
def abs_min_cool_setpoint_limit(self) -> int:
"""Absolute minimum cooling setpoint."""
return self._abs_min_cool_setpoint_limit
@property
def abs_max_heat_setpoint_limit(self) -> int:
"""Absolute maximum heating setpoint."""
return self._abs_max_heat_setpoint_limit
@property
def abs_min_heat_setpoint_limit(self) -> int:
"""Absolute minimum heating setpoint."""
return self._abs_min_heat_setpoint_limit
@property
def ctrl_seqe_of_oper(self) -> int:
"""Control Sequence of operations attribute."""
return self._ctrl_seqe_of_oper
@property
def max_cool_setpoint_limit(self) -> int:
"""Maximum cooling setpoint."""
if self._max_cool_setpoint_limit is None:
return self.abs_max_cool_setpoint_limit
return self._max_cool_setpoint_limit
@property
def min_cool_setpoint_limit(self) -> int:
"""Minimum cooling setpoint."""
if self._min_cool_setpoint_limit is None:
return self.abs_min_cool_setpoint_limit
return self._min_cool_setpoint_limit
@property
def max_heat_setpoint_limit(self) -> int:
"""Maximum heating setpoint."""
if self._max_heat_setpoint_limit is None:
return self.abs_max_heat_setpoint_limit
return self._max_heat_setpoint_limit
@property
def min_heat_setpoint_limit(self) -> int:
"""Minimum heating setpoint."""
if self._min_heat_setpoint_limit is None:
return self.abs_min_heat_setpoint_limit
return self._min_heat_setpoint_limit
@property
def local_temp(self) -> Optional[int]:
"""Thermostat temperature."""
return self._local_temp
@property
def occupancy(self) -> Optional[int]:
"""Is occupancy detected."""
return self._occupancy
@property
def occupied_cooling_setpoint(self) -> Optional[int]:
"""Temperature when room is occupied."""
return self._occupied_cooling_setpoint
@property
def occupied_heating_setpoint(self) -> Optional[int]:
"""Temperature when room is occupied."""
return self._occupied_heating_setpoint
@property
def pi_cooling_demand(self) -> int:
"""Cooling demand."""
return self._pi_cooling_demand
@property
def pi_heating_demand(self) -> int:
"""Heating demand."""
return self._pi_heating_demand
@property
def running_mode(self) -> Optional[int]:
"""Thermostat running mode."""
return self._running_mode
@property
def running_state(self) -> Optional[int]:
"""Thermostat running state, state of heat, cool, fan relays."""
return self._running_state
@property
def system_mode(self) -> Optional[int]:
"""System mode."""
return self._system_mode
@property
def unoccupied_cooling_setpoint(self) -> Optional[int]:
"""Temperature when room is not occupied."""
return self._unoccupied_cooling_setpoint
@property
def unoccupied_heating_setpoint(self) -> Optional[int]:
"""Temperature when room is not occupied."""
return self._unoccupied_heating_setpoint
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute update cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
setattr(self, f"_{attr_name}", value)
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
AttributeUpdateRecord(attrid, attr_name, value),
)
async def _chunk_attr_read(self, attrs, cached=False):
chunk, attrs = attrs[:4], attrs[4:]
while chunk:
res, fail = await self.cluster.read_attributes(chunk, allow_cache=cached)
self.debug("read attributes: Success: %s. Failed: %s", res, fail)
for attr in chunk:
self._init_attrs.pop(attr, None)
if attr in fail:
continue
if isinstance(attr, str):
setattr(self, f"_{attr}", res[attr])
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
AttributeUpdateRecord(None, attr, res[attr]),
)
chunk, attrs = attrs[:4], attrs[4:]
async def configure_reporting(self):
"""Configure attribute reporting for a cluster.
This also swallows DeliveryError exceptions that are thrown when
devices are unreachable.
"""
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self._ch_pool.manufacturer_code:
kwargs["manufacturer"] = self._ch_pool.manufacturer_code
chunk, rest = self._report_config[:4], self._report_config[4:]
while chunk:
attrs = {record["attr"]: record["config"] for record in chunk}
try:
res = await self.cluster.configure_reporting_multiple(attrs, **kwargs)
self._configure_reporting_status(attrs, res[0])
except (ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"failed to set reporting on '%s' cluster for: %s",
self.cluster.ep_attribute,
str(ex),
)
break
chunk, rest = rest[:4], rest[4:]
def _configure_reporting_status(
self, attrs: Dict[Union[int, str], Tuple], res: Union[List, Tuple]
) -> None:
"""Parse configure reporting result."""
if not isinstance(res, list):
# assume default response
self.debug(
"attr reporting for '%s' on '%s': %s",
attrs,
self.name,
res,
)
return
if res[0].status == Status.SUCCESS and len(res) == 1:
self.debug(
"Successfully configured reporting for '%s' on '%s' cluster: %s",
attrs,
self.name,
res,
)
return
failed = [
self.cluster.attributes.get(r.attrid, [r.attrid])[0]
for r in res
if r.status != Status.SUCCESS
]
attrs = {self.cluster.attributes.get(r, [r])[0] for r in attrs}
self.debug(
"Successfully configured reporting for '%s' on '%s' cluster",
attrs - set(failed),
self.name,
)
self.debug(
"Failed to configure reporting for '%s' on '%s' cluster: %s",
failed,
self.name,
res,
)
@retryable_req(delays=(1, 1, 3))
async def async_initialize(self, from_cache):
"""Initialize channel."""
cached = [a for a, cached in self._init_attrs.items() if cached]
uncached = [a for a, cached in self._init_attrs.items() if not cached]
await self._chunk_attr_read(cached, cached=True)
await self._chunk_attr_read(uncached, cached=False)
await super().async_initialize(from_cache)
async def async_set_operation_mode(self, mode) -> bool:
"""Set Operation mode."""
if not await self.write_attributes({"system_mode": mode}):
self.debug("couldn't set '%s' operation mode", mode)
return False
self._system_mode = mode
self.debug("set system to %s", mode)
return True
async def async_set_heating_setpoint(
self, temperature: int, is_away: bool = False
) -> bool:
"""Set heating setpoint."""
if is_away:
data = {"unoccupied_heating_setpoint": temperature}
else:
data = {"occupied_heating_setpoint": temperature}
if not await self.write_attributes(data):
self.debug("couldn't set heating setpoint")
return False
if is_away:
self._unoccupied_heating_setpoint = temperature
else:
self._occupied_heating_setpoint = temperature
self.debug("set heating setpoint to %s", temperature)
return True
async def async_set_cooling_setpoint(
self, temperature: int, is_away: bool = False
) -> bool:
"""Set cooling setpoint."""
if is_away:
data = {"unoccupied_cooling_setpoint": temperature}
else:
data = {"occupied_cooling_setpoint": temperature}
if not await self.write_attributes(data):
self.debug("couldn't set cooling setpoint")
return False
if is_away:
self._unoccupied_cooling_setpoint = temperature
else:
self._occupied_cooling_setpoint = temperature
self.debug("set cooling setpoint to %s", temperature)
return True
async def get_occupancy(self) -> Optional[bool]:
"""Get unreportable occupancy attribute."""
try:
res, fail = await self.cluster.read_attributes(["occupancy"])
self.debug("read 'occupancy' attr, success: %s, fail: %s", res, fail)
if "occupancy" not in res:
return None
self._occupancy = res["occupancy"]
return bool(self.occupancy)
except ZigbeeException as ex:
self.debug("Couldn't read 'occupancy' attribute: %s", ex)
async def write_attributes(self, data, **kwargs):
"""Write attributes helper."""
try:
res = await self.cluster.write_attributes(data, **kwargs)
except ZigbeeException as exc:
self.debug("couldn't write %s: %s", data, exc)
return False
self.debug("wrote %s attrs, Status: %s", data, res)
return self.check_result(res)
@staticmethod
def check_result(res: list) -> bool:
"""Normalize the result."""
if not isinstance(res, list):
return False
return all([record.status == Status.SUCCESS for record in res[0]])
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.UserInterface.cluster_id)
class UserInterface(ZigbeeChannel):
"""User interface (thermostat) channel."""
|
import os
import re
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hostname(host):
assert re.search(r'instance-[12]', host.check_output('hostname -s'))
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
filename = '/etc/molecule/{}'.format(host.check_output('hostname -s'))
f = host.file(filename)
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
import asyncio
from concurrent.futures import ThreadPoolExecutor
import dataclasses
import logging
import sys
import threading
from typing import Any, Dict, Optional
from homeassistant import bootstrap
from homeassistant.core import callback
from homeassistant.helpers.frame import warn_use
#
# Python 3.8 has significantly less workers by default
# than Python 3.7. In order to be consistent between
# supported versions, we need to set max_workers.
#
# In most cases the workers are not I/O bound, as they
# are sleeping/blocking waiting for data from integrations
# updating so this number should be higher than the default
# use case.
#
MAX_EXECUTOR_WORKERS = 64
@dataclasses.dataclass
class RuntimeConfig:
"""Class to hold the information for running Home Assistant."""
config_dir: str
skip_pip: bool = False
safe_mode: bool = False
verbose: bool = False
log_rotate_days: Optional[int] = None
log_file: Optional[str] = None
log_no_color: bool = False
debug: bool = False
open_ui: bool = False
# In Python 3.8+ proactor policy is the default on Windows
if sys.platform == "win32" and sys.version_info[:2] < (3, 8):
PolicyBase = asyncio.WindowsProactorEventLoopPolicy
else:
PolicyBase = asyncio.DefaultEventLoopPolicy
class HassEventLoopPolicy(PolicyBase): # type: ignore
"""Event loop policy for Home Assistant."""
def __init__(self, debug: bool) -> None:
"""Init the event loop policy."""
super().__init__()
self.debug = debug
@property
def loop_name(self) -> str:
"""Return name of the loop."""
return self._loop_factory.__name__ # type: ignore
def new_event_loop(self) -> asyncio.AbstractEventLoop:
"""Get the event loop."""
loop: asyncio.AbstractEventLoop = super().new_event_loop()
loop.set_exception_handler(_async_loop_exception_handler)
if self.debug:
loop.set_debug(True)
executor = ThreadPoolExecutor(
thread_name_prefix="SyncWorker", max_workers=MAX_EXECUTOR_WORKERS
)
loop.set_default_executor(executor)
loop.set_default_executor = warn_use( # type: ignore
loop.set_default_executor, "sets default executor on the event loop"
)
# Python 3.9+
if hasattr(loop, "shutdown_default_executor"):
return loop
# Copied from Python 3.9 source
def _do_shutdown(future: asyncio.Future) -> None:
try:
executor.shutdown(wait=True)
loop.call_soon_threadsafe(future.set_result, None)
except Exception as ex: # pylint: disable=broad-except
loop.call_soon_threadsafe(future.set_exception, ex)
async def shutdown_default_executor() -> None:
"""Schedule the shutdown of the default executor."""
future = loop.create_future()
thread = threading.Thread(target=_do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
setattr(loop, "shutdown_default_executor", shutdown_default_executor)
return loop
@callback
def _async_loop_exception_handler(_: Any, context: Dict) -> None:
"""Handle all exception inside the core loop."""
kwargs = {}
exception = context.get("exception")
if exception:
kwargs["exc_info"] = (type(exception), exception, exception.__traceback__)
logging.getLogger(__package__).error(
"Error doing job: %s", context["message"], **kwargs # type: ignore
)
async def setup_and_run_hass(runtime_config: RuntimeConfig) -> int:
"""Set up Home Assistant and run."""
hass = await bootstrap.async_setup_hass(runtime_config)
if hass is None:
return 1
return await hass.async_run()
def run(runtime_config: RuntimeConfig) -> int:
"""Run Home Assistant."""
asyncio.set_event_loop_policy(HassEventLoopPolicy(runtime_config.debug))
return asyncio.run(setup_and_run_hass(runtime_config))
|
from datetime import timedelta
import re
import pytest
from homeassistant.components.frontend import (
CONF_EXTRA_HTML_URL,
CONF_EXTRA_HTML_URL_ES5,
CONF_JS_VERSION,
CONF_THEMES,
DOMAIN,
EVENT_PANELS_UPDATED,
THEMES_STORAGE_KEY,
)
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.loader import async_get_integration
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from tests.async_mock import patch
from tests.common import async_capture_events, async_fire_time_changed
CONFIG_THEMES = {
DOMAIN: {
CONF_THEMES: {
"happy": {"primary-color": "red"},
"dark": {"primary-color": "black"},
}
}
}
@pytest.fixture
def mock_http_client(hass, aiohttp_client):
"""Start the Home Assistant HTTP component."""
hass.loop.run_until_complete(async_setup_component(hass, "frontend", {}))
return hass.loop.run_until_complete(aiohttp_client(hass.http.app))
@pytest.fixture
def mock_http_client_with_themes(hass, aiohttp_client):
"""Start the Home Assistant HTTP component."""
hass.loop.run_until_complete(
async_setup_component(
hass,
"frontend",
{DOMAIN: {CONF_THEMES: {"happy": {"primary-color": "red"}}}},
)
)
return hass.loop.run_until_complete(aiohttp_client(hass.http.app))
@pytest.fixture
def mock_http_client_with_urls(hass, aiohttp_client):
"""Start the Home Assistant HTTP component."""
hass.loop.run_until_complete(
async_setup_component(
hass,
"frontend",
{
DOMAIN: {
CONF_JS_VERSION: "auto",
CONF_EXTRA_HTML_URL: ["https://domain.com/my_extra_url.html"],
CONF_EXTRA_HTML_URL_ES5: [
"https://domain.com/my_extra_url_es5.html"
],
}
},
)
)
return hass.loop.run_until_complete(aiohttp_client(hass.http.app))
@pytest.fixture
def mock_onboarded():
"""Mock that we're onboarded."""
with patch(
"homeassistant.components.onboarding.async_is_onboarded", return_value=True
):
yield
async def test_frontend_and_static(mock_http_client, mock_onboarded):
"""Test if we can get the frontend."""
resp = await mock_http_client.get("")
assert resp.status == 200
assert "cache-control" not in resp.headers
text = await resp.text()
# Test we can retrieve frontend.js
frontendjs = re.search(r"(?P<app>\/frontend_es5\/app.[A-Za-z0-9]{8}.js)", text)
assert frontendjs is not None, text
resp = await mock_http_client.get(frontendjs.groups(0)[0])
assert resp.status == 200
assert "public" in resp.headers.get("cache-control")
async def test_dont_cache_service_worker(mock_http_client):
"""Test that we don't cache the service worker."""
resp = await mock_http_client.get("/service_worker.js")
assert resp.status == 200
assert "cache-control" not in resp.headers
async def test_404(mock_http_client):
"""Test for HTTP 404 error."""
resp = await mock_http_client.get("/not-existing")
assert resp.status == HTTP_NOT_FOUND
async def test_we_cannot_POST_to_root(mock_http_client):
"""Test that POST is not allow to root."""
resp = await mock_http_client.post("/")
assert resp.status == 405
async def test_themes_api(hass, hass_ws_client):
"""Test that /api/themes returns correct data."""
assert await async_setup_component(hass, "frontend", CONFIG_THEMES)
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_theme"] == "default"
assert msg["result"]["default_dark_theme"] is None
assert msg["result"]["themes"] == {
"happy": {"primary-color": "red"},
"dark": {"primary-color": "black"},
}
# safe mode
hass.config.safe_mode = True
await client.send_json({"id": 6, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_theme"] == "safe_mode"
assert msg["result"]["themes"] == {
"safe_mode": {"primary-color": "#db4437", "accent-color": "#ffca28"}
}
async def test_themes_persist(hass, hass_ws_client, hass_storage):
"""Test that theme settings are restores after restart."""
hass_storage[THEMES_STORAGE_KEY] = {
"key": THEMES_STORAGE_KEY,
"version": 1,
"data": {
"frontend_default_theme": "happy",
"frontend_default_dark_theme": "dark",
},
}
assert await async_setup_component(hass, "frontend", CONFIG_THEMES)
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_theme"] == "happy"
assert msg["result"]["default_dark_theme"] == "dark"
async def test_themes_save_storage(hass, hass_storage):
"""Test that theme settings are restores after restart."""
hass_storage[THEMES_STORAGE_KEY] = {
"key": THEMES_STORAGE_KEY,
"version": 1,
"data": {},
}
assert await async_setup_component(hass, "frontend", CONFIG_THEMES)
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "happy"}, blocking=True
)
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "dark", "mode": "dark"}, blocking=True
)
# To trigger the call_later
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=60))
# To execute the save
await hass.async_block_till_done()
assert hass_storage[THEMES_STORAGE_KEY]["data"] == {
"frontend_default_theme": "happy",
"frontend_default_dark_theme": "dark",
}
async def test_themes_set_theme(hass, hass_ws_client):
"""Test frontend.set_theme service."""
assert await async_setup_component(hass, "frontend", CONFIG_THEMES)
client = await hass_ws_client(hass)
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "happy"}, blocking=True
)
await client.send_json({"id": 5, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_theme"] == "happy"
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "default"}, blocking=True
)
await client.send_json({"id": 6, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_theme"] == "default"
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "happy"}, blocking=True
)
await hass.services.async_call(DOMAIN, "set_theme", {"name": "none"}, blocking=True)
await client.send_json({"id": 7, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_theme"] == "default"
async def test_themes_set_theme_wrong_name(hass, hass_ws_client):
"""Test frontend.set_theme service called with wrong name."""
assert await async_setup_component(hass, "frontend", CONFIG_THEMES)
client = await hass_ws_client(hass)
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "wrong"}, blocking=True
)
await client.send_json({"id": 5, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_theme"] == "default"
async def test_themes_set_dark_theme(hass, hass_ws_client):
"""Test frontend.set_theme service called with dark mode."""
assert await async_setup_component(hass, "frontend", CONFIG_THEMES)
client = await hass_ws_client(hass)
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "dark", "mode": "dark"}, blocking=True
)
await client.send_json({"id": 5, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_dark_theme"] == "dark"
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "default", "mode": "dark"}, blocking=True
)
await client.send_json({"id": 6, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_dark_theme"] == "default"
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "none", "mode": "dark"}, blocking=True
)
await client.send_json({"id": 7, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_dark_theme"] is None
async def test_themes_set_dark_theme_wrong_name(hass, hass_ws_client):
"""Test frontend.set_theme service called with mode dark and wrong name."""
assert await async_setup_component(hass, "frontend", CONFIG_THEMES)
client = await hass_ws_client(hass)
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "wrong", "mode": "dark"}, blocking=True
)
await client.send_json({"id": 5, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["default_dark_theme"] is None
async def test_themes_reload_themes(hass, hass_ws_client):
"""Test frontend.reload_themes service."""
assert await async_setup_component(hass, "frontend", CONFIG_THEMES)
client = await hass_ws_client(hass)
with patch(
"homeassistant.components.frontend.async_hass_config_yaml",
return_value={DOMAIN: {CONF_THEMES: {"sad": {"primary-color": "blue"}}}},
):
await hass.services.async_call(
DOMAIN, "set_theme", {"name": "happy"}, blocking=True
)
await hass.services.async_call(DOMAIN, "reload_themes", blocking=True)
await client.send_json({"id": 5, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["result"]["themes"] == {"sad": {"primary-color": "blue"}}
assert msg["result"]["default_theme"] == "default"
async def test_missing_themes(hass, hass_ws_client):
"""Test that themes API works when themes are not defined."""
await async_setup_component(hass, "frontend", {})
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "frontend/get_themes"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["default_theme"] == "default"
assert msg["result"]["themes"] == {}
async def test_get_panels(hass, hass_ws_client, mock_http_client):
"""Test get_panels command."""
events = async_capture_events(hass, EVENT_PANELS_UPDATED)
resp = await mock_http_client.get("/map")
assert resp.status == HTTP_NOT_FOUND
hass.components.frontend.async_register_built_in_panel(
"map", "Map", "mdi:tooltip-account", require_admin=True
)
resp = await mock_http_client.get("/map")
assert resp.status == 200
assert len(events) == 1
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "get_panels"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["map"]["component_name"] == "map"
assert msg["result"]["map"]["url_path"] == "map"
assert msg["result"]["map"]["icon"] == "mdi:tooltip-account"
assert msg["result"]["map"]["title"] == "Map"
assert msg["result"]["map"]["require_admin"] is True
hass.components.frontend.async_remove_panel("map")
resp = await mock_http_client.get("/map")
assert resp.status == HTTP_NOT_FOUND
assert len(events) == 2
async def test_get_panels_non_admin(hass, hass_ws_client, hass_admin_user):
"""Test get_panels command."""
hass_admin_user.groups = []
await async_setup_component(hass, "frontend", {})
hass.components.frontend.async_register_built_in_panel(
"map", "Map", "mdi:tooltip-account", require_admin=True
)
hass.components.frontend.async_register_built_in_panel(
"history", "History", "mdi:history"
)
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "get_panels"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert "history" in msg["result"]
assert "map" not in msg["result"]
async def test_get_translations(hass, hass_ws_client):
"""Test get_translations command."""
await async_setup_component(hass, "frontend", {})
client = await hass_ws_client(hass)
with patch(
"homeassistant.components.frontend.async_get_translations",
side_effect=lambda hass, lang, category, integration, config_flow: {
"lang": lang
},
):
await client.send_json(
{
"id": 5,
"type": "frontend/get_translations",
"language": "nl",
"category": "lang",
}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"] == {"resources": {"lang": "nl"}}
async def test_auth_load(mock_http_client, mock_onboarded):
"""Test auth component loaded by default."""
resp = await mock_http_client.get("/auth/providers")
assert resp.status == 200
async def test_onboarding_load(mock_http_client):
"""Test onboarding component loaded by default."""
resp = await mock_http_client.get("/api/onboarding")
assert resp.status == 200
async def test_auth_authorize(mock_http_client):
"""Test the authorize endpoint works."""
resp = await mock_http_client.get(
"/auth/authorize?response_type=code&client_id=https://localhost/&"
"redirect_uri=https://localhost/&state=123%23456"
)
assert resp.status == 200
# No caching of auth page.
assert "cache-control" not in resp.headers
text = await resp.text()
# Test we can retrieve authorize.js
authorizejs = re.search(
r"(?P<app>\/frontend_latest\/authorize.[A-Za-z0-9]{8}.js)", text
)
assert authorizejs is not None, text
resp = await mock_http_client.get(authorizejs.groups(0)[0])
assert resp.status == 200
assert "public" in resp.headers.get("cache-control")
async def test_get_version(hass, hass_ws_client):
"""Test get_version command."""
frontend = await async_get_integration(hass, "frontend")
cur_version = next(
req.split("==", 1)[1]
for req in frontend.requirements
if req.startswith("home-assistant-frontend==")
)
await async_setup_component(hass, "frontend", {})
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "frontend/get_version"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"] == {"version": cur_version}
async def test_static_paths(hass, mock_http_client):
"""Test static paths."""
resp = await mock_http_client.get(
"/.well-known/change-password", allow_redirects=False
)
assert resp.status == 302
assert resp.headers["location"] == "/profile"
|
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from . import DATA_SABNZBD, SENSOR_TYPES, SIGNAL_SABNZBD_UPDATED
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SABnzbd sensors."""
if discovery_info is None:
return
sab_api_data = hass.data[DATA_SABNZBD]
sensors = sab_api_data.sensors
client_name = sab_api_data.name
async_add_entities(
[SabnzbdSensor(sensor, sab_api_data, client_name) for sensor in sensors]
)
class SabnzbdSensor(Entity):
"""Representation of an SABnzbd sensor."""
def __init__(self, sensor_type, sabnzbd_api_data, client_name):
"""Initialize the sensor."""
self._client_name = client_name
self._field_name = SENSOR_TYPES[sensor_type][2]
self._name = SENSOR_TYPES[sensor_type][0]
self._sabnzbd_api = sabnzbd_api_data
self._state = None
self._type = sensor_type
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_SABNZBD_UPDATED, self.update_state
)
)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""Don't poll. Will be updated by dispatcher signal."""
return False
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update_state(self, args):
"""Get the latest data and updates the states."""
self._state = self._sabnzbd_api.get_queue_field(self._field_name)
if self._type == "speed":
self._state = round(float(self._state) / 1024, 1)
elif "size" in self._type:
self._state = round(float(self._state), 2)
self.schedule_update_ha_state()
|
import pandas as pd
import pytest
import pytz
from qstrader.simulation.event import SimulationEvent
@pytest.mark.parametrize(
"sim_event_params,compare_event_params,expected_result",
[
(
('2020-01-01 00:00:00', 'pre_market'),
('2020-01-01 00:00:00', 'pre_market'),
True,
),
(
('2020-01-01 00:00:00', 'pre_market'),
('2020-01-01 00:00:00', 'post_market'),
False,
),
(
('2020-01-01 00:00:00', 'pre_market'),
('2020-01-02 00:00:00', 'pre_market'),
False,
),
(
('2020-01-01 00:00:00', 'pre_market'),
('2020-01-02 00:00:00', 'post_market'),
False,
)
]
)
def test_sim_event_eq(
sim_event_params, compare_event_params, expected_result
):
"""
Checks that the SimulationEvent __eq__ correctly
compares SimulationEvent instances.
"""
sim_event = SimulationEvent(pd.Timestamp(sim_event_params[0], tz=pytz.UTC), sim_event_params[1])
compare_event = SimulationEvent(pd.Timestamp(compare_event_params[0], tz=pytz.UTC), compare_event_params[1])
assert expected_result == (sim_event == compare_event)
|
import logging
import threading
from pymodbus.client.sync import ModbusSerialClient, ModbusTcpClient, ModbusUdpClient
from pymodbus.transaction import ModbusRtuFramer
import voluptuous as vol
from homeassistant.components.cover import (
DEVICE_CLASSES_SCHEMA as COVER_DEVICE_CLASSES_SCHEMA,
)
from homeassistant.const import (
ATTR_STATE,
CONF_COVERS,
CONF_DELAY,
CONF_DEVICE_CLASS,
CONF_HOST,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SLAVE,
CONF_STRUCTURE,
CONF_TIMEOUT,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from .const import (
ATTR_ADDRESS,
ATTR_HUB,
ATTR_UNIT,
ATTR_VALUE,
CALL_TYPE_COIL,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CONF_BAUDRATE,
CONF_BYTESIZE,
CONF_CLIMATES,
CONF_CURRENT_TEMP,
CONF_CURRENT_TEMP_REGISTER_TYPE,
CONF_DATA_COUNT,
CONF_DATA_TYPE,
CONF_INPUT_TYPE,
CONF_MAX_TEMP,
CONF_MIN_TEMP,
CONF_OFFSET,
CONF_PARITY,
CONF_PRECISION,
CONF_REGISTER,
CONF_SCALE,
CONF_STATE_CLOSED,
CONF_STATE_CLOSING,
CONF_STATE_OPEN,
CONF_STATE_OPENING,
CONF_STATUS_REGISTER,
CONF_STATUS_REGISTER_TYPE,
CONF_STEP,
CONF_STOPBITS,
CONF_TARGET_TEMP,
CONF_UNIT,
DATA_TYPE_CUSTOM,
DATA_TYPE_FLOAT,
DATA_TYPE_INT,
DATA_TYPE_UINT,
DEFAULT_HUB,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SLAVE,
DEFAULT_STRUCTURE_PREFIX,
DEFAULT_TEMP_UNIT,
MODBUS_DOMAIN as DOMAIN,
SERVICE_WRITE_COIL,
SERVICE_WRITE_REGISTER,
)
_LOGGER = logging.getLogger(__name__)
BASE_SCHEMA = vol.Schema({vol.Optional(CONF_NAME, default=DEFAULT_HUB): cv.string})
CLIMATE_SCHEMA = vol.Schema(
{
vol.Required(CONF_CURRENT_TEMP): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SLAVE): cv.positive_int,
vol.Required(CONF_TARGET_TEMP): cv.positive_int,
vol.Optional(CONF_DATA_COUNT, default=2): cv.positive_int,
vol.Optional(
CONF_CURRENT_TEMP_REGISTER_TYPE, default=CALL_TYPE_REGISTER_HOLDING
): vol.In([CALL_TYPE_REGISTER_HOLDING, CALL_TYPE_REGISTER_INPUT]),
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_FLOAT): vol.In(
[DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT, DATA_TYPE_CUSTOM]
),
vol.Optional(CONF_PRECISION, default=1): cv.positive_int,
vol.Optional(CONF_SCALE, default=1): vol.Coerce(float),
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): vol.All(
cv.time_period, lambda value: value.total_seconds()
),
vol.Optional(CONF_OFFSET, default=0): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP, default=35): cv.positive_int,
vol.Optional(CONF_MIN_TEMP, default=5): cv.positive_int,
vol.Optional(CONF_STEP, default=0.5): vol.Coerce(float),
vol.Optional(CONF_STRUCTURE, default=DEFAULT_STRUCTURE_PREFIX): cv.string,
vol.Optional(CONF_UNIT, default=DEFAULT_TEMP_UNIT): cv.string,
}
)
COVERS_SCHEMA = vol.All(
cv.has_at_least_one_key(CALL_TYPE_COIL, CONF_REGISTER),
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): vol.All(
cv.time_period, lambda value: value.total_seconds()
),
vol.Optional(CONF_DEVICE_CLASS): COVER_DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_SLAVE, default=DEFAULT_SLAVE): cv.positive_int,
vol.Optional(CONF_STATE_CLOSED, default=0): cv.positive_int,
vol.Optional(CONF_STATE_CLOSING, default=3): cv.positive_int,
vol.Optional(CONF_STATE_OPEN, default=1): cv.positive_int,
vol.Optional(CONF_STATE_OPENING, default=2): cv.positive_int,
vol.Optional(CONF_STATUS_REGISTER): cv.positive_int,
vol.Optional(
CONF_STATUS_REGISTER_TYPE,
default=CALL_TYPE_REGISTER_HOLDING,
): vol.In([CALL_TYPE_REGISTER_HOLDING, CALL_TYPE_REGISTER_INPUT]),
vol.Exclusive(CALL_TYPE_COIL, CONF_INPUT_TYPE): cv.positive_int,
vol.Exclusive(CONF_REGISTER, CONF_INPUT_TYPE): cv.positive_int,
}
),
)
SERIAL_SCHEMA = BASE_SCHEMA.extend(
{
vol.Required(CONF_BAUDRATE): cv.positive_int,
vol.Required(CONF_BYTESIZE): vol.Any(5, 6, 7, 8),
vol.Required(CONF_METHOD): vol.Any("rtu", "ascii"),
vol.Required(CONF_PORT): cv.string,
vol.Required(CONF_PARITY): vol.Any("E", "O", "N"),
vol.Required(CONF_STOPBITS): vol.Any(1, 2),
vol.Required(CONF_TYPE): "serial",
vol.Optional(CONF_TIMEOUT, default=3): cv.socket_timeout,
vol.Optional(CONF_CLIMATES): vol.All(cv.ensure_list, [CLIMATE_SCHEMA]),
vol.Optional(CONF_COVERS): vol.All(cv.ensure_list, [COVERS_SCHEMA]),
}
)
ETHERNET_SCHEMA = BASE_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_TYPE): vol.Any("tcp", "udp", "rtuovertcp"),
vol.Optional(CONF_TIMEOUT, default=3): cv.socket_timeout,
vol.Optional(CONF_DELAY, default=0): cv.positive_int,
vol.Optional(CONF_CLIMATES): vol.All(cv.ensure_list, [CLIMATE_SCHEMA]),
vol.Optional(CONF_COVERS): vol.All(cv.ensure_list, [COVERS_SCHEMA]),
}
)
SERVICE_WRITE_REGISTER_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HUB, default=DEFAULT_HUB): cv.string,
vol.Required(ATTR_UNIT): cv.positive_int,
vol.Required(ATTR_ADDRESS): cv.positive_int,
vol.Required(ATTR_VALUE): vol.Any(
cv.positive_int, vol.All(cv.ensure_list, [cv.positive_int])
),
}
)
SERVICE_WRITE_COIL_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HUB, default=DEFAULT_HUB): cv.string,
vol.Required(ATTR_UNIT): cv.positive_int,
vol.Required(ATTR_ADDRESS): cv.positive_int,
vol.Required(ATTR_STATE): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Any(SERIAL_SCHEMA, ETHERNET_SCHEMA),
],
),
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up Modbus component."""
hass.data[DOMAIN] = hub_collect = {}
for conf_hub in config[DOMAIN]:
hub_collect[conf_hub[CONF_NAME]] = ModbusHub(conf_hub)
# load platforms
for component, conf_key in (
("climate", CONF_CLIMATES),
("cover", CONF_COVERS),
):
if conf_key in conf_hub:
load_platform(hass, component, DOMAIN, conf_hub, config)
def stop_modbus(event):
"""Stop Modbus service."""
for client in hub_collect.values():
client.close()
def write_register(service):
"""Write Modbus registers."""
unit = int(float(service.data[ATTR_UNIT]))
address = int(float(service.data[ATTR_ADDRESS]))
value = service.data[ATTR_VALUE]
client_name = service.data[ATTR_HUB]
if isinstance(value, list):
hub_collect[client_name].write_registers(
unit, address, [int(float(i)) for i in value]
)
else:
hub_collect[client_name].write_register(unit, address, int(float(value)))
def write_coil(service):
"""Write Modbus coil."""
unit = service.data[ATTR_UNIT]
address = service.data[ATTR_ADDRESS]
state = service.data[ATTR_STATE]
client_name = service.data[ATTR_HUB]
hub_collect[client_name].write_coil(unit, address, state)
# do not wait for EVENT_HOMEASSISTANT_START, activate pymodbus now
for client in hub_collect.values():
client.setup()
# register function to gracefully stop modbus
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_modbus)
# Register services for modbus
hass.services.register(
DOMAIN,
SERVICE_WRITE_REGISTER,
write_register,
schema=SERVICE_WRITE_REGISTER_SCHEMA,
)
hass.services.register(
DOMAIN, SERVICE_WRITE_COIL, write_coil, schema=SERVICE_WRITE_COIL_SCHEMA
)
return True
class ModbusHub:
"""Thread safe wrapper class for pymodbus."""
def __init__(self, client_config):
"""Initialize the Modbus hub."""
# generic configuration
self._client = None
self._lock = threading.Lock()
self._config_name = client_config[CONF_NAME]
self._config_type = client_config[CONF_TYPE]
self._config_port = client_config[CONF_PORT]
self._config_timeout = client_config[CONF_TIMEOUT]
self._config_delay = 0
if self._config_type == "serial":
# serial configuration
self._config_method = client_config[CONF_METHOD]
self._config_baudrate = client_config[CONF_BAUDRATE]
self._config_stopbits = client_config[CONF_STOPBITS]
self._config_bytesize = client_config[CONF_BYTESIZE]
self._config_parity = client_config[CONF_PARITY]
else:
# network configuration
self._config_host = client_config[CONF_HOST]
self._config_delay = client_config[CONF_DELAY]
if self._config_delay > 0:
_LOGGER.warning(
"Parameter delay is accepted but not used in this version"
)
@property
def name(self):
"""Return the name of this hub."""
return self._config_name
def setup(self):
"""Set up pymodbus client."""
if self._config_type == "serial":
self._client = ModbusSerialClient(
method=self._config_method,
port=self._config_port,
baudrate=self._config_baudrate,
stopbits=self._config_stopbits,
bytesize=self._config_bytesize,
parity=self._config_parity,
timeout=self._config_timeout,
retry_on_empty=True,
)
elif self._config_type == "rtuovertcp":
self._client = ModbusTcpClient(
host=self._config_host,
port=self._config_port,
framer=ModbusRtuFramer,
timeout=self._config_timeout,
)
elif self._config_type == "tcp":
self._client = ModbusTcpClient(
host=self._config_host,
port=self._config_port,
timeout=self._config_timeout,
)
elif self._config_type == "udp":
self._client = ModbusUdpClient(
host=self._config_host,
port=self._config_port,
timeout=self._config_timeout,
)
else:
assert False
# Connect device
self.connect()
def close(self):
"""Disconnect client."""
with self._lock:
self._client.close()
def connect(self):
"""Connect client."""
with self._lock:
self._client.connect()
def read_coils(self, unit, address, count):
"""Read coils."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
return self._client.read_coils(address, count, **kwargs)
def read_discrete_inputs(self, unit, address, count):
"""Read discrete inputs."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
return self._client.read_discrete_inputs(address, count, **kwargs)
def read_input_registers(self, unit, address, count):
"""Read input registers."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
return self._client.read_input_registers(address, count, **kwargs)
def read_holding_registers(self, unit, address, count):
"""Read holding registers."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
return self._client.read_holding_registers(address, count, **kwargs)
def write_coil(self, unit, address, value):
"""Write coil."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
self._client.write_coil(address, value, **kwargs)
def write_register(self, unit, address, value):
"""Write register."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
self._client.write_register(address, value, **kwargs)
def write_registers(self, unit, address, values):
"""Write registers."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
self._client.write_registers(address, values, **kwargs)
|
from .constants import FIFF
from .tag import find_tag, has_tag
from .write import (write_int, start_block, end_block, write_float_matrix,
write_name_list)
from ..utils import logger
def _transpose_named_matrix(mat):
"""Transpose mat inplace (no copy)."""
mat['nrow'], mat['ncol'] = mat['ncol'], mat['nrow']
mat['row_names'], mat['col_names'] = mat['col_names'], mat['row_names']
mat['data'] = mat['data'].T
def _read_named_matrix(fid, node, matkind, indent=' ', transpose=False):
"""Read named matrix from the given node.
Parameters
----------
fid : file
The opened file descriptor.
node : dict
The node in the tree.
matkind : int
The type of matrix.
transpose : bool
If True, transpose the matrix. Default is False.
%(verbose)s
Returns
-------
mat: dict
The matrix data
"""
# Descend one level if necessary
if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX:
for k in range(node['nchild']):
if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX:
if has_tag(node['children'][k], matkind):
node = node['children'][k]
break
else:
logger.info(indent + 'Desired named matrix (kind = %d) not '
'available' % matkind)
return None
else:
if not has_tag(node, matkind):
logger.info(indent + 'Desired named matrix (kind = %d) not '
'available' % matkind)
return None
# Read everything we need
tag = find_tag(fid, node, matkind)
if tag is None:
raise ValueError('Matrix data missing')
else:
data = tag.data
nrow, ncol = data.shape
tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
if tag is not None and tag.data != nrow:
raise ValueError('Number of rows in matrix data and FIFF_MNE_NROW '
'tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
if tag is not None and tag.data != ncol:
raise ValueError('Number of columns in matrix data and '
'FIFF_MNE_NCOL tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
row_names = tag.data.split(':') if tag is not None else []
tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
col_names = tag.data.split(':') if tag is not None else []
mat = dict(nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names,
data=data)
if transpose:
_transpose_named_matrix(mat)
return mat
def write_named_matrix(fid, kind, mat):
"""Write named matrix from the given node.
Parameters
----------
fid : file
The opened file descriptor.
kind : int
The kind of the matrix.
matkind : int
The type of matrix.
"""
# let's save ourselves from disaster
n_tot = mat['nrow'] * mat['ncol']
if mat['data'].size != n_tot:
ratio = n_tot / float(mat['data'].size)
if n_tot < mat['data'].size and ratio > 0:
ratio = 1 / ratio
raise ValueError('Cannot write matrix: row (%i) and column (%i) '
'total element (%i) mismatch with data size (%i), '
'appears to be off by a factor of %gx'
% (mat['nrow'], mat['ncol'], n_tot,
mat['data'].size, ratio))
start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
write_int(fid, FIFF.FIFF_MNE_NROW, mat['nrow'])
write_int(fid, FIFF.FIFF_MNE_NCOL, mat['ncol'])
if len(mat['row_names']) > 0:
# let's prevent unintentional stupidity
if len(mat['row_names']) != mat['nrow']:
raise ValueError('len(mat["row_names"]) != mat["nrow"]')
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat['row_names'])
if len(mat['col_names']) > 0:
# let's prevent unintentional stupidity
if len(mat['col_names']) != mat['ncol']:
raise ValueError('len(mat["col_names"]) != mat["ncol"]')
write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat['col_names'])
write_float_matrix(fid, kind, mat['data'])
end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
|
import asyncio
import collections
import hashlib
import logging
import time
import urllib.parse
import aiohttp
import async_timeout
from hangups import exceptions
logger = logging.getLogger(__name__)
CONNECT_TIMEOUT = 30
REQUEST_TIMEOUT = 30
MAX_RETRIES = 3
ORIGIN_URL = 'https://hangouts.google.com'
FetchResponse = collections.namedtuple('FetchResponse', ['code', 'body'])
class Session:
"""Session for making HTTP requests to Google.
Args:
cookies (dict): Cookies to authenticate requests with.
proxy (str): (optional) HTTP proxy URL to use for requests.
"""
def __init__(self, cookies, proxy=None):
self._proxy = proxy
# The server does not support quoting cookie values (see #498).
cookie_jar = aiohttp.CookieJar(quote_cookie=False)
timeout = aiohttp.ClientTimeout(connect=CONNECT_TIMEOUT)
self._session = aiohttp.ClientSession(
cookies=cookies, cookie_jar=cookie_jar, timeout=timeout
)
sapisid = cookies['SAPISID']
self._authorization_headers = _get_authorization_headers(sapisid)
async def fetch(self, method, url, params=None, headers=None, data=None):
"""Make an HTTP request.
Automatically uses configured HTTP proxy, and adds Google authorization
header and cookies.
Failures will be retried MAX_RETRIES times before raising NetworkError.
Args:
method (str): Request method.
url (str): Request URL.
params (dict): (optional) Request query string parameters.
headers (dict): (optional) Request headers.
data: (str): (optional) Request body data.
Returns:
FetchResponse: Response data.
Raises:
NetworkError: If the request fails.
"""
logger.debug('Sending request %s %s:\n%r', method, url, data)
for retry_num in range(MAX_RETRIES):
try:
async with self.fetch_raw(method, url, params=params,
headers=headers, data=data) as res:
async with async_timeout.timeout(REQUEST_TIMEOUT):
body = await res.read()
logger.debug('Received response %d %s:\n%r',
res.status, res.reason, body)
except asyncio.TimeoutError:
error_msg = 'Request timed out'
except aiohttp.ServerDisconnectedError as err:
error_msg = 'Server disconnected error: {}'.format(err)
except (aiohttp.ClientError, ValueError) as err:
error_msg = 'Request connection error: {}'.format(err)
else:
break
logger.info('Request attempt %d failed: %s', retry_num, error_msg)
else:
logger.info('Request failed after %d attempts', MAX_RETRIES)
raise exceptions.NetworkError(error_msg)
if res.status != 200:
logger.info('Request returned unexpected status: %d %s',
res.status, res.reason)
raise exceptions.NetworkError(
'Request return unexpected status: {}: {}'
.format(res.status, res.reason)
)
return FetchResponse(res.status, body)
def fetch_raw(self, method, url, params=None, headers=None, data=None):
"""Make an HTTP request using aiohttp directly.
Automatically uses configured HTTP proxy, and adds Google authorization
header and cookies.
Args:
method (str): Request method.
url (str): Request URL.
params (dict): (optional) Request query string parameters.
headers (dict): (optional) Request headers.
data: (str): (optional) Request body data.
Returns:
aiohttp._RequestContextManager: ContextManager for a HTTP response.
Raises:
See ``aiohttp.ClientSession.request``.
"""
# Ensure we don't accidentally send the authorization header to a
# non-Google domain:
if not urllib.parse.urlparse(url).hostname.endswith('.google.com'):
raise Exception('expected google.com domain')
headers = headers or {}
headers.update(self._authorization_headers)
return self._session.request(
method, url, params=params, headers=headers, data=data,
proxy=self._proxy
)
async def close(self):
"""Close the underlying aiohttp.ClientSession."""
await self._session.close()
def _get_authorization_headers(sapisid_cookie):
"""Return authorization headers for API request."""
# It doesn't seem to matter what the url and time are as long as they are
# consistent.
time_sec = int(time.time())
auth_string = '{} {} {}'.format(time_sec, sapisid_cookie, ORIGIN_URL)
auth_hash = hashlib.sha1(auth_string.encode()).hexdigest()
sapisidhash = 'SAPISIDHASH {}_{}'.format(time_sec, auth_hash)
return {
'authorization': sapisidhash,
'origin': ORIGIN_URL,
'x-goog-authuser': '0',
}
|
import json
import logging
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.providers.openstack import utils as os_utils
FLAGS = flags.FLAGS
STANDARD = 'standard'
DISK_TYPE = {
disk.STANDARD: STANDARD,
}
def CreateVolume(resource, name):
"""Creates a remote (Cinder) block volume."""
vol_cmd = os_utils.OpenStackCLICommand(resource, 'volume', 'create', name)
vol_cmd.flags['availability-zone'] = resource.zone
vol_cmd.flags['size'] = resource.disk_size
if FLAGS.openstack_volume_type:
vol_cmd.flags['type'] = FLAGS.openstack_volume_type
stdout, _, _ = vol_cmd.Issue()
vol_resp = json.loads(stdout)
return vol_resp
def CreateBootVolume(resource, name, image):
"""Creates a remote (Cinder) block volume with a boot image."""
vol_cmd = os_utils.OpenStackCLICommand(resource, 'volume', 'create', name)
vol_cmd.flags['availability-zone'] = resource.zone
vol_cmd.flags['image'] = image
vol_cmd.flags['size'] = (resource.disk_size or
GetImageMinDiskSize(resource, image))
stdout, _, _ = vol_cmd.Issue()
vol_resp = json.loads(stdout)
return vol_resp
def GetImageMinDiskSize(resource, image):
"""Returns minimum disk size required by the image."""
image_cmd = os_utils.OpenStackCLICommand(resource, 'image', 'show', image)
stdout, _, _ = image_cmd.Issue()
image_resp = json.loads(stdout)
volume_size = max((int(image_resp['min_disk']),
resource.disk_size,))
return volume_size
def DeleteVolume(resource, volume_id):
"""Deletes a remote (Cinder) block volume."""
vol_cmd = os_utils.OpenStackCLICommand(resource, 'volume', 'delete',
volume_id)
del vol_cmd.flags['format'] # volume delete does not support json output
vol_cmd.Issue()
@vm_util.Retry(poll_interval=5, max_retries=-1, timeout=300, log_errors=False,
retryable_exceptions=errors.Resource.RetryableCreationError)
def WaitForVolumeCreation(resource, volume_id):
"""Waits until volume is available"""
vol_cmd = os_utils.OpenStackCLICommand(resource, 'volume', 'show', volume_id)
stdout, stderr, _ = vol_cmd.Issue()
if stderr:
raise errors.Error(stderr)
resp = json.loads(stdout)
if resp['status'] != 'available':
msg = 'Volume is not ready. Retrying to check status.'
raise errors.Resource.RetryableCreationError(msg)
disk.RegisterDiskTypeMap(providers.OPENSTACK, DISK_TYPE)
class OpenStackDiskSpec(disk.BaseDiskSpec):
"""Object holding the information needed to create an OpenStackDisk.
Attributes:
disk_size: None or int. Size of the disk in GB.
volume_type: None or string. Volume type to be used to create a
block storage volume.
"""
CLOUD = providers.OPENSTACK
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super(OpenStackDiskSpec, cls)._ApplyFlags(config_values, flag_values)
if (flag_values['openstack_volume_size'].present
and not flag_values['data_disk_size'].present):
config_values['disk_size'] = flag_values.openstack_volume_size
else:
config_values['disk_size'] = flag_values.data_disk_size
if flag_values['openstack_volume_type'].present:
config_values['volume_type'] = flag_values.openstack_volume_type
@classmethod
def _GetOptionDecoderConstructions(cls):
decoders = super(OpenStackDiskSpec, cls)._GetOptionDecoderConstructions()
decoders.update(
{
'volume_type': (option_decoders.StringDecoder,
{'default': None, 'none_ok': True},)
}
)
return decoders
class OpenStackDisk(disk.BaseDisk):
def __init__(self, disk_spec, name, zone, image=None):
super(OpenStackDisk, self).__init__(disk_spec)
self.attached_vm_id = None
self.image = image
self.name = name
self.zone = zone
self.id = None
def _Create(self):
vol_resp = CreateVolume(self, self.name)
self.id = vol_resp['id']
WaitForVolumeCreation(self, self.id)
def _Delete(self):
if self.id is None:
logging.info('Volume %s was not created. Skipping deletion.' % self.name)
return
DeleteVolume(self, self.id)
self._WaitForVolumeDeletion()
def _Exists(self):
if self.id is None:
return False
cmd = os_utils.OpenStackCLICommand(self, 'volume', 'show', self.id)
stdout, stderr, _ = cmd.Issue(suppress_warning=True)
if stdout and stdout.strip():
return stdout
return not stderr
def Attach(self, vm):
self._AttachVolume(vm)
self._WaitForVolumeAttachment(vm)
self.attached_vm_id = vm.id
def Detach(self):
self._DetachVolume()
self.attached_vm_id = None
self.device_path = None
def _AttachVolume(self, vm):
if self.id is None:
raise errors.Error('Cannot attach remote volume %s' % self.name)
if vm.id is None:
msg = 'Cannot attach remote volume %s to non-existing %s VM' % (self.name,
vm.name)
raise errors.Error(msg)
cmd = os_utils.OpenStackCLICommand(
self, 'server', 'add', 'volume', vm.id, self.id)
del cmd.flags['format']
_, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(stderr)
@vm_util.Retry(poll_interval=1, max_retries=-1, timeout=300, log_errors=False,
retryable_exceptions=errors.Resource.RetryableCreationError)
def _WaitForVolumeAttachment(self, vm):
if self.id is None:
return
cmd = os_utils.OpenStackCLICommand(self, 'volume', 'show', self.id)
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(stderr)
resp = json.loads(stdout)
attachments = resp['attachments']
self.device_path = self._GetDeviceFromAttachment(attachments)
msg = 'Remote volume %s has been attached to %s.' % (self.name, vm.name)
logging.info(msg)
def _GetDeviceFromAttachment(self, attachments):
device = None
for attachment in attachments:
if attachment['volume_id'] == self.id:
device = attachment['device']
if not device:
msg = '%s is not yet attached. Retrying to check status.' % self.name
raise errors.Resource.RetryableCreationError(msg)
return device
def _DetachVolume(self):
if self.id is None:
raise errors.Error('Cannot detach remote volume %s' % self.name)
if self.attached_vm_id is None:
raise errors.Error('Cannot detach remote volume from a non-existing VM.')
cmd = os_utils.OpenStackCLICommand(
self, 'server', 'remove', 'volume', self.attached_vm_id, self.id)
del cmd.flags['format']
_, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(stderr)
@vm_util.Retry(poll_interval=1, max_retries=-1, timeout=300, log_errors=False,
retryable_exceptions=errors.Resource.RetryableDeletionError)
def _WaitForVolumeDeletion(self):
if self.id is None:
return
cmd = os_utils.OpenStackCLICommand(self, 'volume', 'show', self.id)
stdout, stderr, _ = cmd.Issue(suppress_warning=True)
if stderr.strip():
return # Volume could not be found, inferred that has been deleted.
resp = json.loads(stdout)
if resp['status'] in ('building', 'available', 'in-use', 'deleting',):
msg = ('Volume %s has not yet been deleted. Retrying to check status.'
% self.id)
raise errors.Resource.RetryableDeletionError(msg)
|
from flexx import app, event, ui
class ErrorsPy(app.PyComponent):
def init(self):
self.js = ErrorsJS(self)
@event.action
def do_something_stupid(self):
self.raise_error()
def raise_error(self):
raise RuntimeError('Deliberate error')
@event.reaction('!js.b4_pointer_click')
def error_in_Py_reaction(self, *events):
self.raise_error()
class ErrorsJS(ui.Widget):
def init(self, pycomponent):
self.py = pycomponent
with ui.VBox():
self.b1 = ui.Button(text='Raise error in JS action')
self.b2 = ui.Button(text='Raise error in JS reaction')
self.b3 = ui.Button(text='Raise error in Python action')
self.b4 = ui.Button(text='Raise error in Python reaction')
ui.Widget(flex=1) # spacer
@event.action
def do_something_stupid(self):
self.raise_error(0)
def raise_error(self):
raise RuntimeError('Deliberate error')
# Handlers for four buttons
@event.reaction('b1.pointer_click')
def error_in_JS_action(self, *events):
self.do_something_stupid()
@event.reaction('b2.pointer_click')
def error_in_JS_reaction(self, *events):
self.raise_error()
@event.reaction('b3.pointer_click')
def error_in_Py_action(self, *events):
self.py.do_something_stupid()
@event.reaction('b4.pointer_click')
def error_in_Py_reaction(self, *events):
self.emit('b4_pointer_click')
if __name__ == '__main__':
m = app.launch(ErrorsPy, 'browser')
app.run()
|
import logging
import voluptuous as vol
from webexteamssdk import ApiError, WebexTeamsAPI, exceptions
from homeassistant.components.notify import (
ATTR_TITLE,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_TOKEN
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ROOM_ID = "room_id"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_TOKEN): cv.string, vol.Required(CONF_ROOM_ID): cv.string}
)
def get_service(hass, config, discovery_info=None):
"""Get the CiscoWebexTeams notification service."""
client = WebexTeamsAPI(access_token=config[CONF_TOKEN])
try:
# Validate the token & room_id
client.rooms.get(config[CONF_ROOM_ID])
except exceptions.ApiError as error:
_LOGGER.error(error)
return None
return CiscoWebexTeamsNotificationService(client, config[CONF_ROOM_ID])
class CiscoWebexTeamsNotificationService(BaseNotificationService):
"""The Cisco Webex Teams Notification Service."""
def __init__(self, client, room):
"""Initialize the service."""
self.room = room
self.client = client
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
title = ""
if kwargs.get(ATTR_TITLE) is not None:
title = f"{kwargs.get(ATTR_TITLE)}<br>"
try:
self.client.messages.create(roomId=self.room, html=f"{title}{message}")
except ApiError as api_error:
_LOGGER.error(
"Could not send CiscoWebexTeams notification. Error: %s", api_error
)
|
from __future__ import unicode_literals, division
import sys
import codecs
import argparse
# hack for python2/3 compatibility
from io import open
argparse.open = open
def create_parser(subparsers=None):
if subparsers:
parser = subparsers.add_parser('segment-char-ngrams',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="segment rare words into character n-grams")
else:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="segment rare words into character n-grams")
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), default=sys.stdin,
metavar='PATH',
help="Input file (default: standard input).")
parser.add_argument(
'--vocab', type=argparse.FileType('r'), metavar='PATH',
required=True,
help="Vocabulary file.")
parser.add_argument(
'--shortlist', type=int, metavar='INT', default=0,
help="do not segment INT most frequent words in vocabulary (default: '%(default)s')).")
parser.add_argument(
'-n', type=int, metavar='INT', default=2,
help="segment rare words into character n-grams of size INT (default: '%(default)s')).")
parser.add_argument(
'--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
metavar='PATH',
help="Output file (default: standard output)")
parser.add_argument(
'--separator', '-s', type=str, default='@@', metavar='STR',
help="Separator between non-final subword units (default: '%(default)s'))")
return parser
def segment_char_ngrams(args):
vocab = [line.split()[0] for line in args.vocab if len(line.split()) == 2]
vocab = dict((y,x) for (x,y) in enumerate(vocab))
for line in args.input:
for word in line.split():
if word not in vocab or vocab[word] > args.shortlist:
i = 0
while i*args.n < len(word):
args.output.write(word[i*args.n:i*args.n+args.n])
i += 1
if i*args.n < len(word):
args.output.write(args.separator)
args.output.write(' ')
else:
args.output.write(word + ' ')
args.output.write('\n')
if __name__ == '__main__':
# python 2/3 compatibility
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
else:
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
parser = create_parser()
args = parser.parse_args()
if sys.version_info < (3, 0):
args.separator = args.separator.decode('UTF-8')
# read/write files as UTF-8
args.vocab = codecs.open(args.vocab.name, encoding='utf-8')
if args.input.name != '<stdin>':
args.input = codecs.open(args.input.name, encoding='utf-8')
if args.output.name != '<stdout>':
args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
segment_char_ngrams(args)
|
from vine import transform
from .message import AsyncMessage
_all__ = ['AsyncQueue']
def list_first(rs):
"""Get the first item in a list, or None if list empty."""
return rs[0] if len(rs) == 1 else None
class AsyncQueue():
"""Async SQS Queue."""
def __init__(self, connection=None, url=None, message_class=AsyncMessage):
self.connection = connection
self.url = url
self.message_class = message_class
self.visibility_timeout = None
def _NA(self, *args, **kwargs):
raise NotImplementedError()
count_slow = dump = save_to_file = save_to_filename = save = \
save_to_s3 = load_from_s3 = load_from_file = load_from_filename = \
load = clear = _NA
def get_attributes(self, attributes='All', callback=None):
return self.connection.get_queue_attributes(
self, attributes, callback,
)
def set_attribute(self, attribute, value, callback=None):
return self.connection.set_queue_attribute(
self, attribute, value, callback,
)
def get_timeout(self, callback=None, _attr='VisibilityTimeout'):
return self.get_attributes(
_attr, transform(
self._coerce_field_value, callback, _attr, int,
),
)
def _coerce_field_value(self, key, type, response):
return type(response[key])
def set_timeout(self, visibility_timeout, callback=None):
return self.set_attribute(
'VisibilityTimeout', visibility_timeout,
transform(
self._on_timeout_set, callback,
)
)
def _on_timeout_set(self, visibility_timeout):
if visibility_timeout:
self.visibility_timeout = visibility_timeout
return self.visibility_timeout
def add_permission(self, label, aws_account_id, action_name,
callback=None):
return self.connection.add_permission(
self, label, aws_account_id, action_name, callback,
)
def remove_permission(self, label, callback=None):
return self.connection.remove_permission(self, label, callback)
def read(self, visibility_timeout=None, wait_time_seconds=None,
callback=None):
return self.get_messages(
1, visibility_timeout,
wait_time_seconds=wait_time_seconds,
callback=transform(list_first, callback),
)
def write(self, message, delay_seconds=None, callback=None):
return self.connection.send_message(
self, message.get_body_encoded(), delay_seconds,
callback=transform(self._on_message_sent, callback, message),
)
def write_batch(self, messages, callback=None):
return self.connection.send_message_batch(
self, messages, callback=callback,
)
def _on_message_sent(self, orig_message, new_message):
orig_message.id = new_message.id
orig_message.md5 = new_message.md5
return new_message
def get_messages(self, num_messages=1, visibility_timeout=None,
attributes=None, wait_time_seconds=None, callback=None):
return self.connection.receive_message(
self, number_messages=num_messages,
visibility_timeout=visibility_timeout,
attributes=attributes,
wait_time_seconds=wait_time_seconds,
callback=callback,
)
def delete_message(self, message, callback=None):
return self.connection.delete_message(self, message, callback)
def delete_message_batch(self, messages, callback=None):
return self.connection.delete_message_batch(
self, messages, callback=callback,
)
def change_message_visibility_batch(self, messages, callback=None):
return self.connection.change_message_visibility_batch(
self, messages, callback=callback,
)
def delete(self, callback=None):
return self.connection.delete_queue(self, callback=callback)
def count(self, page_size=10, vtimeout=10, callback=None,
_attr='ApproximateNumberOfMessages'):
return self.get_attributes(
_attr, callback=transform(
self._coerce_field_value, callback, _attr, int,
),
)
|
import argparse
import glob
import os
import time
import random
COLOURS = (b'\xFF\x00\x00', b'\x00\xFF\x00', b'\x00\x00\xFF', b'\xFF\xFF\x00', b'\xFF\x00\xFF', b'\x00\xFF\xFF')
def write_binary(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'wb') as open_file:
open_file.write(payload)
def read_string(driver_path, device_file):
with open(os.path.join(driver_path, device_file), 'r') as open_file:
return open_file.read().rstrip('\n')
def write_string(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'w') as open_file:
open_file.write(payload)
def find_devices(vid, pid):
driver_paths = glob.glob(os.path.join('/sys/bus/hid/drivers/razeraccessory', '*:{0:04X}:{1:04X}.*'.format(vid, pid)))
for driver_path in driver_paths:
device_type_path = os.path.join(driver_path, 'device_type')
if os.path.exists(device_type_path):
yield driver_path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--skip-standard', action='store_true')
parser.add_argument('--skip-custom', action='store_true')
parser.add_argument('--skip-game-led', action='store_true')
parser.add_argument('--skip-macro-led', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
found_chroma = False
for index, driver_path in enumerate(find_devices(0x1532, 0x0C00), start=1):
found_chroma = True
print("Razer Firefly {0}\n".format(index))
print("Driver version: {0}".format(read_string(driver_path, 'version')))
print("Driver firmware version: {0}".format(read_string(driver_path, 'firmware_version')))
print("Device serial: {0}".format(read_string(driver_path, 'device_serial')))
print("Device type: {0}".format(read_string(driver_path, 'device_type')))
print("Device mode: {0}".format(read_string(driver_path, 'device_mode')))
# Set to static red so that we have something standard
write_binary(driver_path, 'matrix_effect_static', b'\xFF\x00\x00')
if not args.skip_standard:
print("Starting brightness test. Press enter to begin.")
input()
print("Max brightness...", end='')
write_string(driver_path, 'matrix_brightness', '255')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Half brightness...", end='')
write_string(driver_path, 'matrix_brightness', '128')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Zero brightness...", end='')
write_string(driver_path, 'matrix_brightness', '0')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
write_string(driver_path, 'matrix_brightness', '255')
print("Starting other colour effect tests. Press enter to begin.")
input()
print("Green Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\x00')
time.sleep(5)
print("Cyan Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\xFF')
time.sleep(5)
print("Spectrum")
write_binary(driver_path, 'matrix_effect_spectrum', b'\x00')
time.sleep(10)
print("None")
write_binary(driver_path, 'matrix_effect_none', b'\x00')
time.sleep(5)
print("Wave Left")
write_string(driver_path, 'matrix_effect_wave', '1')
time.sleep(5)
print("Wave Right")
write_string(driver_path, 'matrix_effect_wave', '2')
time.sleep(5)
print("Breathing random")
write_binary(driver_path, 'matrix_effect_breath', b'\x00')
time.sleep(10)
print("Breathing red")
write_binary(driver_path, 'matrix_effect_breath', b'\xFF\x00\x00')
time.sleep(10)
print("Breathing blue-green")
write_binary(driver_path, 'matrix_effect_breath', b'\x00\xFF\x00\x00\x00\xFF')
time.sleep(10)
if not args.skip_custom:
# Custom LEDs all rows
payload_all = b'\x00\x00\x0E'
for i in range(0, 15): # 15 colours 0x00-0x0E
payload_all += random.choice(COLOURS)
payload_m1_5 = b''
for led in (0x00, 0x0E):
led_byte = led.to_bytes(1, byteorder='big')
payload_m1_5 += b'\x00' + led_byte + led_byte + b'\xFF\xFF\xFF'
print("Custom LED matrix colours test. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_all)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
print("Custom LED matrix partial colours test. First and last led to white. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_m1_5)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
time.sleep(0.5)
print("Finished")
if not found_chroma:
print("No Fireflies found")
|
class Device(object):
"""
Razer Device (High level not dbus)
"""
def __init__(self, device_id, device_serial, device_dbus_object):
self._parent = None
self._id = device_id
self._serial = device_serial
self._dbus = device_dbus_object
# Register as parent
self._dbus.register_parent(self)
@property
def device_id(self):
"""
Device's USB ID String
:return: Device ID
:rtype: str
"""
return self._id
@property
def serial(self):
"""
Device's Serial String
:return: Serial
:rtype: str
"""
return self._serial
@property
def dbus(self):
"""
Device's DBus object
:return: DBus Object
:rtype: openrazer_daemon.hardware.device_base.__RazerDevice
"""
return self._dbus
def register_parent(self, parent):
"""
Register the parent as an observer to be optionally notified (sends to other devices)
:param parent: Observer
:type parent: object
"""
self._parent = parent
def notify_parent(self, msg):
"""
Notify observers with msg
:param msg: Tuple with first element a string
:type msg: tuple
"""
self._parent.notify(self, msg)
def notify_child(self, msg):
"""
Receive observer messages
:param msg: Tuple with first element a string
:type msg: tuple
"""
# Message from DBus object
self._dbus.notify(msg)
class DeviceCollection(object):
"""
Multimap of devices
Can be referenced by either ID or serial
"""
def __init__(self):
self._id_map = {}
self._serial_map = {}
def add(self, device_id, device_serial, device_dbus):
"""
Add device to collection
:param device_id: Device's USB ID
:type device_id: str
:param device_serial: Device's Serial String
:type device_serial: str
:param device_dbus: Device's DBus object
:type device_dbus: openrazer_daemon.hardware.device_base.__RazerDevice
"""
device_object = Device(device_id, device_serial, device_dbus)
device_object.register_parent(self)
self._id_map[device_id] = device_object
self._serial_map[device_serial] = device_object
def remove(self, key):
"""
Remove object being referenced to by ident from collection
:param key: ID or serial
:type key: str
"""
self.__delitem__(key)
def get(self, item):
"""
Get device object by ID or serial
:param item: ID or serial
:type item: str
:return: Device object
:rtype: Device
:raises IndexError: If item not found
"""
return self.__getitem__(item)
def id_items(self):
"""
Get (id, Device) iterator
:return: Items method from the id map
:rtype: list of tuple of str, Device
"""
return self._id_map.items()
def serial_items(self):
"""
Get (serial, Device) iterator
:return: Items method from the serial map
:rtype: list of tuple of str, Device
"""
return self._serial_map.items()
def serials(self):
"""
Get list of serials
:return: Serial list
:rtype: list of str
"""
return list(self._serial_map.keys())
def __len__(self):
"""
Get length of collection
:return: Length
:rtype: int
"""
return len(self._id_map)
def __getitem__(self, item):
"""
Get device object by ID or serial
:param item: ID or serial
:type item: str
:return: Device object
:rtype: Device
:raises IndexError: If item not found
"""
if item in self._id_map:
return self._id_map[item]
elif item in self._serial_map:
return self._serial_map[item]
else:
raise IndexError()
def __delitem__(self, key):
"""
Remove object being referenced to by ident from collection
:param key: ID or serial
:type key: str
"""
if key in self._id_map:
serial = self._id_map[key].serial
self._id_map.pop(key, None)
self._serial_map.pop(serial, None)
elif key in self._serial_map:
device_id = self._serial_map[key].device_id
self._id_map.pop(device_id, None)
self._serial_map.pop(key, None)
def __contains__(self, item):
"""
If ID or serial exists in datastructure
:param item: ID or serial
:type item: str
:return: True if ID or serial exists
:rtype: bool
"""
return item in self._id_map or item in self._serial_map
def __iter__(self):
"""
Get devices
:return: Devices
:rtype: list of Device
"""
return iter(self._id_map.values())
@property
def devices(self):
"""
Get device list
:return: List of devices
:rtype: list of Device
"""
return list(self._id_map.values())
def notify(self, active_child, msg):
"""
Send messages between children
:param active_child: Child sending the message
:type active_child: Device
:param msg: Messgae
:type msg: tuple
"""
for child in self._id_map.values():
if child is not active_child:
child.notify_child(msg)
|
from pydeconz.sensor import Thermostat
from homeassistant.components.climate import DOMAIN, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import ATTR_OFFSET, ATTR_VALVE, NEW_SENSOR
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
HVAC_MODES = {HVAC_MODE_AUTO: "auto", HVAC_MODE_HEAT: "heat", HVAC_MODE_OFF: "off"}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ climate devices.
Thermostats are based on the same device class as sensors in deCONZ.
"""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_climate(sensors):
"""Add climate devices from deCONZ."""
entities = []
for sensor in sensors:
if (
sensor.type in Thermostat.ZHATYPE
and sensor.uniqueid not in gateway.entities[DOMAIN]
and (
gateway.option_allow_clip_sensor
or not sensor.type.startswith("CLIP")
)
):
entities.append(DeconzThermostat(sensor, gateway))
if entities:
async_add_entities(entities)
gateway.listeners.append(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_SENSOR), async_add_climate
)
)
async_add_climate(gateway.api.sensors.values())
class DeconzThermostat(DeconzDevice, ClimateEntity):
"""Representation of a deCONZ thermostat."""
TYPE = DOMAIN
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
for hass_hvac_mode, device_mode in HVAC_MODES.items():
if self._device.mode == device_mode:
return hass_hvac_mode
if self._device.state_on:
return HVAC_MODE_HEAT
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> list:
"""Return the list of available hvac operation modes."""
return list(HVAC_MODES)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.temperature
@property
def target_temperature(self):
"""Return the target temperature."""
return self._device.heatsetpoint
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if ATTR_TEMPERATURE not in kwargs:
raise ValueError(f"Expected attribute {ATTR_TEMPERATURE}")
data = {"heatsetpoint": kwargs[ATTR_TEMPERATURE] * 100}
await self._device.async_set_config(data)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
if hvac_mode not in HVAC_MODES:
raise ValueError(f"Unsupported mode {hvac_mode}")
data = {"mode": HVAC_MODES[hvac_mode]}
await self._device.async_set_config(data)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the state attributes of the thermostat."""
attr = {}
if self._device.offset:
attr[ATTR_OFFSET] = self._device.offset
if self._device.valve is not None:
attr[ATTR_VALVE] = self._device.valve
return attr
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import unittest
from absl import flags
import mock
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import aws_disk
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import aws_nfs_service
from perfkitbenchmarker.providers.aws import aws_virtual_machine
from perfkitbenchmarker.providers.aws import util
from tests import pkb_common_test_case
import six
FLAGS = flags.FLAGS
_COMPONENT = 'test_component'
_SUBNET_ID = 'subnet1'
_SECURITY_GROUP_ID = 'group1'
_AWS_ZONE = 'us-east-1d'
_AWS_REGION = 'us-east-1'
_AWS_CMD_PREFIX = ['aws', '--output', 'json', '--region', 'us-east-1', 'efs']
_FILE_ID = 'FSID'
_MOUNT_ID = 'FSMT'
_BENCHMARK = 'fio'
_RUN_URI = 'fb810a9b'
_OWNER = 'joe'
_NFS_TOKEN = 'nfs-token-%s' % _RUN_URI
_TIER = 'generalPurpose'
_THROUGHPUT_MODE = 'bursting'
_PROVISIONED_THROUGHPUT = 100.0
AwsResponses = collections.namedtuple('Responses', 'create describe')
_FILER = AwsResponses({
'SizeInBytes': {
'Value': 0
},
'CreationToken': _NFS_TOKEN,
'CreationTime': 1513322422.0,
'PerformanceMode': 'generalPurpose',
'FileSystemId': _FILE_ID,
'NumberOfMountTargets': 0,
'LifeCycleState': 'creating',
'OwnerId': '835761027970'
}, {
'FileSystems': [{
'SizeInBytes': {
'Value': 6144
},
'CreationToken': _NFS_TOKEN,
'CreationTime': 1513322422.0,
'PerformanceMode': 'generalPurpose',
'FileSystemId': _FILE_ID,
'NumberOfMountTargets': 0,
'LifeCycleState': 'available',
'OwnerId': '835761027970'
}]
})
_MOUNT = AwsResponses({
'MountTargetId': _MOUNT_ID,
'NetworkInterfaceId': 'eni-9956273b',
'FileSystemId': _FILE_ID,
'LifeCycleState': 'creating',
'SubnetId': _SUBNET_ID,
'OwnerId': '835761027970',
'IpAddress': '10.0.0.182'
}, {
'MountTargets': [{
'MountTargetId': _MOUNT_ID,
'NetworkInterfaceId': 'eni-9956273b',
'FileSystemId': _FILE_ID,
'LifeCycleState': 'available',
'SubnetId': _SUBNET_ID,
'OwnerId': '835761027970',
'IpAddress': '10.0.0.182'
}]
})
class BaseTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(BaseTest, self).setUp()
self.issue_cmd = mock.Mock()
self.aws_network_spec = self._CreatePatched(aws_network, 'AwsNetwork')
mock_network = mock.Mock()
mock_network.subnet.id = 'subnet1'
mock_network.vpc.default_security_group_id = 'group1'
self.aws_network_spec.GetNetworkFromNetworkSpec.return_value = mock_network
def SetFlags(self, **kwargs):
FLAGS['aws_user_name'].parse('aws_user')
FLAGS['nfs_timeout_hard'].parse(True)
FLAGS['benchmarks'].parse([_BENCHMARK])
FLAGS['nfs_rsize'].parse(1048576)
FLAGS['nfs_wsize'].parse(1048576)
FLAGS['nfs_tier'].parse('generalPurpose')
FLAGS['nfs_timeout'].parse(60)
FLAGS['default_timeout'].parse(10)
FLAGS['owner'].parse(_OWNER)
FLAGS['nfs_retries'].parse(2)
FLAGS['run_uri'].parse(_RUN_URI)
FLAGS['nfs_version'].parse('4.1')
FLAGS['temp_dir'].parse('/non/existent/temp/dir')
FLAGS['aws_efs_token'].parse('')
FLAGS['aws_delete_file_system'].parse(True)
FLAGS['efs_throughput_mode'].parse(_THROUGHPUT_MODE)
FLAGS['efs_provisioned_throughput'].parse(_PROVISIONED_THROUGHPUT)
for key, value in six.iteritems(kwargs):
FLAGS[key].parse(value)
def _CreatePatched(self, module, method_name):
patcher = mock.patch.object(module, method_name)
mock_method = patcher.start()
self.addCleanup(patcher.stop)
return mock_method
def _CreateDiskSpec(self, fs_type):
return aws_disk.AwsDiskSpec(
_COMPONENT,
num_striped_disks=1,
disk_type=fs_type if fs_type == disk.NFS else disk.LOCAL,
mount_point='/scratch')
def _CreateMockNetwork(self):
mock_network = mock.Mock()
mock_network.subnet.id = _SUBNET_ID
mock_network.vpc.default_security_group_id = _SECURITY_GROUP_ID
return mock_network
def _CreateNfsService(self, nfs_tier=''):
self.SetFlags(nfs_tier=nfs_tier)
disk_spec = self._CreateDiskSpec(disk.NFS)
nfs = aws_nfs_service.AwsNfsService(disk_spec, _AWS_ZONE)
nfs.aws_commands = self.issue_cmd
nfs.networks = [self._CreateMockNetwork()]
return nfs
class AwsNfsServiceTest(BaseTest):
def _CreateFiler(self):
nfs = self._CreateNfsService()
self.issue_cmd.CreateFiler.return_value = _FILE_ID
nfs._CreateFiler()
return nfs
def _CreateMount(self):
nfs = self._CreateNfsService()
nfs.filer_id = _FILE_ID
self.issue_cmd.CreateMount.return_value = _MOUNT_ID
nfs._CreateMount()
return nfs
# create NFS resource
def testCreateNfsService(self):
nfs = self._CreateNfsService()
self.assertEqual(_AWS_REGION, nfs.region)
self.issue_cmd.assert_not_called()
def testInvalidNfsTier(self):
with self.assertRaises(errors.Config.InvalidValue):
self._CreateNfsService('INVALID_TIER')
def testValidNfsTier(self):
nfs = self._CreateNfsService('maxIO')
self.assertEqual('maxIO', nfs.nfs_tier)
def testNoNfsTier(self):
nfs = self._CreateNfsService()
self.assertEqual('generalPurpose', nfs.nfs_tier)
# tests for file system resource
def testCreateFiler(self):
nfs = self._CreateFiler()
self.assertEqual(_FILE_ID, nfs.filer_id)
self.issue_cmd.CreateFiler.assert_called_with(
_NFS_TOKEN, _TIER, _THROUGHPUT_MODE, _PROVISIONED_THROUGHPUT)
def testDeleteFiler(self):
nfs = self._CreateFiler()
nfs._DeleteFiler()
self.issue_cmd.DeleteFiler.assert_called_with(_FILE_ID)
def testDeleteFilerWithoutDeletingMountFirst(self):
nfs = self._CreateFiler()
nfs._CreateMount()
self.issue_cmd.reset_mock()
with self.assertRaises(errors.Resource.RetryableDeletionError):
nfs._DeleteFiler()
self.issue_cmd.assert_not_called()
# tests for mount resource
def testCreateMount(self):
nfs = self._CreateMount()
self.assertEqual(_MOUNT_ID, nfs.mount_id)
self.issue_cmd.CreateMount.assert_called_with(_FILE_ID, _SUBNET_ID,
_SECURITY_GROUP_ID)
def testCreateMountNoFiler(self):
nfs = self._CreateNfsService()
self.issue_cmd.reset_mock()
with self.assertRaises(errors.Resource.CreationError):
nfs._CreateMount()
self.issue_cmd.assert_not_called()
def testDeleteMount(self):
nfs = self._CreateMount()
self.issue_cmd.reset_mock()
nfs._DeleteMount()
self.issue_cmd.DeleteMount.assert_called_with(_MOUNT_ID)
def testFullLifeCycle(self):
# summation of the testCreate and testDelete calls
nfs = self._CreateNfsService()
self.issue_cmd.CreateFiler.return_value = _FILE_ID
self.issue_cmd.CreateMount.return_value = _MOUNT_ID
nfs.Create()
nfs.Delete()
self.issue_cmd.CreateFiler.assert_called_with(
_NFS_TOKEN, _TIER, _THROUGHPUT_MODE, _PROVISIONED_THROUGHPUT)
self.issue_cmd.AddTagsToFiler.assert_called_with(_FILE_ID)
self.issue_cmd.WaitUntilFilerAvailable.assert_called_with(_FILE_ID)
self.issue_cmd.CreateMount.assert_called_with(_FILE_ID, _SUBNET_ID,
_SECURITY_GROUP_ID)
self.issue_cmd.DeleteMount.assert_called_with(_MOUNT_ID)
self.issue_cmd.DeleteFiler.assert_called_with(_FILE_ID)
class AwsVirtualMachineTest(BaseTest):
def _CreateMockVm(self):
self._CreatePatched(aws_network, 'AwsNetwork')
self._CreatePatched(aws_network, 'AwsFirewall')
vm_spec = aws_virtual_machine.AwsVmSpec(
_COMPONENT, zone=_AWS_ZONE, machine_type='m2.2xlarge')
aws_machine = aws_virtual_machine.Rhel7BasedAwsVirtualMachine(vm_spec)
aws_machine.RemoteCommand = mock.Mock()
aws_machine.RemoteHostCommand = mock.Mock()
return aws_machine
def _SetBmSpec(self, nfs):
bm_spec = mock.Mock()
bm_spec.nfs_service = nfs
get_spec = self._CreatePatched(context, 'GetThreadBenchmarkSpec')
get_spec.return_value = bm_spec
def _CallCreateScratchDisk(self, fs_type):
nfs = self._CreateNfsService()
self.issue_cmd.CreateFiler.return_value = _FILE_ID
self.issue_cmd.CreateMount.return_value = _MOUNT_ID
nfs.Create()
self._SetBmSpec(nfs)
aws_machine = self._CreateMockVm()
aws_machine.CreateScratchDisk(self._CreateDiskSpec(fs_type))
return aws_machine
def testCreateNfsDisk(self):
mount_opt = ('hard,nfsvers=4.1,retrans=2,rsize=1048576,timeo=600,'
'wsize=1048576')
host = 'FSID.efs.us-east-1.amazonaws.com'
mount_cmd = ('sudo mkdir -p /scratch;'
'sudo mount -t nfs -o {mount_opt} {host}:/ /scratch && '
'sudo chown $USER:$USER /scratch;').format(
mount_opt=mount_opt, host=host)
fstab_cmd = ('echo "{host}:/ /scratch nfs {mount_opt}"'
' | sudo tee -a /etc/fstab').format(
mount_opt=mount_opt, host=host)
install_nfs = 'sudo yum install -y nfs-utils'
aws_machine = self._CallCreateScratchDisk(disk.NFS)
aws_machine.RemoteCommand.assert_called_with(install_nfs)
self.assertEqual(
[mock.call(mount_cmd), mock.call(fstab_cmd)],
aws_machine.RemoteHostCommand.call_args_list)
def testCreateLocalDisk(self):
# show that the non-NFS case formats the disk
format_cmd = (
'[[ -d /mnt ]] && sudo umount /mnt; '
'sudo mke2fs -F -E lazy_itable_init=0,discard -O ^has_journal '
'-t ext4 -b 4096 /dev/xvdb')
mount_cmd = ('sudo mkdir -p /scratch;'
'sudo mount -o discard /dev/xvdb /scratch && '
'sudo chown $USER:$USER /scratch;')
fstab_cmd = ('echo "/dev/xvdb /scratch ext4 defaults" | sudo tee -a '
'/etc/fstab')
aws_machine = self._CallCreateScratchDisk('ext4')
self.assertEqual(
[mock.call(format_cmd),
mock.call(mount_cmd),
mock.call(fstab_cmd)], aws_machine.RemoteHostCommand.call_args_list)
class AwsEfsCommandsTest(BaseTest):
def setUp(self):
super(AwsEfsCommandsTest, self).setUp()
self.SetFlags()
self.issue_cmd = self._CreatePatched(vm_util, 'IssueCommand')
self.aws = aws_nfs_service.AwsEfsCommands(_AWS_REGION)
def _SetResponse(self, json_value=None):
txt = json.dumps(json_value) if json_value else ''
self.issue_cmd.return_value = (txt, '', 0)
def assertCalled(self, *args):
cmd = ['aws', '--output', 'json', '--region', _AWS_REGION,
'efs'] + list(args)
self.issue_cmd.assert_called_with(cmd, raise_on_failure=False)
def testCreateFiler(self):
self._SetResponse(_FILER.create)
self.aws.CreateFiler(_NFS_TOKEN, _TIER, _THROUGHPUT_MODE,
_PROVISIONED_THROUGHPUT)
self.assertCalled('create-file-system', '--creation-token', _NFS_TOKEN,
'--performance-mode', _TIER, '--throughput-mode',
_THROUGHPUT_MODE)
def testAddTags(self):
self._SetResponse()
self.aws.AddTagsToFiler(_FILE_ID)
tags = util.MakeFormattedDefaultTags()
self.assertCalled('create-tags', '--file-system-id', _FILE_ID, '--tags',
*tags)
def testFilerAvailable(self):
self._SetResponse(_FILER.describe)
self.aws.WaitUntilFilerAvailable(_FILE_ID)
self.assertCalled('describe-file-systems', '--file-system-id', _FILE_ID)
def testMountAvailable(self):
self._SetResponse(_MOUNT.describe)
self.aws.IsMountAvailable(_MOUNT_ID)
self.assertCalled('describe-mount-targets', '--mount-target-id', _MOUNT_ID)
def testCreateMount(self):
self._SetResponse(_MOUNT.create)
self.aws.CreateMount(_FILE_ID, _SUBNET_ID, _SECURITY_GROUP_ID)
self.assertCalled('create-mount-target', '--file-system-id', _FILE_ID,
'--subnet-id', _SUBNET_ID, '--security-groups',
_SECURITY_GROUP_ID)
def testDeleteFiler(self):
self._SetResponse()
self.aws.DeleteFiler(_FILE_ID)
self.assertCalled('delete-file-system', '--file-system-id', _FILE_ID)
def testDeleteMount(self):
self._SetResponse()
self.aws.DeleteMount(_MOUNT_ID)
self.assertCalled('delete-mount-target', '--mount-target-id', _MOUNT_ID)
if __name__ == '__main__':
unittest.main()
|
import logging
import unittest
from unittest import mock
from homeassistant import setup
from homeassistant.components import litejet
import homeassistant.components.switch as switch
from tests.common import get_test_home_assistant
from tests.components.switch import common
_LOGGER = logging.getLogger(__name__)
ENTITY_SWITCH = "switch.mock_switch_1"
ENTITY_SWITCH_NUMBER = 1
ENTITY_OTHER_SWITCH = "switch.mock_switch_2"
ENTITY_OTHER_SWITCH_NUMBER = 2
class TestLiteJetSwitch(unittest.TestCase):
"""Test the litejet component."""
@mock.patch("homeassistant.components.litejet.LiteJet")
def setup_method(self, method, mock_pylitejet):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.start()
self.switch_pressed_callbacks = {}
self.switch_released_callbacks = {}
def get_switch_name(number):
return f"Mock Switch #{number}"
def on_switch_pressed(number, callback):
self.switch_pressed_callbacks[number] = callback
def on_switch_released(number, callback):
self.switch_released_callbacks[number] = callback
self.mock_lj = mock_pylitejet.return_value
self.mock_lj.loads.return_value = range(0)
self.mock_lj.button_switches.return_value = range(1, 3)
self.mock_lj.all_switches.return_value = range(1, 6)
self.mock_lj.scenes.return_value = range(0)
self.mock_lj.get_switch_name.side_effect = get_switch_name
self.mock_lj.on_switch_pressed.side_effect = on_switch_pressed
self.mock_lj.on_switch_released.side_effect = on_switch_released
config = {"litejet": {"port": "/dev/serial/by-id/mock-litejet"}}
if method == self.test_include_switches_False:
config["litejet"]["include_switches"] = False
elif method != self.test_include_switches_unspecified:
config["litejet"]["include_switches"] = True
assert setup.setup_component(self.hass, litejet.DOMAIN, config)
self.hass.block_till_done()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def switch(self):
"""Return the switch state."""
return self.hass.states.get(ENTITY_SWITCH)
def other_switch(self):
"""Return the other switch state."""
return self.hass.states.get(ENTITY_OTHER_SWITCH)
def test_include_switches_unspecified(self):
"""Test that switches are ignored by default."""
self.mock_lj.button_switches.assert_not_called()
self.mock_lj.all_switches.assert_not_called()
def test_include_switches_False(self):
"""Test that switches can be explicitly ignored."""
self.mock_lj.button_switches.assert_not_called()
self.mock_lj.all_switches.assert_not_called()
def test_on_off(self):
"""Test turning the switch on and off."""
assert self.switch().state == "off"
assert self.other_switch().state == "off"
assert not switch.is_on(self.hass, ENTITY_SWITCH)
common.turn_on(self.hass, ENTITY_SWITCH)
self.hass.block_till_done()
self.mock_lj.press_switch.assert_called_with(ENTITY_SWITCH_NUMBER)
common.turn_off(self.hass, ENTITY_SWITCH)
self.hass.block_till_done()
self.mock_lj.release_switch.assert_called_with(ENTITY_SWITCH_NUMBER)
def test_pressed_event(self):
"""Test handling an event from LiteJet."""
# Switch 1
_LOGGER.info(self.switch_pressed_callbacks[ENTITY_SWITCH_NUMBER])
self.switch_pressed_callbacks[ENTITY_SWITCH_NUMBER]()
self.hass.block_till_done()
assert switch.is_on(self.hass, ENTITY_SWITCH)
assert not switch.is_on(self.hass, ENTITY_OTHER_SWITCH)
assert self.switch().state == "on"
assert self.other_switch().state == "off"
# Switch 2
self.switch_pressed_callbacks[ENTITY_OTHER_SWITCH_NUMBER]()
self.hass.block_till_done()
assert switch.is_on(self.hass, ENTITY_OTHER_SWITCH)
assert switch.is_on(self.hass, ENTITY_SWITCH)
assert self.other_switch().state == "on"
assert self.switch().state == "on"
def test_released_event(self):
"""Test handling an event from LiteJet."""
# Initial state is on.
self.switch_pressed_callbacks[ENTITY_OTHER_SWITCH_NUMBER]()
self.hass.block_till_done()
assert switch.is_on(self.hass, ENTITY_OTHER_SWITCH)
# Event indicates it is off now.
self.switch_released_callbacks[ENTITY_OTHER_SWITCH_NUMBER]()
self.hass.block_till_done()
assert not switch.is_on(self.hass, ENTITY_OTHER_SWITCH)
assert not switch.is_on(self.hass, ENTITY_SWITCH)
assert self.other_switch().state == "off"
assert self.switch().state == "off"
|
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, Text, Boolean
from sqlalchemy_utils import JSONType
from lemur.database import db
from lemur.plugins.base import plugins
from sqlalchemy_utils import ArrowType
class Source(db.Model):
__tablename__ = "sources"
id = Column(Integer, primary_key=True)
label = Column(String(32), unique=True)
options = Column(JSONType)
description = Column(Text())
plugin_name = Column(String(32))
active = Column(Boolean, default=True)
last_run = Column(ArrowType)
endpoints = relationship("Endpoint", back_populates="source")
@property
def plugin(self):
return plugins.get(self.plugin_name)
def __repr__(self):
return "Source(label={label})".format(label=self.label)
|
import typing
import abc
from matchzoo.engine import base_metric
from matchzoo.engine import parse_metric
class BaseTask(abc.ABC):
"""Base Task, shouldn't be used directly."""
def __init__(self, loss=None, metrics=None):
"""
Base task constructor.
:param loss: By default the first loss in available losses.
:param metrics:
"""
self._loss = loss
self._metrics = self._convert_metrics(metrics)
self._assure_loss()
self._assure_metrics()
def _convert_metrics(self, metrics):
if not metrics:
metrics = []
elif not isinstance(metrics, list):
metrics = [metrics]
return [
parse_metric.parse_metric(metric, self) for metric in metrics
]
def _assure_loss(self):
if not self._loss:
self._loss = self.list_available_losses()[0]
def _assure_metrics(self):
if not self._metrics:
first_available = self.list_available_metrics()[0]
self._metrics = self._convert_metrics(first_available)
@property
def loss(self):
""":return: Loss used in the task."""
return self._loss
@property
def metrics(self):
""":return: Metrics used in the task."""
return self._metrics
@metrics.setter
def metrics(
self,
new_metrics: typing.Union[
typing.List[str],
typing.List[base_metric.BaseMetric],
str,
base_metric.BaseMetric
]
):
self._metrics = self._convert_metrics(new_metrics)
@classmethod
@abc.abstractmethod
def list_available_losses(cls) -> list:
""":return: a list of available losses."""
@classmethod
@abc.abstractmethod
def list_available_metrics(cls) -> list:
""":return: a list of available metrics."""
@property
@abc.abstractmethod
def output_shape(self) -> tuple:
""":return: output shape of a single sample of the task."""
@property
@abc.abstractmethod
def output_dtype(self):
""":return: output data type for specific task."""
|
import unittest
import six
from mock import Mock, call
class TestListing(unittest.TestCase):
def setUp(self):
self.trashdir = Mock()
self.trashinfo_reader = Mock()
self.listing = Listing(self.trashdir, self.trashinfo_reader)
def test_it_should_read_all_trashinfo_from_home_dir(self):
self.listing.read_home_trashdir('/path/to/trash_dir')
self.trashdir.list_trashinfos.assert_called_with(
trashdir='/path/to/trash_dir',
list_to=self.trashinfo_reader)
class TestTrashDirReader(unittest.TestCase):
def test_should_list_all_trashinfo_found(self):
def files(path): yield 'file1'; yield 'file2'
os_listdir = Mock(side_effect=files)
trashdir = TrashDirReader(os_listdir)
out = Mock()
trashdir.list_trashinfos(trashdir='/path', list_to=out)
six.assertCountEqual(self,
[call(trashinfo='/path/file1'),
call(trashinfo='/path/file2')], out.mock_calls)
class TrashDirReader:
def __init__(self, os_listdir):
self.os_listdir = os_listdir
def list_trashinfos(self, trashdir, list_to):
import os
for entry in self.os_listdir(trashdir):
full_path = os.path.join(trashdir, entry)
list_to(trashinfo=full_path)
class Listing:
def __init__(self, trashdir, trashinfo_reader):
self.trashdir = trashdir
self.trashinfo_reader = trashinfo_reader
def read_home_trashdir(self, path):
self.trashdir.list_trashinfos(trashdir=path,
list_to=self.trashinfo_reader)
|
import os.path as op
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
import pytest
import matplotlib.pyplot as plt
from mne import (read_events, Epochs, read_cov, pick_types, Annotations,
make_fixed_length_events)
from mne.io import read_raw_fif
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
from mne.utils import (requires_sklearn, _click_ch_name, catch_logging,
_close_event)
from mne.viz.ica import _create_properties_layout, plot_ica_properties
from mne.viz.utils import _fake_click
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.2
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
def _get_raw(preload=False):
"""Get raw data."""
return read_raw_fif(raw_fname, preload=preload)
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return [0, 1, 2, 6, 7, 8, 12, 13, 14] # take a only few channels
def _get_epochs():
"""Get epochs."""
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
with pytest.warns(RuntimeWarning, match='projection'):
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)
return epochs
@requires_sklearn
def test_plot_ica_components():
"""Test plotting of ICA solutions."""
res = 8
fast_test = {"res": res, "contours": 0, "sensors": False}
raw = _get_raw()
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2)
ica_picks = _get_picks(raw)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=ica_picks)
for components in [0, [0], [0, 1], [0, 1] * 2, None]:
ica.plot_components(components, image_interp='bilinear',
colorbar=True, **fast_test)
plt.close('all')
# test interactive mode (passing 'inst' arg)
with catch_logging() as log:
ica.plot_components([0, 1], image_interp='bilinear', inst=raw, res=16,
verbose='debug', ch_type='grad')
log = log.getvalue()
assert 'grad data' in log
assert 'Interpolation mode local to mean' in log
fig = plt.gcf()
# test title click
# ----------------
lbl = fig.axes[1].get_label()
ica_idx = int(lbl[-3:])
titles = [ax.title for ax in fig.axes]
title_pos_midpoint = (titles[1].get_window_extent().extents
.reshape((2, 2)).mean(axis=0))
# first click adds to exclude
_fake_click(fig, fig.axes[1], title_pos_midpoint, xform='pix')
assert ica_idx in ica.exclude
# clicking again removes from exclude
_fake_click(fig, fig.axes[1], title_pos_midpoint, xform='pix')
assert ica_idx not in ica.exclude
# test topo click
# ---------------
_fake_click(fig, fig.axes[1], (0., 0.), xform='data')
c_fig = plt.gcf()
labels = [ax.get_label() for ax in c_fig.axes]
for label in ['topomap', 'image', 'erp', 'spectrum', 'variance']:
assert label in labels
topomap_ax = c_fig.axes[labels.index('topomap')]
title = topomap_ax.get_title()
assert (lbl == title)
ica.info = None
with pytest.raises(RuntimeError, match='fit the ICA'):
ica.plot_components(1, ch_type='mag')
@pytest.mark.slowtest
@requires_sklearn
def test_plot_ica_properties():
"""Test plotting of ICA properties."""
raw = _get_raw(preload=True).crop(0, 5)
raw.add_proj([], remove_existing=True)
events = make_fixed_length_events(raw)
picks = _get_picks(raw)[:6]
pick_names = [raw.ch_names[k] for k in picks]
raw.pick_channels(pick_names)
reject = dict(grad=4000e-13, mag=4e-12)
epochs = Epochs(raw, events[:3], event_id, tmin, tmax,
baseline=(None, 0), preload=True)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, max_iter=1,
random_state=0)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw)
# test _create_properties_layout
fig, ax = _create_properties_layout()
assert_equal(len(ax), 5)
with pytest.raises(ValueError, match='specify both fig and figsize'):
_create_properties_layout(figsize=(2, 2), fig=fig)
topoargs = dict(topomap_args={'res': 4, 'contours': 0, "sensors": False})
with catch_logging() as log:
ica.plot_properties(raw, picks=0, verbose='debug', **topoargs)
log = log.getvalue()
assert raw.ch_names[0] == 'MEG 0113'
assert 'Interpolation mode local to mean' in log, log
ica.plot_properties(epochs, picks=1, dB=False, plot_std=1.5, **topoargs)
ica.plot_properties(epochs, picks=1, image_args={'sigma': 1.5},
topomap_args={'res': 4, 'colorbar': True},
psd_args={'fmax': 65.}, plot_std=False,
figsize=[4.5, 4.5], reject=reject)
plt.close('all')
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties(epochs, dB=list('abc'))
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties(ica)
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties([0.2])
with pytest.raises(TypeError, match='must be an instance'):
plot_ica_properties(epochs, epochs)
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties(epochs, psd_args='not dict')
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties(epochs, plot_std=[])
fig, ax = plt.subplots(2, 3)
ax = ax.ravel()[:-1]
ica.plot_properties(epochs, picks=1, axes=ax, **topoargs)
pytest.raises(TypeError, plot_ica_properties, epochs, ica, picks=[0, 1],
axes=ax)
pytest.raises(ValueError, ica.plot_properties, epochs, axes='not axes')
plt.close('all')
# Test merging grads.
pick_names = raw.ch_names[:15:2] + raw.ch_names[1:15:2]
raw = _get_raw(preload=True).pick_channels(pick_names).crop(0, 5)
raw.info.normalize_proj()
ica = ICA(random_state=0, max_iter=1)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
ica.plot_properties(raw)
plt.close('all')
# Test handling of zeros
ica = ICA(random_state=0, max_iter=1)
epochs.pick_channels(pick_names)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(epochs)
epochs._data[0] = 0
with pytest.warns(None): # Usually UserWarning: Infinite value .* for epo
ica.plot_properties(epochs, **topoargs)
plt.close('all')
# Test Raw with annotations
annot = Annotations(onset=[1], duration=[1], description=['BAD'])
raw_annot = _get_raw(preload=True).set_annotations(annot).crop(0, 8)
raw_annot.pick(np.arange(10))
raw_annot.del_proj()
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw_annot)
# drop bad data segments
fig = ica.plot_properties(raw_annot, picks=[0, 1], **topoargs)
assert_equal(len(fig), 2)
# don't drop
ica.plot_properties(raw_annot, reject_by_annotation=False, **topoargs)
@requires_sklearn
def test_plot_ica_sources():
"""Test plotting of ICA panel."""
raw = read_raw_fif(raw_fname).crop(0, 1).load_data()
picks = _get_picks(raw)
epochs = _get_epochs()
raw.pick_channels([raw.ch_names[k] for k in picks])
ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
ica = ICA(n_components=2)
ica.fit(raw, picks=ica_picks)
ica.exclude = [1]
fig = ica.plot_sources(raw)
assert len(plt.get_fignums()) == 1
# change which component is in ICA.exclude (click data trace to remove
# current one; click name to add other one)
fig.canvas.draw()
x = fig.mne.traces[1].get_xdata()[5]
y = fig.mne.traces[1].get_ydata()[5]
_fake_click(fig, fig.mne.ax_main, (x, y), xform='data') # exclude = []
_click_ch_name(fig, ch_index=0, button=1) # exclude = [0]
fig.canvas.key_press_event(fig.mne.close_key)
_close_event(fig)
assert len(plt.get_fignums()) == 0
assert_array_equal(ica.exclude, [0])
plt.close('all')
# dtype can change int->np.int64 after load, test it explicitly
ica.n_components_ = np.int64(ica.n_components_)
# test clicks on y-label (need >2 secs for plot_properties() to work)
long_raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
fig = ica.plot_sources(long_raw)
assert len(plt.get_fignums()) == 1
fig.canvas.draw()
_fake_click(fig, fig.mne.ax_main, (-0.1, 0), xform='data', button=3)
assert len(fig.mne.child_figs) == 1
assert len(plt.get_fignums()) == 2
# close child fig directly (workaround for mpl issue #18609)
fig.mne.child_figs[0].canvas.key_press_event('escape')
assert len(plt.get_fignums()) == 1
fig.canvas.key_press_event(fig.mne.close_key)
assert len(plt.get_fignums()) == 0
del long_raw
# test with annotations
orig_annot = raw.annotations
raw.set_annotations(Annotations([0.2], [0.1], 'Test'))
fig = ica.plot_sources(raw)
assert len(fig.mne.ax_main.collections) == 1
assert len(fig.mne.ax_hscroll.collections) == 1
raw.set_annotations(orig_annot)
# test error handling
raw.info['bads'] = ['MEG 0113']
with pytest.raises(RuntimeError, match="Raw doesn't match fitted data"):
ica.plot_sources(inst=raw)
epochs.info['bads'] = ['MEG 0113']
with pytest.raises(RuntimeError, match="Epochs don't match fitted data"):
ica.plot_sources(inst=epochs)
epochs.info['bads'] = []
# test w/ epochs and evokeds
ica.plot_sources(epochs)
ica.plot_sources(epochs.average())
evoked = epochs.average()
fig = ica.plot_sources(evoked)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded
ica.exclude = [0]
ica.plot_sources(evoked)
ica.labels_ = dict(eog=[0])
ica.labels_['eog/0/crazy-channel'] = [0]
ica.plot_sources(evoked) # now with labels
with pytest.raises(ValueError, match='must be of Raw or Epochs type'):
ica.plot_sources('meeow')
@pytest.mark.slowtest
@requires_sklearn
def test_plot_ica_overlay():
"""Test plotting of ICA cleaning."""
raw = _get_raw(preload=True)
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, random_state=0)
# can't use info.normalize_proj here because of how and when ICA and Epochs
# objects do picking of Raw data
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
# don't test raw, needs preload ...
with pytest.warns(RuntimeWarning, match='projection'):
ecg_epochs = create_ecg_epochs(raw, picks=picks)
ica.plot_overlay(ecg_epochs.average())
with pytest.warns(RuntimeWarning, match='projection'):
eog_epochs = create_eog_epochs(raw, picks=picks)
ica.plot_overlay(eog_epochs.average(), n_pca_components=2)
pytest.raises(TypeError, ica.plot_overlay, raw[:2, :3][0])
pytest.raises(TypeError, ica.plot_overlay, raw, exclude=2)
ica.plot_overlay(raw)
plt.close('all')
# smoke test for CTF
raw = read_raw_fif(raw_ctf_fname)
raw.apply_gradient_compensation(3)
picks = pick_types(raw.info, meg=True, ref_meg=False)
ica = ICA(n_components=2, )
ica.fit(raw, picks=picks)
with pytest.warns(RuntimeWarning, match='longer than'):
ecg_epochs = create_ecg_epochs(raw)
ica.plot_overlay(ecg_epochs.average())
def _get_geometry(fig):
try:
return fig.axes[0].get_subplotspec().get_geometry() # pragma: no cover
except AttributeError: # MPL < 3.4 (probably)
return fig.axes[0].get_geometry() # pragma: no cover
@requires_sklearn
def test_plot_ica_scores():
"""Test plotting of ICA scores."""
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], figsize=(6.4, 2.7))
ica.plot_scores([[0.3, 0.2], [0.3, 0.2]], axhline=[0.1, -0.1])
# check labels
ica.labels_ = dict()
ica.labels_['eog'] = 0
ica.labels_['ecg'] = 1
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='eog')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='ecg')
ica.labels_['eog/0/foo'] = 0
ica.labels_['ecg/1/bar'] = 0
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='foo')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='eog')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='ecg')
# check setting number of columns
fig = ica.plot_scores([[0.3, 0.2], [0.3, 0.2], [0.3, 0.2]],
axhline=[0.1, -0.1])
assert 2 == _get_geometry(fig)[1]
fig = ica.plot_scores([[0.3, 0.2], [0.3, 0.2]], axhline=[0.1, -0.1],
n_cols=1)
assert 1 == _get_geometry(fig)[1]
# only use 1 column (even though 2 were requested)
fig = ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], n_cols=2)
assert 1 == _get_geometry(fig)[1]
with pytest.raises(ValueError, match='Need as many'):
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1],
labels=['one', 'one-too-many'])
with pytest.raises(ValueError, match='The length of'):
ica.plot_scores([0.2])
@requires_sklearn
def test_plot_instance_components():
"""Test plotting of components as instances of raw and epochs."""
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
ica.exclude = [0]
fig = ica.plot_sources(raw, title='Components')
keys = ('home', 'home', 'end', 'down', 'up', 'right', 'left', '-', '+',
'=', 'd', 'd', 'pageup', 'pagedown', 'z', 'z', 's', 's', 'f11',
'b')
for key in keys:
fig.canvas.key_press_event(key)
ax = fig.mne.ax_main
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]],
'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
epochs = _get_epochs()
fig = ica.plot_sources(epochs, title='Components')
for key in keys:
fig.canvas.key_press_event(key)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
|
from . import core as html5
def unescape(val, maxLength = 0):
"""
Unquotes several HTML-quoted characters in a string.
:param val: The value to be unescaped.
:type val: str
:param maxLength: Cut-off after maxLength characters.
A value of 0 means "unlimited". (default)
:type maxLength: int
:returns: The unquoted string.
:rtype: str
"""
val = val \
.replace("<", "<") \
.replace(">", ">") \
.replace(""", "\"") \
.replace("'", "'")
if maxLength > 0:
return val[0:maxLength]
return val
def doesEventHitWidgetOrParents(event, widget):
"""
Test if event 'event' hits widget 'widget' (or *any* of its parents)
"""
while widget:
if event.target == widget.element:
return widget
widget = widget.parent()
return None
def doesEventHitWidgetOrChildren(event, widget):
"""
Test if event 'event' hits widget 'widget' (or *any* of its children)
"""
if event.target == widget.element:
return widget
for child in widget.children():
if doesEventHitWidgetOrChildren(event, child):
return child
return None
def textToHtml(node, text):
"""
Generates html nodes from text by splitting text into content and into
line breaks html5.Br.
:param node: The node where the nodes are appended to.
:param text: The text to be inserted.
"""
for (i, part) in enumerate(text.split("\n")):
if i > 0:
node.appendChild(html5.Br())
node.appendChild(html5.TextNode(part))
def parseInt(s, ret = 0):
"""
Parses a value as int
"""
if not isinstance(s, str):
return int(s)
elif s:
if s[0] in "+-":
ts = s[1:]
else:
ts = s
if ts and all([_ in "0123456789" for _ in ts]):
return int(s)
return ret
def parseFloat(s, ret = 0.0):
"""
Parses a value as float.
"""
if not isinstance(s, str):
return float(s)
elif s:
if s[0] in "+-":
ts = s[1:]
else:
ts = s
if ts and ts.count(".") <= 1 and all([_ in ".0123456789" for _ in ts]):
return float(s)
return ret
|
import os.path as op
import numpy as np
import pytest
from mne import (read_forward_solution, VolSourceEstimate, SourceEstimate,
VolVectorSourceEstimate, compute_source_morph)
from mne.datasets import testing
from mne.utils import (requires_dipy, requires_nibabel, requires_version,
catch_logging)
from mne.viz import plot_volume_source_estimates
from mne.viz.utils import _fake_click
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
fwd_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@requires_dipy()
@requires_nibabel()
@requires_version('nilearn', '0.4')
@pytest.mark.parametrize(
'mode, stype, init_t, want_t, init_p, want_p, bg_img', [
('glass_brain', 's', None, 2, None, (-30.9, 18.4, 56.7), None),
('stat_map', 'vec', 1, 1, None, (15.7, 16.0, -6.3), None),
('glass_brain', 'vec', None, 1, (10, -10, 20), (6.6, -9., 19.9), None),
('stat_map', 's', 1, 1, (-10, 5, 10), (-12.3, 2.0, 7.7), 'brain.mgz')])
def test_plot_volume_source_estimates(mode, stype, init_t, want_t,
init_p, want_p, bg_img):
"""Test interactive plotting of volume source estimates."""
forward = read_forward_solution(fwd_fname)
sample_src = forward['src']
if init_p is not None:
init_p = np.array(init_p) / 1000.
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
n_time = 2
data = np.random.RandomState(0).rand(n_verts, n_time)
if stype == 'vec':
stc = VolVectorSourceEstimate(
np.tile(data[:, np.newaxis], (1, 3, 1)), vertices, 1, 1)
else:
assert stype == 's'
stc = VolSourceEstimate(data, vertices, 1, 1)
with pytest.warns(None): # sometimes get scalars/index warning
with catch_logging() as log:
fig = stc.plot(
sample_src, subject='sample', subjects_dir=subjects_dir,
mode=mode, initial_time=init_t, initial_pos=init_p,
bg_img=bg_img, verbose=True)
log = log.getvalue()
want_str = 't = %0.3f s' % want_t
assert want_str in log, (want_str, init_t)
want_str = '(%0.1f, %0.1f, %0.1f) mm' % want_p
assert want_str in log, (want_str, init_p)
for ax_idx in [0, 2, 3, 4]:
_fake_click(fig, fig.axes[ax_idx], (0.3, 0.5))
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('shift+right')
if bg_img is not None:
with pytest.raises(FileNotFoundError, match='MRI file .* not found'):
stc.plot(sample_src, subject='sample', subjects_dir=subjects_dir,
mode='stat_map', bg_img='junk.mgz')
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@requires_dipy()
@requires_nibabel()
@requires_version('nilearn', '0.4')
def test_plot_volume_source_estimates_morph():
"""Test interactive plotting of volume source estimates with morph."""
forward = read_forward_solution(fwd_fname)
sample_src = forward['src']
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
n_time = 2
data = np.random.RandomState(0).rand(n_verts, n_time)
stc = VolSourceEstimate(data, vertices, 1, 1)
sample_src[0]['subject_his_id'] = 'sample' # old src
morph = compute_source_morph(sample_src, 'sample', 'fsaverage', zooms=5,
subjects_dir=subjects_dir)
initial_pos = (-0.05, -0.01, -0.006)
with pytest.warns(None): # sometimes get scalars/index warning
with catch_logging() as log:
stc.plot(morph, subjects_dir=subjects_dir, mode='glass_brain',
initial_pos=initial_pos, verbose=True)
log = log.getvalue()
assert 't = 1.000 s' in log
assert '(-52.0, -8.0, -7.0) mm' in log
with pytest.raises(ValueError, match='Allowed values are'):
stc.plot(sample_src, 'sample', subjects_dir, mode='abcd')
vertices.append([])
surface_stc = SourceEstimate(data, vertices, 1, 1)
with pytest.raises(TypeError, match='an instance of VolSourceEstimate'):
plot_volume_source_estimates(surface_stc, sample_src, 'sample',
subjects_dir)
with pytest.raises(ValueError, match='Negative colormap limits'):
stc.plot(sample_src, 'sample', subjects_dir,
clim=dict(lims=[-1, 2, 3], kind='value'))
|
import os
import shutil
from ..core import driver
from ..core import exceptions
from ..core import lru
class Storage(driver.Base):
supports_bytes_range = True
def __init__(self, path=None, config=None):
self._root_path = path or './tmp'
def _init_path(self, path=None, create=False):
path = os.path.join(self._root_path, path) if path else self._root_path
if create is True:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
return path
@lru.get
def get_content(self, path):
path = self._init_path(path)
try:
with open(path, mode='rb') as f:
d = f.read()
except Exception:
raise exceptions.FileNotFoundError('%s is not there' % path)
return d
@lru.set
def put_content(self, path, content):
path = self._init_path(path, create=True)
with open(path, mode='wb') as f:
f.write(content)
return path
def stream_read(self, path, bytes_range=None):
path = self._init_path(path)
nb_bytes = 0
total_size = 0
try:
with open(path, mode='rb') as f:
if bytes_range:
f.seek(bytes_range[0])
total_size = bytes_range[1] - bytes_range[0] + 1
while True:
buf = None
if bytes_range:
# Bytes Range is enabled
buf_size = self.buffer_size
if nb_bytes + buf_size > total_size:
# We make sure we don't read out of the range
buf_size = total_size - nb_bytes
if buf_size > 0:
buf = f.read(buf_size)
nb_bytes += len(buf)
else:
# We're at the end of the range
buf = ''
else:
buf = f.read(self.buffer_size)
if not buf:
break
yield buf
except IOError:
raise exceptions.FileNotFoundError('%s is not there' % path)
def stream_write(self, path, fp):
# Size is mandatory
path = self._init_path(path, create=True)
with open(path, mode='wb') as f:
try:
while True:
buf = fp.read(self.buffer_size)
if not buf:
break
f.write(buf)
except IOError:
pass
def list_directory(self, path=None):
prefix = ''
if path:
prefix = '%s/' % path
path = self._init_path(path)
exists = False
try:
for d in os.listdir(path):
exists = True
yield prefix + d
except Exception:
pass
if not exists:
raise exceptions.FileNotFoundError('%s is not there' % path)
def exists(self, path):
path = self._init_path(path)
return os.path.exists(path)
@lru.remove
def remove(self, path):
path = self._init_path(path)
if os.path.isdir(path):
shutil.rmtree(path)
return
try:
os.remove(path)
except OSError:
raise exceptions.FileNotFoundError('%s is not there' % path)
def get_size(self, path):
path = self._init_path(path)
try:
return os.path.getsize(path)
except OSError:
raise exceptions.FileNotFoundError('%s is not there' % path)
|
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.helpers.device_registry import EVENT_DEVICE_REGISTRY_UPDATED
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ATTR_DISCOVERY_HASH, device_trigger
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
AUTOMATION_TYPE_TRIGGER = "trigger"
AUTOMATION_TYPES = [AUTOMATION_TYPE_TRIGGER]
AUTOMATION_TYPES_SCHEMA = vol.In(AUTOMATION_TYPES)
CONF_AUTOMATION_TYPE = "automation_type"
PLATFORM_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{vol.Required(CONF_AUTOMATION_TYPE): AUTOMATION_TYPES_SCHEMA},
extra=vol.ALLOW_EXTRA,
)
async def async_setup_entry(hass, config_entry):
"""Set up MQTT device automation dynamically through MQTT discovery."""
async def async_device_removed(event):
"""Handle the removal of a device."""
if event.data["action"] != "remove":
return
await device_trigger.async_device_removed(hass, event.data["device_id"])
async def async_discover(discovery_payload):
"""Discover and add an MQTT device automation."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
if config[CONF_AUTOMATION_TYPE] == AUTOMATION_TYPE_TRIGGER:
await device_trigger.async_setup_trigger(
hass, config, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format("device_automation", "mqtt"), async_discover
)
hass.bus.async_listen(EVENT_DEVICE_REGISTRY_UPDATED, async_device_removed)
|
import unittest
from kalliope.core.Models import Neuron, Signal, Synapse, Brain
from kalliope.signals.mqtt_subscriber import Mqtt_subscriber
from kalliope.signals.mqtt_subscriber.models import Broker, Topic
class TestMqtt_subscriber(unittest.TestCase):
def test_check_mqtt_dict(self):
valid_dict_of_parameters = {
"topic": "my_topic",
"broker_ip": "192.168.0.1"
}
invalid_dict_of_parameters = {
"topic": "my_topic"
}
self.assertTrue(Mqtt_subscriber.check_parameters(valid_dict_of_parameters))
self.assertFalse(Mqtt_subscriber.check_parameters(invalid_dict_of_parameters))
def test_get_list_synapse_with_mqtt_subscriber(self):
# test with one signal mqtt
neuron = Neuron(name='say', parameters={'message': ['test message']})
signal1 = Signal(name="mqtt_subscriber", parameters={"topic": "test", "broker_ip": "192.168.0.1"})
synapse1 = Synapse(name="synapse1", neurons=[neuron], signals=[signal1])
synapses = [synapse1]
brain = Brain()
brain.synapses = synapses
expected_result = synapses
mq = Mqtt_subscriber()
mq.brain = brain
generator = mq.get_list_synapse()
self.assertEqual(expected_result, list(generator))
# test with two synapse
neuron = Neuron(name='say', parameters={'message': ['test message']})
signal1 = Signal(name="order", parameters="test_order")
signal2 = Signal(name="mqtt_subscriber", parameters={"topic": "test", "broker_ip": "192.168.0.1"})
synapse1 = Synapse(name="synapse1", neurons=[neuron], signals=[signal1])
synapse2 = Synapse(name="synapse2", neurons=[neuron], signals=[signal1, signal2])
synapses = [synapse1, synapse2]
brain = Brain()
brain.synapses = synapses
expected_result = [synapse2]
mq = Mqtt_subscriber()
mq.brain = brain
generator = mq.get_list_synapse()
self.assertEqual(expected_result, list(generator))
def test_get_list_broker_to_instantiate(self):
# ----------------
# only one synapse
# ----------------
neuron = Neuron(name='say', parameters={'message': ['test message']})
signal1 = Signal(name="mqtt_subscriber", parameters={"topic": "topic1", "broker_ip": "192.168.0.1"})
synapse1 = Synapse(name="synapse1", neurons=[neuron], signals=[signal1])
brain = Brain()
brain.synapses = [synapse1]
list_synapse_with_mqtt_subscriber = [synapse1]
expected_broker = Broker()
expected_broker.broker_ip = "192.168.0.1"
expected_broker.topics = list()
expected_topic = Topic()
expected_topic.name = "topic1"
# add the current synapse to the topic
expected_topic.synapses = list()
expected_topic.synapses.append(synapse1)
expected_broker.topics.append(expected_topic)
expected_retuned_list = [expected_broker]
mq = Mqtt_subscriber()
mq.brain = brain
self.assertListEqual(expected_retuned_list,
mq.get_list_broker_to_instantiate(list_synapse_with_mqtt_subscriber))
# ----------------
# one synapse, two different broker
# ----------------
neuron = Neuron(name='say', parameters={'message': ['test message']})
signal1 = Signal(name="mqtt_subscriber", parameters={"topic": "topic1",
"broker_ip": "192.168.0.1",
"is_json": False})
signal2 = Signal(name="mqtt_subscriber", parameters={"topic": "topic2",
"broker_ip": "172.16.0.1",
"is_json": False})
synapse1 = Synapse(name="synapse1", neurons=[neuron], signals=[signal1, signal2])
brain = Brain()
brain.synapses = [synapse1]
list_synapse_with_mqtt_subscriber = [synapse1]
expected_broker1 = Broker()
expected_broker1.broker_ip = "192.168.0.1"
expected_broker1.topics = list()
expected_topic = Topic()
expected_topic.name = "topic1"
# add the current synapse to the topic
expected_topic.synapses = list()
expected_topic.synapses.append(synapse1)
expected_broker1.topics.append(expected_topic)
expected_broker2 = Broker()
expected_broker2.broker_ip = "172.16.0.1"
expected_broker2.topics = list()
expected_topic = Topic()
expected_topic.name = "topic2"
# add the current synapse to the topic
expected_topic.synapses = list()
expected_topic.synapses.append(synapse1)
expected_broker2.topics.append(expected_topic)
expected_retuned_list = [expected_broker1, expected_broker2]
mq = Mqtt_subscriber()
mq.brain = brain
self.assertEqual(expected_retuned_list, mq.get_list_broker_to_instantiate(list_synapse_with_mqtt_subscriber))
# ----------------
# two synapse, same broker, different topics
# ----------------
# synapse 1
neuron1 = Neuron(name='say', parameters={'message': ['test message']})
signal1 = Signal(name="mqtt_subscriber", parameters={"topic": "topic1", "broker_ip": "192.168.0.1"})
synapse1 = Synapse(name="synapse1", neurons=[neuron1], signals=[signal1])
# synapse 2
neuron2 = Neuron(name='say', parameters={'message': ['test message']})
signal2 = Signal(name="mqtt_subscriber", parameters={"topic": "topic2", "broker_ip": "192.168.0.1"})
synapse2 = Synapse(name="synapse2", neurons=[neuron2], signals=[signal2])
brain = Brain()
brain.synapses = [synapse1, synapse2]
list_synapse_with_mqtt_subscriber = [synapse1, synapse2]
expected_broker1 = Broker()
expected_broker1.broker_ip = "192.168.0.1"
expected_broker1.topics = list()
expected_topic1 = Topic()
expected_topic1.name = "topic1"
expected_topic2 = Topic()
expected_topic2.name = "topic2"
# add the current synapse to the topic
expected_topic1.synapses = [synapse1]
expected_topic2.synapses = [synapse2]
# add both topic to the broker
expected_broker1.topics.append(expected_topic1)
expected_broker1.topics.append(expected_topic2)
expected_retuned_list = [expected_broker1]
mq = Mqtt_subscriber()
mq.brain = brain
self.assertEqual(expected_retuned_list, mq.get_list_broker_to_instantiate(list_synapse_with_mqtt_subscriber))
# ----------------
# two synapse, same broker, same topic
# ----------------
# synapse 1
neuron1 = Neuron(name='say', parameters={'message': ['test message']})
signal1 = Signal(name="mqtt_subscriber", parameters={"topic": "topic1", "broker_ip": "192.168.0.1"})
synapse1 = Synapse(name="synapse1", neurons=[neuron1], signals=[signal1])
# synapse 2
neuron2 = Neuron(name='say', parameters={'message': ['test message']})
signal2 = Signal(name="mqtt_subscriber", parameters={"topic": "topic1", "broker_ip": "192.168.0.1"})
synapse2 = Synapse(name="synapse2", neurons=[neuron2], signals=[signal2])
brain = Brain()
brain.synapses = [synapse1, synapse2]
list_synapse_with_mqtt_subscriber = [synapse1, synapse2]
expected_broker1 = Broker()
expected_broker1.broker_ip = "192.168.0.1"
expected_broker1.topics = list()
expected_topic1 = Topic()
expected_topic1.name = "topic1"
# add both synapses to the topic
expected_topic1.synapses = [synapse1, synapse2]
# add the topic to the broker
expected_broker1.topics.append(expected_topic1)
expected_retuned_list = [expected_broker1]
mq = Mqtt_subscriber()
mq.brain = brain
self.assertEqual(expected_retuned_list, mq.get_list_broker_to_instantiate(list_synapse_with_mqtt_subscriber))
if __name__ == '__main__':
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(TestMqtt_subscriber("test_get_list_broker_to_instantiate"))
# runner = unittest.TextTestRunner()
# runner.run(suite)
|
from pyopenuv import Client
from pyopenuv.errors import OpenUvError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_API_KEY,
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
)
from homeassistant.helpers import aiohttp_client, config_validation as cv
from .const import DOMAIN # pylint: disable=unused-import
CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Inclusive(CONF_LATITUDE, "coords"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coords"): cv.longitude,
vol.Optional(CONF_ELEVATION): vol.Coerce(float),
}
)
class OpenUvFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle an OpenUV config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=CONFIG_SCHEMA,
errors=errors if errors else {},
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return await self._show_form()
if user_input.get(CONF_LATITUDE):
identifier = f"{user_input[CONF_LATITUDE]}, {user_input[CONF_LONGITUDE]}"
else:
identifier = "Default Coordinates"
await self.async_set_unique_id(identifier)
self._abort_if_unique_id_configured()
websession = aiohttp_client.async_get_clientsession(self.hass)
client = Client(user_input[CONF_API_KEY], 0, 0, websession)
try:
await client.uv_index()
except OpenUvError:
return await self._show_form({CONF_API_KEY: "invalid_api_key"})
return self.async_create_entry(title=identifier, data=user_input)
|
import ntpath
WINDOWS_FIO_DIR = 'fio-3.1-x86'
FIO_ZIP = WINDOWS_FIO_DIR + '.zip'
FIO_URL = 'https://bluestop.org/files/fio/releases/' + FIO_ZIP
def GetFioExec(vm):
return ntpath.join(vm.temp_dir,
'{fio_dir}\\fio.exe --thread'.format(
fio_dir=WINDOWS_FIO_DIR))
def GetRemoteJobFilePath(vm):
return '{path}\\fio.job'.format(path=vm.temp_dir)
def Install(vm):
zip_path = ntpath.join(vm.temp_dir, FIO_ZIP)
vm.DownloadFile(FIO_URL, zip_path)
vm.UnzipFile(zip_path, vm.temp_dir)
|
import unittest
import pandas as pd
import numpy as np
from pgmpy.models import SEMGraph, SEM
from pgmpy.estimators import SEMEstimator, IVEstimator
class TestSEMEstimator(unittest.TestCase):
def setUp(self):
self.custom = SEMGraph(
ebunch=[("a", "b"), ("b", "c")], latents=[], err_corr=[], err_var={}
)
a = np.random.randn(10 ** 3)
b = a + np.random.normal(loc=0, scale=0.1, size=10 ** 3)
c = b + np.random.normal(loc=0, scale=0.2, size=10 ** 3)
self.custom_data = pd.DataFrame({"a": a, "b": b, "c": c})
self.custom_data -= self.custom_data.mean(axis=0)
self.custom_lisrel = self.custom.to_lisrel()
self.demo = SEMGraph(
ebunch=[
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
("xi1", "eta1"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta1", "eta2"),
("xi1", "eta2"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5"),
("y2", "y6"),
("y3", "y7"),
("y4", "y8"),
("y2", "y4"),
("y6", "y8"),
],
err_var={},
)
self.demo_lisrel = self.demo.to_lisrel()
self.demo_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/democracy1989a.csv",
index_col=0,
header=0,
)
self.union = SEMGraph(
ebunch=[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
],
latents=[],
err_corr=[("yrsmill", "age")],
err_var={},
)
self.union_lisrel = self.union.to_lisrel()
self.union_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/union1989b.csv", index_col=0, header=0
)
def test_get_init_values(self):
demo_estimator = SEMEstimator(self.demo)
for method in ["random", "std"]:
B_init, zeta_init = demo_estimator.get_init_values(
data=self.demo_data, method=method
)
demo_lisrel = self.demo.to_lisrel()
m = len(demo_lisrel.eta)
self.assertEqual(B_init.shape, (m, m))
self.assertEqual(zeta_init.shape, (m, m))
union_estimator = SEMEstimator(self.union)
B_init, zeta_init = union_estimator.get_init_values(
data=self.union_data, method=method
)
union_lisrel = self.union.to_lisrel()
m = len(union_lisrel.eta)
self.assertEqual(B_init.shape, (m, m))
self.assertEqual(zeta_init.shape, (m, m))
@unittest.skip
def test_demo_estimator_random_init(self):
estimator = SEMEstimator(self.demo)
summary = estimator.fit(self.demo_data, method="ml")
@unittest.skip
def test_union_estimator_random_init(self):
estimator = SEMEstimator(self.union_lisrel)
summary = estimator.fit(
self.union_data, method="ml", opt="adam", max_iter=10 ** 6, exit_delta=1e-1
)
@unittest.skip
def test_custom_estimator_random_init(self):
estimator = SEMEstimator(self.custom_lisrel)
summary = estimator.fit(
self.custom_data, method="ml", max_iter=10 ** 6, opt="adam"
)
summary = estimator.fit(
self.custom_data, method="uls", max_iter=10 ** 6, opt="adam"
)
summary = estimator.fit(
self.custom_data,
method="gls",
max_iter=10 ** 6,
opt="adam",
W=np.ones((3, 3)),
)
@unittest.skip
def test_union_estimator_std_init(self):
estimator = SEMEstimator(self.union_lisrel)
summary = estimator.fit(
self.union_data,
method="ml",
opt="adam",
init_values="std",
max_iter=10 ** 6,
exit_delta=1e-1,
)
@unittest.skip
def test_custom_estimator_std_init(self):
estimator = SEMEstimator(self.custom_lisrel)
summary = estimator.fit(
self.custom_data,
method="ml",
init_values="std",
max_iter=10 ** 6,
opt="adam",
)
class TestIVEstimator(unittest.TestCase):
def setUp(self):
self.model = SEM.from_graph(
ebunch=[
("Z1", "X", 1.0),
("Z2", "X", 1.0),
("Z2", "W", 1.0),
("W", "U", 1.0),
("U", "X", 1.0),
("U", "Y", 1.0),
("X", "Y", 1.0),
],
latents=["U"],
err_var={"Z1": 1, "Z2": 1, "W": 1, "X": 1, "U": 1, "Y": 1},
)
self.generated_data = self.model.to_lisrel().generate_samples(100000)
def test_fit(self):
estimator = IVEstimator(self.model)
param, summary = estimator.fit(X="X", Y="Y", data=self.generated_data)
self.assertTrue((param - 1) < 0.01)
|
from homeassistant.helpers.entity import Entity
class IHCDevice(Entity):
"""Base class for all IHC devices.
All IHC devices have an associated IHC resource. IHCDevice handled the
registration of the IHC controller callback when the IHC resource changes.
Derived classes must implement the on_ihc_change method
"""
def __init__(
self, ihc_controller, name, ihc_id: int, info: bool, product=None
) -> None:
"""Initialize IHC attributes."""
self.ihc_controller = ihc_controller
self._name = name
self.ihc_id = ihc_id
self.info = info
if product:
self.ihc_name = product["name"]
self.ihc_note = product["note"]
self.ihc_position = product["position"]
else:
self.ihc_name = ""
self.ihc_note = ""
self.ihc_position = ""
async def async_added_to_hass(self):
"""Add callback for IHC changes."""
self.ihc_controller.add_notify_event(self.ihc_id, self.on_ihc_change, True)
@property
def should_poll(self) -> bool:
"""No polling needed for IHC devices."""
return False
@property
def name(self):
"""Return the device name."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if not self.info:
return {}
return {
"ihc_id": self.ihc_id,
"ihc_name": self.ihc_name,
"ihc_note": self.ihc_note,
"ihc_position": self.ihc_position,
}
def on_ihc_change(self, ihc_id, value):
"""Handle IHC resource change.
Derived classes must overwrite this to do device specific stuff.
"""
raise NotImplementedError
|
import os.path as op
import pytest
import numpy as np
from numpy.fft import rfft, rfftfreq
from mne import create_info
from mne.datasets import testing
from mne.io import RawArray, read_raw_fif
from mne.io.pick import _pick_data_channels
from mne.preprocessing import oversampled_temporal_projection
from mne.utils import run_tests_if_main, catch_logging
data_path = testing.data_path(download=False)
erm_fname = op.join(data_path, 'SSS', 'test_move_anon_erm_raw.fif')
triux_fname = op.join(data_path, 'SSS', 'TRIUX', 'triux_bmlhus_erm_raw.fif')
skip_fname = op.join(data_path, 'misc', 'intervalrecording_raw.fif')
def test_otp_array():
"""Test the OTP algorithm on artificial data."""
n_channels, n_time, sfreq = 10, 2000, 1000.
signal_f = 2.
rng = np.random.RandomState(0)
data = rng.randn(n_channels, n_time)
raw = RawArray(data, create_info(n_channels, sfreq, 'eeg'))
raw.info['bads'] = [raw.ch_names[0]]
signal = np.sin(2 * np.pi * signal_f * raw.times)
raw._data += signal
# Check SNR
def snr(data):
"""Check SNR according to the simulation model."""
data_fft = rfft(data)
freqs = rfftfreq(n_time, 1. / 1000.)
sidx = np.where(freqs == signal_f)[0][0]
oidx = list(range(sidx)) + list(range(sidx + 1, len(freqs)))
snr = ((data_fft[:, sidx] * data_fft[:, sidx].conj()).real.sum() /
(data_fft[:, oidx] * data_fft[:, oidx].conj()).real.sum())
return snr
orig_snr = snr(raw[:][0])
with catch_logging() as log:
raw_otp = oversampled_temporal_projection(
raw, duration=2., verbose=True)
otp_2_snr = snr(raw_otp[:][0])
assert otp_2_snr > 3 + orig_snr
assert '1 data chunk' in log.getvalue()
with catch_logging() as log:
raw_otp = oversampled_temporal_projection(
raw, duration=1.2, verbose=True)
otp_1p5_snr = snr(raw_otp[:][0])
assert otp_1p5_snr > 3 + orig_snr
assert '2 data chunks' in log.getvalue()
with catch_logging() as log:
raw_otp = oversampled_temporal_projection(
raw, duration=1., verbose=True)
otp_1_snr = snr(raw_otp[:][0])
assert otp_1_snr > 2 + orig_snr
assert '3 data chunks' in log.getvalue()
# Pure-noise test
raw._data -= signal
raw_otp = oversampled_temporal_projection(raw, 2.)
reduction = (np.linalg.norm(raw[:][0], axis=-1) /
np.linalg.norm(raw_otp[:][0], axis=-1))
assert reduction.min() > 9.
# Degenerate conditions
raw = RawArray(np.zeros((200, 1000)), create_info(200, sfreq, 'eeg'))
with pytest.raises(ValueError): # too short
oversampled_temporal_projection(raw, duration=198. / sfreq)
with pytest.raises(ValueError): # duration invalid
oversampled_temporal_projection(
raw, duration=raw.times[-1] + 2. / raw.info['sfreq'], verbose=True)
raw._data[0, 0] = np.inf
with pytest.raises(RuntimeError): # too short
oversampled_temporal_projection(raw, duration=1.)
@testing.requires_testing_data
def test_otp_real():
"""Test OTP on real data."""
for fname in (erm_fname, triux_fname):
raw = read_raw_fif(fname, allow_maxshield='yes').crop(0, 1)
raw.load_data().pick_channels(raw.ch_names[:10])
raw_otp = oversampled_temporal_projection(raw, 1.)
picks = _pick_data_channels(raw.info)
reduction = (np.linalg.norm(raw[picks][0], axis=-1) /
np.linalg.norm(raw_otp[picks][0], axis=-1))
assert reduction.min() > 1
# Handling of acquisition skips
raw = read_raw_fif(skip_fname, preload=True)
raw.pick_channels(raw.ch_names[:10])
raw_otp = oversampled_temporal_projection(raw, duration=1.)
run_tests_if_main()
|
from flask import current_app
from marshmallow import fields, validates_schema, pre_load
from marshmallow import validate
from marshmallow.exceptions import ValidationError
from lemur.schemas import (
PluginInputSchema,
PluginOutputSchema,
ExtensionSchema,
AssociatedAuthoritySchema,
AssociatedRoleSchema,
)
from lemur.users.schemas import UserNestedOutputSchema
from lemur.common.schema import LemurInputSchema, LemurOutputSchema
from lemur.common import validators, missing
from lemur.common.fields import ArrowDateTime
from lemur.constants import CERTIFICATE_KEY_TYPES
class AuthorityInputSchema(LemurInputSchema):
name = fields.String(required=True)
owner = fields.Email(required=True)
description = fields.String()
common_name = fields.String(required=True, validate=validators.common_name)
validity_start = ArrowDateTime()
validity_end = ArrowDateTime()
validity_years = fields.Integer()
# certificate body fields
organizational_unit = fields.String(
missing=lambda: current_app.config.get("LEMUR_DEFAULT_ORGANIZATIONAL_UNIT")
)
organization = fields.String(
missing=lambda: current_app.config.get("LEMUR_DEFAULT_ORGANIZATION")
)
location = fields.String()
country = fields.String(
missing=lambda: current_app.config.get("LEMUR_DEFAULT_COUNTRY")
)
state = fields.String(missing=lambda: current_app.config.get("LEMUR_DEFAULT_STATE"))
# Creating a String field instead of Email to allow empty value
email = fields.String()
plugin = fields.Nested(PluginInputSchema)
# signing related options
type = fields.String(validate=validate.OneOf(["root", "subca"]), missing="root")
parent = fields.Nested(AssociatedAuthoritySchema)
signing_algorithm = fields.String(
validate=validate.OneOf(["sha256WithRSA", "sha1WithRSA",
"sha256WithECDSA", "SHA384withECDSA", "SHA512withECDSA"]),
missing="sha256WithRSA",
)
key_type = fields.String(
validate=validate.OneOf(CERTIFICATE_KEY_TYPES), missing="RSA2048"
)
key_name = fields.String()
sensitivity = fields.String(
validate=validate.OneOf(["medium", "high"]), missing="medium"
)
serial_number = fields.Integer()
first_serial = fields.Integer(missing=1)
extensions = fields.Nested(ExtensionSchema)
roles = fields.Nested(AssociatedRoleSchema(many=True))
@validates_schema
def validate_dates(self, data):
validators.dates(data)
@validates_schema
def validate_subca(self, data):
if data["type"] == "subca":
if not data.get("parent"):
raise ValidationError(
"If generating a subca, parent 'authority' must be specified."
)
@pre_load
def ensure_dates(self, data):
return missing.convert_validity_years(data)
class AuthorityUpdateSchema(LemurInputSchema):
owner = fields.Email(required=True)
description = fields.String()
active = fields.Boolean(missing=True)
roles = fields.Nested(AssociatedRoleSchema(many=True))
class RootAuthorityCertificateOutputSchema(LemurOutputSchema):
__envelope__ = False
id = fields.Integer()
active = fields.Boolean()
bits = fields.Integer()
body = fields.String()
chain = fields.String()
description = fields.String()
name = fields.String()
cn = fields.String()
not_after = fields.DateTime()
not_before = fields.DateTime()
owner = fields.Email()
status = fields.Boolean()
user = fields.Nested(UserNestedOutputSchema)
class AuthorityOutputSchema(LemurOutputSchema):
id = fields.Integer()
description = fields.String()
name = fields.String()
owner = fields.Email()
plugin = fields.Nested(PluginOutputSchema)
active = fields.Boolean()
options = fields.Dict()
roles = fields.List(fields.Nested(AssociatedRoleSchema))
max_issuance_days = fields.Integer()
default_validity_days = fields.Integer()
authority_certificate = fields.Nested(RootAuthorityCertificateOutputSchema)
class AuthorityNestedOutputSchema(LemurOutputSchema):
__envelope__ = False
id = fields.Integer()
description = fields.String()
name = fields.String()
owner = fields.Email()
plugin = fields.Nested(PluginOutputSchema)
active = fields.Boolean()
authority_certificate = fields.Nested(RootAuthorityCertificateOutputSchema, only=["not_after", "not_before"])
is_cab_compliant = fields.Boolean()
max_issuance_days = fields.Integer()
default_validity_days = fields.Integer()
authority_update_schema = AuthorityUpdateSchema()
authority_input_schema = AuthorityInputSchema()
authority_output_schema = AuthorityOutputSchema()
authorities_output_schema = AuthorityOutputSchema(many=True)
|
from datetime import timedelta
import logging
import os
from sense_hat import SenseHat
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DISPLAY_OPTIONS,
CONF_NAME,
PERCENTAGE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "sensehat"
CONF_IS_HAT_ATTACHED = "is_hat_attached"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
SENSOR_TYPES = {
"temperature": ["temperature", TEMP_CELSIUS],
"humidity": ["humidity", PERCENTAGE],
"pressure": ["pressure", "mb"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DISPLAY_OPTIONS, default=list(SENSOR_TYPES)): [
vol.In(SENSOR_TYPES)
],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_IS_HAT_ATTACHED, default=True): cv.boolean,
}
)
def get_cpu_temp():
"""Get CPU temperature."""
res = os.popen("vcgencmd measure_temp").readline()
t_cpu = float(res.replace("temp=", "").replace("'C\n", ""))
return t_cpu
def get_average(temp_base):
"""Use moving average to get better readings."""
if not hasattr(get_average, "temp"):
get_average.temp = [temp_base, temp_base, temp_base]
get_average.temp[2] = get_average.temp[1]
get_average.temp[1] = get_average.temp[0]
get_average.temp[0] = temp_base
temp_avg = (get_average.temp[0] + get_average.temp[1] + get_average.temp[2]) / 3
return temp_avg
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sense HAT sensor platform."""
data = SenseHatData(config.get(CONF_IS_HAT_ATTACHED))
dev = []
for variable in config[CONF_DISPLAY_OPTIONS]:
dev.append(SenseHatSensor(data, variable))
add_entities(dev, True)
class SenseHatSensor(Entity):
"""Representation of a Sense HAT sensor."""
def __init__(self, data, sensor_types):
"""Initialize the sensor."""
self.data = data
self._name = SENSOR_TYPES[sensor_types][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_types][1]
self.type = sensor_types
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
if not self.data.humidity:
_LOGGER.error("Don't receive data")
return
if self.type == "temperature":
self._state = self.data.temperature
if self.type == "humidity":
self._state = self.data.humidity
if self.type == "pressure":
self._state = self.data.pressure
class SenseHatData:
"""Get the latest data and update."""
def __init__(self, is_hat_attached):
"""Initialize the data object."""
self.temperature = None
self.humidity = None
self.pressure = None
self.is_hat_attached = is_hat_attached
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Sense HAT."""
sense = SenseHat()
temp_from_h = sense.get_temperature_from_humidity()
temp_from_p = sense.get_temperature_from_pressure()
t_total = (temp_from_h + temp_from_p) / 2
if self.is_hat_attached:
t_cpu = get_cpu_temp()
t_correct = t_total - ((t_cpu - t_total) / 1.5)
t_correct = get_average(t_correct)
else:
t_correct = get_average(t_total)
self.temperature = t_correct
self.humidity = sense.get_humidity()
self.pressure = sense.get_pressure()
|
import re
from contextlib import contextmanager
from plumbum.commands import CommandNotFound, shquote, ConcreteCommand
from plumbum.lib import _setdoc, ProcInfo, six
from plumbum.machines.local import LocalPath
from tempfile import NamedTemporaryFile
from plumbum.machines.base import BaseMachine
from plumbum.machines.env import BaseEnv
from plumbum.path.remote import RemotePath, RemoteWorkdir, StatRes
class RemoteEnv(BaseEnv):
"""The remote machine's environment; exposes a dict-like interface"""
__slots__ = ["_orig", "remote"]
def __init__(self, remote):
self.remote = remote
session = remote._session
# GNU env has a -0 argument; use it if present. Otherwise,
# fall back to calling printenv on each (possible) variable
# from plain env.
env0 = session.run("env -0; echo")
if env0[0] == 0 and not env0[2].rstrip():
self._curr = dict(
line.split('=', 1) for line in env0[1].split('\x00')
if '=' in line)
else:
lines = session.run("env; echo")[1].splitlines()
split = (line.split('=', 1) for line in lines)
keys = (line[0] for line in split if len(line) > 1)
runs = ((key, session.run('printenv "%s"; echo' % key))
for key in keys)
self._curr = dict(
(key, run[1].rstrip('\n')) for (key, run) in runs
if run[0] == 0 and run[1].rstrip('\n') and not run[2])
self._orig = self._curr.copy()
BaseEnv.__init__(self, self.remote.path, ":")
@_setdoc(BaseEnv)
def __delitem__(self, name):
BaseEnv.__delitem__(self, name)
self.remote._session.run("unset %s" % (name, ))
@_setdoc(BaseEnv)
def __setitem__(self, name, value):
BaseEnv.__setitem__(self, name, value)
self.remote._session.run("export %s=%s" % (name, shquote(value)))
@_setdoc(BaseEnv)
def pop(self, name, *default):
BaseEnv.pop(self, name, *default)
self.remote._session.run("unset %s" % (name, ))
@_setdoc(BaseEnv)
def update(self, *args, **kwargs):
BaseEnv.update(self, *args, **kwargs)
self.remote._session.run("export " + " ".join(
"%s=%s" % (k, shquote(v)) for k, v in self.getdict().items()))
def expand(self, expr):
"""Expands any environment variables and home shortcuts found in ``expr``
(like ``os.path.expanduser`` combined with ``os.path.expandvars``)
:param expr: An expression containing environment variables (as ``$FOO``) or
home shortcuts (as ``~/.bashrc``)
:returns: The expanded string"""
return self.remote.expand(expr)
def expanduser(self, expr):
"""Expand home shortcuts (e.g., ``~/foo/bar`` or ``~john/foo/bar``)
:param expr: An expression containing home shortcuts
:returns: The expanded string"""
return self.remote.expanduser(expr)
# def clear(self):
# BaseEnv.clear(self, *args, **kwargs)
# self.remote._session.run("export %s" % " ".join("%s=%s" % (k, v) for k, v in self.getdict()))
def getdelta(self):
"""Returns the difference between the this environment and the original environment of
the remote machine"""
self._curr["PATH"] = self.path.join()
delta = {}
for k, v in self._curr.items():
if k not in self._orig:
delta[k] = str(v)
for k, v in self._orig.items():
if k not in self._curr:
delta[k] = ""
else:
if v != self._curr[k]:
delta[k] = self._curr[k]
return delta
class RemoteCommand(ConcreteCommand):
__slots__ = ["remote", "executable"]
QUOTE_LEVEL = 1
def __init__(self, remote, executable, encoding="auto"):
self.remote = remote
ConcreteCommand.__init__(
self, executable, remote.custom_encoding
if encoding == "auto" else encoding)
@property
def machine(self):
return self.remote
def __repr__(self):
return "RemoteCommand(%r, %r)" % (self.remote, self.executable)
def popen(self, args=(), **kwargs):
return self.remote.popen(self[args], **kwargs)
def nohup(self, cwd='.', stdout='nohup.out', stderr=None, append=True):
"""Runs a command detached."""
return self.machine.daemonic_popen(self, cwd, stdout, stderr, append)
class ClosedRemoteMachine(Exception):
pass
class ClosedRemote(object):
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
self._obj = obj
def close(self):
pass
def __getattr__(self, name):
raise ClosedRemoteMachine("%r has been closed" % (self._obj, ))
class BaseRemoteMachine(BaseMachine):
"""Represents a *remote machine*; serves as an entry point to everything related to that
remote machine, such as working directory and environment manipulation, command creation,
etc.
Attributes:
* ``cwd`` - the remote working directory
* ``env`` - the remote environment
* ``custom_encoding`` - the remote machine's default encoding (assumed to be UTF8)
* ``connect_timeout`` - the connection timeout
There also is a _cwd attribute that exists if the cwd is not current (del if cwd is changed).
"""
# allow inheritors to override the RemoteCommand class
RemoteCommand = RemoteCommand
@property
def cwd(self):
if not hasattr(self, '_cwd'):
self._cwd = RemoteWorkdir(self)
return self._cwd
def __init__(self, encoding="utf8", connect_timeout=10, new_session=False):
self.custom_encoding = encoding
self.connect_timeout = connect_timeout
self._session = self.session(new_session=new_session)
self.uname = self._get_uname()
self.env = RemoteEnv(self)
self._python = None
def _get_uname(self):
rc, out, _ = self._session.run("uname", retcode=None)
if rc == 0:
return out.strip()
else:
rc, out, _ = self._session.run(
"python -c 'import platform;print(platform.uname()[0])'",
retcode=None)
if rc == 0:
return out.strip()
else:
# all POSIX systems should have uname. make an educated guess it's Windows
return "Windows"
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self)
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def close(self):
"""closes the connection to the remote machine; all paths and programs will
become defunct"""
self._session.close()
self._session = ClosedRemote(self)
def path(self, *parts):
"""A factory for :class:`RemotePaths <plumbum.path.remote.RemotePath>`.
Usage: ``p = rem.path("/usr", "lib", "python2.7")``
"""
parts2 = [str(self.cwd)]
for p in parts:
if isinstance(p, LocalPath):
raise TypeError("Cannot construct RemotePath from %r" % (p, ))
parts2.append(self.expanduser(str(p)))
return RemotePath(self, *parts2)
def which(self, progname):
"""Looks up a program in the ``PATH``. If the program is not found, raises
:class:`CommandNotFound <plumbum.commands.CommandNotFound>`
:param progname: The program's name. Note that if underscores (``_``) are present
in the name, and the exact name is not found, they will be replaced
in turn by hyphens (``-``) then periods (``.``), and the name will
be looked up again for each alternative
:returns: A :class:`RemotePath <plumbum.path.local.RemotePath>`
"""
alternatives = [progname]
if "_" in progname:
alternatives.append(progname.replace("_", "-"))
alternatives.append(progname.replace("_", "."))
for name in alternatives:
for p in self.env.path:
fn = p / name
if fn.access("x") and not fn.is_dir():
return fn
raise CommandNotFound(progname, self.env.path)
def __getitem__(self, cmd):
"""Returns a `Command` object representing the given program. ``cmd`` can be a string or
a :class:`RemotePath <plumbum.path.remote.RemotePath>`; if it is a path, a command
representing this path will be returned; otherwise, the program name will be looked up in
the system's ``PATH`` (using ``which``). Usage::
r_ls = rem["ls"]
"""
if isinstance(cmd, RemotePath):
if cmd.remote is self:
return self.RemoteCommand(self, cmd)
else:
raise TypeError(
"Given path does not belong to this remote machine: %r" %
(cmd, ))
elif not isinstance(cmd, LocalPath):
if "/" in cmd or "\\" in cmd:
return self.RemoteCommand(self, self.path(cmd))
else:
return self.RemoteCommand(self, self.which(cmd))
else:
raise TypeError("cmd must not be a LocalPath: %r" % (cmd, ))
@property
def python(self):
"""A command that represents the default remote python interpreter"""
if not self._python:
self._python = self["python"]
return self._python
def session(self, isatty=False, new_session=False):
"""Creates a new :class:`ShellSession <plumbum.session.ShellSession>` object; this invokes the user's
shell on the remote machine and executes commands on it over stdin/stdout/stderr"""
raise NotImplementedError()
def download(self, src, dst):
"""Downloads a remote file/directory (``src``) to a local destination (``dst``).
``src`` must be a string or a :class:`RemotePath <plumbum.path.remote.RemotePath>`
pointing to this remote machine, and ``dst`` must be a string or a
:class:`LocalPath <plumbum.machines.local.LocalPath>`"""
raise NotImplementedError()
def upload(self, src, dst):
"""Uploads a local file/directory (``src``) to a remote destination (``dst``).
``src`` must be a string or a :class:`LocalPath <plumbum.machines.local.LocalPath>`,
and ``dst`` must be a string or a :class:`RemotePath <plumbum.path.remote.RemotePath>`
pointing to this remote machine"""
raise NotImplementedError()
def popen(self, args, **kwargs):
"""Spawns the given command on the remote machine, returning a ``Popen``-like object;
do not use this method directly, unless you need "low-level" control on the remote
process"""
raise NotImplementedError()
def list_processes(self):
"""
Returns information about all running processes (on POSIX systems: using ``ps``)
.. versionadded:: 1.3
"""
ps = self["ps"]
lines = ps("-e", "-o", "pid,uid,stat,args").splitlines()
lines.pop(0) # header
for line in lines:
parts = line.strip().split()
yield ProcInfo(
int(parts[0]), int(parts[1]), parts[2], " ".join(parts[3:]))
def pgrep(self, pattern):
"""
Process grep: return information about all processes whose command-line args match the given regex pattern
"""
pat = re.compile(pattern)
for procinfo in self.list_processes():
if pat.search(procinfo.args):
yield procinfo
@contextmanager
def tempdir(self):
"""A context manager that creates a remote temporary directory, which is removed when
the context exits"""
_, out, _ = self._session.run("mktemp -d tmp.XXXXXXXXXX")
dir = self.path(out.strip()) # @ReservedAssignment
try:
yield dir
finally:
dir.delete()
#
# Path implementation
#
def _path_listdir(self, fn):
files = self._session.run("ls -a %s" % (shquote(fn), ))[1].splitlines()
files.remove(".")
files.remove("..")
return files
def _path_glob(self, fn, pattern):
# shquote does not work here due to the way bash loops use space as a seperator
pattern = pattern.replace(" ", r"\ ")
fn = fn.replace(" ", r"\ ")
matches = self._session.run(
r'for fn in {0}/{1}; do echo $fn; done'.format(
fn, pattern))[1].splitlines()
if len(matches) == 1 and not self._path_stat(matches[0]):
return [] # pattern expansion failed
return matches
def _path_getuid(self, fn):
stat_cmd = "stat -c '%u,%U' " if self.uname not in (
'Darwin', 'FreeBSD') else "stat -f '%u,%Su' "
return self._session.run(stat_cmd + shquote(fn))[1].strip().split(",")
def _path_getgid(self, fn):
stat_cmd = "stat -c '%g,%G' " if self.uname not in (
'Darwin', 'FreeBSD') else "stat -f '%g,%Sg' "
return self._session.run(stat_cmd + shquote(fn))[1].strip().split(",")
def _path_stat(self, fn):
if self.uname not in ('Darwin', 'FreeBSD'):
stat_cmd = "stat -c '%F,%f,%i,%d,%h,%u,%g,%s,%X,%Y,%Z' "
else:
stat_cmd = "stat -f '%HT,%Xp,%i,%d,%l,%u,%g,%z,%a,%m,%c' "
rc, out, _ = self._session.run(stat_cmd + shquote(fn), retcode=None)
if rc != 0:
return None
statres = out.strip().split(",")
text_mode = statres.pop(0).lower()
res = StatRes((int(statres[0], 16), ) + tuple(
int(sr) for sr in statres[1:]))
res.text_mode = text_mode
return res
def _path_delete(self, fn):
self._session.run("rm -rf %s" % (shquote(fn), ))
def _path_move(self, src, dst):
self._session.run("mv %s %s" % (shquote(src), shquote(dst)))
def _path_copy(self, src, dst):
self._session.run("cp -r %s %s" % (shquote(src), shquote(dst)))
def _path_mkdir(self, fn, mode=None, minus_p=True):
p_str = "-p " if minus_p else ""
cmd = "mkdir %s%s" % (p_str, shquote(fn))
self._session.run(cmd)
def _path_chmod(self, mode, fn):
self._session.run("chmod %o %s" % (mode, shquote(fn)))
def _path_touch(self, path):
self._session.run("touch {path}".format(path=path))
def _path_chown(self, fn, owner, group, recursive):
args = ["chown"]
if recursive:
args.append("-R")
if owner is not None and group is not None:
args.append("%s:%s" % (owner, group))
elif owner is not None:
args.append(str(owner))
elif group is not None:
args.append(":%s" % (group, ))
args.append(shquote(fn))
self._session.run(" ".join(args))
def _path_read(self, fn):
data = self["cat"](fn)
if self.custom_encoding and isinstance(data, six.unicode_type):
data = data.encode(self.custom_encoding)
return data
def _path_write(self, fn, data):
if self.custom_encoding and isinstance(data, six.unicode_type):
data = data.encode(self.custom_encoding)
with NamedTemporaryFile() as f:
f.write(data)
f.flush()
f.seek(0)
self.upload(f.name, fn)
def _path_link(self, src, dst, symlink):
self._session.run(
"ln %s %s %s" % ("-s"
if symlink else "", shquote(src), shquote(dst)))
@_setdoc(BaseEnv)
def expand(self, expr):
return self._session.run("echo %s" % (expr, ))[1].strip()
@_setdoc(BaseEnv)
def expanduser(self, expr):
if not any(part.startswith("~") for part in expr.split("/")):
return expr
# we escape all $ signs to avoid expanding env-vars
return self._session.run(
"echo %s" % (expr.replace("$", "\\$"), ))[1].strip()
|
import numpy as np
from ... import pick_types
from ...io import BaseRaw
from ...utils import _validate_type, verbose
from ..nirs import _channel_frequencies, _check_channels_ordered
from ...filter import filter_data
@verbose
def scalp_coupling_index(raw, l_freq=0.7, h_freq=1.5,
l_trans_bandwidth=0.3, h_trans_bandwidth=0.3,
verbose=False):
r"""Calculate scalp coupling index.
This function calculates the scalp coupling index
:footcite:`pollonini2014auditory`. This is a measure of the quality of the
connection between the optode and the scalp.
Parameters
----------
raw : instance of Raw
The raw data.
%(l_freq)s
%(h_freq)s
%(l_trans_bandwidth)s
%(h_trans_bandwidth)s
%(verbose)s
Returns
-------
sci : array of float
Array containing scalp coupling index for each channel.
References
----------
.. footbibliography::
"""
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
if not len(pick_types(raw.info, fnirs='fnirs_od')):
raise RuntimeError('Scalp coupling index '
'should be run on optical density data.')
freqs = np.unique(_channel_frequencies(raw))
picks = _check_channels_ordered(raw, freqs)
filtered_data = filter_data(raw._data, raw.info['sfreq'], l_freq, h_freq,
picks=picks, verbose=verbose,
l_trans_bandwidth=l_trans_bandwidth,
h_trans_bandwidth=h_trans_bandwidth)
sci = np.zeros(picks.shape)
for ii in picks[::2]:
c = np.corrcoef(filtered_data[ii], filtered_data[ii + 1])[0][1]
sci[ii] = c
sci[ii + 1] = c
return sci
|
import asyncio
from typing import Any, Dict, Optional
from urllib.parse import urlparse
import aiohue
from aiohue.discovery import discover_nupnp, normalize_bridge_id
import async_timeout
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.components import ssdp
from homeassistant.const import CONF_HOST, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .bridge import authenticate_bridge
from .const import ( # pylint: disable=unused-import
CONF_ALLOW_HUE_GROUPS,
CONF_ALLOW_UNREACHABLE,
DOMAIN,
LOGGER,
)
from .errors import AuthenticationRequired, CannotConnect
HUE_MANUFACTURERURL = "http://www.philips.com"
HUE_IGNORED_BRIDGE_NAMES = ["Home Assistant Bridge", "Espalexa"]
HUE_MANUAL_BRIDGE_ID = "manual"
class HueFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Hue config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return HueOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Hue flow."""
self.bridge: Optional[aiohue.Bridge] = None
self.discovered_bridges: Optional[Dict[str, aiohue.Bridge]] = None
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
# This is for backwards compatibility.
return await self.async_step_init(user_input)
@core.callback
def _async_get_bridge(self, host: str, bridge_id: Optional[str] = None):
"""Return a bridge object."""
if bridge_id is not None:
bridge_id = normalize_bridge_id(bridge_id)
return aiohue.Bridge(
host,
websession=aiohttp_client.async_get_clientsession(self.hass),
bridge_id=bridge_id,
)
async def async_step_init(self, user_input=None):
"""Handle a flow start."""
# Check if user chooses manual entry
if user_input is not None and user_input["id"] == HUE_MANUAL_BRIDGE_ID:
return await self.async_step_manual()
if (
user_input is not None
and self.discovered_bridges is not None
and user_input["id"] in self.discovered_bridges
):
self.bridge = self.discovered_bridges[user_input["id"]]
await self.async_set_unique_id(self.bridge.id, raise_on_progress=False)
return await self.async_step_link()
# Find / discover bridges
try:
with async_timeout.timeout(5):
bridges = await discover_nupnp(
websession=aiohttp_client.async_get_clientsession(self.hass)
)
except asyncio.TimeoutError:
return self.async_abort(reason="discover_timeout")
if bridges:
# Find already configured hosts
already_configured = self._async_current_ids(False)
bridges = [
bridge for bridge in bridges if bridge.id not in already_configured
]
self.discovered_bridges = {bridge.id: bridge for bridge in bridges}
if not self.discovered_bridges:
return await self.async_step_manual()
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required("id"): vol.In(
{
**{bridge.id: bridge.host for bridge in bridges},
HUE_MANUAL_BRIDGE_ID: "Manually add a Hue Bridge",
}
)
}
),
)
async def async_step_manual(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle manual bridge setup."""
if user_input is None:
return self.async_show_form(
step_id="manual",
data_schema=vol.Schema({vol.Required(CONF_HOST): str}),
)
if any(
user_input["host"] == entry.data.get("host")
for entry in self._async_current_entries()
):
return self.async_abort(reason="already_configured")
self.bridge = self._async_get_bridge(user_input[CONF_HOST])
return await self.async_step_link()
async def async_step_link(self, user_input=None):
"""Attempt to link with the Hue bridge.
Given a configured host, will ask the user to press the link button
to connect to the bridge.
"""
if user_input is None:
return self.async_show_form(step_id="link")
bridge = self.bridge
assert bridge is not None
errors = {}
try:
await authenticate_bridge(self.hass, bridge)
except AuthenticationRequired:
errors["base"] = "register_failed"
except CannotConnect:
LOGGER.error("Error connecting to the Hue bridge at %s", bridge.host)
return self.async_abort(reason="cannot_connect")
except Exception: # pylint: disable=broad-except
LOGGER.exception(
"Unknown error connecting with Hue bridge at %s", bridge.host
)
errors["base"] = "linking"
if errors:
return self.async_show_form(step_id="link", errors=errors)
# Can happen if we come from import or manual entry
if self.unique_id is None:
await self.async_set_unique_id(
normalize_bridge_id(bridge.id), raise_on_progress=False
)
return self.async_create_entry(
title=bridge.config.name,
data={CONF_HOST: bridge.host, CONF_USERNAME: bridge.username},
)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered Hue bridge.
This flow is triggered by the SSDP component. It will check if the
host is already configured and delegate to the import step if not.
"""
# Filter out non-Hue bridges #1
if discovery_info.get(ssdp.ATTR_UPNP_MANUFACTURER_URL) != HUE_MANUFACTURERURL:
return self.async_abort(reason="not_hue_bridge")
# Filter out non-Hue bridges #2
if any(
name in discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME, "")
for name in HUE_IGNORED_BRIDGE_NAMES
):
return self.async_abort(reason="not_hue_bridge")
if (
ssdp.ATTR_SSDP_LOCATION not in discovery_info
or ssdp.ATTR_UPNP_SERIAL not in discovery_info
):
return self.async_abort(reason="not_hue_bridge")
host = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname
bridge = self._async_get_bridge(host, discovery_info[ssdp.ATTR_UPNP_SERIAL])
await self.async_set_unique_id(bridge.id)
self._abort_if_unique_id_configured(
updates={CONF_HOST: bridge.host}, reload_on_update=False
)
self.bridge = bridge
return await self.async_step_link()
async def async_step_import(self, import_info):
"""Import a new bridge as a config entry.
This flow is triggered by `async_setup` for both configured and
discovered bridges. Triggered for any bridge that does not have a
config entry yet (based on host).
This flow is also triggered by `async_step_discovery`.
"""
# Check if host exists, abort if so.
if any(
import_info["host"] == entry.data.get("host")
for entry in self._async_current_entries()
):
return self.async_abort(reason="already_configured")
self.bridge = self._async_get_bridge(import_info["host"])
return await self.async_step_link()
class HueOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Hue options."""
def __init__(self, config_entry):
"""Initialize Hue options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Manage Hue options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALLOW_HUE_GROUPS,
default=self.config_entry.options.get(
CONF_ALLOW_HUE_GROUPS, False
),
): bool,
vol.Optional(
CONF_ALLOW_UNREACHABLE,
default=self.config_entry.options.get(
CONF_ALLOW_UNREACHABLE, False
),
): bool,
}
),
)
|
import os
import glob
import diamond.collector
class KSMCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(KSMCollector, self).get_default_config_help()
config_help.update({
'ksm_path': "location where KSM kernel data can be found",
})
return config_help
def get_default_config(self):
"""
Return default config.
path: Graphite path output
ksm_path: location where KSM kernel data can be found
"""
config = super(KSMCollector, self).get_default_config()
config.update({
'path': 'ksm',
'ksm_path': '/sys/kernel/mm/ksm'})
return config
def collect(self):
for item in glob.glob(os.path.join(self.config['ksm_path'], "*")):
if os.access(item, os.R_OK):
filehandle = open(item)
try:
self.publish(os.path.basename(item),
float(filehandle.readline().rstrip()))
except ValueError:
pass
filehandle.close()
|
import os
import re
import pytest
from nikola import __main__ as nikola
def test_simple_config(simple_config, metadata_option):
"""Check whether configuration-files without ineritance are interpreted correctly."""
assert simple_config[metadata_option]["ID"] == "conf"
def test_inherited_config(simple_config, metadata_option, complex_config):
"""Check whether configuration-files with ineritance are interpreted correctly."""
check_base_equality(simple_config, metadata_option, complex_config)
assert complex_config[metadata_option]["ID"] == "prod"
def test_config_with_illegal_filename(
simple_config, metadata_option, complex_filename_config
):
"""Check whether files with illegal module-name characters can be set as config-files, too."""
check_base_equality(simple_config, metadata_option, complex_filename_config)
assert complex_filename_config[metadata_option]["ID"] == "illegal"
@pytest.fixture(scope="module")
def simple_config(data_dir):
nikola.main(["--conf=" + os.path.join(data_dir, "conf.py")])
return nikola.config
@pytest.fixture(scope="module")
def data_dir(test_dir):
return os.path.join(test_dir, "data", "test_config")
@pytest.fixture
def metadata_option():
return "ADDITIONAL_METADATA"
@pytest.fixture(scope="module")
def complex_config(data_dir):
nikola.main(["--conf=" + os.path.join(data_dir, "prod.py")])
return nikola.config
@pytest.fixture(scope="module")
def complex_filename_config(data_dir):
config_path = os.path.join(
data_dir, "config.with+illegal(module)name.characters.py"
)
nikola.main(["--conf=" + config_path])
return nikola.config
def check_base_equality(base_config, metadata_option, config):
"""Check whether the specified `config` equals the base config."""
for option in base_config.keys():
if re.match("^[A-Z]+(_[A-Z]+)*$", option) and option != metadata_option:
assert base_config[option] == config[option]
|
from asyncio import run_coroutine_threadsafe
from pysmappee import api
from homeassistant import config_entries, core
from homeassistant.const import CONF_PLATFORM
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
class ConfigEntrySmappeeApi(api.SmappeeApi):
"""Provide Smappee authentication tied to an OAuth2 based config entry."""
def __init__(
self,
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
):
"""Initialize Smappee Auth."""
self.hass = hass
self.config_entry = config_entry
self.session = config_entry_oauth2_flow.OAuth2Session(
hass, config_entry, implementation
)
platform_to_farm = {
"PRODUCTION": 1,
"ACCEPTANCE": 2,
"DEVELOPMENT": 3,
}
super().__init__(
None,
None,
token=self.session.token,
farm=platform_to_farm[hass.data[DOMAIN][CONF_PLATFORM]],
)
def refresh_tokens(self) -> dict:
"""Refresh and return new Smappee tokens using Home Assistant OAuth2 session."""
run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self.hass.loop
).result()
return self.session.token
|
import logging
import requests
from starlingbank import StarlingAccount
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
BALANCE_TYPES = ["cleared_balance", "effective_balance"]
CONF_ACCOUNTS = "accounts"
CONF_BALANCE_TYPES = "balance_types"
CONF_SANDBOX = "sandbox"
DEFAULT_SANDBOX = False
DEFAULT_ACCOUNT_NAME = "Starling"
ICON = "mdi:currency-gbp"
ACCOUNT_SCHEMA = vol.Schema(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_BALANCE_TYPES, default=BALANCE_TYPES): vol.All(
cv.ensure_list, [vol.In(BALANCE_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_ACCOUNT_NAME): cv.string,
vol.Optional(CONF_SANDBOX, default=DEFAULT_SANDBOX): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_ACCOUNTS): vol.Schema([ACCOUNT_SCHEMA])}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Sterling Bank sensor platform."""
sensors = []
for account in config[CONF_ACCOUNTS]:
try:
starling_account = StarlingAccount(
account[CONF_ACCESS_TOKEN], sandbox=account[CONF_SANDBOX]
)
for balance_type in account[CONF_BALANCE_TYPES]:
sensors.append(
StarlingBalanceSensor(
starling_account, account[CONF_NAME], balance_type
)
)
except requests.exceptions.HTTPError as error:
_LOGGER.error(
"Unable to set up Starling account '%s': %s", account[CONF_NAME], error
)
add_devices(sensors, True)
class StarlingBalanceSensor(Entity):
"""Representation of a Starling balance sensor."""
def __init__(self, starling_account, account_name, balance_data_type):
"""Initialize the sensor."""
self._starling_account = starling_account
self._balance_data_type = balance_data_type
self._state = None
self._account_name = account_name
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(
self._account_name, self._balance_data_type.replace("_", " ").capitalize()
)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._starling_account.currency
@property
def icon(self):
"""Return the entity icon."""
return ICON
def update(self):
"""Fetch new state data for the sensor."""
self._starling_account.update_balance_data()
if self._balance_data_type == "cleared_balance":
self._state = self._starling_account.cleared_balance / 100
elif self._balance_data_type == "effective_balance":
self._state = self._starling_account.effective_balance / 100
|
import unittest
import numpy as np
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import CUBKeypointDataset
from chainercv.utils import assert_is_bbox
from chainercv.utils import assert_is_point_dataset
@testing.parameterize(*testing.product({
'return_bbox': [True, False],
'return_prob_map': [True, False]}
))
class TestCUBKeypointDataset(unittest.TestCase):
def setUp(self):
self.dataset = CUBKeypointDataset(return_bbox=self.return_bbox,
return_prob_map=self.return_prob_map)
@attr.slow
def test_cub_point_dataset(self):
assert_is_point_dataset(
self.dataset, n_point=15, n_example=10)
idx = np.random.choice(np.arange(10))
if self.return_bbox:
if self.return_prob_map:
bbox = self.dataset[idx][-2]
else:
bbox = self.dataset[idx][-1]
assert_is_bbox(bbox)
if self.return_prob_map:
img = self.dataset[idx][0]
prob_map = self.dataset[idx][-1]
self.assertEqual(prob_map.dtype, np.float32)
self.assertEqual(prob_map.shape, img.shape[1:])
self.assertTrue(np.min(prob_map) >= 0)
self.assertTrue(np.max(prob_map) <= 1)
testing.run_module(__name__, __file__)
|
from django.db import transaction
from weblate.machinery.base import get_machinery_language
from weblate.memory.models import Memory
from weblate.utils.celery import app
from weblate.utils.state import STATE_TRANSLATED
@app.task(trail=False)
def import_memory(project_id):
from weblate.trans.models import Project, Unit
project = Project.objects.get(pk=project_id)
for component in project.component_set.iterator():
with transaction.atomic():
units = Unit.objects.filter(
translation__component=component, state__gte=STATE_TRANSLATED
)
if not component.intermediate:
units = units.exclude(
translation__language_id=component.source_language_id
)
for unit in units.prefetch_related("translation", "translation__language"):
update_memory(None, unit, component, project)
@app.task(trail=False)
def handle_unit_translation_change(unit_id, user_id=None):
from weblate.auth.models import User
from weblate.trans.models import Unit
user = None if user_id is None else User.objects.get(pk=user_id)
unit = Unit.objects.get(pk=unit_id)
update_memory(user, unit)
def update_memory(user, unit, component=None, project=None):
component = component or unit.translation.component
project = project or component.project
params = {
"source_language": get_machinery_language(component.source_language),
"target_language": get_machinery_language(unit.translation.language),
"source": unit.source,
"target": unit.target,
"origin": component.full_slug,
}
add_project = True
add_shared = project.contribute_shared_tm
add_user = user is not None
# Check matching entries in memory
for matching in Memory.objects.filter(from_file=False, **params):
if (
matching.user_id is None
and matching.project_id == project.id
and not matching.shared
):
add_project = False
elif (
add_shared
and matching.user_id is None
and matching.project_id is None
and matching.shared
):
add_shared = False
elif (
add_user
and matching.user_id == user.id
and matching.project_id is None
and not matching.shared
):
add_user = False
if add_project:
Memory.objects.create(
user=None, project=project, from_file=False, shared=False, **params
)
if add_shared:
Memory.objects.create(
user=None, project=None, from_file=False, shared=True, **params
)
if add_user:
Memory.objects.create(
user=user, project=None, from_file=False, shared=False, **params
)
|
import os
import string
import subprocess
from random import SystemRandom
from urllib.parse import urlparse
from django.conf import settings
from weblate.trans.util import get_clean_env
from weblate.utils.data import data_dir
from weblate.utils.errors import report_error
from weblate.vcs.ssh import SSH_WRAPPER, add_host_key
CACHEDIR = """Signature: 8a477f597d28d172789f06886806bc55
# This file is a cache directory tag created by Weblate
# For information about cache directory tags, see:
# https://bford.info/cachedir/spec.html
"""
class BackupError(Exception):
pass
def make_password(length=50):
generator = SystemRandom()
chars = string.ascii_letters + string.digits + "!@#$%^&*()"
return "".join(generator.choice(chars) for i in range(length))
def tag_cache_dirs():
"""Create CACHEDIR.TAG in our cache dirs to exlude from backups."""
dirs = [
# Fontconfig cache
data_dir("cache", "fonts"),
# Static files (default is inside data)
settings.STATIC_ROOT,
]
# Django file based caches
for cache in settings.CACHES.values():
if cache["BACKEND"] == "django.core.cache.backends.filebased.FileBasedCache":
dirs.append(cache["LOCATION"])
# Create CACHEDIR.TAG in each cache dir
for name in dirs:
tagfile = os.path.join(name, "CACHEDIR.TAG")
if os.path.exists(name) and not os.path.exists(tagfile):
with open(tagfile, "w") as handle:
handle.write(CACHEDIR)
def borg(cmd, env=None):
"""Wrapper to execute borgbackup."""
SSH_WRAPPER.create()
try:
return subprocess.check_output(
["borg", "--rsh", SSH_WRAPPER.filename] + cmd,
stderr=subprocess.STDOUT,
env=get_clean_env(env),
universal_newlines=True,
)
except OSError as error:
report_error()
raise BackupError(f"Could not execute borg program: {error}")
except subprocess.CalledProcessError as error:
report_error(extra_data={"stdout": error.stdout})
raise BackupError(error.stdout)
def initialize(location, passphrase):
"""Initialize repository."""
parsed = urlparse(location)
if parsed.hostname:
add_host_key(None, parsed.hostname, parsed.port)
return borg(
["init", "--encryption", "repokey-blake2", location],
{"BORG_NEW_PASSPHRASE": passphrase},
)
def get_paper_key(location):
"""Get paper key for recovery."""
return borg(["key", "export", "--paper", location])
def backup(location, passphrase):
"""Perform DATA_DIR backup."""
tag_cache_dirs()
return borg(
[
"create",
"--verbose",
"--list",
"--filter",
"AME",
"--stats",
"--exclude-caches",
"--exclude",
"*/.config/borg",
"--compression",
"auto,zstd",
f"{location}::{{now}}",
settings.DATA_DIR,
],
{"BORG_PASSPHRASE": passphrase},
)
def prune(location, passphrase):
"""Prune past backups."""
return borg(
[
"prune",
"--list",
"--keep-daily",
"14",
"--keep-weekly",
"8",
"--keep-monthly",
"6",
location,
],
{"BORG_PASSPHRASE": passphrase},
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.