text
stringlengths 213
32.3k
|
---|
from __future__ import print_function
from __future__ import unicode_literals
import re
import inspect
import os
import shutil
from elephas import spark_model, ml_model, hyperparam
from elephas.parameter import client, server
from elephas.utils import functional_utils, rdd_utils, serialization
from elephas.ml import adapter as ml_adapter
from elephas.mllib import adapter as mllib_adapter
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('utf8')
EXCLUDE = {
'SocketClient',
'SocketServer'
}
PAGES = [
{
'page': 'models/spark-model.md',
'classes': [
spark_model.SparkModel
],
'functions': [
spark_model.load_spark_model
],
},
{
'page': 'models/spark-mllib-model.md',
'classes': [
spark_model.SparkMLlibModel
],
'functions': [
spark_model.load_spark_model
],
},
{
'page': 'models/spark-ml-model.md',
'classes': [
ml_model.ElephasEstimator,
ml_model.ElephasTransformer
],
'functions': [
ml_model.load_ml_transformer,
ml_model.load_ml_estimator
],
},
{
'page': 'models/hyper-param-model.md',
'classes': [
hyperparam.HyperParamModel
]
},
{
'page': 'parameter/client.md',
'classes': [
client.BaseParameterClient,
client.HttpClient
]
},
{
'page': 'parameter/server.md',
'classes': [
server.BaseParameterServer,
server.HttpServer
]
},
{
'page': 'utils/functional_utils.md',
'all_module_functions': [functional_utils],
},
{
'page': 'utils/rdd_utils.md',
'all_module_functions': [rdd_utils],
},
{
'page': 'utils/serialization_utils.md',
'all_module_functions': [serialization],
},
{
'page': 'adapters/spark-ml.md',
'all_module_functions': [ml_adapter],
},
{
'page': 'adapters/spark-mllib.md',
'all_module_functions': [mllib_adapter],
},
]
ROOT = 'http://maxpumperla.com/elephas'
def get_function_signature(function, method=True):
wrapped = getattr(function, '_original_function', None)
if wrapped is None:
signature = inspect.getargspec(function)
else:
signature = inspect.getargspec(wrapped)
defaults = signature.defaults
if method:
args = signature.args[1:]
else:
args = signature.args
if defaults:
kwargs = zip(args[-len(defaults):], defaults)
args = args[:-len(defaults)]
else:
kwargs = []
st = '%s.%s(' % (clean_module_name(function.__module__), function.__name__)
for a in args:
st += str(a) + ', '
for a, v in kwargs:
if isinstance(v, str):
v = '\'' + v + '\''
st += str(a) + '=' + str(v) + ', '
if kwargs or args:
signature = st[:-2] + ')'
else:
signature = st + ')'
return signature
def get_class_signature(cls):
try:
class_signature = get_function_signature(cls.__init__)
class_signature = class_signature.replace('__init__', cls.__name__)
except (TypeError, AttributeError):
# in case the class inherits from object and does not
# define __init__
class_signature = "{clean_module_name}.{cls_name}()".format(
clean_module_name=clean_module_name(cls.__module__),
cls_name=cls.__name__
)
return class_signature
def clean_module_name(name):
assert name[:8] == 'elephas.', 'Invalid module name: %s' % name
return name
def class_to_docs_link(cls):
module_name = clean_module_name(cls.__module__)
module_name = module_name[6:]
link = ROOT + module_name.replace('.', '/') + '#' + cls.__name__.lower()
return link
def class_to_source_link(cls):
module_name = clean_module_name(cls.__module__)
path = module_name.replace('.', '/')
path += '.py'
line = inspect.getsourcelines(cls)[-1]
link = ('https://github.com/maxpumperla/'
'elephas/blob/master/' + path + '#L' + str(line))
return '[[source]](' + link + ')'
def code_snippet(snippet):
result = '```python\n'
result += snippet + '\n'
result += '```\n'
return result
def count_leading_spaces(s):
ws = re.search(r'\S', s)
if ws:
return ws.start()
else:
return 0
def process_list_block(docstring, starting_point, leading_spaces, marker):
ending_point = docstring.find('\n\n', starting_point)
block = docstring[starting_point:(None if ending_point == -1 else
ending_point - 1)]
# Place marker for later reinjection.
docstring = docstring.replace(block, marker)
lines = block.split('\n')
# Remove the computed number of leading white spaces from each line.
lines = [re.sub('^' + ' ' * leading_spaces, '', line) for line in lines]
# Usually lines have at least 4 additional leading spaces.
# These have to be removed, but first the list roots have to be detected.
top_level_regex = r'^ ([^\s\\\(]+):(.*)'
top_level_replacement = r'- __\1__:\2'
lines = [re.sub(top_level_regex, top_level_replacement, line) for line in lines]
# All the other lines get simply the 4 leading space (if present) removed
lines = [re.sub(r'^ ', '', line) for line in lines]
# Fix text lines after lists
indent = 0
text_block = False
for i in range(len(lines)):
line = lines[i]
spaces = re.search(r'\S', line)
if spaces:
# If it is a list element
if line[spaces.start()] == '-':
indent = spaces.start() + 1
if text_block:
text_block = False
lines[i] = '\n' + line
elif spaces.start() < indent:
text_block = True
indent = spaces.start()
lines[i] = '\n' + line
else:
text_block = False
indent = 0
block = '\n'.join(lines)
return docstring, block
def process_docstring(docstring):
# First, extract code blocks and process them.
code_blocks = []
if '```' in docstring:
tmp = docstring[:]
while '```' in tmp:
tmp = tmp[tmp.find('```'):]
index = tmp[3:].find('```') + 6
snippet = tmp[:index]
# Place marker in docstring for later reinjection.
docstring = docstring.replace(
snippet, '$CODE_BLOCK_%d' % len(code_blocks))
snippet_lines = snippet.split('\n')
# Remove leading spaces.
num_leading_spaces = snippet_lines[-1].find('`')
snippet_lines = ([snippet_lines[0]] +
[line[num_leading_spaces:]
for line in snippet_lines[1:]])
# Most code snippets have 3 or 4 more leading spaces
# on inner lines, but not all. Remove them.
inner_lines = snippet_lines[1:-1]
leading_spaces = None
for line in inner_lines:
if not line or line[0] == '\n':
continue
spaces = count_leading_spaces(line)
if leading_spaces is None:
leading_spaces = spaces
if spaces < leading_spaces:
leading_spaces = spaces
if leading_spaces:
snippet_lines = ([snippet_lines[0]] +
[line[leading_spaces:]
for line in snippet_lines[1:-1]] +
[snippet_lines[-1]])
snippet = '\n'.join(snippet_lines)
code_blocks.append(snippet)
tmp = tmp[index:]
# Format docstring lists.
section_regex = r'\n( +)# (.*)\n'
section_idx = re.search(section_regex, docstring)
shift = 0
sections = {}
while section_idx and section_idx.group(2):
anchor = section_idx.group(2)
leading_spaces = len(section_idx.group(1))
shift += section_idx.end()
marker = '$' + anchor.replace(' ', '_') + '$'
docstring, content = process_list_block(docstring,
shift,
leading_spaces,
marker)
sections[marker] = content
section_idx = re.search(section_regex, docstring[shift:])
# Format docstring section titles.
docstring = re.sub(r'\n(\s+)# (.*)\n',
r'\n\1__\2__\n\n',
docstring)
# Strip all remaining leading spaces.
lines = docstring.split('\n')
docstring = '\n'.join([line.lstrip(' ') for line in lines])
# Reinject list blocks.
for marker, content in sections.items():
docstring = docstring.replace(marker, content)
# Reinject code blocks.
for i, code_block in enumerate(code_blocks):
docstring = docstring.replace(
'$CODE_BLOCK_%d' % i, code_block)
return docstring
print('Cleaning up existing sources directory.')
if os.path.exists('sources'):
shutil.rmtree('sources')
print('Populating sources directory with templates.')
for subdir, dirs, fnames in os.walk('templates'):
for fname in fnames:
new_subdir = subdir.replace('templates', 'sources')
if not os.path.exists(new_subdir):
os.makedirs(new_subdir)
if fname[-3:] == '.md':
fpath = os.path.join(subdir, fname)
new_fpath = fpath.replace('templates', 'sources')
shutil.copy(fpath, new_fpath)
def read_file(path):
with open(path) as f:
return f.read()
def collect_class_methods(cls, methods):
if isinstance(methods, (list, tuple)):
return [getattr(cls, m) if isinstance(m, str) else m for m in methods]
methods = []
for _, method in inspect.getmembers(cls, predicate=inspect.isroutine):
if method.__name__[0] == '_' or method.__name__ in EXCLUDE:
continue
methods.append(method)
return methods
def render_function(function, method=True):
subblocks = []
signature = get_function_signature(function, method=method)
if method:
signature = signature.replace(
clean_module_name(function.__module__) + '.', '')
subblocks.append('### ' + function.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = function.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
return '\n\n'.join(subblocks)
def read_page_data(page_data, type):
assert type in ['classes', 'functions', 'methods']
data = page_data.get(type, [])
for module in page_data.get('all_module_{}'.format(type), []):
module_data = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if (inspect.isclass(module_member) and type == 'classes' or
inspect.isfunction(module_member) and type == 'functions'):
instance = module_member
if module.__name__ in instance.__module__:
if instance not in module_data:
module_data.append(instance)
module_data.sort(key=lambda x: id(x))
data += module_data
return data
if __name__ == '__main__':
readme = read_file('../README.md')
index = read_file('templates/index.md')
index = index.replace('{{autogenerated}}', readme[readme.find('##'):])
with open('sources/index.md', 'w') as f:
f.write(index)
print('Generating Elephas docs')
for page_data in PAGES:
classes = read_page_data(page_data, 'classes')
blocks = []
for element in classes:
if not isinstance(element, (list, tuple)):
element = (element, [])
cls = element[0]
subblocks = []
signature = get_class_signature(cls)
subblocks.append('<span style="float:right;">' +
class_to_source_link(cls) + '</span>')
if element[1]:
subblocks.append('## ' + cls.__name__ + ' class\n')
else:
subblocks.append('### ' + cls.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
methods = collect_class_methods(cls, element[1])
if methods:
subblocks.append('\n---')
subblocks.append('## ' + cls.__name__ + ' methods\n')
subblocks.append('\n---\n'.join(
[render_function(method, method=True) for method in methods]))
blocks.append('\n'.join(subblocks))
methods = read_page_data(page_data, 'methods')
for method in methods:
blocks.append(render_function(method, method=True))
functions = read_page_data(page_data, 'functions')
for function in functions:
blocks.append(render_function(function, method=False))
if not blocks:
raise RuntimeError('Found no content for page ' +
page_data['page'])
mkdown = '\n----\n\n'.join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data['page']
path = os.path.join('sources', page_name)
if os.path.exists(path):
template = read_file(path)
assert '{{autogenerated}}' in template, ('Template found for ' + path +
' but missing {{autogenerated}}'
' tag.')
mkdown = template.replace('{{autogenerated}}', mkdown)
print('...inserting autogenerated content into template:', path)
else:
print('...creating new page with autogenerated content:', path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(path, 'w') as f:
f.write(mkdown)
|
import numpy as np
import six
from chainercv.utils.testing.assertions.assert_is_image import assert_is_image
def assert_is_label_dataset(dataset, n_class, n_example=None, color=True):
"""Checks if a dataset satisfies the label dataset API.
This function checks if a given dataset satisfies the label dataset
API or not.
If the dataset does not satifiy the API, this function raises an
:class:`AssertionError`.
Args:
dataset: A dataset to be checked.
n_class (int): The number of classes.
n_example (int): The number of examples to be checked.
If this argument is specified, this function picks
examples ramdomly and checks them. Otherwise,
this function checks all examples.
color (bool): A boolean that determines the expected channel size.
If it is :obj:`True`, the number of channels
should be :obj:`3`. Otherwise, it should be :obj:`1`.
The default value is :obj:`True`.
"""
assert len(dataset) > 0, 'The length of dataset must be greater than zero.'
if n_example:
for _ in six.moves.range(n_example):
i = np.random.randint(0, len(dataset))
_check_example(dataset[i], n_class, color)
else:
for i in six.moves.range(len(dataset)):
_check_example(dataset[i], n_class, color)
def _check_example(example, n_class, color):
assert len(example) >= 2, \
'Each example must have at least two elements:' \
'img and label.'
img, label = example[:2]
assert_is_image(img, color=color)
assert isinstance(label, np.int32), \
'label must be a numpy.int32.'
assert label.ndim == 0, 'The ndim of label must be 0'
assert label >= 0 and label < n_class, \
'The value of label must be in [0, n_class - 1].'
|
import builtins
import warnings
from django.test import TestCase
from zinnia import markups
from zinnia.markups import html_format
from zinnia.markups import markdown
from zinnia.markups import restructuredtext
from zinnia.markups import textile
from zinnia.tests.utils import skip_if_lib_not_available
class MarkupsTestCase(TestCase):
text = 'Hello *World* !'
@skip_if_lib_not_available('textile')
def test_textile(self):
self.assertHTMLEqual(
textile(self.text).strip(),
'<p>Hello <strong>World</strong> !</p>'
)
@skip_if_lib_not_available('markdown')
def test_markdown(self):
self.assertHTMLEqual(
markdown(self.text).strip(),
'<p>Hello <em>World</em> !</p>'
)
@skip_if_lib_not_available('markdown')
def test_markdown_extensions(self):
text = '[TOC]\n\n# Header 1\n\n## Header 2'
self.assertHTMLEqual(
markdown(text).strip(),
'<p>[TOC]</p>\n<h1>Header 1</h1>'
'\n<h2>Header 2</h2>'
)
self.assertHTMLEqual(
markdown(text, extensions=['markdown.extensions.toc']).strip(),
'<div class="toc">\n<ul>\n<li><a href="#header-1">'
'Header 1</a><ul>\n<li><a href="#header-2">'
'Header 2</a></li>\n</ul>\n</li>\n</ul>\n</div>'
'\n<h1 id="header-1">Header 1</h1>\n'
'<h2 id="header-2">Header 2</h2>'
)
from markdown.extensions.toc import TocExtension
tocext = TocExtension(marker='--TOC--', permalink='PL')
self.assertHTMLEqual(
markdown(text, extensions=[tocext]).strip(),
'<p>[TOC]</p>\n<h1 id="header-1">Header 1'
'<a class="headerlink" href="#header-1" '
'title="Permanent link">PL</a></h1>\n'
'<h2 id="header-2">Header 2'
'<a class="headerlink" href="#header-2" '
'title="Permanent link">PL</a></h2>'
)
@skip_if_lib_not_available('docutils')
def test_restructuredtext(self):
self.assertHTMLEqual(
restructuredtext(self.text).strip(),
'<p>Hello <em>World</em> !</p>'
)
@skip_if_lib_not_available('docutils')
def test_restructuredtext_settings_override(self):
text = 'My email is [email protected]'
self.assertHTMLEqual(
restructuredtext(text).strip(),
'<p>My email is <a class="reference external" '
'href="mailto:toto@example.com">'
'toto@example.com</a></p>'
)
self.assertHTMLEqual(
restructuredtext(text, {'cloak_email_addresses': True}).strip(),
'<p>My email is <a class="reference external" '
'href="mailto:toto%40example.com">'
'toto<span>@</span>example<span>.</span>com</a></p>'
)
class MarkupFailImportTestCase(TestCase):
exclude_list = ['textile', 'markdown', 'docutils']
def setUp(self):
self.original_import = builtins.__import__
builtins.__import__ = self.import_hook
def tearDown(self):
builtins.__import__ = self.original_import
def import_hook(self, name, *args, **kwargs):
if name in self.exclude_list:
raise ImportError('%s module has been disabled' % name)
else:
self.original_import(name, *args, **kwargs)
def test_textile(self):
with warnings.catch_warnings(record=True) as w:
result = textile('My *text*')
self.tearDown()
self.assertEqual(result, 'My *text*')
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(
str(w[-1].message),
"The Python textile library isn't installed.")
def test_markdown(self):
with warnings.catch_warnings(record=True) as w:
result = markdown('My *text*')
self.tearDown()
self.assertEqual(result, 'My *text*')
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(
str(w[-1].message),
"The Python markdown library isn't installed.")
def test_restructuredtext(self):
with warnings.catch_warnings(record=True) as w:
result = restructuredtext('My *text*')
self.tearDown()
self.assertEqual(result, 'My *text*')
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(
str(w[-1].message),
"The Python docutils library isn't installed.")
class HtmlFormatTestCase(TestCase):
def setUp(self):
self.original_rendering = markups.MARKUP_LANGUAGE
def tearDown(self):
markups.MARKUP_LANGUAGE = self.original_rendering
def test_html_format_default(self):
markups.MARKUP_LANGUAGE = None
self.assertHTMLEqual(html_format(''), '')
self.assertHTMLEqual(html_format('Content'), '<p>Content</p>')
self.assertEqual(html_format('Content</p>'), 'Content</p>')
self.assertHTMLEqual(
html_format('Hello\nworld!'),
'<p>Hello<br />world!</p>'
)
@skip_if_lib_not_available('textile')
def test_html_content_textitle(self):
markups.MARKUP_LANGUAGE = 'textile'
value = 'Hello world !\n\n' \
'this is my content :\n\n' \
'* Item 1\n* Item 2'
self.assertHTMLEqual(
html_format(value),
'\t<p>Hello world !</p>\n\n\t'
'<p>this is my content :</p>\n\n\t'
'<ul>\n\t\t<li>Item 1</li>\n\t\t'
'<li>Item 2</li>\n\t</ul>'
)
@skip_if_lib_not_available('markdown')
def test_html_content_markdown(self):
markups.MARKUP_LANGUAGE = 'markdown'
value = 'Hello world !\n\n' \
'this is my content :\n\n' \
'* Item 1\n* Item 2'
self.assertHTMLEqual(
html_format(value),
'<p>Hello world !</p>\n'
'<p>this is my content :</p>'
'\n<ul>\n<li>Item 1</li>\n'
'<li>Item 2</li>\n</ul>'
)
@skip_if_lib_not_available('docutils')
def test_html_content_restructuredtext(self):
markups.MARKUP_LANGUAGE = 'restructuredtext'
value = 'Hello world !\n\n' \
'this is my content :\n\n' \
'* Item 1\n* Item 2'
self.assertHTMLEqual(
html_format(value),
'<p>Hello world !</p>\n'
'<p>this is my content :</p>'
'\n<ul class="simple">\n<li>Item 1</li>\n'
'<li>Item 2</li>\n</ul>\n'
)
|
import logging
from subprocess import CalledProcessError
import gensim
import os
import unittest
from gensim import corpora
from gensim.test.utils import datapath
class TestDtmModel(unittest.TestCase):
def setUp(self):
self.time_slices = [3, 7]
self.corpus = corpora.mmcorpus.MmCorpus(datapath('dtm_test.mm'))
self.id2word = corpora.Dictionary.load(datapath('dtm_test.dict'))
# first you need to setup the environment variable $DTM_PATH for the dtm executable file
self.dtm_path = os.environ.get('DTM_PATH', None)
if not self.dtm_path:
self.skipTest("$DTM_PATH is not properly set up.")
def testDtm(self):
if self.dtm_path is not None:
model = gensim.models.wrappers.DtmModel(
self.dtm_path, self.corpus, self.time_slices, num_topics=2,
id2word=self.id2word, model='dtm', initialize_lda=True,
rng_seed=1
)
topics = model.show_topics(num_topics=2, times=2, num_words=10)
self.assertEqual(len(topics), 4)
one_topic = model.show_topic(topicid=1, time=1, topn=10)
self.assertEqual(len(one_topic), 10)
self.assertEqual(one_topic[0][1], u'idexx')
def testDim(self):
if self.dtm_path is not None:
model = gensim.models.wrappers.DtmModel(
self.dtm_path, self.corpus, self.time_slices, num_topics=2,
id2word=self.id2word, model='fixed', initialize_lda=True,
rng_seed=1
)
topics = model.show_topics(num_topics=2, times=2, num_words=10)
self.assertEqual(len(topics), 4)
one_topic = model.show_topic(topicid=1, time=1, topn=10)
self.assertEqual(len(one_topic), 10)
self.assertEqual(one_topic[0][1], u'skills')
# In stderr expect "Error opening file /tmp/a65419_train_out/initial-lda-ss.dat. Failing."
def testCalledProcessError(self):
if self.dtm_path is not None:
with self.assertRaises(CalledProcessError):
gensim.models.wrappers.DtmModel(
self.dtm_path, self.corpus, self.time_slices, num_topics=2,
id2word=self.id2word, model='dtm', initialize_lda=False,
rng_seed=1
)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
import pytest
from qutebrowser import qutebrowser
@pytest.fixture
def parser():
return qutebrowser.get_argparser()
class TestDebugFlag:
def test_valid(self, parser):
args = parser.parse_args(['--debug-flag', 'chromium',
'--debug-flag', 'stack'])
assert args.debug_flags == ['chromium', 'stack']
def test_invalid(self, parser, capsys):
with pytest.raises(SystemExit):
parser.parse_args(['--debug-flag', 'invalid'])
_out, err = capsys.readouterr()
assert 'Invalid debug flag - valid flags:' in err
class TestLogFilter:
def test_valid(self, parser):
args = parser.parse_args(['--logfilter', 'misc'])
assert args.logfilter == 'misc'
def test_invalid(self, parser, capsys):
with pytest.raises(SystemExit):
parser.parse_args(['--logfilter', 'invalid'])
_out, err = capsys.readouterr()
print(err)
assert 'Invalid log category invalid - valid categories' in err
|
import datetime
import json
from acme import challenges
from acme.messages import errors, STATUS_VALID, ERROR_CODES
from flask import current_app
from lemur.authorizations import service as authorization_service
from lemur.exceptions import LemurException, InvalidConfiguration
from lemur.plugins.base import plugins
from lemur.destinations import service as destination_service
from lemur.plugins.lemur_acme.acme_handlers import AcmeHandler, AcmeDnsHandler
class AcmeChallengeMissmatchError(LemurException):
pass
class AcmeChallenge(object):
"""
This is the base class, all ACME challenges will need to extend, allowing for future extendability
"""
def create_certificate(self, csr, issuer_options):
"""
Create the new certificate, using the provided CSR and issuer_options.
Right now this is basically a copy of the create_certificate methods in the AcmeHandlers, but should be cleaned
and tried to make use of the deploy and cleanup methods
:param csr:
:param issuer_options:
:return:
"""
pass
def deploy(self, challenge, acme_client, validation_target):
"""
In here the challenge validation is fetched and deployed somewhere that it can be validated by the provider
:param self:
:param challenge: the challenge object, must match for the challenge implementation
:param acme_client: an already bootstrapped acme_client, to avoid passing all issuer_options and so on
:param validation_target: an identifier for the validation target, e.g. the name of a DNS provider
"""
raise NotImplementedError
def cleanup(self, challenge, acme_client, validation_target):
"""
Ideally the challenge should be cleaned up, after the validation is done
:param challenge: Needed to identify the challenge to be removed
:param acme_client: an already bootstrapped acme_client, to avoid passing all issuer_options and so on
:param validation_target: Needed to remove the validation
"""
raise NotImplementedError
class AcmeHttpChallenge(AcmeChallenge):
challengeType = challenges.HTTP01
def create_certificate(self, csr, issuer_options):
"""
Creates an ACME certificate using the HTTP-01 challenge.
:param csr:
:param issuer_options:
:return: :raise Exception:
"""
self.acme = AcmeHandler()
authority = issuer_options.get("authority")
acme_client, registration = self.acme.setup_acme_client(authority)
orderr = acme_client.new_order(csr)
chall = []
deployed_challenges = []
all_pre_validated = True
for authz in orderr.authorizations:
# Choosing challenge.
# check if authorizations is already in a valid state
if authz.body.status != STATUS_VALID:
all_pre_validated = False
# authz.body.challenges is a set of ChallengeBody objects.
for i in authz.body.challenges:
# Find the supported challenge.
if isinstance(i.chall, challenges.HTTP01):
chall.append(i)
else:
current_app.logger.info("{} already validated, skipping".format(authz.body.identifier.value))
if len(chall) == 0 and not all_pre_validated:
raise Exception('HTTP-01 challenge was not offered by the CA server at {}'.format(orderr.uri))
elif not all_pre_validated:
validation_target = None
for option in json.loads(issuer_options["authority"].options):
if option["name"] == "tokenDestination":
validation_target = option["value"]
if validation_target is None:
raise Exception('No token_destination configured for this authority. Cant complete HTTP-01 challenge')
for challenge in chall:
try:
response = self.deploy(challenge, acme_client, validation_target)
deployed_challenges.append(challenge.chall.path)
acme_client.answer_challenge(challenge, response)
except Exception as e:
current_app.logger.error(e)
raise Exception('Failure while trying to deploy token to configure destination. See logs for more information')
current_app.logger.info("Uploaded HTTP-01 challenge tokens, trying to poll and finalize the order")
try:
finalized_orderr = acme_client.poll_and_finalize(orderr,
datetime.datetime.now() + datetime.timedelta(seconds=90))
except errors.ValidationError as validationError:
for authz in validationError.failed_authzrs:
for chall in authz.body.challenges:
if chall.error:
current_app.logger.error(
"ValidationError occured of type {}, with message {}".format(chall.error.typ,
ERROR_CODES[chall.error.code]))
raise Exception('Validation error occured, can\'t complete challenges. See logs for more information.')
pem_certificate, pem_certificate_chain = self.acme.extract_cert_and_chain(finalized_orderr.fullchain_pem)
if len(deployed_challenges) != 0:
for token_path in deployed_challenges:
self.cleanup(token_path, validation_target)
# validation is a random string, we use it as external id, to make it possible to implement revoke_certificate
return pem_certificate, pem_certificate_chain, None
def deploy(self, challenge, acme_client, validation_target):
if not isinstance(challenge.chall, challenges.HTTP01):
raise AcmeChallengeMissmatchError(
'The provided challenge is not of type HTTP01, but instead of type {}'.format(
challenge.__class__.__name__))
destination = destination_service.get(validation_target)
if destination is None:
raise Exception(
'Couldn\'t find the destination with name {}. Cant complete HTTP01 challenge'.format(validation_target))
destination_plugin = plugins.get(destination.plugin_name)
response, validation = challenge.response_and_validation(acme_client.net.key)
destination_plugin.upload_acme_token(challenge.chall.path, validation, destination.options)
current_app.logger.info("Uploaded HTTP-01 challenge token.")
return response
def cleanup(self, token_path, validation_target):
destination = destination_service.get(validation_target)
if destination is None:
current_app.logger.info(
'Couldn\'t find the destination with name {}, won\'t cleanup the challenge'.format(validation_target))
destination_plugin = plugins.get(destination.plugin_name)
destination_plugin.delete_acme_token(token_path, destination.options)
current_app.logger.info("Cleaned up HTTP-01 challenge token.")
class AcmeDnsChallenge(AcmeChallenge):
challengeType = challenges.DNS01
def create_certificate(self, csr, issuer_options):
"""
Creates an ACME certificate.
:param csr:
:param issuer_options:
:return: :raise Exception:
"""
self.acme = AcmeDnsHandler()
authority = issuer_options.get("authority")
create_immediately = issuer_options.get("create_immediately", False)
acme_client, registration = self.acme.setup_acme_client(authority)
dns_provider = issuer_options.get("dns_provider", {})
if dns_provider:
dns_provider_options = dns_provider.options
credentials = json.loads(dns_provider.credentials)
current_app.logger.debug(
"Using DNS provider: {0}".format(dns_provider.provider_type)
)
dns_provider_plugin = __import__(
dns_provider.provider_type, globals(), locals(), [], 1
)
account_number = credentials.get("account_id")
provider_type = dns_provider.provider_type
if provider_type == "route53" and not account_number:
error = "Route53 DNS Provider {} does not have an account number configured.".format(
dns_provider.name
)
current_app.logger.error(error)
raise InvalidConfiguration(error)
else:
dns_provider = {}
dns_provider_options = None
account_number = None
provider_type = None
domains = self.acme.get_domains(issuer_options)
if not create_immediately:
# Create pending authorizations that we'll need to do the creation
dns_authorization = authorization_service.create(
account_number, domains, provider_type
)
# Return id of the DNS Authorization
return None, None, dns_authorization.id
authorizations = self.acme.get_authorizations(
acme_client,
account_number,
domains,
dns_provider_plugin,
dns_provider_options,
)
self.acme.finalize_authorizations(
acme_client,
account_number,
dns_provider_plugin,
authorizations,
dns_provider_options,
)
pem_certificate, pem_certificate_chain = self.acme.request_certificate(
acme_client, authorizations, csr
)
# TODO add external ID (if possible)
return pem_certificate, pem_certificate_chain, None
def deploy(self, challenge, acme_client, validation_target):
pass
def cleanup(self, authorizations, acme_client, validation_target):
"""
Best effort attempt to delete DNS challenges that may not have been deleted previously. This is usually called
on an exception
:param authorizations: all the authorizations to be cleaned up
:param acme_client: an already bootstrapped acme_client, to avoid passing all issuer_options and so on
:param validation_target: Unused right now
:return:
"""
acme = AcmeDnsHandler()
acme.cleanup_dns_challenges(acme_client, authorizations)
|
from unittest import TestLoader, TestSuite
from .test_actions import ActionTestCase
from .test_api import APITestCase
from .test_cache import CacheTestCase
from .test_helper import HelperTestCase
def get_suite() -> TestSuite:
""""Returns a suite of all automated tests."""
all_tests = TestSuite()
all_tests.addTest(TestLoader().loadTestsFromTestCase(ActionTestCase))
all_tests.addTest(TestLoader().loadTestsFromTestCase(APITestCase))
all_tests.addTest(TestLoader().loadTestsFromTestCase(CacheTestCase))
all_tests.addTest(TestLoader().loadTestsFromTestCase(HelperTestCase))
return all_tests
|
import itertools
import json
import logging
import re
import shutil
import datetime
from concurrent.futures import ProcessPoolExecutor as Pool, as_completed
from pathlib import Path
from urllib.request import urlopen
from urllib.parse import urljoin
logger = logging.getLogger()
PARALLEL_DOWNLOADS = 6
GITHUB_PACKAGE_URL = "https://github.com/lxml/lxml-wheels"
APPVEYOR_PACKAGE_URL = "https://ci.appveyor.com/api/projects/scoder/lxml"
APPVEYOR_BUILDJOBS_URL = "https://ci.appveyor.com/api/buildjobs"
def find_github_files(version, base_package_url=GITHUB_PACKAGE_URL):
url = f"{base_package_url}/releases/tag/lxml-{version}"
with urlopen(url) as p:
page = p.read().decode()
for wheel_url, _ in itertools.groupby(sorted(re.findall(r'href="([^"]+\.whl)"', page))):
yield urljoin(base_package_url, wheel_url)
def find_appveyor_files(version, base_package_url=APPVEYOR_PACKAGE_URL, base_job_url=APPVEYOR_BUILDJOBS_URL):
url = f"{base_package_url}/history?recordsNumber=20"
with urlopen(url) as p:
builds = json.load(p)["builds"]
tag = f"lxml-{version}"
for build in builds:
if build['isTag'] and build['tag'] == tag:
build_id = build['buildId']
break
else:
logger.warning(f"No appveyor build found for tag '{tag}'")
return
build_url = f"{base_package_url}/builds/{build_id}"
with urlopen(build_url) as p:
jobs = json.load(p)["build"]["jobs"]
for job in jobs:
artifacts_url = f"{base_job_url}/{job['jobId']}/artifacts/"
with urlopen(artifacts_url) as p:
for artifact in json.load(p):
yield urljoin(artifacts_url, artifact['fileName'])
def download1(wheel_url, dest_dir):
wheel_name = wheel_url.rsplit("/", 1)[1]
logger.info(f"Downloading {wheel_url} ...")
with urlopen(wheel_url) as w:
file_path = dest_dir / wheel_name
if (file_path.exists()
and "Content-Length" in w.headers
and file_path.stat().st_size == int(w.headers["Content-Length"])):
logger.info(f"Already have {wheel_name}")
else:
try:
with open(file_path, "wb") as f:
shutil.copyfileobj(w, f)
except:
if file_path.exists():
file_path.unlink()
raise
else:
logger.info(f"Finished downloading {wheel_name}")
return wheel_name
def download(urls, dest_dir, jobs=PARALLEL_DOWNLOADS):
with Pool(max_workers=jobs) as pool:
futures = [pool.submit(download1, url, dest_dir) for url in urls]
try:
for future in as_completed(futures):
wheel_name = future.result()
yield wheel_name
except KeyboardInterrupt:
for future in futures:
future.cancel()
raise
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
from itertools import cycle, islice
num_active = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = cycle(islice(nexts, num_active))
def main(*args):
if not args:
print("Please pass the version to download")
return
version = args[0]
dest_dir = Path("dist") / version
if not dest_dir.is_dir():
dest_dir.mkdir()
start_time = datetime.datetime.now().replace(microsecond=0)
urls = roundrobin(
find_github_files(version),
find_appveyor_files(version),
)
count = sum(1 for _ in enumerate(download(urls, dest_dir)))
duration = datetime.datetime.now().replace(microsecond=0) - start_time
logger.info(f"Downloaded {count} files in {duration}.")
if __name__ == "__main__":
import sys
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
format="%(asctime)-15s %(message)s",
)
main(*sys.argv[1:])
|
import asyncio
from datetime import timedelta
import logging
import eliqonline
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_NAME, POWER_WATT
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_CHANNEL_ID = "channel_id"
DEFAULT_NAME = "ELIQ Online"
ICON = "mdi:gauge"
SCAN_INTERVAL = timedelta(seconds=60)
UNIT_OF_MEASUREMENT = POWER_WATT
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Required(CONF_CHANNEL_ID): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ELIQ Online sensor."""
access_token = config.get(CONF_ACCESS_TOKEN)
name = config.get(CONF_NAME, DEFAULT_NAME)
channel_id = config.get(CONF_CHANNEL_ID)
session = async_get_clientsession(hass)
api = eliqonline.API(session=session, access_token=access_token)
try:
_LOGGER.debug("Probing for access to ELIQ Online API")
await api.get_data_now(channelid=channel_id)
except OSError as error:
_LOGGER.error("Could not access the ELIQ Online API: %s", error)
return False
async_add_entities([EliqSensor(api, channel_id, name)], True)
class EliqSensor(Entity):
"""Implementation of an ELIQ Online sensor."""
def __init__(self, api, channel_id, name):
"""Initialize the sensor."""
self._name = name
self._state = None
self._api = api
self._channel_id = channel_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return icon."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return UNIT_OF_MEASUREMENT
@property
def state(self):
"""Return the state of the device."""
return self._state
async def async_update(self):
"""Get the latest data."""
try:
response = await self._api.get_data_now(channelid=self._channel_id)
self._state = int(response["power"])
_LOGGER.debug("Updated power from server %d W", self._state)
except KeyError:
_LOGGER.warning("Invalid response from ELIQ Online API")
except (OSError, asyncio.TimeoutError) as error:
_LOGGER.warning("Could not connect to the ELIQ Online API: %s", error)
|
import platform
from stash.tests.stashtest import StashTestCase
class VersionTests(StashTestCase):
"""Tests for the 'version' command."""
def test_keys(self):
"""ensure keys like 'core.py' are in the output of 'version'"""
output = self.run_command("version", exitcode=0)
self.assertIn("StaSh", output)
self.assertIn("Python", output)
self.assertIn("UI", output)
self.assertIn("root", output)
self.assertIn("core.py", output)
# skip iOS version because we run the tests on linux (i think)
self.assertIn("Platform", output)
self.assertIn("SELFUPDATE_TARGET", output)
self.assertIn("BIN_PATH", output)
self.assertIn("PYTHONPATH", output)
self.assertIn("Loaded libraries", output)
def test_correct_py_version(self):
"""test that the correct python version will be reported."""
output = self.run_command("version", exitcode=0)
self.assertIn(platform.python_version(), output)
def test_correct_stash_version(self):
"""test that the correct stash version will be reported."""
output = self.run_command("version", exitcode=0)
self.assertIn(self.stash.__version__, output)
|
import pytest
from kombu import Connection
from kombu.transport import zookeeper
pytest.importorskip('kazoo')
class test_Channel:
def setup(self):
self.connection = self.create_connection()
self.channel = self.connection.default_channel
def create_connection(self, **kwargs):
return Connection(transport=zookeeper.Transport, **kwargs)
def teardown(self):
self.connection.close()
def test_put_puts_bytes_to_queue(self):
class AssertQueue:
def put(self, value, priority):
assert isinstance(value, bytes)
self.channel._queues['foo'] = AssertQueue()
self.channel._put(queue='foo', message='bar')
@pytest.mark.parametrize('input,expected', (
('', '/'),
('/root', '/root'),
('/root/', '/root'),
))
def test_virtual_host_normalization(self, input, expected):
with self.create_connection(virtual_host=input) as conn:
assert conn.default_channel._vhost == expected
|
import logging
import voluptuous as vol
from homeassistant.components import camera, mqtt
from homeassistant.components.camera import Camera
from homeassistant.const import CONF_DEVICE, CONF_NAME, CONF_UNIQUE_ID
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH,
CONF_QOS,
DOMAIN,
PLATFORMS,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from .debug_info import log_messages
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_TOPIC = "topic"
DEFAULT_NAME = "MQTT Camera"
PLATFORM_SCHEMA = (
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT camera through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT camera dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT camera."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(camera.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up the MQTT Camera."""
async_add_entities([MqttCamera(config, config_entry, discovery_data)])
class MqttCamera(
MqttAttributes, MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, Camera
):
"""representation of a MQTT camera."""
def __init__(self, config, config_entry, discovery_data):
"""Initialize the MQTT Camera."""
self._config = config
self._unique_id = config.get(CONF_UNIQUE_ID)
self._sub_state = None
self._last_image = None
device_config = config.get(CONF_DEVICE)
Camera.__init__(self)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._config = config
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Handle new MQTT messages."""
self._last_image = msg.payload
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
"encoding": None,
}
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
async def async_camera_image(self):
"""Return image response."""
return self._last_image
@property
def name(self):
"""Return the name of this camera."""
return self._config[CONF_NAME]
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
|
import csv
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.const import (
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
import homeassistant.util.color as color_util
# mypy: allow-untyped-defs, no-check-untyped-defs
DOMAIN = "light"
SCAN_INTERVAL = timedelta(seconds=30)
DATA_PROFILES = "light_profiles"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# Bitfield of features supported by the light entity
SUPPORT_BRIGHTNESS = 1
SUPPORT_COLOR_TEMP = 2
SUPPORT_EFFECT = 4
SUPPORT_FLASH = 8
SUPPORT_COLOR = 16
SUPPORT_TRANSITION = 32
SUPPORT_WHITE_VALUE = 128
# Float that represents transition time in seconds to make change.
ATTR_TRANSITION = "transition"
# Lists holding color values
ATTR_RGB_COLOR = "rgb_color"
ATTR_XY_COLOR = "xy_color"
ATTR_HS_COLOR = "hs_color"
ATTR_COLOR_TEMP = "color_temp"
ATTR_KELVIN = "kelvin"
ATTR_MIN_MIREDS = "min_mireds"
ATTR_MAX_MIREDS = "max_mireds"
ATTR_COLOR_NAME = "color_name"
ATTR_WHITE_VALUE = "white_value"
# Brightness of the light, 0..255 or percentage
ATTR_BRIGHTNESS = "brightness"
ATTR_BRIGHTNESS_PCT = "brightness_pct"
ATTR_BRIGHTNESS_STEP = "brightness_step"
ATTR_BRIGHTNESS_STEP_PCT = "brightness_step_pct"
# String representing a profile (built-in ones or external defined).
ATTR_PROFILE = "profile"
# If the light should flash, can be FLASH_SHORT or FLASH_LONG.
ATTR_FLASH = "flash"
FLASH_SHORT = "short"
FLASH_LONG = "long"
# List of possible effects
ATTR_EFFECT_LIST = "effect_list"
# Apply an effect to the light, can be EFFECT_COLORLOOP.
ATTR_EFFECT = "effect"
EFFECT_COLORLOOP = "colorloop"
EFFECT_RANDOM = "random"
EFFECT_WHITE = "white"
COLOR_GROUP = "Color descriptors"
LIGHT_PROFILES_FILE = "light_profiles.csv"
# Service call validation schemas
VALID_TRANSITION = vol.All(vol.Coerce(float), vol.Clamp(min=0, max=6553))
VALID_BRIGHTNESS = vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255))
VALID_BRIGHTNESS_PCT = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
VALID_BRIGHTNESS_STEP = vol.All(vol.Coerce(int), vol.Clamp(min=-255, max=255))
VALID_BRIGHTNESS_STEP_PCT = vol.All(vol.Coerce(float), vol.Clamp(min=-100, max=100))
VALID_FLASH = vol.In([FLASH_SHORT, FLASH_LONG])
LIGHT_TURN_ON_SCHEMA = {
vol.Exclusive(ATTR_PROFILE, COLOR_GROUP): cv.string,
ATTR_TRANSITION: VALID_TRANSITION,
vol.Exclusive(ATTR_BRIGHTNESS, ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
vol.Exclusive(ATTR_BRIGHTNESS_PCT, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_BRIGHTNESS_STEP, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_STEP,
vol.Exclusive(ATTR_BRIGHTNESS_STEP_PCT, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_STEP_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.small_float, cv.small_float)), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
vol.Coerce(tuple),
),
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP): cv.positive_int,
ATTR_WHITE_VALUE: vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
ATTR_FLASH: VALID_FLASH,
ATTR_EFFECT: cv.string,
}
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass, entity_id):
"""Return if the lights are on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
def preprocess_turn_on_alternatives(hass, params):
"""Process extra data for turn light on request.
Async friendly.
"""
# Bail out, we process this later.
if ATTR_BRIGHTNESS_STEP in params or ATTR_BRIGHTNESS_STEP_PCT in params:
return
if ATTR_PROFILE in params:
hass.data[DATA_PROFILES].apply_profile(params.pop(ATTR_PROFILE), params)
color_name = params.pop(ATTR_COLOR_NAME, None)
if color_name is not None:
try:
params[ATTR_RGB_COLOR] = color_util.color_name_to_rgb(color_name)
except ValueError:
_LOGGER.warning("Got unknown color %s, falling back to white", color_name)
params[ATTR_RGB_COLOR] = (255, 255, 255)
kelvin = params.pop(ATTR_KELVIN, None)
if kelvin is not None:
mired = color_util.color_temperature_kelvin_to_mired(kelvin)
params[ATTR_COLOR_TEMP] = int(mired)
brightness_pct = params.pop(ATTR_BRIGHTNESS_PCT, None)
if brightness_pct is not None:
params[ATTR_BRIGHTNESS] = round(255 * brightness_pct / 100)
xy_color = params.pop(ATTR_XY_COLOR, None)
if xy_color is not None:
params[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
rgb_color = params.pop(ATTR_RGB_COLOR, None)
if rgb_color is not None:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
def filter_turn_off_params(params):
"""Filter out params not used in turn off."""
return {k: v for k, v in params.items() if k in (ATTR_TRANSITION, ATTR_FLASH)}
async def async_setup(hass, config):
"""Expose light control via state machine and services."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
profiles = hass.data[DATA_PROFILES] = Profiles(hass)
await profiles.async_initialize()
def preprocess_data(data):
"""Preprocess the service data."""
base = {
entity_field: data.pop(entity_field)
for entity_field in cv.ENTITY_SERVICE_FIELDS
if entity_field in data
}
preprocess_turn_on_alternatives(hass, data)
base["params"] = data
return base
async def async_handle_light_on_service(light, call):
"""Handle turning a light on.
If brightness is set to 0, this service will turn the light off.
"""
params = call.data["params"]
if not params:
profiles.apply_default(light.entity_id, params)
# Only process params once we processed brightness step
if params and (
ATTR_BRIGHTNESS_STEP in params or ATTR_BRIGHTNESS_STEP_PCT in params
):
brightness = light.brightness if light.is_on else 0
if ATTR_BRIGHTNESS_STEP in params:
brightness += params.pop(ATTR_BRIGHTNESS_STEP)
else:
brightness += round(params.pop(ATTR_BRIGHTNESS_STEP_PCT) / 100 * 255)
params[ATTR_BRIGHTNESS] = max(0, min(255, brightness))
preprocess_turn_on_alternatives(hass, params)
# Zero brightness: Light will be turned off
if params.get(ATTR_BRIGHTNESS) == 0:
await light.async_turn_off(**filter_turn_off_params(params))
else:
await light.async_turn_on(**params)
async def async_handle_toggle_service(light, call):
"""Handle toggling a light."""
if light.is_on:
off_params = filter_turn_off_params(call.data)
await light.async_turn_off(**off_params)
else:
await async_handle_light_on_service(light, call)
# Listen for light on and light off service calls.
component.async_register_entity_service(
SERVICE_TURN_ON,
vol.All(cv.make_entity_service_schema(LIGHT_TURN_ON_SCHEMA), preprocess_data),
async_handle_light_on_service,
)
component.async_register_entity_service(
SERVICE_TURN_OFF,
{ATTR_TRANSITION: VALID_TRANSITION, ATTR_FLASH: VALID_FLASH},
"async_turn_off",
)
component.async_register_entity_service(
SERVICE_TOGGLE,
vol.All(cv.make_entity_service_schema(LIGHT_TURN_ON_SCHEMA), preprocess_data),
async_handle_toggle_service,
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Profiles:
"""Representation of available color profiles."""
SCHEMA = vol.Schema(
vol.Any(
vol.ExactSequence((str, cv.small_float, cv.small_float, cv.byte)),
vol.ExactSequence(
(str, cv.small_float, cv.small_float, cv.byte, cv.positive_int)
),
)
)
def __init__(self, hass):
"""Initialize profiles."""
self.hass = hass
self.data = None
def _load_profile_data(self):
"""Load built-in profiles and custom profiles."""
profile_paths = [
os.path.join(os.path.dirname(__file__), LIGHT_PROFILES_FILE),
self.hass.config.path(LIGHT_PROFILES_FILE),
]
profiles = {}
for profile_path in profile_paths:
if not os.path.isfile(profile_path):
continue
with open(profile_path) as inp:
reader = csv.reader(inp)
# Skip the header
next(reader, None)
try:
for rec in reader:
(
profile,
color_x,
color_y,
brightness,
*transition,
) = Profiles.SCHEMA(rec)
transition = transition[0] if transition else 0
profiles[profile] = color_util.color_xy_to_hs(
color_x, color_y
) + (
brightness,
transition,
)
except vol.MultipleInvalid as ex:
_LOGGER.error(
"Error parsing light profile from %s: %s", profile_path, ex
)
continue
return profiles
async def async_initialize(self):
"""Load and cache profiles."""
self.data = await self.hass.async_add_executor_job(self._load_profile_data)
@callback
def apply_default(self, entity_id, params):
"""Return the default turn-on profile for the given light."""
name = f"{entity_id}.default"
if name in self.data:
self.apply_profile(name, params)
return
name = "group.all_lights.default"
if name in self.data:
self.apply_profile(name, params)
@callback
def apply_profile(self, name, params):
"""Apply a profile."""
profile = self.data.get(name)
if profile is None:
return
params.setdefault(ATTR_HS_COLOR, profile[:2])
params.setdefault(ATTR_BRIGHTNESS, profile[2])
params.setdefault(ATTR_TRANSITION, profile[3])
class LightEntity(ToggleEntity):
"""Representation of a light."""
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return None
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return None
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return None
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return 153
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return 500
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return None
@property
def effect_list(self):
"""Return the list of supported effects."""
return None
@property
def effect(self):
"""Return the current effect."""
return None
@property
def capability_attributes(self):
"""Return capability attributes."""
data = {}
supported_features = self.supported_features
if supported_features & SUPPORT_COLOR_TEMP:
data[ATTR_MIN_MIREDS] = self.min_mireds
data[ATTR_MAX_MIREDS] = self.max_mireds
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT_LIST] = self.effect_list
return data
@property
def state_attributes(self):
"""Return state attributes."""
if not self.is_on:
return None
data = {}
supported_features = self.supported_features
if supported_features & SUPPORT_BRIGHTNESS:
data[ATTR_BRIGHTNESS] = self.brightness
if supported_features & SUPPORT_COLOR_TEMP:
data[ATTR_COLOR_TEMP] = self.color_temp
if supported_features & SUPPORT_COLOR and self.hs_color:
hs_color = self.hs_color
data[ATTR_HS_COLOR] = (round(hs_color[0], 3), round(hs_color[1], 3))
data[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
data[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
if supported_features & SUPPORT_WHITE_VALUE:
data[ATTR_WHITE_VALUE] = self.white_value
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT] = self.effect
return {key: val for key, val in data.items() if val is not None}
@property
def supported_features(self):
"""Flag supported features."""
return 0
class Light(LightEntity):
"""Representation of a light (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"Light is deprecated, modify %s to extend LightEntity",
cls.__name__,
)
|
from homeassistant.components.fail2ban.sensor import (
STATE_ALL_BANS,
STATE_CURRENT_BANS,
BanLogParser,
BanSensor,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, mock_open, patch
from tests.common import assert_setup_component
def fake_log(log_key):
"""Return a fake fail2ban log."""
fake_log_dict = {
"single_ban": (
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 111.111.111.111"
),
"ipv6_ban": (
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 2607:f0d0:1002:51::4"
),
"multi_ban": (
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 111.111.111.111\n"
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 222.222.222.222"
),
"multi_jail": (
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 111.111.111.111\n"
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_two] Ban 222.222.222.222"
),
"unban_all": (
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 111.111.111.111\n"
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Unban 111.111.111.111\n"
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 222.222.222.222\n"
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Unban 222.222.222.222"
),
"unban_one": (
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 111.111.111.111\n"
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Ban 222.222.222.222\n"
"2017-01-01 12:23:35 fail2ban.actions [111]: "
"NOTICE [jail_one] Unban 111.111.111.111"
),
}
return fake_log_dict[log_key]
@patch("os.path.isfile", Mock(return_value=True))
async def test_setup(hass):
"""Test that sensor can be setup."""
config = {"sensor": {"platform": "fail2ban", "jails": ["jail_one"]}}
mock_fh = mock_open()
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert_setup_component(1, "sensor")
@patch("os.path.isfile", Mock(return_value=True))
async def test_multi_jails(hass):
"""Test that multiple jails can be set up as sensors.."""
config = {"sensor": {"platform": "fail2ban", "jails": ["jail_one", "jail_two"]}}
mock_fh = mock_open()
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert_setup_component(2, "sensor")
async def test_single_ban(hass):
"""Test that log is parsed correctly for single ban."""
log_parser = BanLogParser("/test/fail2ban.log")
sensor = BanSensor("fail2ban", "jail_one", log_parser)
assert sensor.name == "fail2ban jail_one"
mock_fh = mock_open(read_data=fake_log("single_ban"))
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
sensor.update()
assert sensor.state == "111.111.111.111"
assert sensor.state_attributes[STATE_CURRENT_BANS] == ["111.111.111.111"]
assert sensor.state_attributes[STATE_ALL_BANS] == ["111.111.111.111"]
async def test_ipv6_ban(hass):
"""Test that log is parsed correctly for IPV6 bans."""
log_parser = BanLogParser("/test/fail2ban.log")
sensor = BanSensor("fail2ban", "jail_one", log_parser)
assert sensor.name == "fail2ban jail_one"
mock_fh = mock_open(read_data=fake_log("ipv6_ban"))
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
sensor.update()
assert sensor.state == "2607:f0d0:1002:51::4"
assert sensor.state_attributes[STATE_CURRENT_BANS] == ["2607:f0d0:1002:51::4"]
assert sensor.state_attributes[STATE_ALL_BANS] == ["2607:f0d0:1002:51::4"]
async def test_multiple_ban(hass):
"""Test that log is parsed correctly for multiple ban."""
log_parser = BanLogParser("/test/fail2ban.log")
sensor = BanSensor("fail2ban", "jail_one", log_parser)
assert sensor.name == "fail2ban jail_one"
mock_fh = mock_open(read_data=fake_log("multi_ban"))
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
sensor.update()
assert sensor.state == "222.222.222.222"
assert sensor.state_attributes[STATE_CURRENT_BANS] == [
"111.111.111.111",
"222.222.222.222",
]
assert sensor.state_attributes[STATE_ALL_BANS] == [
"111.111.111.111",
"222.222.222.222",
]
async def test_unban_all(hass):
"""Test that log is parsed correctly when unbanning."""
log_parser = BanLogParser("/test/fail2ban.log")
sensor = BanSensor("fail2ban", "jail_one", log_parser)
assert sensor.name == "fail2ban jail_one"
mock_fh = mock_open(read_data=fake_log("unban_all"))
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
sensor.update()
assert sensor.state == "None"
assert sensor.state_attributes[STATE_CURRENT_BANS] == []
assert sensor.state_attributes[STATE_ALL_BANS] == [
"111.111.111.111",
"222.222.222.222",
]
async def test_unban_one(hass):
"""Test that log is parsed correctly when unbanning one ip."""
log_parser = BanLogParser("/test/fail2ban.log")
sensor = BanSensor("fail2ban", "jail_one", log_parser)
assert sensor.name == "fail2ban jail_one"
mock_fh = mock_open(read_data=fake_log("unban_one"))
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
sensor.update()
assert sensor.state == "222.222.222.222"
assert sensor.state_attributes[STATE_CURRENT_BANS] == ["222.222.222.222"]
assert sensor.state_attributes[STATE_ALL_BANS] == [
"111.111.111.111",
"222.222.222.222",
]
async def test_multi_jail(hass):
"""Test that log is parsed correctly when using multiple jails."""
log_parser = BanLogParser("/test/fail2ban.log")
sensor1 = BanSensor("fail2ban", "jail_one", log_parser)
sensor2 = BanSensor("fail2ban", "jail_two", log_parser)
assert sensor1.name == "fail2ban jail_one"
assert sensor2.name == "fail2ban jail_two"
mock_fh = mock_open(read_data=fake_log("multi_jail"))
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
sensor1.update()
sensor2.update()
assert sensor1.state == "111.111.111.111"
assert sensor1.state_attributes[STATE_CURRENT_BANS] == ["111.111.111.111"]
assert sensor1.state_attributes[STATE_ALL_BANS] == ["111.111.111.111"]
assert sensor2.state == "222.222.222.222"
assert sensor2.state_attributes[STATE_CURRENT_BANS] == ["222.222.222.222"]
assert sensor2.state_attributes[STATE_ALL_BANS] == ["222.222.222.222"]
async def test_ban_active_after_update(hass):
"""Test that ban persists after subsequent update."""
log_parser = BanLogParser("/test/fail2ban.log")
sensor = BanSensor("fail2ban", "jail_one", log_parser)
assert sensor.name == "fail2ban jail_one"
mock_fh = mock_open(read_data=fake_log("single_ban"))
with patch("homeassistant.components.fail2ban.sensor.open", mock_fh, create=True):
sensor.update()
assert sensor.state == "111.111.111.111"
sensor.update()
assert sensor.state == "111.111.111.111"
assert sensor.state_attributes[STATE_CURRENT_BANS] == ["111.111.111.111"]
assert sensor.state_attributes[STATE_ALL_BANS] == ["111.111.111.111"]
|
from kalliope.core.ConfigurationManager import SettingLoader
from sys import version_info
sl = SettingLoader()
settings = sl.settings
module_file_path = "%s/python%d%d/_snowboydetect" % (settings.machine, version_info[0], version_info[1])
from sys import version_info as _swig_python_version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module(module_file_path, [dirname(__file__)])
except ImportError:
import _snowboydetect
return _snowboydetect
if fp is not None:
try:
_mod = imp.load_module('_snowboydetect', fp, pathname, description)
finally:
fp.close()
return _mod
_snowboydetect = swig_import_helper()
del swig_import_helper
else:
import _snowboydetect
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SnowboyDetect(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SnowboyDetect, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SnowboyDetect, name)
__repr__ = _swig_repr
def __init__(self, resource_filename, model_str):
this = _snowboydetect.new_SnowboyDetect(resource_filename, model_str)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Reset(self):
return _snowboydetect.SnowboyDetect_Reset(self)
def RunDetection(self, *args):
return _snowboydetect.SnowboyDetect_RunDetection(self, *args)
def SetSensitivity(self, sensitivity_str):
return _snowboydetect.SnowboyDetect_SetSensitivity(self, sensitivity_str)
def SetHighSensitivity(self, high_sensitivity_str):
return _snowboydetect.SnowboyDetect_SetHighSensitivity(self, high_sensitivity_str)
def GetSensitivity(self):
return _snowboydetect.SnowboyDetect_GetSensitivity(self)
def SetAudioGain(self, audio_gain):
return _snowboydetect.SnowboyDetect_SetAudioGain(self, audio_gain)
def UpdateModel(self):
return _snowboydetect.SnowboyDetect_UpdateModel(self)
def NumHotwords(self):
return _snowboydetect.SnowboyDetect_NumHotwords(self)
def ApplyFrontend(self, apply_frontend):
return _snowboydetect.SnowboyDetect_ApplyFrontend(self, apply_frontend)
def SampleRate(self):
return _snowboydetect.SnowboyDetect_SampleRate(self)
def NumChannels(self):
return _snowboydetect.SnowboyDetect_NumChannels(self)
def BitsPerSample(self):
return _snowboydetect.SnowboyDetect_BitsPerSample(self)
__swig_destroy__ = _snowboydetect.delete_SnowboyDetect
__del__ = lambda self: None
SnowboyDetect_swigregister = _snowboydetect.SnowboyDetect_swigregister
SnowboyDetect_swigregister(SnowboyDetect)
class SnowboyVad(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SnowboyVad, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SnowboyVad, name)
__repr__ = _swig_repr
def __init__(self, resource_filename):
this = _snowboydetect.new_SnowboyVad(resource_filename)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Reset(self):
return _snowboydetect.SnowboyVad_Reset(self)
def RunVad(self, *args):
return _snowboydetect.SnowboyVad_RunVad(self, *args)
def SetAudioGain(self, audio_gain):
return _snowboydetect.SnowboyVad_SetAudioGain(self, audio_gain)
def ApplyFrontend(self, apply_frontend):
return _snowboydetect.SnowboyVad_ApplyFrontend(self, apply_frontend)
def SampleRate(self):
return _snowboydetect.SnowboyVad_SampleRate(self)
def NumChannels(self):
return _snowboydetect.SnowboyVad_NumChannels(self)
def BitsPerSample(self):
return _snowboydetect.SnowboyVad_BitsPerSample(self)
__swig_destroy__ = _snowboydetect.delete_SnowboyVad
__del__ = lambda self: None
SnowboyVad_swigregister = _snowboydetect.SnowboyVad_swigregister
SnowboyVad_swigregister(SnowboyVad)
# This file is compatible with both classic and new-style classes.
|
import dedupe
import dedupe.api
import unittest
import itertools
import random
import numpy
import warnings
from collections import OrderedDict
def icfi(x):
return list(itertools.chain.from_iterable(x))
DATA_SAMPLE = [({'age': '27', 'name': 'Kyle'},
{'age': '50', 'name': 'Bob'}),
({'age': '27', 'name': 'Kyle'},
{'age': '35', 'name': 'William'}),
({'age': '10', 'name': 'Sue'},
{'age': '35', 'name': 'William'}),
({'age': '27', 'name': 'Kyle'},
{'age': '20', 'name': 'Jimmy'}),
({'age': '75', 'name': 'Charlie'},
{'age': '21', 'name': 'Jimbo'})]
data_dict = OrderedDict(((0, {'name': 'Bob', 'age': '51'}),
(1, {'name': 'Linda', 'age': '50'}),
(2, {'name': 'Gene', 'age': '12'}),
(3, {'name': 'Tina', 'age': '15'}),
(4, {'name': 'Bob B.', 'age': '51'}),
(5, {'name': 'bob belcher', 'age': '51'}),
(6, {'name': 'linda ', 'age': '50'})))
data_dict_2 = OrderedDict(((7, {'name': 'BOB', 'age': '51'}),
(8, {'name': 'LINDA', 'age': '50'}),
(9, {'name': 'GENE', 'age': '12'}),
(10, {'name': 'TINA', 'age': '15'}),
(11, {'name': 'BOB B.', 'age': '51'}),
(12, {'name': 'BOB BELCHER', 'age': '51'}),
(13, {'name': 'LINDA ', 'age': '50'})))
class ActiveMatch(unittest.TestCase):
def setUp(self):
self.field_definition = [{'field': 'name', 'type': 'String'},
{'field': 'age', 'type': 'String'}]
def test_initialize_fields(self):
self.assertRaises(TypeError, dedupe.api.ActiveMatching)
with self.assertRaises(ValueError):
dedupe.api.ActiveMatching([],)
with self.assertRaises(ValueError):
dedupe.api.ActiveMatching([{'field': 'name', 'type': 'Custom', 'comparator': lambda x: 1}],)
with self.assertRaises(ValueError):
dedupe.api.ActiveMatching([{'field': 'name', 'type': 'Custom', 'comparator': lambda x: 1},
{'field': 'age', 'type': 'Custom', 'comparator': lambda x: 1}],)
dedupe.api.ActiveMatching([{'field': 'name', 'type': 'Custom', 'comparator': lambda x: 1},
{'field': 'age', 'type': 'String'}],)
def test_check_record(self):
matcher = dedupe.api.ActiveMatching(self.field_definition)
self.assertRaises(ValueError, matcher._checkRecordPair, ())
self.assertRaises(ValueError, matcher._checkRecordPair, (1, 2))
self.assertRaises(ValueError, matcher._checkRecordPair, (1, 2, 3))
self.assertRaises(ValueError, matcher._checkRecordPair, ({}, {}))
matcher._checkRecordPair(({'name': 'Frank', 'age': '72'},
{'name': 'Bob', 'age': '27'}))
def test_markPair(self):
from collections import OrderedDict
good_training_pairs = OrderedDict((('match', DATA_SAMPLE[3:5]),
('distinct', DATA_SAMPLE[0:3])))
bad_training_pairs = {'non_dupes': DATA_SAMPLE[0:3],
'match': DATA_SAMPLE[3:5]}
matcher = dedupe.api.ActiveMatching(self.field_definition)
self.assertRaises(ValueError, matcher.mark_pairs, bad_training_pairs)
matcher.mark_pairs(good_training_pairs)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
matcher.mark_pairs({'match': [], 'distinct': []})
assert len(w) == 1
assert str(
w[-1].message) == "Didn't return any labeled record pairs"
class DedupeTest(unittest.TestCase):
def setUp(self):
random.seed(123)
numpy.random.seed(456)
field_definition = [{'field': 'name', 'type': 'String'},
{'field': 'age', 'type': 'String'}]
self.deduper = dedupe.Dedupe(field_definition)
def test_randomSample(self):
random.seed(6)
numpy.random.seed(6)
self.deduper._sample(data_dict, 30, 1)
correct_result = [({'age': '50', 'name': 'Linda'},
{'age': '51', 'name': 'bob belcher'}),
({'age': '51', 'name': 'Bob'},
{'age': '51', 'name': 'Bob B.'}),
({'age': '51', 'name': 'Bob'},
{'age': '51', 'name': 'bob belcher'}),
({'age': '51', 'name': 'Bob B.'},
{'age': '51', 'name': 'bob belcher'}),
({'age': '50', 'name': 'Linda'},
{'age': '50', 'name': 'linda '})]
for pair in correct_result:
assert pair in self.deduper.active_learner.candidates
if __name__ == "__main__":
unittest.main()
|
import pytest
from tests.async_mock import patch
TEST_CALENDAR = {
"id": "[email protected]",
"etag": '"3584134138943410"',
"timeZone": "UTC",
"accessRole": "reader",
"foregroundColor": "#000000",
"selected": True,
"kind": "calendar#calendarListEntry",
"backgroundColor": "#16a765",
"description": "Test Calendar",
"summary": "We are, we are, a... Test Calendar",
"colorId": "8",
"defaultReminders": [],
"track": True,
}
@pytest.fixture
def test_calendar():
"""Return a test calendar."""
return TEST_CALENDAR
@pytest.fixture
def mock_next_event():
"""Mock the google calendar data."""
patch_google_cal = patch(
"homeassistant.components.google.calendar.GoogleCalendarData"
)
with patch_google_cal as google_cal_data:
yield google_cal_data
|
import os
flavor = os.environ.get('SETTINGS_FLAVOR', 'dev')
reload = True
bind = '%s:%s' % (
os.environ.get('REGISTRY_HOST', '0.0.0.0'),
os.environ.get('REGISTRY_PORT', '5000')
)
graceful_timeout = int(os.environ.get('GUNICORN_GRACEFUL_TIMEOUT', 3600))
timeout = int(os.environ.get('GUNICORN_SILENT_TIMEOUT', 3600))
worker_class = 'gevent'
max_requests = int(os.environ.get('GUNICORN_MAX_REQUESTS', 100))
workers = int(os.environ.get('GUNICORN_WORKERS', 4))
log_level = 'debug'
debug = True
accesslog = os.environ.get('GUNICORN_ACCESS_LOG_FILE', '-')
errorlog = os.environ.get('GUNICORN_ERROR_LOG_FILE', '-')
access_log_format = ('%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" '
'"%(a)s" %(D)s %({X-Docker-Size}o)s')
if flavor == 'prod' or flavor == 'staging':
reload = False
workers = 8
debug = False
log_level = 'info'
|
import math
from homeassistant.components import emulated_kasa
from homeassistant.components.emulated_kasa.const import (
CONF_POWER,
CONF_POWER_ENTITY,
DOMAIN,
)
from homeassistant.components.fan import (
ATTR_SPEED,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_SPEED,
)
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import (
ATTR_CURRENT_POWER_W,
DOMAIN as SWITCH_DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
CONF_ENTITIES,
CONF_NAME,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, Mock, patch
ENTITY_SWITCH = "switch.ac"
ENTITY_SWITCH_NAME = "A/C"
ENTITY_SWITCH_POWER = 400.0
ENTITY_LIGHT = "light.bed_light"
ENTITY_LIGHT_NAME = "Bed Room Lights"
ENTITY_FAN = "fan.ceiling_fan"
ENTITY_FAN_NAME = "Ceiling Fan"
ENTITY_FAN_SPEED_LOW = 5
ENTITY_FAN_SPEED_MED = 10
ENTITY_FAN_SPEED_HIGH = 50
ENTITY_SENSOR = "sensor.outside_temperature"
ENTITY_SENSOR_NAME = "Power Sensor"
CONFIG = {
DOMAIN: {
CONF_ENTITIES: {
ENTITY_SWITCH: {
CONF_NAME: ENTITY_SWITCH_NAME,
CONF_POWER: ENTITY_SWITCH_POWER,
},
ENTITY_LIGHT: {
CONF_NAME: ENTITY_LIGHT_NAME,
CONF_POWER_ENTITY: ENTITY_SENSOR,
},
ENTITY_FAN: {
CONF_POWER: "{% if is_state_attr('"
+ ENTITY_FAN
+ "','speed', 'low') %} "
+ str(ENTITY_FAN_SPEED_LOW)
+ "{% elif is_state_attr('"
+ ENTITY_FAN
+ "','speed', 'medium') %} "
+ str(ENTITY_FAN_SPEED_MED)
+ "{% elif is_state_attr('"
+ ENTITY_FAN
+ "','speed', 'high') %} "
+ str(ENTITY_FAN_SPEED_HIGH)
+ "{% endif %}"
},
}
}
}
CONFIG_SWITCH = {
DOMAIN: {
CONF_ENTITIES: {
ENTITY_SWITCH: {
CONF_NAME: ENTITY_SWITCH_NAME,
CONF_POWER: ENTITY_SWITCH_POWER,
},
}
}
}
CONFIG_SWITCH_NO_POWER = {
DOMAIN: {
CONF_ENTITIES: {
ENTITY_SWITCH: {},
}
}
}
CONFIG_LIGHT = {
DOMAIN: {
CONF_ENTITIES: {
ENTITY_LIGHT: {
CONF_NAME: ENTITY_LIGHT_NAME,
CONF_POWER_ENTITY: ENTITY_SENSOR,
},
}
}
}
CONFIG_FAN = {
DOMAIN: {
CONF_ENTITIES: {
ENTITY_FAN: {
CONF_POWER: "{% if is_state_attr('"
+ ENTITY_FAN
+ "','speed', 'low') %} "
+ str(ENTITY_FAN_SPEED_LOW)
+ "{% elif is_state_attr('"
+ ENTITY_FAN
+ "','speed', 'medium') %} "
+ str(ENTITY_FAN_SPEED_MED)
+ "{% elif is_state_attr('"
+ ENTITY_FAN
+ "','speed', 'high') %} "
+ str(ENTITY_FAN_SPEED_HIGH)
+ "{% endif %}"
},
}
}
}
CONFIG_SENSOR = {
DOMAIN: {
CONF_ENTITIES: {
ENTITY_SENSOR: {CONF_NAME: ENTITY_SENSOR_NAME},
}
}
}
def nested_value(ndict, *keys):
"""Return a nested dict value or None if it doesn't exist."""
if len(keys) == 0:
return ndict
key = keys[0]
if not isinstance(ndict, dict) or key not in ndict:
return None
return nested_value(ndict[key], *keys[1:])
async def test_setup(hass):
"""Test that devices are reported correctly."""
with patch(
"sense_energy.SenseLink",
return_value=Mock(start=AsyncMock(), close=AsyncMock()),
):
assert await async_setup_component(hass, DOMAIN, CONFIG) is True
async def test_float(hass):
"""Test a configuration using a simple float."""
config = CONFIG_SWITCH[DOMAIN][CONF_ENTITIES]
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{SWITCH_DOMAIN: {"platform": "demo"}},
)
with patch(
"sense_energy.SenseLink",
return_value=Mock(start=AsyncMock(), close=AsyncMock()),
):
assert await async_setup_component(hass, DOMAIN, CONFIG_SWITCH) is True
await hass.async_block_till_done()
await emulated_kasa.validate_configs(hass, config)
# Turn switch on
await hass.services.async_call(
SWITCH_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_SWITCH}, blocking=True
)
switch = hass.states.get(ENTITY_SWITCH)
assert switch.state == STATE_ON
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_SWITCH_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, ENTITY_SWITCH_POWER)
# Turn off
await hass.services.async_call(
SWITCH_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_SWITCH}, blocking=True
)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_SWITCH_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 0)
async def test_switch_power(hass):
"""Test a configuration using a simple float."""
config = CONFIG_SWITCH_NO_POWER[DOMAIN][CONF_ENTITIES]
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{SWITCH_DOMAIN: {"platform": "demo"}},
)
with patch(
"sense_energy.SenseLink",
return_value=Mock(start=AsyncMock(), close=AsyncMock()),
):
assert await async_setup_component(hass, DOMAIN, CONFIG_SWITCH_NO_POWER) is True
await hass.async_block_till_done()
await emulated_kasa.validate_configs(hass, config)
# Turn switch on
await hass.services.async_call(
SWITCH_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_SWITCH}, blocking=True
)
switch = hass.states.get(ENTITY_SWITCH)
assert switch.state == STATE_ON
power = switch.attributes[ATTR_CURRENT_POWER_W]
assert power == 100
assert switch.name == "AC"
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == "AC"
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, power)
hass.states.async_set(
ENTITY_SWITCH,
STATE_ON,
attributes={ATTR_CURRENT_POWER_W: 120, ATTR_FRIENDLY_NAME: "AC"},
)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == "AC"
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 120)
# Turn off
await hass.services.async_call(
SWITCH_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_SWITCH}, blocking=True
)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == "AC"
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 0)
async def test_template(hass):
"""Test a configuration using a complex template."""
config = CONFIG_FAN[DOMAIN][CONF_ENTITIES]
assert await async_setup_component(
hass, FAN_DOMAIN, {FAN_DOMAIN: {"platform": "demo"}}
)
with patch(
"sense_energy.SenseLink",
return_value=Mock(start=AsyncMock(), close=AsyncMock()),
):
assert await async_setup_component(hass, DOMAIN, CONFIG_FAN) is True
await hass.async_block_till_done()
await emulated_kasa.validate_configs(hass, config)
# Turn all devices on to known state
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_FAN}, blocking=True
)
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: ENTITY_FAN, ATTR_SPEED: "low"},
blocking=True,
)
fan = hass.states.get(ENTITY_FAN)
assert fan.state == STATE_ON
# Fan low:
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_FAN_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, ENTITY_FAN_SPEED_LOW)
# Fan High:
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: ENTITY_FAN, ATTR_SPEED: "high"},
blocking=True,
)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_FAN_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, ENTITY_FAN_SPEED_HIGH)
# Fan off:
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_FAN}, blocking=True
)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_FAN_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 0)
async def test_sensor(hass):
"""Test a configuration using a sensor in a template."""
config = CONFIG_LIGHT[DOMAIN][CONF_ENTITIES]
assert await async_setup_component(
hass, LIGHT_DOMAIN, {LIGHT_DOMAIN: {"platform": "demo"}}
)
assert await async_setup_component(
hass,
SENSOR_DOMAIN,
{SENSOR_DOMAIN: {"platform": "demo"}},
)
with patch(
"sense_energy.SenseLink",
return_value=Mock(start=AsyncMock(), close=AsyncMock()),
):
assert await async_setup_component(hass, DOMAIN, CONFIG_LIGHT) is True
await hass.async_block_till_done()
await emulated_kasa.validate_configs(hass, config)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_LIGHT}, blocking=True
)
hass.states.async_set(ENTITY_SENSOR, 35)
light = hass.states.get(ENTITY_LIGHT)
assert light.state == STATE_ON
sensor = hass.states.get(ENTITY_SENSOR)
assert sensor.state == "35"
# light
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_LIGHT_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 35)
# change power sensor
hass.states.async_set(ENTITY_SENSOR, 40)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_LIGHT_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 40)
# report 0 if device is off
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_LIGHT}, blocking=True
)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_LIGHT_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 0)
async def test_sensor_state(hass):
"""Test a configuration using a sensor in a template."""
config = CONFIG_SENSOR[DOMAIN][CONF_ENTITIES]
assert await async_setup_component(
hass,
SENSOR_DOMAIN,
{SENSOR_DOMAIN: {"platform": "demo"}},
)
with patch(
"sense_energy.SenseLink",
return_value=Mock(start=AsyncMock(), close=AsyncMock()),
):
assert await async_setup_component(hass, DOMAIN, CONFIG_SENSOR) is True
await hass.async_block_till_done()
await emulated_kasa.validate_configs(hass, config)
hass.states.async_set(ENTITY_SENSOR, 35)
sensor = hass.states.get(ENTITY_SENSOR)
assert sensor.state == "35"
# sensor
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_SENSOR_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 35)
# change power sensor
hass.states.async_set(ENTITY_SENSOR, 40)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_SENSOR_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 40)
# report 0 if device is off
hass.states.async_set(ENTITY_SENSOR, 0)
plug_it = emulated_kasa.get_plug_devices(hass, config)
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_SENSOR_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 0)
async def test_multiple_devices(hass):
"""Test that devices are reported correctly."""
config = CONFIG[DOMAIN][CONF_ENTITIES]
assert await async_setup_component(
hass, SWITCH_DOMAIN, {SWITCH_DOMAIN: {"platform": "demo"}}
)
assert await async_setup_component(
hass, LIGHT_DOMAIN, {LIGHT_DOMAIN: {"platform": "demo"}}
)
assert await async_setup_component(
hass, FAN_DOMAIN, {FAN_DOMAIN: {"platform": "demo"}}
)
assert await async_setup_component(
hass,
SENSOR_DOMAIN,
{SENSOR_DOMAIN: {"platform": "demo"}},
)
with patch(
"sense_energy.SenseLink",
return_value=Mock(start=AsyncMock(), close=AsyncMock()),
):
assert await emulated_kasa.async_setup(hass, CONFIG) is True
await hass.async_block_till_done()
await emulated_kasa.validate_configs(hass, config)
# Turn all devices on to known state
await hass.services.async_call(
SWITCH_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_SWITCH}, blocking=True
)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_LIGHT}, blocking=True
)
hass.states.async_set(ENTITY_SENSOR, 35)
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_FAN}, blocking=True
)
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: ENTITY_FAN, ATTR_SPEED: "medium"},
blocking=True,
)
# All of them should now be on
switch = hass.states.get(ENTITY_SWITCH)
assert switch.state == STATE_ON
light = hass.states.get(ENTITY_LIGHT)
assert light.state == STATE_ON
sensor = hass.states.get(ENTITY_SENSOR)
assert sensor.state == "35"
fan = hass.states.get(ENTITY_FAN)
assert fan.state == STATE_ON
plug_it = emulated_kasa.get_plug_devices(hass, config)
# switch
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_SWITCH_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, ENTITY_SWITCH_POWER)
# light
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_LIGHT_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, 35)
# fan
plug = next(plug_it).generate_response()
assert nested_value(plug, "system", "get_sysinfo", "alias") == ENTITY_FAN_NAME
power = nested_value(plug, "emeter", "get_realtime", "power")
assert math.isclose(power, ENTITY_FAN_SPEED_MED)
# No more devices
assert next(plug_it, None) is None
|
import sys
from coverage import env
from coverage.files import prep_patterns, FnmatchMatcher
from coverage.misc import CoverageException, NoSource, NotPython, ensure_dir_for_file, file_be_gone
def render_report(output_path, reporter, morfs):
"""Run the provided reporter ensuring any required setup and cleanup is done
At a high level this method ensures the output file is ready to be written to. Then writes the
report to it. Then closes the file and deletes any garbage created if necessary.
"""
file_to_close = None
delete_file = False
if output_path:
if output_path == '-':
outfile = sys.stdout
else:
# Ensure that the output directory is created; done here
# because this report pre-opens the output file.
# HTMLReport does this using the Report plumbing because
# its task is more complex, being multiple files.
ensure_dir_for_file(output_path)
open_kwargs = {}
if env.PY3:
open_kwargs['encoding'] = 'utf8'
outfile = open(output_path, "w", **open_kwargs)
file_to_close = outfile
try:
return reporter.report(morfs, outfile=outfile)
except CoverageException:
delete_file = True
raise
finally:
if file_to_close:
file_to_close.close()
if delete_file:
file_be_gone(output_path)
def get_analysis_to_report(coverage, morfs):
"""Get the files to report on.
For each morf in `morfs`, if it should be reported on (based on the omit
and include configuration options), yield a pair, the `FileReporter` and
`Analysis` for the morf.
"""
file_reporters = coverage._get_file_reporters(morfs)
config = coverage.config
if config.report_include:
matcher = FnmatchMatcher(prep_patterns(config.report_include))
file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
if config.report_omit:
matcher = FnmatchMatcher(prep_patterns(config.report_omit))
file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
if not file_reporters:
raise CoverageException("No data to report.")
for fr in sorted(file_reporters):
try:
analysis = coverage._analyze(fr)
except NoSource:
if not config.ignore_errors:
raise
except NotPython:
# Only report errors for .py files, and only if we didn't
# explicitly suppress those errors.
# NotPython is only raised by PythonFileReporter, which has a
# should_be_python() method.
if fr.should_be_python():
if config.ignore_errors:
msg = "Couldn't parse Python file '{}'".format(fr.filename)
coverage._warn(msg, slug="couldnt-parse")
else:
raise
else:
yield (fr, analysis)
|
import logging
from homeassistant.components.media_player import BrowseMedia
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_PLAYLIST,
MEDIA_CLASS_TRACK,
)
from homeassistant.components.media_player.errors import BrowseError
class UnknownMediaType(BrowseError):
"""Unknown media type."""
EXCLUDE_ITEMS = {
"Play Album",
"Play Artist",
"Play Playlist",
"Play Composer",
"Play Now",
"Play From Here",
"Queue",
"Start Radio",
"Add Next",
"Play Radio",
"Play Work",
"Settings",
"Search",
"Search Tidal",
"Search Qobuz",
}
# Maximum number of items to pull back from the API
ITEM_LIMIT = 3000
_LOGGER = logging.getLogger(__name__)
def browse_media(zone_id, roon_server, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
try:
_LOGGER.debug("browse_media: %s: %s", media_content_type, media_content_id)
if media_content_type in [None, "library"]:
return library_payload(roon_server, zone_id, media_content_id)
except UnknownMediaType as err:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
) from err
def item_payload(roon_server, item, list_image_id):
"""Create response payload for a single media item."""
title = item["title"]
subtitle = item.get("subtitle")
if subtitle is None:
display_title = title
else:
display_title = f"{title} ({subtitle})"
image_id = item.get("image_key") or list_image_id
image = None
if image_id:
image = roon_server.roonapi.get_image(image_id)
media_content_id = item["item_key"]
media_content_type = "library"
hint = item.get("hint")
if hint == "list":
media_class = MEDIA_CLASS_DIRECTORY
can_expand = True
elif hint == "action_list":
media_class = MEDIA_CLASS_PLAYLIST
can_expand = False
elif hint == "action":
media_content_type = "track"
media_class = MEDIA_CLASS_TRACK
can_expand = False
else:
# Roon API says to treat unknown as a list
media_class = MEDIA_CLASS_DIRECTORY
can_expand = True
_LOGGER.warning("Unknown hint %s - %s", title, hint)
payload = {
"title": display_title,
"media_class": media_class,
"media_content_id": media_content_id,
"media_content_type": media_content_type,
"can_play": True,
"can_expand": can_expand,
"thumbnail": image,
}
return BrowseMedia(**payload)
def library_payload(roon_server, zone_id, media_content_id):
"""Create response payload for the library."""
opts = {
"hierarchy": "browse",
"zone_or_output_id": zone_id,
"count": ITEM_LIMIT,
}
# Roon starts browsing for a zone where it left off - so start from the top unless otherwise specified
if media_content_id is None or media_content_id == "Explore":
opts["pop_all"] = True
content_id = "Explore"
else:
opts["item_key"] = media_content_id
content_id = media_content_id
result_header = roon_server.roonapi.browse_browse(opts)
_LOGGER.debug("Result header %s", result_header)
header = result_header["list"]
title = header.get("title")
subtitle = header.get("subtitle")
if subtitle is None:
list_title = title
else:
list_title = f"{title} ({subtitle})"
total_count = header["count"]
library_image_id = header.get("image_key")
library_info = BrowseMedia(
title=list_title,
media_content_id=content_id,
media_content_type="library",
media_class=MEDIA_CLASS_DIRECTORY,
can_play=False,
can_expand=True,
children=[],
)
result_detail = roon_server.roonapi.browse_load(opts)
_LOGGER.debug("Result detail %s", result_detail)
items = result_detail["items"]
count = len(items)
if count < total_count:
_LOGGER.debug(
"Exceeded limit of %d, loaded %d/%d", ITEM_LIMIT, count, total_count
)
for item in items:
if item.get("title") in EXCLUDE_ITEMS:
continue
entry = item_payload(roon_server, item, library_image_id)
library_info.children.append(entry)
return library_info
|
import unittest
import numpy as np
import chainer
from chainer.backends import cuda
from chainer import testing
from chainermn import create_communicator
from chainercv.links.model.mobilenet import TFConvolution2D
from chainercv.utils.testing import attr
def _add_one(x):
return x + 1
@testing.parameterize(*testing.product({
'pad': [1, 'SAME'],
'args_style': ['explicit', 'None', 'omit'],
}))
class TestTFConvolution2D(unittest.TestCase):
in_channels = 1
out_channels = 1
ksize = 3
stride = 1
pad = 1
dilate = 1
def setUp(self):
self.x = np.random.uniform(
-1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
self.gy = np.random.uniform(
-1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)
# Convolution is the identity function.
initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
dtype=np.float32).reshape((1, 1, 3, 3))
initial_bias = 0
if self.args_style == 'explicit':
self.l = TFConvolution2D(
self.in_channels, self.out_channels, self.ksize,
self.stride, self.pad, self.dilate,
initialW=initialW, initial_bias=initial_bias)
elif self.args_style == 'None':
self.l = TFConvolution2D(
None, self.out_channels, self.ksize, self.stride, self.pad,
self.dilate, initialW=initialW, initial_bias=initial_bias)
elif self.args_style == 'omit':
self.l = TFConvolution2D(
self.out_channels, self.ksize, stride=self.stride,
pad=self.pad, dilate=self.dilate, initialW=initialW,
initial_bias=initial_bias)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
with chainer.using_config('train', False):
y = self.l(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.array, self.l.xp.ndarray)
_x_data = x_data
np.testing.assert_almost_equal(
cuda.to_cpu(y.array), cuda.to_cpu(_x_data),
decimal=4
)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.l(x)
y.grad = y_grad
y.backward()
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
|
import os
import pytest
from homeassistant.components.hassio.handler import HassIO, HassioAPIError
from homeassistant.core import CoreState
from homeassistant.setup import async_setup_component
from . import HASSIO_TOKEN
from tests.async_mock import Mock, patch
@pytest.fixture
def hassio_env():
"""Fixture to inject hassio env."""
with patch.dict(os.environ, {"HASSIO": "127.0.0.1"}), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value={"result": "ok", "data": {}},
), patch.dict(os.environ, {"HASSIO_TOKEN": "123456"}), patch(
"homeassistant.components.hassio.HassIO.get_info",
Mock(side_effect=HassioAPIError()),
):
yield
@pytest.fixture
def hassio_stubs(hassio_env, hass, hass_client, aioclient_mock):
"""Create mock hassio http client."""
with patch(
"homeassistant.components.hassio.HassIO.update_hass_api",
return_value={"result": "ok"},
) as hass_api, patch(
"homeassistant.components.hassio.HassIO.update_hass_timezone",
return_value={"result": "ok"},
), patch(
"homeassistant.components.hassio.HassIO.get_info",
side_effect=HassioAPIError(),
):
hass.state = CoreState.starting
hass.loop.run_until_complete(async_setup_component(hass, "hassio", {}))
return hass_api.call_args[0][1]
@pytest.fixture
def hassio_client(hassio_stubs, hass, hass_client):
"""Return a Hass.io HTTP client."""
return hass.loop.run_until_complete(hass_client())
@pytest.fixture
def hassio_noauth_client(hassio_stubs, hass, aiohttp_client):
"""Return a Hass.io HTTP client without auth."""
return hass.loop.run_until_complete(aiohttp_client(hass.http.app))
@pytest.fixture
async def hassio_client_supervisor(hass, aiohttp_client, hassio_stubs):
"""Return an authenticated HTTP client."""
access_token = hass.auth.async_create_access_token(hassio_stubs)
return await aiohttp_client(
hass.http.app,
headers={"Authorization": f"Bearer {access_token}"},
)
@pytest.fixture
def hassio_handler(hass, aioclient_mock):
"""Create mock hassio handler."""
async def get_client_session():
return hass.helpers.aiohttp_client.async_get_clientsession()
websession = hass.loop.run_until_complete(get_client_session())
with patch.dict(os.environ, {"HASSIO_TOKEN": HASSIO_TOKEN}):
yield HassIO(hass.loop, websession, "127.0.0.1")
|
from datetime import timedelta
from bond_api import Action, DeviceType
from homeassistant import core
from homeassistant.components.cover import DOMAIN as COVER_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_STOP_COVER,
)
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import (
help_test_entity_available,
patch_bond_action,
patch_bond_device_state,
setup_platform,
)
from tests.common import async_fire_time_changed
def shades(name: str):
"""Create motorized shades with given name."""
return {"name": name, "type": DeviceType.MOTORIZED_SHADES}
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(
hass,
COVER_DOMAIN,
shades("name-1"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.entities["cover.name_1"]
assert entity.unique_id == "test-hub-id_test-device-id"
async def test_open_cover(hass: core.HomeAssistant):
"""Tests that open cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_open, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_open.assert_called_once_with("test-device-id", Action.open())
async def test_close_cover(hass: core.HomeAssistant):
"""Tests that close cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_close, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_close.assert_called_once_with("test-device-id", Action.close())
async def test_stop_cover(hass: core.HomeAssistant):
"""Tests that stop cover command delegates to API."""
await setup_platform(
hass, COVER_DOMAIN, shades("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_hold, patch_bond_device_state():
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_hold.assert_called_once_with("test-device-id", Action.hold())
async def test_update_reports_open_cover(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports cover is open."""
await setup_platform(hass, COVER_DOMAIN, shades("name-1"))
with patch_bond_device_state(return_value={"open": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("cover.name_1").state == "open"
async def test_update_reports_closed_cover(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports cover is closed."""
await setup_platform(hass, COVER_DOMAIN, shades("name-1"))
with patch_bond_device_state(return_value={"open": 0}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("cover.name_1").state == "closed"
async def test_cover_available(hass: core.HomeAssistant):
"""Tests that available state is updated based on API errors."""
await help_test_entity_available(
hass, COVER_DOMAIN, shades("name-1"), "cover.name_1"
)
|
from homeassistant import config_entries, setup
from homeassistant.components.rachio.const import (
CONF_CUSTOM_URL,
CONF_MANUAL_RUN_MINS,
DOMAIN,
)
from homeassistant.const import CONF_API_KEY
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
def _mock_rachio_return_value(get=None, info=None):
rachio_mock = MagicMock()
person_mock = MagicMock()
type(person_mock).get = MagicMock(return_value=get)
type(person_mock).info = MagicMock(return_value=info)
type(rachio_mock).person = person_mock
return rachio_mock
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
rachio_mock = _mock_rachio_return_value(
get=({"status": 200}, {"username": "myusername"}),
info=({"status": 200}, {"id": "myid"}),
)
with patch(
"homeassistant.components.rachio.config_flow.Rachio", return_value=rachio_mock
), patch(
"homeassistant.components.rachio.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.rachio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "api_key",
CONF_CUSTOM_URL: "http://custom.url",
CONF_MANUAL_RUN_MINS: 5,
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "myusername"
assert result2["data"] == {
CONF_API_KEY: "api_key",
CONF_CUSTOM_URL: "http://custom.url",
CONF_MANUAL_RUN_MINS: 5,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
rachio_mock = _mock_rachio_return_value(
get=({"status": 200}, {"username": "myusername"}),
info=({"status": 412}, {"error": "auth fail"}),
)
with patch(
"homeassistant.components.rachio.config_flow.Rachio", return_value=rachio_mock
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_API_KEY: "api_key"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
rachio_mock = _mock_rachio_return_value(
get=({"status": 599}, {"username": "myusername"}),
info=({"status": 200}, {"id": "myid"}),
)
with patch(
"homeassistant.components.rachio.config_flow.Rachio", return_value=rachio_mock
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_API_KEY: "api_key"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_homekit(hass):
"""Test that we abort from homekit if rachio is already setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "homekit"},
data={"properties": {"id": "AA:BB:CC:DD:EE:FF"}},
)
assert result["type"] == "form"
assert result["errors"] == {}
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == "AA:BB:CC:DD:EE:FF"
entry = MockConfigEntry(domain=DOMAIN, data={CONF_API_KEY: "api_key"})
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "homekit"},
data={"properties": {"id": "AA:BB:CC:DD:EE:FF"}},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
|
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.switch import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a switch."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers == expected_triggers
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a switch trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, "trigger", trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "turn_off device - {} - on - off - None".format(
ent1.entity_id
)
hass.states.async_set(ent1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "turn_on device - {} - off - on - None".format(
ent1.entity_id
)
async def test_if_fires_on_state_change_with_for(hass, calls):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert calls[0].data["some"] == "turn_off device - {} - on - off - 0:00:05".format(
ent1.entity_id
)
|
from homeassistant.components.onewire.const import DEFAULT_SYSBUS_MOUNT_DIR
import homeassistant.components.sensor as sensor
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component
async def test_setup_minimum(hass):
"""Test old platform setup with minimum configuration."""
config = {"sensor": {"platform": "onewire"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, sensor.DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_sysbus(hass):
"""Test old platform setup with SysBus configuration."""
config = {
"sensor": {
"platform": "onewire",
"mount_dir": DEFAULT_SYSBUS_MOUNT_DIR,
}
}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, sensor.DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_owserver(hass):
"""Test old platform setup with OWServer configuration."""
config = {"sensor": {"platform": "onewire", "host": "localhost"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, sensor.DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_owserver_with_port(hass):
"""Test old platform setup with OWServer configuration."""
config = {"sensor": {"platform": "onewire", "host": "localhost", "port": "1234"}}
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, sensor.DOMAIN, config)
await hass.async_block_till_done()
|
from datetime import timedelta
from homeassistant.components.cover import (
DOMAIN as SENSOR_DOMAIN,
ENTITY_ID_FORMAT,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.const import CONF_PLATFORM
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import TuyaDevice
from .const import DOMAIN, TUYA_DATA, TUYA_DISCOVERY_NEW
SCAN_INTERVAL = timedelta(seconds=15)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up tuya sensors dynamically through tuya discovery."""
platform = config_entry.data[CONF_PLATFORM]
async def async_discover_sensor(dev_ids):
"""Discover and add a discovered tuya sensor."""
if not dev_ids:
return
entities = await hass.async_add_executor_job(
_setup_entities,
hass,
dev_ids,
platform,
)
async_add_entities(entities)
async_dispatcher_connect(
hass, TUYA_DISCOVERY_NEW.format(SENSOR_DOMAIN), async_discover_sensor
)
devices_ids = hass.data[DOMAIN]["pending"].pop(SENSOR_DOMAIN)
await async_discover_sensor(devices_ids)
def _setup_entities(hass, dev_ids, platform):
"""Set up Tuya Cover device."""
tuya = hass.data[DOMAIN][TUYA_DATA]
entities = []
for dev_id in dev_ids:
device = tuya.get_device_by_id(dev_id)
if device is None:
continue
entities.append(TuyaCover(device, platform))
return entities
class TuyaCover(TuyaDevice, CoverEntity):
"""Tuya cover devices."""
def __init__(self, tuya, platform):
"""Init tuya cover device."""
super().__init__(tuya, platform)
self.entity_id = ENTITY_ID_FORMAT.format(tuya.object_id())
self._was_closing = False
self._was_opening = False
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE
if self._tuya.support_stop():
supported_features |= SUPPORT_STOP
return supported_features
@property
def is_opening(self):
"""Return if the cover is opening or not."""
state = self._tuya.state()
if state == 1:
self._was_opening = True
self._was_closing = False
return True
return False
@property
def is_closing(self):
"""Return if the cover is closing or not."""
state = self._tuya.state()
if state == 2:
self._was_opening = False
self._was_closing = True
return True
return False
@property
def is_closed(self):
"""Return if the cover is closed or not."""
state = self._tuya.state()
if state != 2 and self._was_closing:
return True
if state != 1 and self._was_opening:
return False
return None
def open_cover(self, **kwargs):
"""Open the cover."""
self._tuya.open_cover()
def close_cover(self, **kwargs):
"""Close cover."""
self._tuya.close_cover()
def stop_cover(self, **kwargs):
"""Stop the cover."""
if self.is_closed is None:
self._was_opening = False
self._was_closing = False
self._tuya.stop_cover()
|
from copy import deepcopy
from aiounifi.controller import MESSAGE_CLIENT, MESSAGE_CLIENT_REMOVED
from aiounifi.websocket import SIGNAL_DATA
from homeassistant.components.device_tracker import DOMAIN as TRACKER_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.unifi.const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
DOMAIN as UNIFI_DOMAIN,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
from .test_controller import setup_unifi_integration
CLIENTS = [
{
"hostname": "Wired client hostname",
"ip": "10.0.0.1",
"is_wired": True,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:01",
"name": "Wired client name",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 1,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
"uptime": 1600094505,
},
{
"hostname": "Wireless client hostname",
"ip": "10.0.0.2",
"is_wired": False,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:02",
"name": "Wireless client name",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 2,
"rx_bytes": 1234000000,
"tx_bytes": 5678000000,
"uptime": 1600094505,
},
]
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a controller."""
assert (
await async_setup_component(
hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: {"platform": UNIFI_DOMAIN}}
)
is True
)
assert UNIFI_DOMAIN not in hass.data
async def test_no_clients(hass):
"""Test the update_clients function when no clients are found."""
controller = await setup_unifi_integration(
hass,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
)
assert len(controller.mock_requests) == 4
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 0
async def test_sensors(hass):
"""Test the update_items function with some clients."""
controller = await setup_unifi_integration(
hass,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
},
clients_response=CLIENTS,
)
assert len(controller.mock_requests) == 4
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 6
wired_client_rx = hass.states.get("sensor.wired_client_name_rx")
assert wired_client_rx.state == "1234.0"
wired_client_tx = hass.states.get("sensor.wired_client_name_tx")
assert wired_client_tx.state == "5678.0"
wired_client_uptime = hass.states.get("sensor.wired_client_name_uptime")
assert wired_client_uptime.state == "2020-09-14T14:41:45+00:00"
wireless_client_rx = hass.states.get("sensor.wireless_client_name_rx")
assert wireless_client_rx.state == "1234.0"
wireless_client_tx = hass.states.get("sensor.wireless_client_name_tx")
assert wireless_client_tx.state == "5678.0"
wireless_client_uptime = hass.states.get("sensor.wireless_client_name_uptime")
assert wireless_client_uptime.state == "2020-09-14T14:41:45+00:00"
clients = deepcopy(CLIENTS)
clients[0]["is_wired"] = False
clients[1]["rx_bytes"] = 2345000000
clients[1]["tx_bytes"] = 6789000000
clients[1]["uptime"] = 1600180860
event = {"meta": {"message": MESSAGE_CLIENT}, "data": clients}
controller.api.message_handler(event)
await hass.async_block_till_done()
wireless_client_rx = hass.states.get("sensor.wireless_client_name_rx")
assert wireless_client_rx.state == "2345.0"
wireless_client_tx = hass.states.get("sensor.wireless_client_name_tx")
assert wireless_client_tx.state == "6789.0"
wireless_client_uptime = hass.states.get("sensor.wireless_client_name_uptime")
assert wireless_client_uptime.state == "2020-09-15T14:41:00+00:00"
hass.config_entries.async_update_entry(
controller.config_entry,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: False,
CONF_ALLOW_UPTIME_SENSORS: False,
},
)
await hass.async_block_till_done()
wireless_client_rx = hass.states.get("sensor.wireless_client_name_rx")
assert wireless_client_rx is None
wireless_client_tx = hass.states.get("sensor.wireless_client_name_tx")
assert wireless_client_tx is None
wired_client_uptime = hass.states.get("sensor.wired_client_name_uptime")
assert wired_client_uptime is None
wireless_client_uptime = hass.states.get("sensor.wireless_client_name_uptime")
assert wireless_client_uptime is None
hass.config_entries.async_update_entry(
controller.config_entry,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
)
await hass.async_block_till_done()
wireless_client_rx = hass.states.get("sensor.wireless_client_name_rx")
assert wireless_client_rx.state == "2345.0"
wireless_client_tx = hass.states.get("sensor.wireless_client_name_tx")
assert wireless_client_tx.state == "6789.0"
wireless_client_uptime = hass.states.get("sensor.wireless_client_name_uptime")
assert wireless_client_uptime.state == "2020-09-15T14:41:00+00:00"
wired_client_uptime = hass.states.get("sensor.wired_client_name_uptime")
assert wired_client_uptime.state == "2020-09-14T14:41:45+00:00"
# Try to add the sensors again, using a signal
clients_connected = set()
devices_connected = set()
clients_connected.add(clients[0]["mac"])
clients_connected.add(clients[1]["mac"])
async_dispatcher_send(
hass,
controller.signal_update,
clients_connected,
devices_connected,
)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 6
async def test_remove_sensors(hass):
"""Test the remove_items function with some clients."""
controller = await setup_unifi_integration(
hass,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
clients_response=CLIENTS,
)
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 6
assert len(hass.states.async_entity_ids(TRACKER_DOMAIN)) == 2
wired_client_rx = hass.states.get("sensor.wired_client_name_rx")
assert wired_client_rx is not None
wired_client_tx = hass.states.get("sensor.wired_client_name_tx")
assert wired_client_tx is not None
wired_client_uptime = hass.states.get("sensor.wired_client_name_uptime")
assert wired_client_uptime is not None
wireless_client_rx = hass.states.get("sensor.wireless_client_name_rx")
assert wireless_client_rx is not None
wireless_client_tx = hass.states.get("sensor.wireless_client_name_tx")
assert wireless_client_tx is not None
wireless_client_uptime = hass.states.get("sensor.wireless_client_name_uptime")
assert wireless_client_uptime is not None
controller.api.websocket._data = {
"meta": {"message": MESSAGE_CLIENT_REMOVED},
"data": [CLIENTS[0]],
}
controller.api.session_handler(SIGNAL_DATA)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 3
assert len(hass.states.async_entity_ids(TRACKER_DOMAIN)) == 1
wired_client_rx = hass.states.get("sensor.wired_client_name_rx")
assert wired_client_rx is None
wired_client_tx = hass.states.get("sensor.wired_client_name_tx")
assert wired_client_tx is None
wired_client_uptime = hass.states.get("sensor.wired_client_name_uptime")
assert wired_client_uptime is None
wireless_client_rx = hass.states.get("sensor.wireless_client_name_rx")
assert wireless_client_rx is not None
wireless_client_tx = hass.states.get("sensor.wireless_client_name_tx")
assert wireless_client_tx is not None
wireless_client_uptime = hass.states.get("sensor.wireless_client_name_uptime")
assert wireless_client_uptime is not None
|
from datetime import datetime as dt
import numpy as np
import pandas as pd
from pandas import DataFrame, DatetimeIndex
from pandas.util.testing import assert_frame_equal
from arctic.date import DateRange, CLOSED_OPEN, CLOSED_CLOSED, OPEN_OPEN, OPEN_CLOSED
if int(pd.__version__.split('.')[1]) > 22:
from functools import partial
pd.concat = partial(pd.concat, sort=False)
# Issue 384
def test_write_dataframe(chunkstore_lib):
# Create dataframe of time measurements taken every 6 hours
date_range = pd.date_range(start=dt(2017, 5, 1, 1), periods=8, freq='6H')
df = DataFrame(data={'something': [100, 200, 300, 400, 500, 600, 700, 800]},
index=DatetimeIndex(date_range, name='date'))
chunkstore_lib.write('test', df, chunk_size='D')
# Iterate
for chunk in chunkstore_lib.iterator('test'):
assert(len(chunk) > 0)
def test_compression(chunkstore_lib):
"""
Issue 407 - Chunkstore was not removing the 1st segment, with segment id -1
so on an append it would append new chunks with id 0 and 1, and a subsequent read
would still pick up -1 (which should have been removed or overwritten).
Since the -1 segment (which previously indicated a standalone segment) is no
longer needed, the special -1 segment id is now removed
"""
def generate_data(date):
"""
Generates a dataframe that is almost exactly the size of
a segment in chunkstore
"""
df = pd.DataFrame(np.random.randn(10000*16, 12),
columns=['beta', 'btop', 'earnyild', 'growth', 'industry', 'leverage',
'liquidty', 'momentum', 'resvol', 'sid', 'size', 'sizenl'])
df['date'] = date
return df
date = pd.Timestamp('2000-01-01')
df = generate_data(date)
chunkstore_lib.write('test', df, chunk_size='A')
date += pd.Timedelta(1, unit='D')
df2 = generate_data(date)
chunkstore_lib.append('test', df2)
read = chunkstore_lib.read('test')
assert_frame_equal(read, pd.concat([df, df2], ignore_index=True))
# issue #420 - ChunkStore doesnt respect DateRange interval
def test_date_interval(chunkstore_lib):
date_range = pd.date_range(start=dt(2017, 5, 1), periods=8, freq='D')
df = DataFrame(data={'data': range(8)},
index=DatetimeIndex(date_range, name='date'))
# test with index
chunkstore_lib.write('test', df, chunk_size='D')
ret = chunkstore_lib.read('test', chunk_range=DateRange(dt(2017, 5, 2), dt(2017, 5, 5), CLOSED_OPEN))
assert_frame_equal(ret, df[1:4])
ret = chunkstore_lib.read('test', chunk_range=DateRange(dt(2017, 5, 2), dt(2017, 5, 5), OPEN_OPEN))
assert_frame_equal(ret, df[2:4])
ret = chunkstore_lib.read('test', chunk_range=DateRange(dt(2017, 5, 2), dt(2017, 5, 5), OPEN_CLOSED))
assert_frame_equal(ret, df[2:5])
ret = chunkstore_lib.read('test', chunk_range=DateRange(dt(2017, 5, 2), dt(2017, 5, 5), CLOSED_CLOSED))
assert_frame_equal(ret, df[1:5])
ret = chunkstore_lib.read('test', chunk_range=DateRange(dt(2017, 5, 2), None, CLOSED_OPEN))
assert_frame_equal(ret, df[1:8])
# test without index
df = DataFrame(data={'data': range(8),
'date': date_range})
chunkstore_lib.write('test2', df, chunk_size='D')
ret = chunkstore_lib.read('test2', chunk_range=DateRange(dt(2017, 5, 2), dt(2017, 5, 5), CLOSED_OPEN))
assert(len(ret) == 3)
ret = chunkstore_lib.read('test2', chunk_range=DateRange(dt(2017, 5, 2), dt(2017, 5, 5), OPEN_OPEN))
assert(len(ret) == 2)
ret = chunkstore_lib.read('test2', chunk_range=DateRange(dt(2017, 5, 2), dt(2017, 5, 5), OPEN_CLOSED))
assert(len(ret) == 3)
ret = chunkstore_lib.read('test2', chunk_range=DateRange(dt(2017, 5, 2), dt(2017, 5, 5), CLOSED_CLOSED))
assert(len(ret) == 4)
ret = chunkstore_lib.read('test2', chunk_range=DateRange(dt(2017, 5, 2), None, CLOSED_OPEN))
assert(len(ret) == 7)
def test_rewrite(chunkstore_lib):
"""
Issue 427
incorrectly storing and updating metadata. dataframes without an index
have no "index" field in their metadata, so updating existing
metadata does not remove the index field.
Also, metadata was incorrectly being stored. symbol, start, and end
are the index for the collection, but metadata was being
stored without an index (so it was defaulting to null,null,null)
"""
date_range = pd.date_range(start=dt(2017, 5, 1, 1), periods=8, freq='6H')
df = DataFrame(data={'something': [100, 200, 300, 400, 500, 600, 700, 800]},
index=DatetimeIndex(date_range, name='date'))
chunkstore_lib.write('test', df, chunk_size='D')
df2 = DataFrame(data={'something': [100, 200, 300, 400, 500, 600, 700, 800],
'date': date_range})
chunkstore_lib.write('test', df2, chunk_size='D')
ret = chunkstore_lib.read('test')
assert_frame_equal(ret, df2)
def test_iterator(chunkstore_lib):
"""
Fixes issue #431 - iterator methods were not taking into account
the fact that symbols can have multiple segments
"""
def generate_data(date):
"""
Generates a dataframe that is larger than one segment
a segment in chunkstore
"""
df = pd.DataFrame(np.random.randn(200000, 12),
columns=['beta', 'btop', 'earnyild', 'growth', 'industry', 'leverage',
'liquidty', 'momentum', 'resvol', 'sid', 'size', 'sizenl'])
df['date'] = date
return df
date = pd.Timestamp('2000-01-01')
df = generate_data(date)
chunkstore_lib.write('test', df, chunk_size='A')
ret = chunkstore_lib.get_chunk_ranges('test')
assert(len(list(ret)) == 1)
# Issue 722
def test_missing_cols(chunkstore_lib):
index = DatetimeIndex(pd.date_range('2019-01-01', periods=3, freq='D'), name='date')
index2 = DatetimeIndex(pd.date_range('2019-01-04', periods=3, freq='D'), name='date')
expected_index = DatetimeIndex(pd.date_range('2019-01-01', periods=6, freq='D'), name='date')
expected_df = DataFrame({'A': [1, 2, 3, 40, 50, 60], 'B': [5.0,6.0,7.0, np.nan, np.nan, np.nan]}, index=expected_index)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [5,6,7]}, index=index)
chunkstore_lib.write('test', df, chunk_size='D')
df = pd.DataFrame({'A': [40, 50, 60]}, index=index2)
chunkstore_lib.append('test', df, chunk_size='D')
assert_frame_equal(chunkstore_lib.read('test'), expected_df)
df = chunkstore_lib.read('test', columns=['B'])
assert_frame_equal(df, expected_df['B'].to_frame())
def test_column_copy(chunkstore_lib):
index = DatetimeIndex(pd.date_range('2019-01-01', periods=3, freq='D'), name='date')
df = pd.DataFrame({'A': [1, 2, 3], 'B': [5,6,7]}, index=index)
cols = ['A']
chunkstore_lib.write('test', df)
chunkstore_lib.read('test', columns=cols)
assert cols == ['A']
def test_get_info_empty(chunkstore_lib):
chunkstore_lib.write('test', pd.DataFrame(data={'date': [], 'data': []}))
ret = chunkstore_lib.get_info('test')
assert ret == {'appended_rows': 0,
'chunker': u'date',
'len': 0, 'chunk_size': 0,
'chunk_count': 0,
'serializer': u'FrameToArray',
'metadata': None}
|
import pytest
import voluptuous_serialize
import homeassistant.components.automation as automation
from homeassistant.components.climate import DOMAIN, const, device_condition
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a climate."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
f"{DOMAIN}.test_5678",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY],
},
)
hass.states.async_set("climate.test_5678", "attributes", {"supported_features": 17})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_hvac_mode",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_preset_mode",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_get_conditions_hvac_only(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a climate."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
f"{DOMAIN}.test_5678",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY],
},
)
hass.states.async_set("climate.test_5678", "attributes", {"supported_features": 1})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_hvac_mode",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
}
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_hvac_mode",
"hvac_mode": "cool",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_hvac_mode - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_preset_mode",
"preset_mode": "away",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_preset_mode - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_hvac_mode - event - test_event1"
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_AUTO,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
},
)
# Should not fire
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_preset_mode - event - test_event2"
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_AUTO,
const.ATTR_PRESET_MODE: const.PRESET_HOME,
},
)
# Should not fire
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_capabilities(hass):
"""Bla."""
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
const.ATTR_HVAC_MODES: [const.HVAC_MODE_COOL, const.HVAC_MODE_OFF],
const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY],
},
)
# Test hvac mode
capabilities = await device_condition.async_get_condition_capabilities(
hass,
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_hvac_mode",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "hvac_mode",
"options": [("cool", "cool"), ("off", "off")],
"required": True,
"type": "select",
}
]
# Test preset mode
capabilities = await device_condition.async_get_condition_capabilities(
hass,
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_preset_mode",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "preset_modes",
"options": [("home", "home"), ("away", "away")],
"required": True,
"type": "select",
}
]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mne
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
# paths to mne datasets - sample ECoG and FreeSurfer subject
misc_path = mne.datasets.misc.data_path()
sample_path = mne.datasets.sample.data_path()
subject = 'sample'
subjects_dir = sample_path + '/subjects'
###############################################################################
# Let's load some ECoG electrode locations and names, and turn them into
# a :class:`mne.channels.DigMontage` class. First, use pandas to read in the
# ``.tsv`` file.
# In this tutorial, the electrode coordinates are assumed to be in meters
elec_df = pd.read_csv(misc_path + '/ecog/sample_ecog_electrodes.tsv',
sep='\t', header=0, index_col=None)
ch_names = elec_df['name'].tolist()
ch_coords = elec_df[['x', 'y', 'z']].to_numpy(dtype=float)
ch_pos = dict(zip(ch_names, ch_coords))
# Ideally the nasion/LPA/RPA will also be present from the digitization, here
# we use fiducials estimated from the subject's FreeSurfer MNI transformation:
lpa, nasion, rpa = mne.coreg.get_mni_fiducials(
subject, subjects_dir=subjects_dir)
lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r']
###############################################################################
# Now we make a :class:`mne.channels.DigMontage` stating that the ECoG
# contacts are in the FreeSurfer surface RAS (i.e., MRI) coordinate system.
montage = mne.channels.make_dig_montage(
ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Now we get the :term:`trans` that transforms from our MRI coordinate system
# to the head coordinate frame. This transform will be applied to the
# data when applying the montage so that standard plotting functions like
# :func:`mne.viz.plot_evoked_topomap` will be aligned properly.
trans = mne.channels.compute_native_head_t(montage)
print(trans)
###############################################################################
# Now that we have our montage, we can load in our corresponding
# time-series data and set the montage to the raw data.
# first we'll load in the sample dataset
raw = mne.io.read_raw_edf(misc_path + '/ecog/sample_ecog.edf')
# drop bad channels
raw.info['bads'].extend([ch for ch in raw.ch_names if ch not in ch_names])
raw.load_data()
raw.drop_channels(raw.info['bads'])
raw.crop(0, 2) # just process 2 sec of data for speed
# attach montage
raw.set_montage(montage)
# set channel types to ECoG (instead of EEG)
raw.set_channel_types({ch_name: 'ecog' for ch_name in raw.ch_names})
###############################################################################
# We can then plot the locations of our electrodes on our subject's brain.
# We'll use :func:`~mne.viz.snapshot_brain_montage` to save the plot as image
# data (along with xy positions of each electrode in the image), so that later
# we can plot frequency band power on top of it.
#
# .. note:: These are not real electrodes for this subject, so they
# do not align to the cortical surface perfectly.
fig = plot_alignment(raw.info, subject=subject, subjects_dir=subjects_dir,
surfaces=['pial'], trans=trans, coord_frame='mri')
mne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, -0.005, 0.03])
xy, im = snapshot_brain_montage(fig, montage)
###############################################################################
# Next, we'll compute the signal power in the gamma (30-90 Hz) and alpha
# (8-12 Hz) bands.
gamma_power_t = raw.copy().filter(30, 90).apply_hilbert(
envelope=True).get_data()
alpha_power_t = raw.copy().filter(8, 12).apply_hilbert(
envelope=True).get_data()
gamma_power = gamma_power_t.mean(axis=-1)
alpha_power = alpha_power_t.mean(axis=-1)
###############################################################################
# Now let's use matplotlib to overplot frequency band power onto the electrodes
# which can be plotted on top of the brain from
# :func:`~mne.viz.snapshot_brain_montage`.
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in raw.info['ch_names']])
# colormap to view spectral power
cmap = 'viridis'
# Create a 1x2 figure showing the average power in gamma and alpha bands.
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
# choose a colormap range wide enough for both frequency bands
_gamma_alpha_power = np.concatenate((gamma_power, alpha_power)).flatten()
vmin, vmax = np.percentile(_gamma_alpha_power, [10, 90])
for ax, band_power, band in zip(axs,
[gamma_power, alpha_power],
['Gamma', 'Alpha']):
ax.imshow(im)
ax.set_axis_off()
sc = ax.scatter(*xy_pts.T, c=band_power, s=200,
cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_title(f'{band} band power', size='x-large')
fig.colorbar(sc, ax=axs)
###############################################################################
# Say we want to visualize the evolution of the power in the gamma band,
# instead of just plotting the average. We can use
# `matplotlib.animation.FuncAnimation` to create an animation and apply this
# to the brain figure.
# create an initialization and animation function
# to pass to FuncAnimation
def init():
"""Create an empty frame."""
return paths,
def animate(i, activity):
"""Animate the plot."""
paths.set_array(activity[:, i])
return paths,
# create the figure and apply the animation of the
# gamma frequency band activity
fig, ax = plt.subplots(figsize=(5, 5))
ax.imshow(im)
ax.set_axis_off()
paths = ax.scatter(*xy_pts.T, c=np.zeros(len(xy_pts)), s=200,
cmap=cmap, vmin=vmin, vmax=vmax)
fig.colorbar(paths, ax=ax)
ax.set_title('Gamma frequency over time (Hilbert transform)',
size='large')
# avoid edge artifacts and decimate, showing just a short chunk
sl = slice(100, 150)
show_power = gamma_power_t[:, sl]
anim = animation.FuncAnimation(fig, animate, init_func=init,
fargs=(show_power,),
frames=show_power.shape[1],
interval=100, blit=True)
###############################################################################
# Alternatively, we can project the sensor data to the nearest locations on
# the pial surface and visualize that:
# sphinx_gallery_thumbnail_number = 4
evoked = mne.EvokedArray(
gamma_power_t[:, sl], raw.info, tmin=raw.times[sl][0])
stc = mne.stc_near_sensors(evoked, trans, subject, subjects_dir=subjects_dir)
clim = dict(kind='value', lims=[vmin * 0.9, vmin, vmax])
brain = stc.plot(surface='pial', hemi='both', initial_time=0.68,
colormap='viridis', clim=clim, views='parietal',
subjects_dir=subjects_dir, size=(500, 500))
# You can save a movie like the one on our documentation website with:
# brain.save_movie(time_dilation=50, interpolation='linear', framerate=10,
# time_viewer=True)
|
import pytest
@pytest.mark.parametrize('js_enabled, expected', [(True, 2.0), (False, None)])
def test_simple_js_webkit(webview, js_enabled, expected):
"""With QtWebKit, evaluateJavaScript works when JS is on."""
# If we get there (because of the webview fixture) we can be certain
# QtWebKit is available
from PyQt5.QtWebKit import QWebSettings
webview.settings().setAttribute(QWebSettings.JavascriptEnabled, js_enabled)
result = webview.page().mainFrame().evaluateJavaScript('1 + 1')
assert result == expected
@pytest.mark.parametrize('js_enabled, expected', [(True, 2.0), (False, 2.0)])
def test_element_js_webkit(webview, js_enabled, expected):
"""With QtWebKit, evaluateJavaScript on an element works with JS off."""
# If we get there (because of the webview fixture) we can be certain
# QtWebKit is available
from PyQt5.QtWebKit import QWebSettings
webview.settings().setAttribute(QWebSettings.JavascriptEnabled, js_enabled)
elem = webview.page().mainFrame().documentElement()
result = elem.evaluateJavaScript('1 + 1')
assert result == expected
@pytest.mark.usefixtures('redirect_webengine_data')
@pytest.mark.parametrize('js_enabled, world, expected', [
# main world
(True, 0, 2.0),
(False, 0, None),
# application world
(True, 1, 2.0),
(False, 1, 2.0),
# user world
(True, 2, 2.0),
(False, 2, 2.0),
])
def test_simple_js_webengine(qtbot, webengineview, qapp,
js_enabled, world, expected):
"""With QtWebEngine, runJavaScript works even when JS is off."""
# If we get there (because of the webengineview fixture) we can be certain
# QtWebEngine is available
from PyQt5.QtWebEngineWidgets import QWebEngineSettings, QWebEngineScript
assert world in [QWebEngineScript.MainWorld,
QWebEngineScript.ApplicationWorld,
QWebEngineScript.UserWorld]
settings = webengineview.settings()
settings.setAttribute(QWebEngineSettings.JavascriptEnabled, js_enabled)
qapp.processEvents()
page = webengineview.page()
with qtbot.wait_callback() as callback:
page.runJavaScript('1 + 1', world, callback)
callback.assert_called_with(expected)
|
from copy import deepcopy
import json
import pytest
from homeassistant.components import vacuum
from homeassistant.components.mqtt import CONF_COMMAND_TOPIC, CONF_STATE_TOPIC
from homeassistant.components.mqtt.vacuum import CONF_SCHEMA, schema_state as mqttvacuum
from homeassistant.components.mqtt.vacuum.schema import services_to_strings
from homeassistant.components.mqtt.vacuum.schema_state import SERVICE_TO_STRING
from homeassistant.components.vacuum import (
ATTR_BATTERY_ICON,
ATTR_BATTERY_LEVEL,
ATTR_FAN_SPEED,
ATTR_FAN_SPEED_LIST,
DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_START,
SERVICE_STOP,
STATE_CLEANING,
STATE_DOCKED,
)
from homeassistant.const import (
CONF_NAME,
CONF_PLATFORM,
ENTITY_MATCH_ALL,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.async_mock import patch
from tests.common import async_fire_mqtt_message
from tests.components.vacuum import common
COMMAND_TOPIC = "vacuum/command"
SEND_COMMAND_TOPIC = "vacuum/send_command"
STATE_TOPIC = "vacuum/state"
DEFAULT_CONFIG = {
CONF_PLATFORM: "mqtt",
CONF_SCHEMA: "state",
CONF_NAME: "mqtttest",
CONF_COMMAND_TOPIC: COMMAND_TOPIC,
mqttvacuum.CONF_SEND_COMMAND_TOPIC: SEND_COMMAND_TOPIC,
CONF_STATE_TOPIC: STATE_TOPIC,
mqttvacuum.CONF_SET_FAN_SPEED_TOPIC: "vacuum/set_fan_speed",
mqttvacuum.CONF_FAN_SPEED_LIST: ["min", "medium", "high", "max"],
}
DEFAULT_CONFIG_2 = {
vacuum.DOMAIN: {"platform": "mqtt", "schema": "state", "name": "test"}
}
async def test_default_supported_features(hass, mqtt_mock):
"""Test that the correct supported features."""
assert await async_setup_component(
hass, vacuum.DOMAIN, {vacuum.DOMAIN: DEFAULT_CONFIG}
)
await hass.async_block_till_done()
entity = hass.states.get("vacuum.mqtttest")
entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0)
assert sorted(services_to_strings(entity_features, SERVICE_TO_STRING)) == sorted(
["start", "stop", "return_home", "battery", "status", "clean_spot"]
)
async def test_all_commands(hass, mqtt_mock):
"""Test simple commands send to the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "start", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "stop", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "pause", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "locate", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(
COMMAND_TOPIC, "clean_spot", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(
COMMAND_TOPIC, "return_to_base", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "medium", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/set_fan_speed", "medium", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(hass, "44 FE 93", entity_id="vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/send_command", "44 FE 93", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
assert json.loads(mqtt_mock.async_publish.mock_calls[-1][1][1]) == {
"command": "44 FE 93",
"key": "value",
}
async def test_commands_without_supported_features(hass, mqtt_mock):
"""Test commands which are not supported by the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
services = mqttvacuum.STRING_TO_SERVICE["status"]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
services, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "medium", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
mqtt_mock.async_publish.assert_not_called()
async def test_status(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54,
"state": "cleaning",
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
assert state.attributes.get(ATTR_FAN_SPEED) == "max"
message = """{
"battery_level": 61,
"state": "docked",
"fan_speed": "min"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_DOCKED
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
assert state.attributes.get(ATTR_FAN_SPEED) == "min"
assert state.attributes.get(ATTR_FAN_SPEED_LIST) == ["min", "medium", "high", "max"]
async def test_no_fan_vacuum(hass, mqtt_mock):
"""Test status updates from the vacuum when fan is not supported."""
config = deepcopy(DEFAULT_CONFIG)
del config[mqttvacuum.CONF_FAN_SPEED_LIST]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.DEFAULT_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54,
"state": "cleaning"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
message = """{
"battery_level": 54,
"state": "cleaning",
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
message = """{
"battery_level": 61,
"state": "docked"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_DOCKED
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
@pytest.mark.no_fail_on_log_exception
async def test_status_invalid_json(hass, mqtt_mock):
"""Test to make sure nothing breaks if the vacuum sends bad JSON."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "vacuum/state", '{"asdfasas false}')
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_UNKNOWN
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one vacuum per unique_id."""
config = {
vacuum.DOMAIN: [
{
"platform": "mqtt",
"schema": "state",
"name": "Test 1",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"schema": "state",
"name": "Test 2",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, vacuum.DOMAIN, config)
async def test_discovery_removal_vacuum(hass, mqtt_mock, caplog):
"""Test removal of discovered vacuum."""
data = '{ "schema": "state", "name": "test", "command_topic": "test_topic"}'
await help_test_discovery_removal(hass, mqtt_mock, caplog, vacuum.DOMAIN, data)
async def test_discovery_update_vacuum(hass, mqtt_mock, caplog):
"""Test update of discovered vacuum."""
data1 = '{ "schema": "state", "name": "Beer", "command_topic": "test_topic"}'
data2 = '{ "schema": "state", "name": "Milk", "command_topic": "test_topic"}'
await help_test_discovery_update(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, data2
)
async def test_discovery_update_unchanged_vacuum(hass, mqtt_mock, caplog):
"""Test update of discovered vacuum."""
data1 = '{ "schema": "state", "name": "Beer", "command_topic": "test_topic"}'
with patch(
"homeassistant.components.mqtt.vacuum.schema_state.MqttStateVacuum.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "schema": "state", "name": "Beer", "command_topic": "test_topic#"}'
data2 = '{ "schema": "state", "name": "Milk", "command_topic": "test_topic"}'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2, payload="{}"
)
|
from __future__ import division, print_function
from urwid import escape
from urwid.compat import bytes, text_type, text_types
import codecs
str_util = escape.str_util
# bring str_util functions into our namespace
calc_text_pos = str_util.calc_text_pos
calc_width = str_util.calc_width
is_wide_char = str_util.is_wide_char
move_next_char = str_util.move_next_char
move_prev_char = str_util.move_prev_char
within_double_byte = str_util.within_double_byte
def detect_encoding():
# Try to determine if using a supported double-byte encoding
import locale
initial = locale.getlocale()
try:
try:
locale.setlocale(locale.LC_ALL, "")
except locale.Error:
pass
return locale.getlocale()[1] or ""
except ValueError as e:
# with invalid LANG value python will throw ValueError
if e.args and e.args[0].startswith("unknown locale"):
return ""
else:
raise
finally:
try:
locale.setlocale(locale.LC_ALL, initial)
except locale.Error:
pass
if 'detected_encoding' not in locals():
detected_encoding = detect_encoding()
else:
assert 0, "It worked!"
_target_encoding = None
_use_dec_special = True
def set_encoding( encoding ):
"""
Set the byte encoding to assume when processing strings and the
encoding to use when converting unicode strings.
"""
encoding = encoding.lower()
global _target_encoding, _use_dec_special
if encoding in ( 'utf-8', 'utf8', 'utf' ):
str_util.set_byte_encoding("utf8")
_use_dec_special = False
elif encoding in ( 'euc-jp' # JISX 0208 only
, 'euc-kr', 'euc-cn', 'euc-tw' # CNS 11643 plain 1 only
, 'gb2312', 'gbk', 'big5', 'cn-gb', 'uhc'
# these shouldn't happen, should they?
, 'eucjp', 'euckr', 'euccn', 'euctw', 'cncb' ):
str_util.set_byte_encoding("wide")
_use_dec_special = True
else:
str_util.set_byte_encoding("narrow")
_use_dec_special = True
# if encoding is valid for conversion from unicode, remember it
_target_encoding = 'ascii'
try:
if encoding:
u"".encode(encoding)
_target_encoding = encoding
except LookupError: pass
def get_encoding_mode():
"""
Get the mode Urwid is using when processing text strings.
Returns 'narrow' for 8-bit encodings, 'wide' for CJK encodings
or 'utf8' for UTF-8 encodings.
"""
return str_util.get_byte_encoding()
def apply_target_encoding( s ):
"""
Return (encoded byte string, character set rle).
"""
if _use_dec_special and type(s) == text_type:
# first convert drawing characters
try:
s = s.translate( escape.DEC_SPECIAL_CHARMAP )
except NotImplementedError:
# python < 2.4 needs to do this the hard way..
for c, alt in zip(escape.DEC_SPECIAL_CHARS,
escape.ALT_DEC_SPECIAL_CHARS):
s = s.replace( c, escape.SO+alt+escape.SI )
if type(s) == text_type:
s = s.replace(escape.SI+escape.SO, u"") # remove redundant shifts
s = codecs.encode(s, _target_encoding, 'replace')
assert isinstance(s, bytes)
SO = escape.SO.encode('ascii')
SI = escape.SI.encode('ascii')
sis = s.split(SO)
assert isinstance(sis[0], bytes)
sis0 = sis[0].replace(SI, bytes())
sout = []
cout = []
if sis0:
sout.append( sis0 )
cout.append( (None,len(sis0)) )
if len(sis)==1:
return sis0, cout
for sn in sis[1:]:
assert isinstance(sn, bytes)
assert isinstance(SI, bytes)
sl = sn.split(SI, 1)
if len(sl) == 1:
sin = sl[0]
assert isinstance(sin, bytes)
sout.append(sin)
rle_append_modify(cout, (escape.DEC_TAG.encode('ascii'), len(sin)))
continue
sin, son = sl
son = son.replace(SI, bytes())
if sin:
sout.append(sin)
rle_append_modify(cout, (escape.DEC_TAG, len(sin)))
if son:
sout.append(son)
rle_append_modify(cout, (None, len(son)))
outstr = bytes().join(sout)
return outstr, cout
######################################################################
# Try to set the encoding using the one detected by the locale module
set_encoding( detected_encoding )
######################################################################
def supports_unicode():
"""
Return True if python is able to convert non-ascii unicode strings
to the current encoding.
"""
return _target_encoding and _target_encoding != 'ascii'
def calc_trim_text( text, start_offs, end_offs, start_col, end_col ):
"""
Calculate the result of trimming text.
start_offs -- offset into text to treat as screen column 0
end_offs -- offset into text to treat as the end of the line
start_col -- screen column to trim at the left
end_col -- screen column to trim at the right
Returns (start, end, pad_left, pad_right), where:
start -- resulting start offset
end -- resulting end offset
pad_left -- 0 for no pad or 1 for one space to be added
pad_right -- 0 for no pad or 1 for one space to be added
"""
spos = start_offs
pad_left = pad_right = 0
if start_col > 0:
spos, sc = calc_text_pos( text, spos, end_offs, start_col )
if sc < start_col:
pad_left = 1
spos, sc = calc_text_pos( text, start_offs,
end_offs, start_col+1 )
run = end_col - start_col - pad_left
pos, sc = calc_text_pos( text, spos, end_offs, run )
if sc < run:
pad_right = 1
return ( spos, pos, pad_left, pad_right )
def trim_text_attr_cs( text, attr, cs, start_col, end_col ):
"""
Return ( trimmed text, trimmed attr, trimmed cs ).
"""
spos, epos, pad_left, pad_right = calc_trim_text(
text, 0, len(text), start_col, end_col )
attrtr = rle_subseg( attr, spos, epos )
cstr = rle_subseg( cs, spos, epos )
if pad_left:
al = rle_get_at( attr, spos-1 )
rle_prepend_modify( attrtr, (al, 1) )
rle_prepend_modify( cstr, (None, 1) )
if pad_right:
al = rle_get_at( attr, epos )
rle_append_modify( attrtr, (al, 1) )
rle_append_modify( cstr, (None, 1) )
return (bytes().rjust(pad_left) + text[spos:epos] +
bytes().rjust(pad_right), attrtr, cstr)
def rle_get_at( rle, pos ):
"""
Return the attribute at offset pos.
"""
x = 0
if pos < 0:
return None
for a, run in rle:
if x+run > pos:
return a
x += run
return None
def rle_subseg( rle, start, end ):
"""Return a sub segment of an rle list."""
l = []
x = 0
for a, run in rle:
if start:
if start >= run:
start -= run
x += run
continue
x += start
run -= start
start = 0
if x >= end:
break
if x+run > end:
run = end-x
x += run
l.append( (a, run) )
return l
def rle_len( rle ):
"""
Return the number of characters covered by a run length
encoded attribute list.
"""
run = 0
for v in rle:
assert type(v) == tuple, repr(rle)
a, r = v
run += r
return run
def rle_prepend_modify(rle, a_r):
"""
Append (a, r) (unpacked from *a_r*) to BEGINNING of rle.
Merge with first run when possible
MODIFIES rle parameter contents. Returns None.
"""
a, r = a_r
if not rle:
rle[:] = [(a, r)]
else:
al, run = rle[0]
if a == al:
rle[0] = (a,run+r)
else:
rle[0:0] = [(a, r)]
def rle_append_modify(rle, a_r):
"""
Append (a, r) (unpacked from *a_r*) to the rle list rle.
Merge with last run when possible.
MODIFIES rle parameter contents. Returns None.
"""
a, r = a_r
if not rle or rle[-1][0] != a:
rle.append( (a,r) )
return
la,lr = rle[-1]
rle[-1] = (a, lr+r)
def rle_join_modify( rle, rle2 ):
"""
Append attribute list rle2 to rle.
Merge last run of rle with first run of rle2 when possible.
MODIFIES attr parameter contents. Returns None.
"""
if not rle2:
return
rle_append_modify(rle, rle2[0])
rle += rle2[1:]
def rle_product( rle1, rle2 ):
"""
Merge the runs of rle1 and rle2 like this:
eg.
rle1 = [ ("a", 10), ("b", 5) ]
rle2 = [ ("Q", 5), ("P", 10) ]
rle_product: [ (("a","Q"), 5), (("a","P"), 5), (("b","P"), 5) ]
rle1 and rle2 are assumed to cover the same total run.
"""
i1 = i2 = 1 # rle1, rle2 indexes
if not rle1 or not rle2: return []
a1, r1 = rle1[0]
a2, r2 = rle2[0]
l = []
while r1 and r2:
r = min(r1, r2)
rle_append_modify( l, ((a1,a2),r) )
r1 -= r
if r1 == 0 and i1< len(rle1):
a1, r1 = rle1[i1]
i1 += 1
r2 -= r
if r2 == 0 and i2< len(rle2):
a2, r2 = rle2[i2]
i2 += 1
return l
def rle_factor( rle ):
"""
Inverse of rle_product.
"""
rle1 = []
rle2 = []
for (a1, a2), r in rle:
rle_append_modify( rle1, (a1, r) )
rle_append_modify( rle2, (a2, r) )
return rle1, rle2
class TagMarkupException(Exception): pass
def decompose_tagmarkup(tm):
"""Return (text string, attribute list) for tagmarkup passed."""
tl, al = _tagmarkup_recurse(tm, None)
# join as unicode or bytes based on type of first element
text = tl[0][:0].join(tl)
if al and al[-1][0] is None:
del al[-1]
return text, al
def _tagmarkup_recurse( tm, attr ):
"""Return (text list, attribute list) for tagmarkup passed.
tm -- tagmarkup
attr -- current attribute or None"""
if type(tm) == list:
# for lists recurse to process each subelement
rtl = []
ral = []
for element in tm:
tl, al = _tagmarkup_recurse( element, attr )
if ral:
# merge attributes when possible
last_attr, last_run = ral[-1]
top_attr, top_run = al[0]
if last_attr == top_attr:
ral[-1] = (top_attr, last_run + top_run)
del al[-1]
rtl += tl
ral += al
return rtl, ral
if type(tm) == tuple:
# tuples mark a new attribute boundary
if len(tm) != 2:
raise TagMarkupException("Tuples must be in the form (attribute, tagmarkup): %r" % (tm,))
attr, element = tm
return _tagmarkup_recurse( element, attr )
if not isinstance(tm, text_types + (bytes,)):
raise TagMarkupException("Invalid markup element: %r" % tm)
# text
return [tm], [(attr, len(tm))]
def is_mouse_event( ev ):
return type(ev) == tuple and len(ev)==4 and ev[0].find("mouse")>=0
def is_mouse_press( ev ):
return ev.find("press")>=0
class MetaSuper(type):
"""adding .__super"""
def __init__(cls, name, bases, d):
super(MetaSuper, cls).__init__(name, bases, d)
if hasattr(cls, "_%s__super" % name):
raise AttributeError("Class has same name as one of its super classes")
setattr(cls, "_%s__super" % name, super(cls))
def int_scale(val, val_range, out_range):
"""
Scale val in the range [0, val_range-1] to an integer in the range
[0, out_range-1]. This implementation uses the "round-half-up" rounding
method.
>>> "%x" % int_scale(0x7, 0x10, 0x10000)
'7777'
>>> "%x" % int_scale(0x5f, 0x100, 0x10)
'6'
>>> int_scale(2, 6, 101)
40
>>> int_scale(1, 3, 4)
2
"""
num = int(val * (out_range-1) * 2 + (val_range-1))
dem = ((val_range-1) * 2)
# if num % dem == 0 then we are exactly half-way and have rounded up.
return num // dem
class StoppingContext(object):
"""Context manager that calls ``stop`` on a given object on exit. Used to
make the ``start`` method on `MainLoop` and `BaseScreen` optionally act as
context managers.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __enter__(self):
return self
def __exit__(self, *exc_info):
self._wrapped.stop()
|
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from weblate.machinery.base import MachineTranslation, MissingConfiguration
TOKEN_URL = "https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}"
TOKEN_EXPIRY = timedelta(minutes=9)
class MicrosoftCognitiveTranslation(MachineTranslation):
"""Microsoft Cognitive Services Translator API support."""
name = "Microsoft Translator"
max_score = 90
language_map = {
"zh-hant": "zh-Hant",
"zh-hans": "zh-Hans",
"zh-tw": "zh-Hant",
"zh-cn": "zh-Hans",
"tlh": "tlh-Latn",
"tlh-qaak": "tlh-Piqd",
"nb": "no",
"bs-latn": "bs-Latn",
"sr-latn": "sr-Latn",
"sr-cyrl": "sr-Cyrl",
}
def __init__(self):
"""Check configuration."""
super().__init__()
self._access_token = None
self._token_expiry = None
# check settings for Microsoft region prefix
if settings.MT_MICROSOFT_REGION is None:
region = ""
else:
region = f"{settings.MT_MICROSOFT_REGION}."
self._cognitive_token_url = TOKEN_URL.format(
region,
settings.MT_MICROSOFT_ENDPOINT_URL,
settings.MT_MICROSOFT_COGNITIVE_KEY,
)
if settings.MT_MICROSOFT_COGNITIVE_KEY is None:
raise MissingConfiguration("Microsoft Translator requires credentials")
@staticmethod
def get_url(suffix):
return f"https://{settings.MT_MICROSOFT_BASE_URL}/{suffix}"
def is_token_expired(self):
"""Check whether token is about to expire."""
return self._token_expiry <= timezone.now()
def get_authentication(self):
"""Hook for backends to allow add authentication headers to request."""
return {"Authorization": f"Bearer {self.access_token}"}
@property
def access_token(self):
"""Obtain and caches access token."""
if self._access_token is None or self.is_token_expired():
self._access_token = self.request(
"post", self._cognitive_token_url, skip_auth=True
).text
self._token_expiry = timezone.now() + TOKEN_EXPIRY
return self._access_token
def map_language_code(self, code):
"""Convert language to service specific code."""
return super().map_language_code(code).replace("_", "-")
def download_languages(self):
"""Download list of supported languages from a service.
Example of the response:
['af', 'ar', 'bs-Latn', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'yue', 'hr', 'cs', 'da',
'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww',
'h', 'id', 'it', 'ja', 'sw', 'tlh', 'tlh-Qaak', 'ko', 'lv', 'lt', 'mg', 'ms',
'mt', 'yua', 'no', 'otq', 'fa', 'pl', 'pt', 'ro', 'r', 'sm', 'sr-Cyrl',
'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'th', 'to', 'tr', 'uk', 'ur', 'vi',
'cy']
"""
response = self.request(
"get", self.get_url("languages"), params={"api-version": "3.0"}
)
# Microsoft tends to use utf-8-sig instead of plain utf-8
response.encoding = response.apparent_encoding
payload = response.json()
# We should get an object, string usually means an error
if isinstance(payload, str):
raise Exception(payload)
return payload["translation"].keys()
def download_translations(
self,
source,
language,
text: str,
unit,
user,
search: bool,
threshold: int = 75,
):
"""Download list of possible translations from a service."""
args = {
"api-version": "3.0",
"from": source,
"to": language,
"category": "general",
}
response = self.request(
"post", self.get_url("translate"), params=args, json=[{"Text": text[:5000]}]
)
# Microsoft tends to use utf-8-sig instead of plain utf-8
response.encoding = "utf-8-sig"
payload = response.json()
yield {
"text": payload[0]["translations"][0]["text"],
"quality": self.max_score,
"service": self.name,
"source": text,
}
|
import os.path as op
import itertools as itt
from numpy.testing import assert_array_equal
import numpy as np
import pytest
from mne import (read_evokeds, read_cov, compute_raw_covariance, pick_types,
pick_info)
from mne.cov import prepare_noise_cov
from mne.datasets import testing
from mne.io import read_raw_fif
from mne.io.pick import _picks_by_type, _get_channel_types
from mne.io.proj import _has_eeg_average_ref_proj
from mne.proj import compute_proj_raw
from mne.rank import (estimate_rank, compute_rank, _get_rank_sss,
_compute_rank_int, _estimate_rank_raw)
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
testing_path = testing.data_path(download=False)
data_dir = op.join(testing_path, 'MEG', 'sample')
mf_fif_fname = op.join(testing_path, 'SSS', 'test_move_anon_raw_sss.fif')
def test_estimate_rank():
"""Test rank estimation."""
data = np.eye(10)
assert_array_equal(estimate_rank(data, return_singular=True)[1],
np.ones(10))
data[0, 0] = 0
assert estimate_rank(data) == 9
pytest.raises(ValueError, estimate_rank, data, 'foo')
@pytest.mark.slowtest
@pytest.mark.parametrize(
'fname, ref_meg', ((raw_fname, False),
(hp_fif_fname, False),
(ctf_fname, False),
(ctf_fname, True)))
@pytest.mark.parametrize(
'scalings', ('norm', dict(mag=1e11, grad=1e9, eeg=1e5)))
@pytest.mark.parametrize('tol_kind, tol', [
('absolute', 1e-4),
('relative', 1e-6),
])
def test_raw_rank_estimation(fname, ref_meg, scalings, tol_kind, tol):
"""Test raw rank estimation."""
if ref_meg and scalings != 'norm':
# Adjust for CTF data (scale factors are quite different)
if tol_kind == 'relative':
scalings = dict(mag=1.)
else:
scalings = dict(mag=1e31)
raw = read_raw_fif(fname)
raw.crop(0, min(4., raw.times[-1])).load_data()
out = _picks_by_type(raw.info, ref_meg=ref_meg, meg_combined=True)
has_eeg = 'eeg' in raw
if has_eeg:
(_, picks_meg), (_, picks_eeg) = out
else:
(_, picks_meg), = out
picks_eeg = []
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
if len(raw.info['proc_history']) == 0:
expected_rank = n_meg + n_eeg
else:
expected_rank = _get_rank_sss(raw.info) + n_eeg
got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg,
tol=tol, tol_kind=tol_kind)
assert got_rank == expected_rank
if 'sss' in fname:
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
n_proj = len(raw.info['projs'])
want_rank = expected_rank - (0 if 'sss' in fname else n_proj)
got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg,
tol=tol, tol_kind=tol_kind)
assert got_rank == want_rank
@pytest.mark.slowtest
@pytest.mark.parametrize('meg', ('separate', 'combined'))
@pytest.mark.parametrize('rank_method, proj', [('info', True),
('info', False),
(None, True),
(None, False)])
def test_cov_rank_estimation(rank_method, proj, meg):
"""Test cov rank estimation."""
# Test that our rank estimation works properly on a simple case
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=False)
cov = read_cov(cov_fname)
ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and
ch.startswith('EEG')]
cov = prepare_noise_cov(cov, evoked.info, ch_names, None)
assert cov['eig'][0] <= 1e-25 # avg projector should set this to zero
assert (cov['eig'][1:] > 1e-16).all() # all else should be > 0
# Now do some more comprehensive tests
raw_sample = read_raw_fif(raw_fname)
assert not _has_eeg_average_ref_proj(raw_sample.info['projs'])
raw_sss = read_raw_fif(hp_fif_fname)
assert not _has_eeg_average_ref_proj(raw_sss.info['projs'])
raw_sss.add_proj(compute_proj_raw(raw_sss, meg=meg))
cov_sample = compute_raw_covariance(raw_sample)
cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj())
cov_sss = compute_raw_covariance(raw_sss)
cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj())
picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)
info_sample = pick_info(raw_sample.info, picks_all_sample)
picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
eeg=True))]
picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
picks_stack_sample += [('all',
pick_types(info_sample, meg=True, eeg=True))]
info_sss = pick_info(raw_sss.info, picks_all_sss)
picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
picks_stack_somato += [('all',
pick_types(info_sss, meg=True, eeg=True))]
iter_tests = list(itt.product(
[(cov_sample, picks_stack_sample, info_sample),
(cov_sample_proj, picks_stack_sample, info_sample),
(cov_sss, picks_stack_somato, info_sss),
(cov_sss_proj, picks_stack_somato, info_sss)], # sss
[dict(mag=1e15, grad=1e13, eeg=1e6)],
))
for (cov, picks_list, iter_info), scalings in iter_tests:
rank = compute_rank(cov, rank_method, scalings, iter_info,
proj=proj)
rank['all'] = sum(rank.values())
for ch_type, picks in picks_list:
this_info = pick_info(iter_info, picks)
# compute subset of projs, active and inactive
n_projs_applied = sum(proj['active'] and
len(set(proj['data']['col_names']) &
set(this_info['ch_names'])) > 0
for proj in cov['projs'])
n_projs_info = sum(len(set(proj['data']['col_names']) &
set(this_info['ch_names'])) > 0
for proj in this_info['projs'])
# count channel types
ch_types = _get_channel_types(this_info)
n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
['eeg', 'mag', 'grad']]
n_meg = n_mag + n_grad
has_sss = (n_meg > 0 and len(this_info['proc_history']) > 0)
if has_sss:
n_meg = _get_rank_sss(this_info)
expected_rank = n_meg + n_eeg
if rank_method is None:
if meg == 'combined' or not has_sss:
if proj:
expected_rank -= n_projs_info
else:
expected_rank -= n_projs_applied
else:
# XXX for now it just uses the total count
assert rank_method == 'info'
if proj:
expected_rank -= n_projs_info
assert rank[ch_type] == expected_rank
@testing.requires_testing_data
@pytest.mark.parametrize('fname, rank_orig', ((hp_fif_fname, 120),
(mf_fif_fname, 67)))
@pytest.mark.parametrize('n_proj, meg', ((0, 'combined'),
(10, 'combined'),
(10, 'separate')))
@pytest.mark.parametrize('tol_kind, tol', [
('absolute', 'float32'),
('relative', 'float32'),
('relative', 1e-5),
])
def test_maxfilter_get_rank(n_proj, fname, rank_orig, meg, tol_kind, tol):
"""Test maxfilter rank lookup."""
raw = read_raw_fif(fname).crop(0, 5).load_data().pick_types(meg=True)
assert raw.info['projs'] == []
mf = raw.info['proc_history'][0]['max_info']
assert mf['sss_info']['nfree'] == rank_orig
assert _get_rank_sss(raw) == rank_orig
mult = 1 + (meg == 'separate')
rank = rank_orig - mult * n_proj
if n_proj > 0:
# Let's do some projection
raw.add_proj(compute_proj_raw(raw, n_mag=n_proj, n_grad=n_proj,
meg=meg, verbose=True))
raw.apply_proj()
data_orig = raw[:][0]
# degenerate cases
with pytest.raises(ValueError, match='tol must be'):
_estimate_rank_raw(raw, tol='foo')
with pytest.raises(TypeError, match='must be a string or a number'):
_estimate_rank_raw(raw, tol=None)
allowed_rank = [rank_orig if meg == 'separate' else rank]
if fname == mf_fif_fname:
# Here we permit a -1 because for mf_fif_fname we miss by 1, which is
# probably acceptable. If we use the entire duration instead of 5 sec
# this problem goes away, but the test is much slower.
allowed_rank.append(allowed_rank[0] - 1)
# multiple ways of hopefully getting the same thing
# default tol=1e-4, scalings='norm'
rank_new = _estimate_rank_raw(raw, tol_kind=tol_kind)
assert rank_new in allowed_rank
rank_new = _estimate_rank_raw(
raw, tol=tol, tol_kind=tol_kind)
if fname == mf_fif_fname and tol_kind == 'relative' and tol != 'auto':
pass # does not play nicely with row norms of _estimate_rank_raw
else:
assert rank_new in allowed_rank
rank_new = _estimate_rank_raw(
raw, scalings=dict(), tol=tol, tol_kind=tol_kind)
assert rank_new in allowed_rank
scalings = dict(grad=1e13, mag=1e15)
rank_new = _compute_rank_int(
raw, None, scalings=scalings, tol=tol, tol_kind=tol_kind,
verbose='debug')
assert rank_new in allowed_rank
# XXX default scalings mis-estimate sometimes :(
if fname == hp_fif_fname:
allowed_rank.append(allowed_rank[0] - 2)
rank_new = _compute_rank_int(
raw, None, tol=tol, tol_kind=tol_kind, verbose='debug')
assert rank_new in allowed_rank
del allowed_rank
rank_new = _compute_rank_int(raw, 'info')
assert rank_new == rank
assert_array_equal(raw[:][0], data_orig)
|
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOISTURE,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import DOMAIN
SENSOR_TYPES = {"Rain": DEVICE_CLASS_MOISTURE, "Night": None}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available BloomSky weather binary sensors."""
# Default needed in case of discovery
if discovery_info is not None:
return
sensors = config[CONF_MONITORED_CONDITIONS]
bloomsky = hass.data[DOMAIN]
for device in bloomsky.devices.values():
for variable in sensors:
add_entities([BloomSkySensor(bloomsky, device, variable)], True)
class BloomSkySensor(BinarySensorEntity):
"""Representation of a single binary sensor in a BloomSky device."""
def __init__(self, bs, device, sensor_name):
"""Initialize a BloomSky binary sensor."""
self._bloomsky = bs
self._device_id = device["DeviceID"]
self._sensor_name = sensor_name
self._name = f"{device['DeviceName']} {sensor_name}"
self._state = None
self._unique_id = f"{self._device_id}-{self._sensor_name}"
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the BloomSky device and this sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return SENSOR_TYPES.get(self._sensor_name)
@property
def is_on(self):
"""Return true if binary sensor is on."""
return self._state
def update(self):
"""Request an update from the BloomSky API."""
self._bloomsky.refresh_devices()
self._state = self._bloomsky.devices[self._device_id]["Data"][self._sensor_name]
|
from pprint import pprint
from riko.bado import coroutine
from riko.collections import SyncPipe, AsyncPipe
p385_conf = {'type': 'date'}
p385_in = {'content': '12/2/2014'}
p405_conf = {'format': '%B %d, %Y'}
p393_conf = {
'attrs': [
{'value': {'terminal': 'date', 'path': 'dateformat'}, 'key': 'date'},
{'value': {'terminal': 'year', 'path': 'year'}, 'key': 'year'}]}
p385_kwargs = {'conf': p385_conf, 'inputs': p385_in}
def pipe(test=False):
s1, s2 = (SyncPipe('input', test=test, **p385_kwargs)
.dateformat(conf=p405_conf)
.split()
.output)
p393_kwargs = {'conf': p393_conf, 'date': s1, 'year': s2, 'test': test}
stream = SyncPipe('itembuilder', **p393_kwargs).list
for i in stream:
pprint(i)
return stream
@coroutine
def async_pipe(reactor, test=False):
s1, s2 = yield (AsyncPipe('input', test=test, **p385_kwargs)
.dateformat(conf=p405_conf)
.split()
.output)
p393_kwargs = {'conf': p393_conf, 'date': s1, 'year': s2, 'test': test}
stream = yield AsyncPipe('itembuilder', **p393_kwargs).list
for i in stream:
pprint(i)
|
from aioguardian.errors import GuardianError
from homeassistant import data_entry_flow
from homeassistant.components.guardian import CONF_UID, DOMAIN
from homeassistant.components.guardian.config_flow import (
async_get_pin_from_discovery_hostname,
async_get_pin_from_uid,
)
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_IP_ADDRESS, CONF_PORT
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_duplicate_error(hass, ping_client):
"""Test that errors are shown when duplicate entries are added."""
conf = {CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777}
MockConfigEntry(domain=DOMAIN, unique_id="guardian_3456", data=conf).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_connect_error(hass):
"""Test that the config entry errors out if the device cannot connect."""
conf = {CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777}
with patch(
"aioguardian.client.Client.connect",
side_effect=GuardianError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_IP_ADDRESS: "cannot_connect"}
async def test_get_pin_from_discovery_hostname():
"""Test getting a device PIN from the zeroconf-discovered hostname."""
pin = async_get_pin_from_discovery_hostname("GVC1-3456.local.")
assert pin == "3456"
async def test_get_pin_from_uid():
"""Test getting a device PIN from its UID."""
pin = async_get_pin_from_uid("ABCDEF123456")
assert pin == "3456"
async def test_step_user(hass, ping_client):
"""Test the user step."""
conf = {CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "ABCDEF123456"
assert result["data"] == {
CONF_IP_ADDRESS: "192.168.1.100",
CONF_PORT: 7777,
CONF_UID: "ABCDEF123456",
}
async def test_step_zeroconf(hass, ping_client):
"""Test the zeroconf step."""
zeroconf_data = {
"host": "192.168.1.100",
"port": 7777,
"hostname": "GVC1-ABCD.local.",
"type": "_api._udp.local.",
"name": "Guardian Valve Controller API._api._udp.local.",
"properties": {"_raw": {}},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=zeroconf_data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "ABCDEF123456"
assert result["data"] == {
CONF_IP_ADDRESS: "192.168.1.100",
CONF_PORT: 7777,
CONF_UID: "ABCDEF123456",
}
async def test_step_zeroconf_already_in_progress(hass):
"""Test the zeroconf step aborting because it's already in progress."""
zeroconf_data = {
"host": "192.168.1.100",
"port": 7777,
"hostname": "GVC1-ABCD.local.",
"type": "_api._udp.local.",
"name": "Guardian Valve Controller API._api._udp.local.",
"properties": {"_raw": {}},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=zeroconf_data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zeroconf_confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=zeroconf_data
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_step_zeroconf_no_discovery_info(hass):
"""Test the zeroconf step aborting because no discovery info came along."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
|
import setuptools
HAS_DIST_INFO_CMD = False
try:
import setuptools.command.dist_info
HAS_DIST_INFO_CMD = True
except ImportError:
"""Setuptools version is too old."""
ALL_STRING_TYPES = tuple(map(type, ('', b'', u'')))
MIN_NATIVE_SETUPTOOLS_VERSION = 34, 4, 0
"""Minimal setuptools having good read_configuration implementation."""
RUNTIME_SETUPTOOLS_VERSION = tuple(map(int, setuptools.__version__.split('.')))
"""Setuptools imported now."""
READ_CONFIG_SHIM_NEEDED = (
RUNTIME_SETUPTOOLS_VERSION < MIN_NATIVE_SETUPTOOLS_VERSION
)
def str_if_nested_or_str(s):
"""Turn input into a native string if possible."""
if isinstance(s, ALL_STRING_TYPES):
return str(s)
if isinstance(s, (list, tuple)):
return type(s)(map(str_if_nested_or_str, s))
if isinstance(s, (dict, )):
return stringify_dict_contents(s)
return s
def stringify_dict_contents(dct):
"""Turn dict keys and values into native strings."""
return {
str_if_nested_or_str(k): str_if_nested_or_str(v)
for k, v in dct.items()
}
if not READ_CONFIG_SHIM_NEEDED:
from setuptools.config import read_configuration, ConfigOptionsHandler
import setuptools.config
import setuptools.dist
# Set default value for 'use_scm_version'
setattr(setuptools.dist.Distribution, 'use_scm_version', False)
# Attach bool parser to 'use_scm_version' option
class ShimConfigOptionsHandler(ConfigOptionsHandler):
"""Extension class for ConfigOptionsHandler."""
@property
def parsers(self):
"""Return an option mapping with default data type parsers."""
_orig_parsers = super(ShimConfigOptionsHandler, self).parsers
return dict(use_scm_version=self._parse_bool, **_orig_parsers)
def parse_section_packages__find(self, section_options):
find_kwargs = super(
ShimConfigOptionsHandler, self
).parse_section_packages__find(section_options)
return stringify_dict_contents(find_kwargs)
setuptools.config.ConfigOptionsHandler = ShimConfigOptionsHandler
else:
"""This is a shim for setuptools<required."""
import functools
import io
import json
import sys
import warnings
try:
import setuptools.config
def filter_out_unknown_section(i):
def chi(self, *args, **kwargs):
i(self, *args, **kwargs)
self.sections = {
s: v for s, v in self.sections.items()
if s != 'packages.find'
}
return chi
setuptools.config.ConfigHandler.__init__ = filter_out_unknown_section(
setuptools.config.ConfigHandler.__init__,
)
except ImportError:
pass
def ignore_unknown_options(s):
@functools.wraps(s)
def sw(**attrs):
try:
ignore_warning_regex = (
r"Unknown distribution option: "
r"'(license_file|project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
return s(**attrs)
finally:
warnings.resetwarnings()
return sw
def parse_predicates(python_requires):
import itertools
import operator
sorted_operators_map = tuple(sorted(
{
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'': operator.eq,
}.items(),
key=lambda i: len(i[0]),
reverse=True,
))
def is_decimal(s):
return type(u'')(s).isdecimal()
conditions = map(str.strip, python_requires.split(','))
for c in conditions:
for op_sign, op_func in sorted_operators_map:
if not c.startswith(op_sign):
continue
raw_ver = itertools.takewhile(
is_decimal,
c[len(op_sign):].strip().split('.'),
)
ver = tuple(map(int, raw_ver))
yield op_func, ver
break
def validate_required_python_or_fail(python_requires=None):
if python_requires is None:
return
python_version = sys.version_info
preds = parse_predicates(python_requires)
for op, v in preds:
py_ver_slug = python_version[:max(len(v), 3)]
condition_matches = op(py_ver_slug, v)
if not condition_matches:
raise RuntimeError(
"requires Python '{}' but the running Python is {}".
format(
python_requires,
'.'.join(map(str, python_version[:3])),
)
)
def verify_required_python_runtime(s):
@functools.wraps(s)
def sw(**attrs):
try:
validate_required_python_or_fail(attrs.get('python_requires'))
except RuntimeError as re:
sys.exit('{} {!s}'.format(attrs['name'], re))
return s(**attrs)
return sw
setuptools.setup = ignore_unknown_options(setuptools.setup)
setuptools.setup = verify_required_python_runtime(setuptools.setup)
try:
from configparser import ConfigParser, NoSectionError
except ImportError:
from ConfigParser import ConfigParser, NoSectionError
ConfigParser.read_file = ConfigParser.readfp
def maybe_read_files(d):
"""Read files if the string starts with `file:` marker."""
FILE_FUNC_MARKER = 'file:'
d = d.strip()
if not d.startswith(FILE_FUNC_MARKER):
return d
descs = []
for fname in map(str.strip, str(d[len(FILE_FUNC_MARKER):]).split(',')):
with io.open(fname, encoding='utf-8') as f:
descs.append(f.read())
return ''.join(descs)
def cfg_val_to_list(v):
"""Turn config val to list and filter out empty lines."""
return list(filter(bool, map(str.strip, str(v).strip().splitlines())))
def cfg_val_to_dict(v):
"""Turn config val to dict and filter out empty lines."""
return dict(
map(lambda l: list(map(str.strip, l.split('=', 1))),
filter(bool, map(str.strip, str(v).strip().splitlines())))
)
def cfg_val_to_primitive(v):
"""Parse primitive config val to appropriate data type."""
return json.loads(v.strip().lower())
def read_configuration(filepath):
"""Read metadata and options from setup.cfg located at filepath."""
cfg = ConfigParser()
with io.open(filepath, encoding='utf-8') as f:
cfg.read_file(f)
md = dict(cfg.items('metadata'))
for list_key in 'classifiers', 'keywords', 'project_urls':
try:
md[list_key] = cfg_val_to_list(md[list_key])
except KeyError:
pass
try:
md['long_description'] = maybe_read_files(md['long_description'])
except KeyError:
pass
opt = dict(cfg.items('options'))
for list_key in 'include_package_data', 'use_scm_version', 'zip_safe':
try:
opt[list_key] = cfg_val_to_primitive(opt[list_key])
except KeyError:
pass
for list_key in 'scripts', 'install_requires', 'setup_requires':
try:
opt[list_key] = cfg_val_to_list(opt[list_key])
except KeyError:
pass
try:
opt['package_dir'] = cfg_val_to_dict(opt['package_dir'])
except KeyError:
pass
try:
opt_package_data = dict(cfg.items('options.package_data'))
if not opt_package_data.get('', '').strip():
opt_package_data[''] = opt_package_data['*']
del opt_package_data['*']
except (KeyError, NoSectionError):
opt_package_data = {}
try:
opt_extras_require = dict(cfg.items('options.extras_require'))
opt['extras_require'] = {}
for k, v in opt_extras_require.items():
opt['extras_require'][k] = cfg_val_to_list(v)
except NoSectionError:
pass
opt['package_data'] = {}
for k, v in opt_package_data.items():
opt['package_data'][k] = cfg_val_to_list(v)
try:
opt_exclude_package_data = dict(
cfg.items('options.exclude_package_data'),
)
if (
not opt_exclude_package_data.get('', '').strip()
and '*' in opt_exclude_package_data
):
opt_exclude_package_data[''] = opt_exclude_package_data['*']
del opt_exclude_package_data['*']
except NoSectionError:
pass
else:
opt['exclude_package_data'] = {}
for k, v in opt_exclude_package_data.items():
opt['exclude_package_data'][k] = cfg_val_to_list(v)
cur_pkgs = opt.get('packages', '').strip()
if '\n' in cur_pkgs:
opt['packages'] = cfg_val_to_list(opt['packages'])
elif cur_pkgs.startswith('find:'):
opt_packages_find = stringify_dict_contents(
dict(cfg.items('options.packages.find'))
)
opt['packages'] = setuptools.find_packages(**opt_packages_find)
return {'metadata': md, 'options': opt}
def cut_local_version_on_upload(version):
"""Generate a PEP440 local version if uploading to PyPI."""
import os
import setuptools_scm.version # only present during setup time
IS_PYPI_UPLOAD = os.getenv('PYPI_UPLOAD') == 'true' # set in tox.ini
return (
'' if IS_PYPI_UPLOAD
else setuptools_scm.version.get_local_node_and_date(version)
)
if HAS_DIST_INFO_CMD:
class patched_dist_info(setuptools.command.dist_info.dist_info):
def run(self):
self.egg_base = str_if_nested_or_str(self.egg_base)
return setuptools.command.dist_info.dist_info.run(self)
declarative_setup_params = read_configuration('setup.cfg')
"""Declarative metadata and options as read by setuptools."""
setup_params = {}
"""Explicit metadata for passing into setuptools.setup() call."""
setup_params = dict(setup_params, **declarative_setup_params['metadata'])
setup_params = dict(setup_params, **declarative_setup_params['options'])
if HAS_DIST_INFO_CMD:
setup_params['cmdclass'] = {
'dist_info': patched_dist_info,
}
setup_params['use_scm_version'] = {
'local_scheme': cut_local_version_on_upload,
}
# Patch incorrectly decoded package_dir option
# ``egg_info`` demands native strings failing with unicode under Python 2
# Ref https://github.com/pypa/setuptools/issues/1136
setup_params = stringify_dict_contents(setup_params)
__name__ == '__main__' and setuptools.setup(**setup_params)
|
from tests.async_mock import MagicMock
def _generate_mock_feed_entry(
external_id,
title,
alert_level,
distance_to_home,
coordinates,
attribution=None,
activity=None,
hazards=None,
):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.alert_level = alert_level
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
feed_entry.attribution = attribution
feed_entry.activity = activity
feed_entry.hazards = hazards
return feed_entry
|
import os
import sys
import shlex
import pytest
import pytest_bdd as bdd
from PyQt5.QtNetwork import QSslSocket
bdd.scenarios('downloads.feature')
PROMPT_MSG = ("Asking question <qutebrowser.utils.usertypes.Question "
"default={!r} mode=<PromptMode.download: 5> option=None "
"text=* title='Save file to:'>, *")
@pytest.fixture
def download_dir(tmpdir):
downloads = tmpdir / 'downloads'
downloads.ensure(dir=True)
(downloads / 'subdir').ensure(dir=True)
try:
os.mkfifo(str(downloads / 'fifo'))
except AttributeError:
pass
unwritable = downloads / 'unwritable'
unwritable.ensure(dir=True)
unwritable.chmod(0)
yield downloads
unwritable.chmod(0o755)
@bdd.given("I set up a temporary download dir")
def temporary_download_dir(quteproc, download_dir):
quteproc.set_setting('downloads.location.prompt', 'false')
quteproc.set_setting('downloads.location.remember', 'false')
quteproc.set_setting('downloads.location.directory', str(download_dir))
@bdd.given("I clean old downloads")
def clean_old_downloads(quteproc):
quteproc.send_cmd(':download-cancel --all')
quteproc.send_cmd(':download-clear')
@bdd.when("SSL is supported")
def check_ssl():
if not QSslSocket.supportsSsl():
pytest.skip("QtNetwork SSL not supported")
@bdd.when("the unwritable dir is unwritable")
def check_unwritable(tmpdir):
unwritable = tmpdir / 'downloads' / 'unwritable'
if os.access(str(unwritable), os.W_OK):
# Docker container or similar
pytest.skip("Unwritable dir was writable")
@bdd.when("I wait until the download is finished")
def wait_for_download_finished(quteproc):
quteproc.wait_for(category='downloads', message='Download * finished')
@bdd.when(bdd.parsers.parse("I wait until the download {name} is finished"))
def wait_for_download_finished_name(quteproc, name):
quteproc.wait_for(category='downloads',
message='Download {} finished'.format(name))
@bdd.when(bdd.parsers.parse('I wait for the download prompt for "{path}"'))
def wait_for_download_prompt(tmpdir, quteproc, path):
full_path = path.replace('(tmpdir)', str(tmpdir)).replace('/', os.sep)
quteproc.wait_for(message=PROMPT_MSG.format(full_path))
quteproc.wait_for(message="Entering mode KeyMode.prompt "
"(reason: question asked)")
@bdd.when("I download an SSL page")
def download_ssl_page(quteproc, ssl_server):
quteproc.send_cmd(':download https://localhost:{}/'
.format(ssl_server.port))
@bdd.then(bdd.parsers.parse("The downloaded file {filename} should not exist"))
def download_should_not_exist(filename, tmpdir):
path = tmpdir / 'downloads' / filename
assert not path.check()
@bdd.then(bdd.parsers.parse("The downloaded file {filename} should exist"))
def download_should_exist(filename, tmpdir):
path = tmpdir / 'downloads' / filename
assert path.check()
@bdd.then(bdd.parsers.parse("The downloaded file {filename} should be "
"{size} bytes big"))
def download_size(filename, size, tmpdir):
path = tmpdir / 'downloads' / filename
assert path.size() == int(size)
@bdd.then(bdd.parsers.parse("The downloaded file {filename} should contain "
"{text}"))
def download_contents(filename, text, tmpdir):
path = tmpdir / 'downloads' / filename
assert text in path.read()
@bdd.then(bdd.parsers.parse('The download prompt should be shown with '
'"{path}"'))
def download_prompt(tmpdir, quteproc, path):
full_path = path.replace('(tmpdir)', str(tmpdir)).replace('/', os.sep)
quteproc.wait_for(message=PROMPT_MSG.format(full_path))
quteproc.send_cmd(':leave-mode')
@bdd.when("I set a test python open_dispatcher")
def default_open_dispatcher_python(quteproc, tmpdir):
cmd = '{} -c "import sys; print(sys.argv[1])"'.format(
shlex.quote(sys.executable))
quteproc.set_setting('downloads.open_dispatcher', cmd)
@bdd.when("I open the download")
def download_open(quteproc):
cmd = '{} -c "import sys; print(sys.argv[1])"'.format(
shlex.quote(sys.executable))
quteproc.send_cmd(':download-open {}'.format(cmd))
@bdd.when("I open the download with a placeholder")
def download_open_placeholder(quteproc):
cmd = '{} -c "import sys; print(sys.argv[1])"'.format(
shlex.quote(sys.executable))
quteproc.send_cmd(':download-open {} {{}}'.format(cmd))
@bdd.when("I directly open the download")
def download_open_with_prompt(quteproc):
cmd = '{} -c pass'.format(shlex.quote(sys.executable))
quteproc.send_cmd(':prompt-open-download {}'.format(cmd))
@bdd.when(bdd.parsers.parse("I delete the downloaded file {filename}"))
def delete_file(tmpdir, filename):
(tmpdir / 'downloads' / filename).remove()
@bdd.then("the FIFO should still be a FIFO")
def fifo_should_be_fifo(tmpdir):
download_dir = tmpdir / 'downloads'
assert download_dir.exists()
assert not os.path.isfile(str(download_dir / 'fifo'))
|
import pyvera as pv
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
)
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, new_simple_controller_config
from tests.async_mock import MagicMock
async def test_climate(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device = MagicMock(spec=pv.VeraThermostat) # type: pv.VeraThermostat
vera_device.device_id = 1
vera_device.vera_device_id = vera_device.device_id
vera_device.name = "dev1"
vera_device.category = pv.CATEGORY_THERMOSTAT
vera_device.power = 10
vera_device.get_current_temperature.return_value = 71
vera_device.get_hvac_mode.return_value = "Off"
vera_device.get_current_goal_temperature.return_value = 72
entity_id = "climate.dev1_1"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(devices=(vera_device,)),
)
update_callback = component_data.controller_data[0].update_callback
assert hass.states.get(entity_id).state == HVAC_MODE_OFF
await hass.services.async_call(
"climate",
"set_hvac_mode",
{"entity_id": entity_id, "hvac_mode": HVAC_MODE_COOL},
)
await hass.async_block_till_done()
vera_device.turn_cool_on.assert_called()
vera_device.get_hvac_mode.return_value = "CoolOn"
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == HVAC_MODE_COOL
await hass.services.async_call(
"climate",
"set_hvac_mode",
{"entity_id": entity_id, "hvac_mode": HVAC_MODE_HEAT},
)
await hass.async_block_till_done()
vera_device.turn_heat_on.assert_called()
vera_device.get_hvac_mode.return_value = "HeatOn"
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == HVAC_MODE_HEAT
await hass.services.async_call(
"climate",
"set_hvac_mode",
{"entity_id": entity_id, "hvac_mode": HVAC_MODE_HEAT_COOL},
)
await hass.async_block_till_done()
vera_device.turn_auto_on.assert_called()
vera_device.get_hvac_mode.return_value = "AutoChangeOver"
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == HVAC_MODE_HEAT_COOL
await hass.services.async_call(
"climate",
"set_hvac_mode",
{"entity_id": entity_id, "hvac_mode": HVAC_MODE_OFF},
)
await hass.async_block_till_done()
vera_device.turn_auto_on.assert_called()
vera_device.get_hvac_mode.return_value = "Off"
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == HVAC_MODE_OFF
await hass.services.async_call(
"climate",
"set_fan_mode",
{"entity_id": entity_id, "fan_mode": "on"},
)
await hass.async_block_till_done()
vera_device.turn_auto_on.assert_called()
vera_device.get_fan_mode.return_value = "ContinuousOn"
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).attributes["fan_mode"] == FAN_ON
await hass.services.async_call(
"climate",
"set_fan_mode",
{"entity_id": entity_id, "fan_mode": "off"},
)
await hass.async_block_till_done()
vera_device.turn_auto_on.assert_called()
vera_device.get_fan_mode.return_value = "Auto"
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).attributes["fan_mode"] == FAN_AUTO
await hass.services.async_call(
"climate",
"set_temperature",
{"entity_id": entity_id, "temperature": 30},
)
await hass.async_block_till_done()
vera_device.set_temperature.assert_called_with(30)
vera_device.get_current_goal_temperature.return_value = 30
vera_device.get_current_temperature.return_value = 25
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).attributes["current_temperature"] == 25
assert hass.states.get(entity_id).attributes["temperature"] == 30
async def test_climate_f(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device = MagicMock(spec=pv.VeraThermostat) # type: pv.VeraThermostat
vera_device.device_id = 1
vera_device.vera_device_id = vera_device.device_id
vera_device.name = "dev1"
vera_device.category = pv.CATEGORY_THERMOSTAT
vera_device.power = 10
vera_device.get_current_temperature.return_value = 71
vera_device.get_hvac_mode.return_value = "Off"
vera_device.get_current_goal_temperature.return_value = 72
entity_id = "climate.dev1_1"
def setup_callback(controller: pv.VeraController) -> None:
controller.temperature_units = "F"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
devices=(vera_device,), setup_callback=setup_callback
),
)
update_callback = component_data.controller_data[0].update_callback
await hass.services.async_call(
"climate",
"set_temperature",
{"entity_id": entity_id, "temperature": 30},
)
await hass.async_block_till_done()
vera_device.set_temperature.assert_called_with(86)
vera_device.get_current_goal_temperature.return_value = 30
vera_device.get_current_temperature.return_value = 25
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).attributes["current_temperature"] == -3.9
assert hass.states.get(entity_id).attributes["temperature"] == -1.1
|
from autobahn.twisted.websocket import create_client_agent
from twisted.internet import task
async def main(reactor):
"""
Using the 'agent' interface to talk to the echo server (run
../echo/server.py for the server, for example)
"""
agent = create_client_agent(reactor)
options = {
"headers": {
"x-foo": "bar",
}
}
proto = await agent.open("ws://localhost:9000/ws", options)
def got_message(*args, **kw):
print("on_message: args={} kwargs={}".format(args, kw))
proto.on('message', got_message)
await proto.is_open
proto.sendMessage(b"i am a message\n")
await task.deferLater(reactor, 0, lambda: None)
proto.sendClose(code=1000, reason="byebye")
await proto.is_closed
if __name__ == "__main__":
from twisted.internet.defer import ensureDeferred
task.react(lambda r: ensureDeferred(main(r)))
|
from io import StringIO
import pytest
from lxml import html as lxml_html
import nikola.plugins.compile.rest
import nikola.plugins.compile.rest.listing
from nikola.plugins.compile.rest import vimeo
from nikola.utils import _reload, LocaleBorg
from .helper import FakeSite
def test_ReST_extension():
sample = '.. raw:: html\n\n <iframe src="foo" height="bar">spam</iframe>'
html = get_html_from_rst(sample)
assert_html_contains(html, "iframe", attributes={"src": "foo"}, text="spam")
with pytest.raises(Exception):
assert_html_contains("eggs", {})
def test_math_extension_outputs_tex():
"""Test that math is outputting TeX code."""
sample = r":math:`e^{ix} = \cos x + i\sin x`"
html = get_html_from_rst(sample)
assert_html_contains(
html,
"span",
attributes={"class": "math"},
text=r"\(e^{ix} = \cos x + i\sin x\)",
)
def test_soundcloud_iframe():
"""Test SoundCloud iframe tag generation"""
sample = ".. soundcloud:: SID\n :height: 400\n :width: 600"
html = get_html_from_rst(sample)
assert_html_contains(
html,
"iframe",
attributes={
"src": (
"https://w.soundcloud.com/player/"
"?url=http://api.soundcloud.com/"
"tracks/SID"
),
"height": "400",
"width": "600",
},
)
def test_youtube_iframe():
"""Test Youtube iframe tag generation"""
sample = ".. youtube:: YID\n :height: 400\n :width: 600"
html = get_html_from_rst(sample)
assert_html_contains(
html,
"iframe",
attributes={
"src": (
"https://www.youtube-nocookie.com"
"/embed/YID?rel=0&"
"wmode=transparent"
),
"height": "400",
"width": "600",
"frameborder": "0",
"allowfullscreen": "",
"allow": "encrypted-media",
},
)
def test_vimeo(disable_vimeo_api_query):
"""Test Vimeo iframe tag generation"""
sample = ".. vimeo:: VID\n :height: 400\n :width: 600"
html = get_html_from_rst(sample)
assert_html_contains(
html,
"iframe",
attributes={
"src": ("https://player.vimeo.com/" "video/VID"),
"height": "400",
"width": "600",
},
)
@pytest.mark.parametrize(
"sample",
[
".. code-block:: python\n\n import antigravity",
".. sourcecode:: python\n\n import antigravity",
],
)
def test_rendering_codeblock_alias(sample):
"""Test CodeBlock aliases"""
get_html_from_rst(sample)
def test_doc_doesnt_exist():
with pytest.raises(Exception):
assert_html_contains("anything", {})
def test_doc():
sample = "Sample for testing my :doc:`fake-post`"
html = get_html_from_rst(sample)
assert_html_contains(
html, "a", text="Fake post", attributes={"href": "/posts/fake-post"}
)
def test_doc_titled():
sample = "Sample for testing my :doc:`titled post <fake-post>`"
html = get_html_from_rst(sample)
assert_html_contains(
html, "a", text="titled post", attributes={"href": "/posts/fake-post"}
)
@pytest.fixture(autouse=True, scope="module")
def localeborg_base():
"""A base config of LocaleBorg."""
LocaleBorg.reset()
assert not LocaleBorg.initialized
LocaleBorg.initialize({}, "en")
assert LocaleBorg.initialized
assert LocaleBorg().current_lang == "en"
try:
yield
finally:
LocaleBorg.reset()
assert not LocaleBorg.initialized
def get_html_from_rst(rst):
"""Create html output from rst string"""
compiler = nikola.plugins.compile.rest.CompileRest()
compiler.set_site(FakeSite())
return compiler.compile_string(rst)[0]
class FakePost:
def __init__(self, outfile):
self._depfile = {outfile: []}
def assert_html_contains(html, element, attributes=None, text=None):
"""
Test if HTML document includes an element with the given attributes
and text content.
The HTML is parsed with lxml for checking against the data you
provide. The method takes an element argument, a string representing
the *name* of an HTML tag, like "script" or "iframe".
We will try to find this tag in the document and perform the tests
on it. You can pass a dictionary to the attributes kwarg
representing the name and the value of the tag attributes. The text
kwarg takes a string argument, which will be tested against the
contents of the HTML element.
One last caveat: you need to url unquote your urls if you are going
to test attributes like "src" or "link", since the HTML rendered by
docutils will be always unquoted.
"""
html_doc = lxml_html.parse(StringIO(html))
try:
tag = next(html_doc.iter(element))
except StopIteration:
raise Exception("<{0}> not in {1}".format(element, html))
if attributes:
arg_attrs = set(attributes.items())
tag_attrs = set(tag.items())
assert arg_attrs.issubset(tag_attrs)
if text:
assert text in tag.text
@pytest.fixture
def disable_vimeo_api_query():
"""
Disable query of the vimeo api over the wire
Set Vimeo.request_size to False for avoiding querying the Vimeo api
over the network.
"""
before = vimeo.Vimeo.request_size
vimeo.Vimeo.request_size = False
try:
_reload(nikola.plugins.compile.rest)
yield
finally:
vimeo.Vimeo.request_size = before
|
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
sys.path = [os.path.abspath(os.path.dirname(os.path.dirname(__file__)))] + sys.path
os.environ['is_test_suite'] = 'True'
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
import numpy as np
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import utils_testing as utils
def test_perform_feature_selection_false_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, perform_feature_selection=False)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
# For some reason, this test now causes a Segmentation Default on travis when run on python 3.5.
# home/travis/.travis/job_stages: line 53: 8810 Segmentation fault (core dumped) nosetests -v --with-coverage --cover-package auto_ml tests
# It didn't error previously
# It appears to be an environment issue (possibly cuased by running too many parallelized things, which only happens in a test suite), not an issue with auto_ml. So we'll run this test to make sure the library functionality works, but only on some environments
if os.environ.get('TRAVIS_PYTHON_VERSION', '0') != '3.5':
def test_compare_all_models_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, compare_all_models=True)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_perform_feature_selection_true_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, perform_feature_selection=True)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.124
def test_perform_feature_scaling_true_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, perform_feature_scaling=True)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_perform_feature_scaling_false_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, perform_feature_scaling=False)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.14
def test_user_input_func_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
def age_bucketing(data):
def define_buckets(age):
if age <= 17:
return 'youth'
elif age <= 40:
return 'adult'
elif age <= 60:
return 'adult2'
else:
return 'over_60'
if isinstance(data, dict):
data['age_bucket'] = define_buckets(data['age'])
else:
data['age_bucket'] = data.age.apply(define_buckets)
return data
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
, 'age_bucket': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, user_input_func=age_bucketing)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.16
assert -0.16 < first_score < -0.135
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert -0.16 < second_score < -0.135
def test_binary_classification_predict_on_Predictor_instance():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.train_basic_binary_classifier(df_titanic_train)
predictions = ml_predictor.predict(df_titanic_test)
test_score = accuracy_score(predictions, df_titanic_test.survived)
# Make sure our score is good, but not unreasonably good
print(test_score)
assert .77 < test_score < .805
def test_multilabel_classification_predict_on_Predictor_instance():
np.random.seed(0)
df_twitter_train, df_twitter_test = utils.get_twitter_sentiment_multilabel_classification_dataset()
# Note that this does not take 'text' into account, intentionally (as that takes a while longer to train)
ml_predictor = utils.train_basic_multilabel_classifier(df_twitter_train)
predictions = ml_predictor.predict(df_twitter_test)
test_score = accuracy_score(predictions, df_twitter_test.airline_sentiment)
# Make sure our score is good, but not unreasonably good
print('test_score')
print(test_score)
assert 0.72 < test_score < 0.77
def test_binary_classification_predict_proba_on_Predictor_instance():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.train_basic_binary_classifier(df_titanic_train)
#
predictions = ml_predictor.predict_proba(df_titanic_test)
predictions = [pred[1] for pred in predictions]
test_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
# Make sure our score is good, but not unreasonably good
print(test_score)
assert -0.16 < test_score < -0.135
def test_pass_in_list_of_dictionaries_train_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
list_titanic_train = df_titanic_train.to_dict('records')
ml_predictor.train(list_titanic_train)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_pass_in_list_of_dictionaries_predict_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
list_titanic_train = df_titanic_train.to_dict('records')
ml_predictor.train(df_titanic_train)
test_score = ml_predictor.score(df_titanic_test.to_dict('records'), df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_include_bad_y_vals_train_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
df_titanic_train.iloc[1]['survived'] = None
df_titanic_train.iloc[8]['survived'] = None
df_titanic_train.iloc[26]['survived'] = None
ml_predictor.train(df_titanic_train)
test_score = ml_predictor.score(df_titanic_test.to_dict('records'), df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.17 < test_score < -0.135
def test_include_bad_y_vals_predict_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
df_titanic_test.iloc[1]['survived'] = float('nan')
df_titanic_test.iloc[8]['survived'] = float('inf')
df_titanic_test.iloc[26]['survived'] = None
ml_predictor.train(df_titanic_train)
test_score = ml_predictor.score(df_titanic_test.to_dict('records'), df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_list_of_single_model_name_classification():
np.random.seed(0)
model_name = 'GradientBoostingClassifier'
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, model_names=[model_name])
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
if os.environ.get('TRAVIS_PYTHON_VERSION', '0') != '3.5':
def test_getting_single_predictions_nlp_date_multilabel_classification():
np.random.seed(0)
df_twitter_train, df_twitter_test = utils.get_twitter_sentiment_multilabel_classification_dataset()
column_descriptions = {
'airline_sentiment': 'output'
, 'airline': 'categorical'
, 'text': 'nlp'
, 'tweet_location': 'categorical'
, 'user_timezone': 'categorical'
, 'tweet_created': 'date'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_twitter_train)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_twitter_test_dictionaries = df_twitter_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
first_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = 0.73
assert lower_bound < first_score < 0.79
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_twitter_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_twitter_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('df_twitter_test_dictionaries')
print(df_twitter_test_dictionaries)
second_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < 0.79
|
from weblate.wladmin.models import WeblateModelAdmin
class AuditLogAdmin(WeblateModelAdmin):
list_display = ["get_message", "user", "address", "user_agent", "timestamp"]
search_fields = ["user__username", "user__email", "address", "activity"]
date_hierarchy = "timestamp"
ordering = ("-timestamp",)
def has_delete_permission(self, request, obj=None):
return False
class ProfileAdmin(WeblateModelAdmin):
list_display = ["user", "full_name", "language", "suggested", "translated"]
search_fields = ["user__username", "user__email", "user__full_name"]
list_filter = ["language"]
filter_horizontal = ("languages", "secondary_languages", "watched")
def has_delete_permission(self, request, obj=None):
return False
class VerifiedEmailAdmin(WeblateModelAdmin):
list_display = ("social", "provider", "email")
search_fields = ("email", "social__user__username", "social__user__email")
raw_id_fields = ("social",)
ordering = ("email",)
def has_delete_permission(self, request, obj=None):
return False
|
import unittest
from mock import Mock, call
from trashcli.restore import TrashDirectories2
class TestTrashDirectories2(unittest.TestCase):
def setUp(self):
self.trash_directories = Mock(spec=['all_trash_directories'])
self.volume_of = lambda x: "volume_of(%s)" % x
self.trash_directories2 = TrashDirectories2(self.volume_of,
self.trash_directories)
def test_when_user_dir_is_none(self):
self.trash_directories.all_trash_directories.return_value = \
"os-trash-directories"
result = self.trash_directories2.trash_directories_or_user('volumes',
None)
self.assertEqual([call.all_trash_directories('volumes')],
self.trash_directories.mock_calls)
self.assertEqual('os-trash-directories', result)
def test_when_user_dir_is_specified(self):
self.trash_directories.all_trash_directories.return_value = \
"os-trash-directories"
result = self.trash_directories2.trash_directories_or_user(
'volumes', 'user-trash_dir')
self.assertEqual([], self.trash_directories.mock_calls)
self.assertEqual([('user-trash_dir', 'volume_of(user-trash_dir)')],
result)
|
import voluptuous as vol
from homeassistant.const import CONF_PLATFORM
from homeassistant.core import HassJob, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import async_track_time_change
# mypy: allow-untyped-defs, no-check-untyped-defs
CONF_HOURS = "hours"
CONF_MINUTES = "minutes"
CONF_SECONDS = "seconds"
class TimePattern:
"""Validate a time pattern value.
:raises Invalid: If the value has a wrong format or is outside the range.
"""
def __init__(self, maximum):
"""Initialize time pattern."""
self.maximum = maximum
def __call__(self, value):
"""Validate input."""
try:
if value == "*":
return value
if isinstance(value, str) and value.startswith("/"):
number = int(value[1:])
else:
value = number = int(value)
if not (0 <= number <= self.maximum):
raise vol.Invalid(f"must be a value between 0 and {self.maximum}")
except ValueError as err:
raise vol.Invalid("invalid time_pattern value") from err
return value
TRIGGER_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_PLATFORM): "time_pattern",
CONF_HOURS: TimePattern(maximum=23),
CONF_MINUTES: TimePattern(maximum=59),
CONF_SECONDS: TimePattern(maximum=59),
}
),
cv.has_at_least_one_key(CONF_HOURS, CONF_MINUTES, CONF_SECONDS),
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
hours = config.get(CONF_HOURS)
minutes = config.get(CONF_MINUTES)
seconds = config.get(CONF_SECONDS)
job = HassJob(action)
# If larger units are specified, default the smaller units to zero
if minutes is None and hours is not None:
minutes = 0
if seconds is None and minutes is not None:
seconds = 0
@callback
def time_automation_listener(now):
"""Listen for time changes and calls action."""
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": "time_pattern",
"now": now,
"description": "time pattern",
}
},
)
return async_track_time_change(
hass, time_automation_listener, hour=hours, minute=minutes, second=seconds
)
|
import errno
import os
from . import _vc
class Vc(_vc.Vc):
CMD = "hg"
NAME = "Mercurial"
VC_DIR = ".hg"
state_map = {
"?": _vc.STATE_NONE,
"A": _vc.STATE_NEW,
"C": _vc.STATE_NORMAL,
"!": _vc.STATE_MISSING,
"I": _vc.STATE_IGNORED,
"M": _vc.STATE_MODIFIED,
"R": _vc.STATE_REMOVED,
}
def commit(self, runner, files, message):
command = [self.CMD, 'commit', '-m', message]
runner(command, files, refresh=True, working_dir=self.root)
def update(self, runner):
command = [self.CMD, 'pull', '-u']
runner(command, [], refresh=True, working_dir=self.root)
def add(self, runner, files):
command = [self.CMD, 'add']
runner(command, files, refresh=True, working_dir=self.root)
def remove(self, runner, files):
command = [self.CMD, 'rm']
runner(command, files, refresh=True, working_dir=self.root)
def revert(self, runner, files):
command = [self.CMD, 'revert']
runner(command, files, refresh=True, working_dir=self.root)
@classmethod
def valid_repo(cls, path):
return not _vc.call([cls.CMD, "root"], cwd=path)
def get_path_for_repo_file(self, path, commit=None):
if commit is not None:
raise NotImplementedError()
if not path.startswith(self.root + os.path.sep):
raise _vc.InvalidVCPath(self, path, "Path not in repository")
path = path[len(self.root) + 1:]
args = [self.CMD, "cat", path]
return _vc.call_temp_output(args, cwd=self.root)
def _update_tree_state_cache(self, path):
""" Update the state of the file(s) at self._tree_cache['path'] """
while 1:
try:
# Get the status of modified files
proc = _vc.popen([self.CMD, "status", '-A', path],
cwd=self.location)
entries = proc.read().split("\n")[:-1]
# The following command removes duplicate file entries.
# Just in case.
entries = list(set(entries))
break
except OSError as e:
if e.errno != errno.EAGAIN:
raise
if len(entries) == 0 and os.path.isfile(path):
# If we're just updating a single file there's a chance that it
# was it was previously modified, and now has been edited so that
# it is un-modified. This will result in an empty 'entries' list,
# and self._tree_cache['path'] will still contain stale data.
# When this corner case occurs we force self._tree_cache['path']
# to STATE_NORMAL.
self._tree_cache[path] = _vc.STATE_NORMAL
else:
# There are 1 or more modified files, parse their state
for entry in entries:
# we might have a space in file name, it should be ignored
statekey, name = entry.split(" ", 1)
path = os.path.join(self.location, name.strip())
state = self.state_map.get(statekey.strip(), _vc.STATE_NONE)
self._tree_cache[path] = state
self._add_missing_cache_entry(path, state)
|
import pytest
from homeassistant.components.sonos import DOMAIN, media_player
from homeassistant.core import Context
from homeassistant.exceptions import Unauthorized
from homeassistant.setup import async_setup_component
async def setup_platform(hass, config_entry, config):
"""Set up the media player platform for testing."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
async def test_async_setup_entry_hosts(hass, config_entry, config, soco):
"""Test static setup."""
await setup_platform(hass, config_entry, config)
entity = hass.data[media_player.DATA_SONOS].entities[0]
assert entity.soco == soco
async def test_async_setup_entry_discover(hass, config_entry, discover):
"""Test discovery setup."""
await setup_platform(hass, config_entry, {})
entity = hass.data[media_player.DATA_SONOS].entities[0]
assert entity.unique_id == "RINCON_test"
async def test_services(hass, config_entry, config, hass_read_only_user):
"""Test join/unjoin requires control access."""
await setup_platform(hass, config_entry, config)
with pytest.raises(Unauthorized):
await hass.services.async_call(
DOMAIN,
media_player.SERVICE_JOIN,
{"master": "media_player.bla", "entity_id": "media_player.blub"},
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
async def test_device_registry(hass, config_entry, config, soco):
"""Test sonos device registered in the device registry."""
await setup_platform(hass, config_entry, config)
device_registry = await hass.helpers.device_registry.async_get_registry()
reg_device = device_registry.async_get_device(
identifiers={("sonos", "RINCON_test")},
connections=set(),
)
assert reg_device.model == "Model Name"
assert reg_device.sw_version == "49.2-64250"
assert reg_device.connections == {("mac", "00:11:22:33:44:55")}
assert reg_device.manufacturer == "Sonos"
assert reg_device.name == "Zone A"
|
import pytest # isort:skip
pytest.importorskip("hypothesis")
import hypothesis.extra.numpy as npst
import hypothesis.strategies as st
from hypothesis import given
import xarray as xr
an_array = npst.arrays(
dtype=st.one_of(
npst.unsigned_integer_dtypes(), npst.integer_dtypes(), npst.floating_dtypes()
),
shape=npst.array_shapes(max_side=3), # max_side specified for performance
)
@pytest.mark.slow
@given(st.data(), an_array)
def test_CFMask_coder_roundtrip(data, arr):
names = data.draw(
st.lists(st.text(), min_size=arr.ndim, max_size=arr.ndim, unique=True).map(
tuple
)
)
original = xr.Variable(names, arr)
coder = xr.coding.variables.CFMaskCoder()
roundtripped = coder.decode(coder.encode(original))
xr.testing.assert_identical(original, roundtripped)
@pytest.mark.slow
@given(st.data(), an_array)
def test_CFScaleOffset_coder_roundtrip(data, arr):
names = data.draw(
st.lists(st.text(), min_size=arr.ndim, max_size=arr.ndim, unique=True).map(
tuple
)
)
original = xr.Variable(names, arr)
coder = xr.coding.variables.CFScaleOffsetCoder()
roundtripped = coder.decode(coder.encode(original))
xr.testing.assert_identical(original, roundtripped)
|
from app import app, github, __version__
from app.utils import ResponseUtil, RequestUtil, DateUtil
from werkzeug.utils import redirect
from flask.helpers import url_for, flash
from flask.globals import session
from app.database.model import User
@app.route('/', methods=['GET'])
def index():
return ResponseUtil.render_template('index.html', __version__=__version__)
@app.route('/login', methods=['GET'])
def login():
return github.authorize()
@github.access_token_getter
def token_getter():
return session.get('oauth_token', None)
@app.route('/github/callback')
@github.authorized_handler
def github_authorized(oauth_token):
if oauth_token is None:
flash("Authorization failed.")
return redirect(url_for('index'))
session['oauth_token'] = oauth_token
me = github.get('user')
user_id = me['login']
# is user exist
user = User.query.get(user_id)
if user is None:
# not exist, add
user = User(id=user_id)
# update github user information
user.last_login = DateUtil.now_datetime()
user.name = me.get('name', user_id)
user.location = me.get('location', '')
user.avatar = me.get('avatar_url', '')
user.save()
RequestUtil.login_user(user.dict())
return redirect(url_for('index'))
@app.route('/logout', methods=['GET'])
def logout():
RequestUtil.logout()
return redirect(url_for('index'))
|
import copy
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
find_layout, HEAD_SIZE_DEFAULT)
from mne.channels.layout import (_box_size, _find_topomap_coords,
generate_2d_layout)
from mne.utils import run_tests_if_main
from mne import pick_types, pick_info
from mne.io import read_raw_kit, _empty_info, read_info
from mne.io.constants import FIFF
from mne.utils import _TempDir
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')
lout_path = op.join(io_dir, 'tests', 'data')
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
fname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')
def _get_test_info():
"""Make test info."""
test_info = _empty_info(1000)
loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
dtype=np.float32)
test_info['chs'] = [
{'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1,
'unit': -1, 'unit_mul': 0},
{'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2,
'unit': -1, 'unit_mul': 0},
{'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1,
'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61,
'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}]
test_info._update_redundant()
test_info._check_consistency()
return test_info
def test_io_layout_lout():
"""Test IO with .lout files."""
tempdir = _TempDir()
layout = read_layout('Vectorview-all', scale=False)
layout.save(op.join(tempdir, 'foobar.lout'))
layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
print(layout) # test repr
def test_io_layout_lay():
"""Test IO with .lay files."""
tempdir = _TempDir()
layout = read_layout('CTF151', scale=False)
layout.save(op.join(tempdir, 'foobar.lay'))
layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
def test_find_topomap_coords():
"""Test mapping of coordinates in 3D space to 2D."""
info = read_info(fif_fname)
picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
# Remove extra digitization point, so EEG digitization points match up
# with the EEG channels
del info['dig'][85]
# Use channel locations
kwargs = dict(ignore_overlap=False, to_sphere=True,
sphere=HEAD_SIZE_DEFAULT)
l0 = _find_topomap_coords(info, picks, **kwargs)
# Remove electrode position information, use digitization points from now
# on.
for ch in info['chs']:
ch['loc'].fill(np.nan)
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1, l0, atol=1e-3)
for z_pt in ((HEAD_SIZE_DEFAULT, 0., 0.),
(0., HEAD_SIZE_DEFAULT, 0.)):
info['dig'][-1]['r'] = z_pt
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1[-1], z_pt[:2], err_msg='Z=0 point moved', atol=1e-6)
# Test plotting mag topomap without channel locations: it should fail
mag_picks = pick_types(info, meg='mag')
with pytest.raises(ValueError, match='Cannot determine location'):
_find_topomap_coords(info, mag_picks, **kwargs)
# Test function with too many EEG digitization points: it should fail
info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Test function with too little EEG digitization points: it should fail
info['dig'] = info['dig'][:-2]
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Electrode positions must be unique
info['dig'].append(info['dig'][-1])
with pytest.raises(ValueError, match='overlapping positions'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without EEG digitization points: it should fail
info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
with pytest.raises(RuntimeError, match='Did not find any digitization'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without any digitization points, it should fail
info['dig'] = None
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
info['dig'] = []
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
def test_make_eeg_layout():
"""Test creation of EEG layout."""
tempdir = _TempDir()
tmp_name = 'foo'
lout_name = 'test_raw'
lout_orig = read_layout(kind=lout_name, path=lout_path)
info = read_info(fif_fname)
info['bads'].append(info['ch_names'][360])
layout = make_eeg_layout(info, exclude=[])
assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
if ch.startswith('EE')]))
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
assert_array_equal(lout_new.kind, tmp_name)
assert_allclose(layout.pos, lout_new.pos, atol=0.1)
assert_array_equal(lout_orig.names, lout_new.names)
# Test input validation
pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, radius=0.6)
pytest.raises(ValueError, make_eeg_layout, info, width=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, width=1.1)
pytest.raises(ValueError, make_eeg_layout, info, height=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, height=1.1)
def test_make_grid_layout():
"""Test creation of grid layout."""
tempdir = _TempDir()
tmp_name = 'bar'
lout_name = 'test_ica'
lout_orig = read_layout(kind=lout_name, path=lout_path)
layout = make_grid_layout(_get_test_info())
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir)
assert_array_equal(lout_new.kind, tmp_name)
assert_array_equal(lout_orig.pos, lout_new.pos)
assert_array_equal(lout_orig.names, lout_new.names)
# Test creating grid layout with specified number of columns
layout = make_grid_layout(_get_test_info(), n_col=2)
# Vertical positions should be equal
assert layout.pos[0, 1] == layout.pos[1, 1]
# Horizontal positions should be unequal
assert layout.pos[0, 0] != layout.pos[1, 0]
# Box sizes should be equal
assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
def test_find_layout():
"""Test finding layout."""
pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')
sample_info = read_info(fif_fname)
grads = pick_types(sample_info, meg='grad')
sample_info2 = pick_info(sample_info, grads)
mags = pick_types(sample_info, meg='mag')
sample_info3 = pick_info(sample_info, mags)
# mock new convention
sample_info4 = copy.deepcopy(sample_info)
for ii, name in enumerate(sample_info4['ch_names']):
new = name.replace(' ', '')
sample_info4['chs'][ii]['ch_name'] = new
eegs = pick_types(sample_info, meg=False, eeg=True)
sample_info5 = pick_info(sample_info, eegs)
lout = find_layout(sample_info, ch_type=None)
assert lout.kind == 'Vectorview-all'
assert all(' ' in k for k in lout.names)
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
# test new vector-view
lout = find_layout(sample_info4, ch_type=None)
assert_equal(lout.kind, 'Vectorview-all')
assert all(' ' not in k for k in lout.names)
lout = find_layout(sample_info, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2)
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3)
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5)
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
# no common layout, 'meg' option not supported
lout = find_layout(read_info(fname_ctf_raw))
assert_equal(lout.kind, 'CTF-275')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
lout = find_layout(read_info(fname_bti_raw))
assert_equal(lout.kind, 'magnesWH3600')
raw_kit = read_raw_kit(fname_kit_157)
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_kit.info['bads'] = ['MEG 013', 'MEG 014', 'MEG 015', 'MEG 016']
raw_kit.info._check_consistency()
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
# fallback for missing IDs
for val in (35, 52, 54, 1001):
raw_kit.info['kit_system_id'] = val
lout = find_layout(raw_kit.info)
assert lout.kind == 'custom'
raw_umd = read_raw_kit(fname_kit_umd)
lout = find_layout(raw_umd.info)
assert_equal(lout.kind, 'KIT-UMD-3')
# Test plotting
lout.plot()
lout.plot(picks=np.arange(10))
plt.close('all')
def test_box_size():
"""Test calculation of box sizes."""
# No points. Box size should be 1,1.
assert_allclose(_box_size([]), (1.0, 1.0))
# Create one point. Box size should be 1,1.
point = [(0, 0)]
assert_allclose(_box_size(point), (1.0, 1.0))
# Create two points. Box size should be 0.5,1.
points = [(0.25, 0.5), (0.75, 0.5)]
assert_allclose(_box_size(points), (0.5, 1.0))
# Create three points. Box size should be (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points), (0.5, 0.5))
# Create a grid of points. Box size should be (0.1, 0.1).
x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
x, y = x.ravel(), y.ravel()
assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
# Create a random set of points. This should never break the function.
rng = np.random.RandomState(42)
points = rng.rand(100, 2)
width, height = _box_size(points)
assert width is not None
assert height is not None
# Test specifying an existing width.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
# Test specifying an existing width that has influence on the calculated
# height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
# Test specifying an existing height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
# Test specifying an existing height that has influence on the calculated
# width.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
# Test specifying both width and height. The function should simply return
# these.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
# Test specifying a width that will cause unfixable horizontal overlap and
# essentially breaks the function (height will be 0).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=1), (1, 0))
# Test adding some padding.
# Create three points. Box size should be a little less than (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
def test_generate_2d_layout():
"""Test creation of a layout from 2d points."""
snobg = 10
sbg = 15
side = range(snobg)
bg_image = np.random.RandomState(42).randn(sbg, sbg)
w, h = [.2, .5]
# Generate fake data
xy = np.array([(i, j) for i in side for j in side])
lt = generate_2d_layout(xy, w=w, h=h)
# Correct points ordering / minmaxing
comp_1, comp_2 = [(5, 0), (7, 0)]
assert lt.pos[:, :2].max() == 1
assert lt.pos[:, :2].min() == 0
with np.errstate(invalid='ignore'): # divide by zero
assert_allclose(xy[comp_2] / float(xy[comp_1]),
lt.pos[comp_2] / float(lt.pos[comp_1]))
assert_allclose(lt.pos[0, [2, 3]], [w, h])
# Correct number elements
assert lt.pos.shape[1] == 4
assert len(lt.box) == 4
# Make sure background image normalizing is correct
lt_bg = generate_2d_layout(xy, bg_image=bg_image)
assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))
run_tests_if_main()
|
import os
import pytest
from nikola import __main__
from .helper import cd, patch_config
from .test_demo_build import prepare_demo_site
from .test_empty_build import ( # NOQA
test_archive_exists,
test_avoid_double_slash_in_rss,
test_check_files,
test_check_links,
test_index_in_sitemap,
)
def test_monthly_archive(build, output_dir):
"""Check that the monthly archive is build."""
assert os.path.isfile(os.path.join(output_dir, "2012", "03", "index.html"))
@pytest.fixture(scope="module")
def build(target_dir):
"""Fill the site with demo content and build it."""
prepare_demo_site(target_dir)
patch_config(
target_dir,
("# CREATE_MONTHLY_ARCHIVE = False", "CREATE_MONTHLY_ARCHIVE = True"),
)
with cd(target_dir):
__main__.main(["build"])
|
import base64
from collections import namedtuple
import logging
import re
import requests
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a HUAWEI scanner."""
scanner = HuaweiDeviceScanner(config[DOMAIN])
return scanner
Device = namedtuple("Device", ["name", "ip", "mac", "state"])
class HuaweiDeviceScanner(DeviceScanner):
"""This class queries a router running HUAWEI firmware."""
ARRAY_REGEX = re.compile(r"var UserDevinfo = new Array\((.*)null\);")
DEVICE_REGEX = re.compile(r"new USERDevice\((.*?)\),")
DEVICE_ATTR_REGEX = re.compile(
'"(?P<Domain>.*?)","(?P<IpAddr>.*?)",'
'"(?P<MacAddr>.*?)","(?P<Port>.*?)",'
'"(?P<IpType>.*?)","(?P<DevType>.*?)",'
'"(?P<DevStatus>.*?)","(?P<PortType>.*?)",'
'"(?P<Time>.*?)","(?P<HostName>.*?)",'
'"(?P<IPv4Enabled>.*?)","(?P<IPv6Enabled>.*?)",'
'"(?P<DeviceType>.*?)"'
)
LOGIN_COOKIE = {"Cookie": "body:Language:portuguese:id=-1"}
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = base64.b64encode(bytes(config[CONF_PASSWORD], "utf-8"))
self.last_results = []
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client.mac for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client.mac == device:
return client.name
return None
def _update_info(self):
"""Ensure the information from the router is up to date.
Return boolean if scanning successful.
"""
data = self._get_data()
if not data:
return False
active_clients = [client for client in data if client.state]
self.last_results = active_clients
_LOGGER.debug(
"Active clients: %s",
"\n".join(f"{client.mac} {client.name}" for client in active_clients),
)
return True
def _get_data(self):
"""Get the devices' data from the router.
Returns a list with all the devices known to the router DHCP server.
"""
array_regex_res = self.ARRAY_REGEX.search(self._get_devices_response())
devices = []
if array_regex_res:
device_regex_res = self.DEVICE_REGEX.findall(array_regex_res.group(1))
for device in device_regex_res:
device_attrs_regex_res = self.DEVICE_ATTR_REGEX.search(device)
devices.append(
Device(
device_attrs_regex_res.group("HostName"),
device_attrs_regex_res.group("IpAddr"),
device_attrs_regex_res.group("MacAddr"),
device_attrs_regex_res.group("DevStatus") == "Online",
)
)
return devices
def _get_devices_response(self):
"""Get the raw string with the devices from the router."""
cnt = requests.post(f"http://{self.host}/asp/GetRandCount.asp")
cnt_str = str(cnt.content, cnt.apparent_encoding, errors="replace")
_LOGGER.debug("Logging in")
cookie = requests.post(
f"http://{self.host}/login.cgi",
data=[
("UserName", self.username),
("PassWord", self.password),
("x.X_HW_Token", cnt_str),
],
cookies=self.LOGIN_COOKIE,
)
_LOGGER.debug("Requesting lan user info update")
# this request is needed or else some devices' state won't be updated
requests.get(
f"http://{self.host}/html/bbsp/common/lanuserinfo.asp",
cookies=cookie.cookies,
)
_LOGGER.debug("Requesting lan user info data")
devices = requests.get(
f"http://{self.host}/html/bbsp/common/GetLanUserDevInfo.asp",
cookies=cookie.cookies,
)
# we need to decode() using the request encoding, then encode() and
# decode('unicode_escape') to replace \\xXX with \xXX
# (i.e. \\x2d -> \x2d)
return (
devices.content.decode(devices.apparent_encoding)
.encode()
.decode("unicode_escape")
)
|
import asyncio
from functools import wraps
from types import ModuleType
from typing import Any, List, MutableMapping
import voluptuous as vol
import voluptuous_serialize
from homeassistant.components import websocket_api
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.loader import IntegrationNotFound
from homeassistant.requirements import async_get_integration_with_requirements
from .exceptions import DeviceNotFound, InvalidDeviceAutomationConfig
# mypy: allow-untyped-calls, allow-untyped-defs
DOMAIN = "device_automation"
TRIGGER_BASE_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "device",
vol.Required(CONF_DOMAIN): str,
vol.Required(CONF_DEVICE_ID): str,
}
)
TYPES = {
# platform name, get automations function, get capabilities function
"trigger": (
"device_trigger",
"async_get_triggers",
"async_get_trigger_capabilities",
),
"condition": (
"device_condition",
"async_get_conditions",
"async_get_condition_capabilities",
),
"action": ("device_action", "async_get_actions", "async_get_action_capabilities"),
}
async def async_setup(hass, config):
"""Set up device automation."""
hass.components.websocket_api.async_register_command(
websocket_device_automation_list_actions
)
hass.components.websocket_api.async_register_command(
websocket_device_automation_list_conditions
)
hass.components.websocket_api.async_register_command(
websocket_device_automation_list_triggers
)
hass.components.websocket_api.async_register_command(
websocket_device_automation_get_action_capabilities
)
hass.components.websocket_api.async_register_command(
websocket_device_automation_get_condition_capabilities
)
hass.components.websocket_api.async_register_command(
websocket_device_automation_get_trigger_capabilities
)
return True
async def async_get_device_automation_platform(
hass: HomeAssistant, domain: str, automation_type: str
) -> ModuleType:
"""Load device automation platform for integration.
Throws InvalidDeviceAutomationConfig if the integration is not found or does not support device automation.
"""
platform_name = TYPES[automation_type][0]
try:
integration = await async_get_integration_with_requirements(hass, domain)
platform = integration.get_platform(platform_name)
except IntegrationNotFound as err:
raise InvalidDeviceAutomationConfig(
f"Integration '{domain}' not found"
) from err
except ImportError as err:
raise InvalidDeviceAutomationConfig(
f"Integration '{domain}' does not support device automation {automation_type}s"
) from err
return platform
async def _async_get_device_automations_from_domain(
hass, domain, automation_type, device_id
):
"""List device automations."""
try:
platform = await async_get_device_automation_platform(
hass, domain, automation_type
)
except InvalidDeviceAutomationConfig:
return None
function_name = TYPES[automation_type][1]
return await getattr(platform, function_name)(hass, device_id)
async def _async_get_device_automations(hass, automation_type, device_id):
"""List device automations."""
device_registry, entity_registry = await asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
)
domains = set()
automations: List[MutableMapping[str, Any]] = []
device = device_registry.async_get(device_id)
if device is None:
raise DeviceNotFound
for entry_id in device.config_entries:
config_entry = hass.config_entries.async_get_entry(entry_id)
domains.add(config_entry.domain)
entity_entries = async_entries_for_device(entity_registry, device_id)
for entity_entry in entity_entries:
domains.add(entity_entry.domain)
device_automations = await asyncio.gather(
*(
_async_get_device_automations_from_domain(
hass, domain, automation_type, device_id
)
for domain in domains
)
)
for device_automation in device_automations:
if device_automation is not None:
automations.extend(device_automation)
return automations
async def _async_get_device_automation_capabilities(hass, automation_type, automation):
"""List device automations."""
try:
platform = await async_get_device_automation_platform(
hass, automation[CONF_DOMAIN], automation_type
)
except InvalidDeviceAutomationConfig:
return {}
function_name = TYPES[automation_type][2]
if not hasattr(platform, function_name):
# The device automation has no capabilities
return {}
try:
capabilities = await getattr(platform, function_name)(hass, automation)
except InvalidDeviceAutomationConfig:
return {}
capabilities = capabilities.copy()
extra_fields = capabilities.get("extra_fields")
if extra_fields is None:
capabilities["extra_fields"] = []
else:
capabilities["extra_fields"] = voluptuous_serialize.convert(
extra_fields, custom_serializer=cv.custom_serializer
)
return capabilities
def handle_device_errors(func):
"""Handle device automation errors."""
@wraps(func)
async def with_error_handling(hass, connection, msg):
try:
await func(hass, connection, msg)
except DeviceNotFound:
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Device not found"
)
return with_error_handling
@websocket_api.websocket_command(
{
vol.Required("type"): "device_automation/action/list",
vol.Required("device_id"): str,
}
)
@websocket_api.async_response
@handle_device_errors
async def websocket_device_automation_list_actions(hass, connection, msg):
"""Handle request for device actions."""
device_id = msg["device_id"]
actions = await _async_get_device_automations(hass, "action", device_id)
connection.send_result(msg["id"], actions)
@websocket_api.websocket_command(
{
vol.Required("type"): "device_automation/condition/list",
vol.Required("device_id"): str,
}
)
@websocket_api.async_response
@handle_device_errors
async def websocket_device_automation_list_conditions(hass, connection, msg):
"""Handle request for device conditions."""
device_id = msg["device_id"]
conditions = await _async_get_device_automations(hass, "condition", device_id)
connection.send_result(msg["id"], conditions)
@websocket_api.websocket_command(
{
vol.Required("type"): "device_automation/trigger/list",
vol.Required("device_id"): str,
}
)
@websocket_api.async_response
@handle_device_errors
async def websocket_device_automation_list_triggers(hass, connection, msg):
"""Handle request for device triggers."""
device_id = msg["device_id"]
triggers = await _async_get_device_automations(hass, "trigger", device_id)
connection.send_result(msg["id"], triggers)
@websocket_api.websocket_command(
{
vol.Required("type"): "device_automation/action/capabilities",
vol.Required("action"): dict,
}
)
@websocket_api.async_response
@handle_device_errors
async def websocket_device_automation_get_action_capabilities(hass, connection, msg):
"""Handle request for device action capabilities."""
action = msg["action"]
capabilities = await _async_get_device_automation_capabilities(
hass, "action", action
)
connection.send_result(msg["id"], capabilities)
@websocket_api.websocket_command(
{
vol.Required("type"): "device_automation/condition/capabilities",
vol.Required("condition"): dict,
}
)
@websocket_api.async_response
@handle_device_errors
async def websocket_device_automation_get_condition_capabilities(hass, connection, msg):
"""Handle request for device condition capabilities."""
condition = msg["condition"]
capabilities = await _async_get_device_automation_capabilities(
hass, "condition", condition
)
connection.send_result(msg["id"], capabilities)
@websocket_api.websocket_command(
{
vol.Required("type"): "device_automation/trigger/capabilities",
vol.Required("trigger"): dict,
}
)
@websocket_api.async_response
@handle_device_errors
async def websocket_device_automation_get_trigger_capabilities(hass, connection, msg):
"""Handle request for device trigger capabilities."""
trigger = msg["trigger"]
capabilities = await _async_get_device_automation_capabilities(
hass, "trigger", trigger
)
connection.send_result(msg["id"], capabilities)
|
import os
import re
import cherrypy
from cherrypy.process import servers
from cherrypy.test import helper
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
def read_process(cmd, args=''):
pipein, pipeout = os.popen4('%s %s' % (cmd, args))
try:
firstline = pipeout.readline()
if (re.search(r'(not recognized|No such file|not found)', firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
APACHE_PATH = 'apache2ctl'
CONF_PATH = 'fastcgi.conf'
conf_fastcgi = """
# Apache2 server conf file for testing CherryPy with mod_fastcgi.
# fumanchu: I had to hard-code paths due to crazy Debian layouts :(
ServerRoot /usr/lib/apache2
User #1000
ErrorLog %(root)s/mod_fastcgi.error.log
DocumentRoot "%(root)s"
ServerName 127.0.0.1
Listen %(port)s
LoadModule fastcgi_module modules/mod_fastcgi.so
LoadModule rewrite_module modules/mod_rewrite.so
Options +ExecCGI
SetHandler fastcgi-script
RewriteEngine On
RewriteRule ^(.*)$ /fastcgi.pyc [L]
FastCgiExternalServer "%(server)s" -host 127.0.0.1:4000
"""
def erase_script_name(environ, start_response):
environ['SCRIPT_NAME'] = ''
return cherrypy.tree(environ, start_response)
class ModFCGISupervisor(helper.LocalWSGISupervisor):
httpserver_class = 'cherrypy.process.servers.FlupFCGIServer'
using_apache = True
using_wsgi = True
template = conf_fastcgi
def __str__(self):
return 'FCGI Server on %s:%s' % (self.host, self.port)
def start(self, modulename):
cherrypy.server.httpserver = servers.FlupFCGIServer(
application=erase_script_name, bindAddress=('127.0.0.1', 4000))
cherrypy.server.httpserver.bind_addr = ('127.0.0.1', 4000)
cherrypy.server.socket_port = 4000
# For FCGI, we both start apache...
self.start_apache()
# ...and our local server
cherrypy.engine.start()
self.sync_apps()
def start_apache(self):
fcgiconf = CONF_PATH
if not os.path.isabs(fcgiconf):
fcgiconf = os.path.join(curdir, fcgiconf)
# Write the Apache conf file.
f = open(fcgiconf, 'wb')
try:
server = repr(os.path.join(curdir, 'fastcgi.pyc'))[1:-1]
output = self.template % {'port': self.port, 'root': curdir,
'server': server}
output = output.replace('\r\n', '\n')
f.write(output)
finally:
f.close()
result = read_process(APACHE_PATH, '-k start -f %s' % fcgiconf)
if result:
print(result)
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, '-k stop')
helper.LocalWSGISupervisor.stop(self)
def sync_apps(self):
cherrypy.server.httpserver.fcgiserver.application = self.get_app(
erase_script_name)
|
from django.contrib.admin.widgets import RelatedFieldWidgetWrapper
from django.test import TestCase
from zinnia.admin.forms import CategoryAdminForm
from zinnia.admin.forms import EntryAdminForm
from zinnia.models import Category
class EntryAdminFormTestCase(TestCase):
def test_categories_has_related_widget(self):
form = EntryAdminForm()
self.assertTrue(
isinstance(form.fields['categories'].widget,
RelatedFieldWidgetWrapper))
class CategoryAdminFormTestCase(TestCase):
def test_parent_has_related_widget(self):
form = CategoryAdminForm()
self.assertTrue(
isinstance(form.fields['parent'].widget,
RelatedFieldWidgetWrapper))
def test_clean_parent(self):
category = Category.objects.create(
title='Category 1', slug='cat-1')
datas = {'parent': category.pk,
'title': category.title,
'slug': category.slug}
form = CategoryAdminForm(datas, instance=category)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors['parent']), 1)
subcategory = Category.objects.create(
title='Category 2', slug='cat-2')
self.assertEqual(subcategory.parent, None)
datas = {'parent': category.pk,
'title': subcategory.title,
'slug': subcategory.slug}
form = CategoryAdminForm(datas, instance=subcategory)
self.assertTrue(form.is_valid())
|
import abc
import copy
import datetime
import posixpath
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import hadoop
flags.DEFINE_string('spark_static_cluster_id', None,
'If set, the name of the Spark cluster, assumed to be '
'ready.')
flags.DEFINE_enum('spark_service_log_level', 'INFO', ['DEBUG', 'INFO', 'FATAL'],
'Supported log levels when submitting jobs to spark service'
' clusters.')
# Cloud to use for pkb-created Spark service.
PKB_MANAGED = 'pkb_managed'
PROVIDER_MANAGED = 'managed'
SUCCESS = 'success'
RUNTIME = 'running_time'
WAITING = 'pending_time'
SPARK_JOB_TYPE = 'spark'
PYSPARK_JOB_TYPE = 'pyspark'
HADOOP_JOB_TYPE = 'hadoop'
SPARK_VM_GROUPS = ('master_group', 'worker_group')
# This is used for error messages.
FLAGS = flags.FLAGS
def GetSparkServiceClass(cloud, service_type):
"""Get the Spark class corresponding to 'cloud'."""
if service_type == PKB_MANAGED:
return PkbSparkService
return resource.GetResourceClass(BaseSparkService, CLOUD=cloud)
class BaseSparkService(resource.BaseResource):
"""Object representing a Spark Service."""
RESOURCE_TYPE = 'BaseSparkService'
SPARK_SAMPLE_LOCATION = ('file:///usr/lib/spark/examples/jars/'
'spark-examples.jar')
HADOOP_SAMPLE_LOCATION = ('file:///usr/lib/hadoop-mapreduce/'
'hadoop-mapreduce-examples.jar')
def __init__(self, spark_service_spec):
"""Initialize the Apache Spark Service object.
Args:
spark_service_spec: spec of the spark service.
"""
is_user_managed = spark_service_spec.static_cluster_id is not None
super(BaseSparkService, self).__init__(user_managed=is_user_managed)
self.spec = spark_service_spec
# If only the worker group is specified, assume the master group is
# configured the same way.
if spark_service_spec.master_group is None:
self.spec.master_group = copy.copy(self.spec.worker_group)
self.spec.master_group.vm_count = 1
self.cluster_id = spark_service_spec.static_cluster_id
assert (spark_service_spec.master_group.vm_spec.zone ==
spark_service_spec.worker_group.vm_spec.zone)
self.zone = spark_service_spec.master_group.vm_spec.zone
@abc.abstractmethod
def SubmitJob(self, job_jar, class_name,
job_script=None,
job_poll_interval=None,
job_stdout_file=None, job_arguments=None,
job_type=SPARK_JOB_TYPE):
"""Submit a job to the spark service.
Submits a job and waits for it to complete.
Args:
job_jar: Jar file to execute.
class_name: Name of the main class.
job_script: PySpark script to run. job_jar and class_name must be None.
job_poll_interval: integer saying how often to poll for job
completion. Not used by providers for which submit job is a
synchronous operation.
job_stdout_file: String giving the location of the file in
which to put the standard out of the job.
job_arguments: Arguments to pass to class_name. These are
not the arguments passed to the wrapper that submits the
job.
Returns:
dictionary, where success is true if the job succeeded,
false otherwise. The dictionary may also contain an entry for
running_time and pending_time if the platform reports those
metrics.
"""
pass
@abc.abstractmethod
def ExecuteOnMaster(self, script_path, script_args):
"""Execute a script on the master node.
Args:
script_path: local path of the script to execute.
script_args: arguments to pass to the script.
"""
pass
@abc.abstractmethod
def CopyFromMaster(self, remote_path, local_path):
"""Copy a file from the master node.
Args:
remote_path: path of the file to copy.
local_path: destination to copy to.
"""
pass
def GetMetadata(self):
"""Return a dictionary of the metadata for this cluster."""
basic_data = {
'spark_service': self.SERVICE_NAME,
'spark_svc_cloud': self.CLOUD,
'spark_cluster_id': self.cluster_id,
'spark_cluster_zone': getattr(self, 'zone', None) or 'unknown'
}
# TODO grab this information for user_managed clusters.
if not self.user_managed:
basic_data.update({'num_workers': str(self.spec.worker_group.vm_count),
'worker_machine_type':
str(self.spec.worker_group.vm_spec.machine_type)})
return basic_data
@classmethod
def GetExampleJar(cls, job_type):
if job_type == SPARK_JOB_TYPE:
return cls.SPARK_SAMPLE_LOCATION
elif job_type == HADOOP_JOB_TYPE:
return cls.HADOOP_SAMPLE_LOCATION
else:
raise NotImplemented()
class PkbSparkService(BaseSparkService):
"""A Spark service created from vms.
This class will create a Spark service by creating VMs and installing
the necessary software. (Similar to how the hbase benchmark currently
runs. It should work across all or almost all providers.
"""
CLOUD = PKB_MANAGED
SERVICE_NAME = 'pkb-managed'
def __init__(self, spark_service_spec):
super(PkbSparkService, self).__init__(spark_service_spec)
assert self.cluster_id is None
self.vms = {}
def _Create(self):
"""Create an Apache Spark cluster."""
# need to fix this to install spark
def InstallHadoop(vm):
vm.Install('hadoop')
vm_util.RunThreaded(InstallHadoop, self.vms['worker_group'] +
self.vms['master_group'])
self.leader = self.vms['master_group'][0]
hadoop.ConfigureAndStart(self.leader,
self.vms['worker_group'])
def _Delete(self):
pass
def SubmitJob(self, jar_file, class_name, job_poll_interval=None,
job_stdout_file=None, job_arguments=None,
job_type=SPARK_JOB_TYPE):
"""Submit the jar file."""
if job_type == SPARK_JOB_TYPE:
raise NotImplemented()
cmd_list = [posixpath.join(hadoop.HADOOP_BIN, 'hadoop'),
'jar', jar_file]
if class_name:
cmd_list.append(class_name)
if job_arguments:
cmd_list += job_arguments
cmd_string = ' '.join(cmd_list)
start_time = datetime.datetime.now()
stdout, _ = self.leader.RemoteCommand(cmd_string)
end_time = datetime.datetime.now()
if job_stdout_file:
with open(job_stdout_file, 'w') as f:
f.write(stdout)
return {SUCCESS: True,
RUNTIME: (end_time - start_time).total_seconds()}
@classmethod
def GetExampleJar(cls, job_type):
if job_type == HADOOP_JOB_TYPE:
return posixpath.join(
hadoop.HADOOP_DIR, 'share', 'hadoop', 'mapreduce',
'hadoop-mapreduce-examples-{0}.jar'.format(FLAGS.hadoop_version))
else:
raise NotImplemented()
def ExecuteOnMaster(self, script_path, script_args):
pass
def CopyFromMaster(self, remote_path, local_path):
pass
|
from __future__ import division
from datetime import date, datetime, time
from math import floor, log
from pygal._compat import to_str, u
from pygal.util import float_format
class Formatter(object):
pass
class HumanReadable(Formatter):
"""Format a number to engineer scale"""
ORDERS = u("yzafpnµm kMGTPEZY")
def __init__(self, none_char=u('∅')):
self.none_char = none_char
def __call__(self, val):
if val is None:
return self.none_char
order = val and int(floor(log(abs(val)) / log(1000)))
orders = self.ORDERS.split(" ")[int(order > 0)]
if order == 0 or order > len(orders):
return float_format(val / (1000**int(order)))
return (
float_format(val / (1000**int(order))) +
orders[int(order) - int(order > 0)]
)
class Significant(Formatter):
"""Show precision significant digit of float"""
def __init__(self, precision=10):
self.format = '%%.%dg' % precision
def __call__(self, val):
if val is None:
return ''
return self.format % val
class Integer(Formatter):
"""Cast number to integer"""
def __call__(self, val):
if val is None:
return ''
return '%d' % val
class Raw(Formatter):
"""Cast everything to string"""
def __call__(self, val):
if val is None:
return ''
return to_str(val)
class IsoDateTime(Formatter):
"""Iso format datetimes"""
def __call__(self, val):
if val is None:
return ''
return val.isoformat()
class Default(Significant, IsoDateTime, Raw):
"""Try to guess best format from type"""
def __call__(self, val):
if val is None:
return ''
if isinstance(val, (int, float)):
return Significant.__call__(self, val)
if isinstance(val, (date, time, datetime)):
return IsoDateTime.__call__(self, val)
return Raw.__call__(self, val)
# Formatters with default options
human_readable = HumanReadable()
significant = Significant()
integer = Integer()
raw = Raw()
# Default config formatter
default = Default()
|
from __future__ import print_function, unicode_literals
from plumbum.cmd import pdflatex, convert
from plumbum import local, cli, FG
from plumbum.path.utils import delete
def image_comp(item):
pdflatex["-shell-escape", item] & FG
print("Converting", item)
convert[item.with_suffix(".svg"),
item.with_suffix(".png")] & FG
delete(item.with_suffix(".log"),
item.with_suffix(".aux"),
)
class MyApp(cli.Application):
def main(self, *srcfiles):
print("Tex files should start with:")
print(r"\documentclass[tikz,convert={outfile=\jobname.svg}]{standalone}")
items = map(cli.ExistingFile, srcfiles) if srcfiles else local.cwd // "*.tex"
for item in items:
image_comp(item)
if __name__ == "__main__":
MyApp.run()
|
import asyncio
from datetime import timedelta
import logging
from meteofrance.client import MeteoFranceClient
from meteofrance.helpers import is_valid_warning_department
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
CONF_CITY,
COORDINATOR_ALERT,
COORDINATOR_FORECAST,
COORDINATOR_RAIN,
DOMAIN,
PLATFORMS,
UNDO_UPDATE_LISTENER,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL_RAIN = timedelta(minutes=5)
SCAN_INTERVAL = timedelta(minutes=15)
CITY_SCHEMA = vol.Schema({vol.Required(CONF_CITY): cv.string})
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [CITY_SCHEMA]))},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up Meteo-France from legacy config file."""
conf = config.get(DOMAIN)
if not conf:
return True
for city_conf in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=city_conf
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up an Meteo-France account from a config entry."""
hass.data.setdefault(DOMAIN, {})
latitude = entry.data.get(CONF_LATITUDE)
client = MeteoFranceClient()
# Migrate from previous config
if not latitude:
places = await hass.async_add_executor_job(
client.search_places, entry.data[CONF_CITY]
)
hass.config_entries.async_update_entry(
entry,
title=f"{places[0]}",
data={
CONF_LATITUDE: places[0].latitude,
CONF_LONGITUDE: places[0].longitude,
},
)
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
async def _async_update_data_forecast_forecast():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(
client.get_forecast, latitude, longitude
)
async def _async_update_data_rain():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(client.get_rain, latitude, longitude)
async def _async_update_data_alert():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(
client.get_warning_current_phenomenoms, department, 0, True
)
coordinator_forecast = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France forecast for city {entry.title}",
update_method=_async_update_data_forecast_forecast,
update_interval=SCAN_INTERVAL,
)
coordinator_rain = None
coordinator_alert = None
# Fetch initial data so we have data when entities subscribe
await coordinator_forecast.async_refresh()
if not coordinator_forecast.last_update_success:
raise ConfigEntryNotReady
# Check if rain forecast is available.
if coordinator_forecast.data.position.get("rain_product_available") == 1:
coordinator_rain = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France rain for city {entry.title}",
update_method=_async_update_data_rain,
update_interval=SCAN_INTERVAL_RAIN,
)
await coordinator_rain.async_refresh()
if not coordinator_rain.last_update_success:
raise ConfigEntryNotReady
else:
_LOGGER.warning(
"1 hour rain forecast not available. %s is not in covered zone",
entry.title,
)
department = coordinator_forecast.data.position.get("dept")
_LOGGER.debug(
"Department corresponding to %s is %s",
entry.title,
department,
)
if is_valid_warning_department(department):
if not hass.data[DOMAIN].get(department):
coordinator_alert = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France alert for department {department}",
update_method=_async_update_data_alert,
update_interval=SCAN_INTERVAL,
)
await coordinator_alert.async_refresh()
if not coordinator_alert.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][department] = True
else:
_LOGGER.warning(
"Weather alert for department %s won't be added with city %s, as it has already been added within another city",
department,
entry.title,
)
else:
_LOGGER.warning(
"Weather alert not available: The city %s is not in metropolitan France or Andorre.",
entry.title,
)
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
COORDINATOR_FORECAST: coordinator_forecast,
COORDINATOR_RAIN: coordinator_rain,
COORDINATOR_ALERT: coordinator_alert,
UNDO_UPDATE_LISTENER: undo_listener,
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
if hass.data[DOMAIN][entry.entry_id][COORDINATOR_ALERT]:
department = hass.data[DOMAIN][entry.entry_id][
COORDINATOR_FORECAST
].data.position.get("dept")
hass.data[DOMAIN][department] = False
_LOGGER.debug(
"Weather alert for depatment %s unloaded and released. It can be added now by another city.",
department,
)
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
async def _async_update_listener(hass: HomeAssistantType, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
|
from typing import Any, Callable, List, Optional
import pyvera as veraApi
from homeassistant.components.climate import (
DOMAIN as PLATFORM_DOMAIN,
ENTITY_ID_FORMAT,
ClimateEntity,
)
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert
from . import VeraDevice
from .common import ControllerData, get_controller_data
FAN_OPERATION_LIST = [FAN_ON, FAN_AUTO]
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
SUPPORT_HVAC = [HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF]
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up the sensor config entry."""
controller_data = get_controller_data(hass, entry)
async_add_entities(
[
VeraThermostat(device, controller_data)
for device in controller_data.devices.get(PLATFORM_DOMAIN)
]
)
class VeraThermostat(VeraDevice[veraApi.VeraThermostat], ClimateEntity):
"""Representation of a Vera Thermostat."""
def __init__(
self, vera_device: veraApi.VeraThermostat, controller_data: ControllerData
):
"""Initialize the Vera device."""
VeraDevice.__init__(self, vera_device, controller_data)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def supported_features(self) -> Optional[int]:
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
mode = self.vera_device.get_hvac_mode()
if mode == "HeatOn":
return HVAC_MODE_HEAT
if mode == "CoolOn":
return HVAC_MODE_COOL
if mode == "AutoChangeOver":
return HVAC_MODE_HEAT_COOL
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return SUPPORT_HVAC
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting."""
mode = self.vera_device.get_fan_mode()
if mode == "ContinuousOn":
return FAN_ON
return FAN_AUTO
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return a list of available fan modes."""
return FAN_OPERATION_LIST
def set_fan_mode(self, fan_mode) -> None:
"""Set new target temperature."""
if fan_mode == FAN_ON:
self.vera_device.fan_on()
else:
self.vera_device.fan_auto()
self.schedule_update_ha_state()
@property
def current_power_w(self) -> Optional[float]:
"""Return the current power usage in W."""
power = self.vera_device.power
if power:
return convert(power, float, 0.0)
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
vera_temp_units = self.vera_device.vera_controller.temperature_units
if vera_temp_units == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self.vera_device.get_current_temperature()
@property
def operation(self) -> str:
"""Return current operation ie. heat, cool, idle."""
return self.vera_device.get_hvac_mode()
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return self.vera_device.get_current_goal_temperature()
def set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self.vera_device.set_temperature(kwargs.get(ATTR_TEMPERATURE))
self.schedule_update_ha_state()
def set_hvac_mode(self, hvac_mode) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self.vera_device.turn_off()
elif hvac_mode == HVAC_MODE_HEAT_COOL:
self.vera_device.turn_auto_on()
elif hvac_mode == HVAC_MODE_COOL:
self.vera_device.turn_cool_on()
elif hvac_mode == HVAC_MODE_HEAT:
self.vera_device.turn_heat_on()
self.schedule_update_ha_state()
|
from __future__ import print_function
__docformat__ = "restructuredtext en"
import re
import sys
import time
from copy import copy
from os.path import exists
from six import integer_types
# python >= 2.3
from optparse import OptionParser as BaseParser, Option as BaseOption, \
OptionGroup, OptionContainer, OptionValueError, OptionError, \
Values, HelpFormatter, NO_DEFAULT, SUPPRESS_HELP
try:
from mx import DateTime
HAS_MX_DATETIME = True
except ImportError:
HAS_MX_DATETIME = False
from logilab.common.textutils import splitstrip, TIME_UNITS, BYTE_UNITS, \
apply_units
def check_regexp(option, opt, value):
"""check a regexp value by trying to compile it
return the compiled regexp
"""
if hasattr(value, 'pattern'):
return value
try:
return re.compile(value)
except ValueError:
raise OptionValueError(
"option %s: invalid regexp value: %r" % (opt, value))
def check_csv(option, opt, value):
"""check a csv value by trying to split it
return the list of separated values
"""
if isinstance(value, (list, tuple)):
return value
try:
return splitstrip(value)
except ValueError:
raise OptionValueError(
"option %s: invalid csv value: %r" % (opt, value))
def check_yn(option, opt, value):
"""check a yn value
return true for yes and false for no
"""
if isinstance(value, int):
return bool(value)
if value in ('y', 'yes'):
return True
if value in ('n', 'no'):
return False
msg = "option %s: invalid yn value %r, should be in (y, yes, n, no)"
raise OptionValueError(msg % (opt, value))
def check_named(option, opt, value):
"""check a named value
return a dictionary containing (name, value) associations
"""
if isinstance(value, dict):
return value
values = []
for value in check_csv(option, opt, value):
if value.find('=') != -1:
values.append(value.split('=', 1))
elif value.find(':') != -1:
values.append(value.split(':', 1))
if values:
return dict(values)
msg = "option %s: invalid named value %r, should be <NAME>=<VALUE> or \
<NAME>:<VALUE>"
raise OptionValueError(msg % (opt, value))
def check_password(option, opt, value):
"""check a password value (can't be empty)
"""
# no actual checking, monkey patch if you want more
return value
def check_file(option, opt, value):
"""check a file value
return the filepath
"""
if exists(value):
return value
msg = "option %s: file %r does not exist"
raise OptionValueError(msg % (opt, value))
# XXX use python datetime
def check_date(option, opt, value):
"""check a file value
return the filepath
"""
try:
return DateTime.strptime(value, "%Y/%m/%d")
except DateTime.Error :
raise OptionValueError(
"expected format of %s is yyyy/mm/dd" % opt)
def check_color(option, opt, value):
"""check a color value and returns it
/!\ does *not* check color labels (like 'red', 'green'), only
checks hexadecimal forms
"""
# Case (1) : color label, we trust the end-user
if re.match('[a-z0-9 ]+$', value, re.I):
return value
# Case (2) : only accepts hexadecimal forms
if re.match('#[a-f0-9]{6}', value, re.I):
return value
# Else : not a color label neither a valid hexadecimal form => error
msg = "option %s: invalid color : %r, should be either hexadecimal \
value or predefined color"
raise OptionValueError(msg % (opt, value))
def check_time(option, opt, value):
if isinstance(value, integer_types + (float,)):
return value
return apply_units(value, TIME_UNITS)
def check_bytes(option, opt, value):
if hasattr(value, '__int__'):
return value
return apply_units(value, BYTE_UNITS, final=int)
class Option(BaseOption):
"""override optik.Option to add some new option types
"""
TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
'multiple_choice', 'file', 'color',
'time', 'bytes')
ATTRS = BaseOption.ATTRS + ['hide', 'level']
TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
TYPE_CHECKER['regexp'] = check_regexp
TYPE_CHECKER['csv'] = check_csv
TYPE_CHECKER['yn'] = check_yn
TYPE_CHECKER['named'] = check_named
TYPE_CHECKER['multiple_choice'] = check_csv
TYPE_CHECKER['file'] = check_file
TYPE_CHECKER['color'] = check_color
TYPE_CHECKER['password'] = check_password
TYPE_CHECKER['time'] = check_time
TYPE_CHECKER['bytes'] = check_bytes
if HAS_MX_DATETIME:
TYPES += ('date',)
TYPE_CHECKER['date'] = check_date
def __init__(self, *opts, **attrs):
BaseOption.__init__(self, *opts, **attrs)
if hasattr(self, "hide") and self.hide:
self.help = SUPPRESS_HELP
def _check_choice(self):
"""FIXME: need to override this due to optik misdesign"""
if self.type in ("choice", "multiple_choice"):
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
BaseOption.CHECK_METHODS[2] = _check_choice
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
if self.type == 'named':
existant = getattr(values, self.dest)
if existant:
existant.update(value)
value = existant
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
class OptionParser(BaseParser):
"""override optik.OptionParser to use our Option class
"""
def __init__(self, option_class=Option, *args, **kwargs):
BaseParser.__init__(self, option_class=Option, *args, **kwargs)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
outputlevel = getattr(formatter, 'output_level', 0)
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading("Options"))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
if group.level <= outputlevel and (
group.description or level_options(group, outputlevel)):
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
OptionGroup.level = 0
def level_options(group, outputlevel):
return [option for option in group.option_list
if (getattr(option, 'level', 0) or 0) <= outputlevel
and not option.help is SUPPRESS_HELP]
def format_option_help(self, formatter):
result = []
outputlevel = getattr(formatter, 'output_level', 0) or 0
for option in level_options(self, outputlevel):
result.append(formatter.format_option(option))
return "".join(result)
OptionContainer.format_option_help = format_option_help
class ManHelpFormatter(HelpFormatter):
"""Format help using man pages ROFF format"""
def __init__ (self,
indent_increment=0,
max_help_position=24,
width=79,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_heading(self, heading):
return '.SH %s\n' % heading.upper()
def format_description(self, description):
return description
def format_option(self, option):
try:
optstring = option.option_strings
except AttributeError:
optstring = self.format_option_strings(option)
if option.help:
help_text = self.expand_default(option)
help = ' '.join([l.strip() for l in help_text.splitlines()])
else:
help = ''
return '''.IP "%s"
%s
''' % (optstring, help)
def format_head(self, optparser, pkginfo, section=1):
long_desc = ""
try:
pgm = optparser._get_prog_name()
except AttributeError:
# py >= 2.4.X (dunno which X exactly, at least 2)
pgm = optparser.get_prog_name()
short_desc = self.format_short_description(pgm, pkginfo.description)
if hasattr(pkginfo, "long_desc"):
long_desc = self.format_long_description(pgm, pkginfo.long_desc)
return '%s\n%s\n%s\n%s' % (self.format_title(pgm, section),
short_desc, self.format_synopsis(pgm),
long_desc)
def format_title(self, pgm, section):
date = '-'.join([str(num) for num in time.localtime()[:3]])
return '.TH %s %s "%s" %s' % (pgm, section, date, pgm)
def format_short_description(self, pgm, short_desc):
return '''.SH NAME
.B %s
\- %s
''' % (pgm, short_desc.strip())
def format_synopsis(self, pgm):
return '''.SH SYNOPSIS
.B %s
[
.I OPTIONS
] [
.I <arguments>
]
''' % pgm
def format_long_description(self, pgm, long_desc):
long_desc = '\n'.join([line.lstrip()
for line in long_desc.splitlines()])
long_desc = long_desc.replace('\n.\n', '\n\n')
if long_desc.lower().startswith(pgm):
long_desc = long_desc[len(pgm):]
return '''.SH DESCRIPTION
.B %s
%s
''' % (pgm, long_desc.strip())
def format_tail(self, pkginfo):
tail = '''.SH SEE ALSO
/usr/share/doc/pythonX.Y-%s/
.SH BUGS
Please report bugs on the project\'s mailing list:
%s
.SH AUTHOR
%s <%s>
''' % (getattr(pkginfo, 'debian_name', pkginfo.modname),
pkginfo.mailinglist, pkginfo.author, pkginfo.author_email)
if hasattr(pkginfo, "copyright"):
tail += '''
.SH COPYRIGHT
%s
''' % pkginfo.copyright
return tail
def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout, level=0):
"""generate a man page from an optik parser"""
formatter = ManHelpFormatter()
formatter.output_level = level
formatter.parser = optparser
print(formatter.format_head(optparser, pkginfo, section), file=stream)
print(optparser.format_option_help(formatter), file=stream)
print(formatter.format_tail(pkginfo), file=stream)
__all__ = ('OptionParser', 'Option', 'OptionGroup', 'OptionValueError',
'Values')
|
import base64
import io
import logging
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_HOST, CONF_TIMEOUT, HTTP_OK, PERCENTAGE
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_DURATION = "duration"
CONF_FONTSIZE = "fontsize"
CONF_POSITION = "position"
CONF_TRANSPARENCY = "transparency"
CONF_COLOR = "color"
CONF_INTERRUPT = "interrupt"
DEFAULT_DURATION = 5
DEFAULT_FONTSIZE = "medium"
DEFAULT_POSITION = "bottom-right"
DEFAULT_TRANSPARENCY = "default"
DEFAULT_COLOR = "grey"
DEFAULT_INTERRUPT = False
DEFAULT_TIMEOUT = 5
DEFAULT_ICON = (
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGP6zwAAAgcBApo"
"cMXEAAAAASUVORK5CYII="
)
ATTR_DURATION = "duration"
ATTR_FONTSIZE = "fontsize"
ATTR_POSITION = "position"
ATTR_TRANSPARENCY = "transparency"
ATTR_COLOR = "color"
ATTR_BKGCOLOR = "bkgcolor"
ATTR_INTERRUPT = "interrupt"
ATTR_IMAGE = "filename2"
ATTR_FILE = "file"
# Attributes contained in file
ATTR_FILE_URL = "url"
ATTR_FILE_PATH = "path"
ATTR_FILE_USERNAME = "username"
ATTR_FILE_PASSWORD = "password"
ATTR_FILE_AUTH = "auth"
# Any other value or absence of 'auth' lead to basic authentication being used
ATTR_FILE_AUTH_DIGEST = "digest"
FONTSIZES = {"small": 1, "medium": 0, "large": 2, "max": 3}
POSITIONS = {
"bottom-right": 0,
"bottom-left": 1,
"top-right": 2,
"top-left": 3,
"center": 4,
}
TRANSPARENCIES = {
"default": 0,
f"0{PERCENTAGE}": 1,
f"25{PERCENTAGE}": 2,
f"50{PERCENTAGE}": 3,
f"75{PERCENTAGE}": 4,
f"100{PERCENTAGE}": 5,
}
COLORS = {
"grey": "#607d8b",
"black": "#000000",
"indigo": "#303F9F",
"green": "#4CAF50",
"red": "#F44336",
"cyan": "#00BCD4",
"teal": "#009688",
"amber": "#FFC107",
"pink": "#E91E63",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): vol.Coerce(int),
vol.Optional(CONF_FONTSIZE, default=DEFAULT_FONTSIZE): vol.In(FONTSIZES.keys()),
vol.Optional(CONF_POSITION, default=DEFAULT_POSITION): vol.In(POSITIONS.keys()),
vol.Optional(CONF_TRANSPARENCY, default=DEFAULT_TRANSPARENCY): vol.In(
TRANSPARENCIES.keys()
),
vol.Optional(CONF_COLOR, default=DEFAULT_COLOR): vol.In(COLORS.keys()),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),
vol.Optional(CONF_INTERRUPT, default=DEFAULT_INTERRUPT): cv.boolean,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Notifications for Android TV notification service."""
remoteip = config.get(CONF_HOST)
duration = config.get(CONF_DURATION)
fontsize = config.get(CONF_FONTSIZE)
position = config.get(CONF_POSITION)
transparency = config.get(CONF_TRANSPARENCY)
color = config.get(CONF_COLOR)
interrupt = config.get(CONF_INTERRUPT)
timeout = config.get(CONF_TIMEOUT)
return NFAndroidTVNotificationService(
remoteip,
duration,
fontsize,
position,
transparency,
color,
interrupt,
timeout,
hass.config.is_allowed_path,
)
class NFAndroidTVNotificationService(BaseNotificationService):
"""Notification service for Notifications for Android TV."""
def __init__(
self,
remoteip,
duration,
fontsize,
position,
transparency,
color,
interrupt,
timeout,
is_allowed_path,
):
"""Initialize the service."""
self._target = f"http://{remoteip}:7676"
self._default_duration = duration
self._default_fontsize = fontsize
self._default_position = position
self._default_transparency = transparency
self._default_color = color
self._default_interrupt = interrupt
self._timeout = timeout
self._icon_file = io.BytesIO(base64.b64decode(DEFAULT_ICON))
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to a Android TV device."""
_LOGGER.debug("Sending notification to: %s", self._target)
payload = {
"filename": (
"icon.png",
self._icon_file,
"application/octet-stream",
{"Expires": "0"},
),
"type": "0",
"title": kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
"msg": message,
"duration": "%i" % self._default_duration,
"fontsize": "%i" % FONTSIZES.get(self._default_fontsize),
"position": "%i" % POSITIONS.get(self._default_position),
"bkgcolor": "%s" % COLORS.get(self._default_color),
"transparency": "%i" % TRANSPARENCIES.get(self._default_transparency),
"offset": "0",
"app": ATTR_TITLE_DEFAULT,
"force": "true",
"interrupt": "%i" % self._default_interrupt,
}
data = kwargs.get(ATTR_DATA)
if data:
if ATTR_DURATION in data:
duration = data.get(ATTR_DURATION)
try:
payload[ATTR_DURATION] = "%i" % int(duration)
except ValueError:
_LOGGER.warning("Invalid duration-value: %s", str(duration))
if ATTR_FONTSIZE in data:
fontsize = data.get(ATTR_FONTSIZE)
if fontsize in FONTSIZES:
payload[ATTR_FONTSIZE] = "%i" % FONTSIZES.get(fontsize)
else:
_LOGGER.warning("Invalid fontsize-value: %s", str(fontsize))
if ATTR_POSITION in data:
position = data.get(ATTR_POSITION)
if position in POSITIONS:
payload[ATTR_POSITION] = "%i" % POSITIONS.get(position)
else:
_LOGGER.warning("Invalid position-value: %s", str(position))
if ATTR_TRANSPARENCY in data:
transparency = data.get(ATTR_TRANSPARENCY)
if transparency in TRANSPARENCIES:
payload[ATTR_TRANSPARENCY] = "%i" % TRANSPARENCIES.get(transparency)
else:
_LOGGER.warning("Invalid transparency-value: %s", str(transparency))
if ATTR_COLOR in data:
color = data.get(ATTR_COLOR)
if color in COLORS:
payload[ATTR_BKGCOLOR] = "%s" % COLORS.get(color)
else:
_LOGGER.warning("Invalid color-value: %s", str(color))
if ATTR_INTERRUPT in data:
interrupt = data.get(ATTR_INTERRUPT)
try:
payload[ATTR_INTERRUPT] = "%i" % cv.boolean(interrupt)
except vol.Invalid:
_LOGGER.warning("Invalid interrupt-value: %s", str(interrupt))
filedata = data.get(ATTR_FILE) if data else None
if filedata is not None:
# Load from file or URL
file_as_bytes = self.load_file(
url=filedata.get(ATTR_FILE_URL),
local_path=filedata.get(ATTR_FILE_PATH),
username=filedata.get(ATTR_FILE_USERNAME),
password=filedata.get(ATTR_FILE_PASSWORD),
auth=filedata.get(ATTR_FILE_AUTH),
)
if file_as_bytes:
payload[ATTR_IMAGE] = (
"image",
file_as_bytes,
"application/octet-stream",
{"Expires": "0"},
)
try:
_LOGGER.debug("Payload: %s", str(payload))
response = requests.post(self._target, files=payload, timeout=self._timeout)
if response.status_code != HTTP_OK:
_LOGGER.error("Error sending message: %s", str(response))
except requests.exceptions.ConnectionError as err:
_LOGGER.error("Error communicating with %s: %s", self._target, str(err))
def load_file(
self, url=None, local_path=None, username=None, password=None, auth=None
):
"""Load image/document/etc from a local path or URL."""
try:
if url is not None:
# Check whether authentication parameters are provided
if username is not None and password is not None:
# Use digest or basic authentication
if ATTR_FILE_AUTH_DIGEST == auth:
auth_ = HTTPDigestAuth(username, password)
else:
auth_ = HTTPBasicAuth(username, password)
# Load file from URL with authentication
req = requests.get(url, auth=auth_, timeout=DEFAULT_TIMEOUT)
else:
# Load file from URL without authentication
req = requests.get(url, timeout=DEFAULT_TIMEOUT)
return req.content
if local_path is not None:
# Check whether path is whitelisted in configuration.yaml
if self.is_allowed_path(local_path):
return open(local_path, "rb")
_LOGGER.warning("'%s' is not secure to load data from!", local_path)
else:
_LOGGER.warning("Neither URL nor local path found in params!")
except OSError as error:
_LOGGER.error("Can't load from url or local path: %s", error)
return None
|
from homeassistant.components.sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
from .devolo_device import DevoloDeviceEntity
DEVICE_CLASS_MAPPING = {
"battery": DEVICE_CLASS_BATTERY,
"temperature": DEVICE_CLASS_TEMPERATURE,
"light": DEVICE_CLASS_ILLUMINANCE,
"humidity": DEVICE_CLASS_HUMIDITY,
"current": DEVICE_CLASS_POWER,
"total": DEVICE_CLASS_POWER,
}
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Get all sensor devices and setup them via config entry."""
entities = []
for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]:
for device in gateway.multi_level_sensor_devices:
for multi_level_sensor in device.multi_level_sensor_property:
entities.append(
DevoloGenericMultiLevelDeviceEntity(
homecontrol=gateway,
device_instance=device,
element_uid=multi_level_sensor,
)
)
for device in gateway.devices.values():
if hasattr(device, "consumption_property"):
for consumption in device.consumption_property:
for consumption_type in ["current", "total"]:
entities.append(
DevoloConsumptionEntity(
homecontrol=gateway,
device_instance=device,
element_uid=consumption,
consumption=consumption_type,
)
)
if hasattr(device, "battery_level"):
entities.append(
DevoloBatteryEntity(
homecontrol=gateway,
device_instance=device,
element_uid=f"devolo.BatterySensor:{device.uid}",
)
)
async_add_entities(entities, False)
class DevoloMultiLevelDeviceEntity(DevoloDeviceEntity):
"""Abstract representation of a multi level sensor within devolo Home Control."""
@property
def device_class(self) -> str:
"""Return device class."""
return self._device_class
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._unit
class DevoloGenericMultiLevelDeviceEntity(DevoloMultiLevelDeviceEntity):
"""Representation of a generic multi level sensor within devolo Home Control."""
def __init__(
self,
homecontrol,
device_instance,
element_uid,
):
"""Initialize a devolo multi level sensor."""
self._multi_level_sensor_property = device_instance.multi_level_sensor_property[
element_uid
]
super().__init__(
homecontrol=homecontrol,
device_instance=device_instance,
element_uid=element_uid,
)
self._device_class = DEVICE_CLASS_MAPPING.get(
self._multi_level_sensor_property.sensor_type
)
self._value = self._multi_level_sensor_property.value
self._unit = self._multi_level_sensor_property.unit
if self._device_class is None:
self._name += f" {self._multi_level_sensor_property.sensor_type}"
if element_uid.startswith("devolo.VoltageMultiLevelSensor:"):
self._enabled_default = False
class DevoloBatteryEntity(DevoloMultiLevelDeviceEntity):
"""Representation of a battery entity within devolo Home Control."""
def __init__(self, homecontrol, device_instance, element_uid):
"""Initialize a battery sensor."""
super().__init__(
homecontrol=homecontrol,
device_instance=device_instance,
element_uid=element_uid,
)
self._device_class = DEVICE_CLASS_MAPPING.get("battery")
self._value = device_instance.battery_level
self._unit = PERCENTAGE
class DevoloConsumptionEntity(DevoloMultiLevelDeviceEntity):
"""Representation of a consumption entity within devolo Home Control."""
def __init__(self, homecontrol, device_instance, element_uid, consumption):
"""Initialize a devolo consumption sensor."""
super().__init__(
homecontrol=homecontrol,
device_instance=device_instance,
element_uid=element_uid,
)
self._sensor_type = consumption
self._device_class = DEVICE_CLASS_MAPPING.get(consumption)
self._value = getattr(
device_instance.consumption_property[element_uid], consumption
)
self._unit = getattr(
device_instance.consumption_property[element_uid], f"{consumption}_unit"
)
self._name += f" {consumption}"
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"{self._unique_id}_{self._sensor_type}"
def _sync(self, message):
"""Update the consumption sensor state."""
if message[0] == self._unique_id:
self._value = getattr(
self._device_instance.consumption_property[self._unique_id],
self._sensor_type,
)
else:
self._generic_message(message)
self.schedule_update_ha_state()
|
import os
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from memory_cgroup import MemoryCgroupCollector
dirname = os.path.dirname(__file__)
fixtures_path = os.path.join(dirname, 'fixtures/')
fixtures = []
for root, dirnames, filenames in os.walk(fixtures_path):
fixtures.append([root, dirnames, filenames])
class TestMemoryCgroupCollector(CollectorTestCase):
def test_import(self):
self.assertTrue(MemoryCgroupCollector)
@patch('__builtin__.open')
@patch('os.walk', Mock(return_value=iter(fixtures)))
@patch.object(Collector, 'publish')
def test_should_open_all_memory_stat(self, publish_mock, open_mock):
config = get_collector_config('MemoryCgroupCollector', {
'interval': 10,
'byte_unit': 'megabyte'
})
self.collector = MemoryCgroupCollector(config, None)
open_mock.side_effect = lambda x: StringIO('')
self.collector.collect()
open_mock.assert_any_call(
fixtures_path + 'lxc/testcontainer/memory.stat')
open_mock.assert_any_call(fixtures_path + 'lxc/memory.stat')
open_mock.assert_any_call(fixtures_path + 'memory.stat')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
config = get_collector_config('MemoryCgroupCollector', {
'interval': 10,
'byte_unit': 'megabyte',
'memory_path': fixtures_path
})
self.collector = MemoryCgroupCollector(config, None)
self.collector.collect()
self.assertPublishedMany(publish_mock, {
'lxc.testcontainer.cache': 1,
'lxc.testcontainer.rss': 1,
'lxc.testcontainer.swap': 1,
'lxc.cache': 1,
'lxc.rss': 1,
'lxc.swap': 1,
'system.cache': 1,
'system.rss': 1,
'system.swap': 1,
'lxc.testcontainer.total_cache': 1,
'lxc.testcontainer.total_rss': 1,
'lxc.testcontainer.total_swap': 1,
'lxc.total_cache': 1,
'lxc.total_rss': 1,
'lxc.total_swap': 1,
'system.total_cache': 1,
'system.total_rss': 1,
'system.total_swap': 1,
})
@patch.object(Collector, 'publish')
def test_should_not_include_filtered_metrics(self, publish_mock):
config = get_collector_config('MemoryCgroupCollector', {
'interval': 10,
'byte_unit': 'megabyte',
'memory_path': fixtures_path,
'skip': 'test\w+ner',
})
self.collector = MemoryCgroupCollector(config, None)
self.collector.collect()
should_be_published = {
'lxc.cache': 1,
'lxc.rss': 1,
'lxc.swap': 1,
'system.cache': 1,
'system.rss': 1,
'system.swap': 1,
'lxc.total_cache': 1,
'lxc.total_rss': 1,
'lxc.total_swap': 1,
'system.total_cache': 1,
'system.total_rss': 1,
'system.total_swap': 1,
}
should_not_be_published = {
'lxc.testcontainer.cache': 1,
'lxc.testcontainer.rss': 1,
'lxc.testcontainer.swap': 1,
'lxc.testcontainer.total_cache': 1,
'lxc.testcontainer.total_rss': 1,
'lxc.testcontainer.total_swap': 1,
}
[self.assertPublished(publish_mock, k, v)
for k, v in should_be_published.iteritems()]
[self.assertUnpublished(publish_mock, k, v)
for k, v in should_not_be_published.iteritems()]
if __name__ == "__main__":
unittest.main()
|
import asyncio
from homeassistant import data_entry_flow
from homeassistant.auth import auth_manager_from_config, models as auth_models
from homeassistant.auth.mfa_modules import auth_mfa_module_from_config
from homeassistant.components.notify import NOTIFY_SERVICE_SCHEMA
from tests.async_mock import patch
from tests.common import MockUser, async_mock_service
MOCK_CODE = "123456"
MOCK_CODE_2 = "654321"
async def test_validating_mfa(hass):
"""Test validating mfa code."""
notify_auth_module = await auth_mfa_module_from_config(hass, {"type": "notify"})
await notify_auth_module.async_setup_user("test-user", {"notify_service": "dummy"})
with patch("pyotp.HOTP.verify", return_value=True):
assert await notify_auth_module.async_validate("test-user", {"code": MOCK_CODE})
async def test_validating_mfa_invalid_code(hass):
"""Test validating an invalid mfa code."""
notify_auth_module = await auth_mfa_module_from_config(hass, {"type": "notify"})
await notify_auth_module.async_setup_user("test-user", {"notify_service": "dummy"})
with patch("pyotp.HOTP.verify", return_value=False):
assert (
await notify_auth_module.async_validate("test-user", {"code": MOCK_CODE})
is False
)
async def test_validating_mfa_invalid_user(hass):
"""Test validating an mfa code with invalid user."""
notify_auth_module = await auth_mfa_module_from_config(hass, {"type": "notify"})
await notify_auth_module.async_setup_user("test-user", {"notify_service": "dummy"})
assert (
await notify_auth_module.async_validate("invalid-user", {"code": MOCK_CODE})
is False
)
async def test_validating_mfa_counter(hass):
"""Test counter will move only after generate code."""
notify_auth_module = await auth_mfa_module_from_config(hass, {"type": "notify"})
await notify_auth_module.async_setup_user(
"test-user", {"counter": 0, "notify_service": "dummy"}
)
async_mock_service(hass, "notify", "dummy")
assert notify_auth_module._user_settings
notify_setting = list(notify_auth_module._user_settings.values())[0]
init_count = notify_setting.counter
assert init_count is not None
with patch("pyotp.HOTP.at", return_value=MOCK_CODE):
await notify_auth_module.async_initialize_login_mfa_step("test-user")
notify_setting = list(notify_auth_module._user_settings.values())[0]
after_generate_count = notify_setting.counter
assert after_generate_count != init_count
with patch("pyotp.HOTP.verify", return_value=True):
assert await notify_auth_module.async_validate("test-user", {"code": MOCK_CODE})
notify_setting = list(notify_auth_module._user_settings.values())[0]
assert after_generate_count == notify_setting.counter
with patch("pyotp.HOTP.verify", return_value=False):
assert (
await notify_auth_module.async_validate("test-user", {"code": MOCK_CODE})
is False
)
notify_setting = list(notify_auth_module._user_settings.values())[0]
assert after_generate_count == notify_setting.counter
async def test_setup_depose_user(hass):
"""Test set up and despose user."""
notify_auth_module = await auth_mfa_module_from_config(hass, {"type": "notify"})
await notify_auth_module.async_setup_user("test-user", {})
assert len(notify_auth_module._user_settings) == 1
await notify_auth_module.async_setup_user("test-user", {})
assert len(notify_auth_module._user_settings) == 1
await notify_auth_module.async_depose_user("test-user")
assert len(notify_auth_module._user_settings) == 0
await notify_auth_module.async_setup_user("test-user2", {"secret": "secret-code"})
assert len(notify_auth_module._user_settings) == 1
async def test_login_flow_validates_mfa(hass):
"""Test login flow with mfa enabled."""
hass.auth = await auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[{"type": "notify"}],
)
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(hass.auth)
await hass.auth.async_link_user(
user,
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
),
)
notify_calls = async_mock_service(
hass, "notify", "test-notify", NOTIFY_SERVICE_SCHEMA
)
await hass.auth.async_enable_user_mfa(
user, "notify", {"notify_service": "test-notify"}
)
provider = hass.auth.auth_providers[0]
result = await hass.auth.login_flow.async_init((provider.type, provider.id))
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"username": "incorrect-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_auth"
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"username": "test-user", "password": "incorrect-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_auth"
with patch("pyotp.HOTP.at", return_value=MOCK_CODE):
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "mfa"
assert result["data_schema"].schema.get("code") == str
# wait service call finished
await hass.async_block_till_done()
assert len(notify_calls) == 1
notify_call = notify_calls[0]
assert notify_call.domain == "notify"
assert notify_call.service == "test-notify"
message = notify_call.data["message"]
message.hass = hass
assert MOCK_CODE in message.async_render()
with patch("pyotp.HOTP.verify", return_value=False):
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"code": "invalid-code"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "mfa"
assert result["errors"]["base"] == "invalid_code"
# wait service call finished
await hass.async_block_till_done()
# would not send new code, allow user retry
assert len(notify_calls) == 1
# retry twice
with patch("pyotp.HOTP.verify", return_value=False), patch(
"pyotp.HOTP.at", return_value=MOCK_CODE_2
):
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"code": "invalid-code"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "mfa"
assert result["errors"]["base"] == "invalid_code"
# after the 3rd failure, flow abort
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"code": "invalid-code"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "too_many_retry"
# wait service call finished
await hass.async_block_till_done()
# restart login
result = await hass.auth.login_flow.async_init((provider.type, provider.id))
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
with patch("pyotp.HOTP.at", return_value=MOCK_CODE):
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "mfa"
assert result["data_schema"].schema.get("code") == str
# wait service call finished
await hass.async_block_till_done()
assert len(notify_calls) == 2
notify_call = notify_calls[1]
assert notify_call.domain == "notify"
assert notify_call.service == "test-notify"
message = notify_call.data["message"]
message.hass = hass
assert MOCK_CODE in message.async_render()
with patch("pyotp.HOTP.verify", return_value=True):
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"code": MOCK_CODE}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"].id == "mock-user"
async def test_setup_user_notify_service(hass):
"""Test allow select notify service during mfa setup."""
notify_calls = async_mock_service(hass, "notify", "test1", NOTIFY_SERVICE_SCHEMA)
async_mock_service(hass, "notify", "test2", NOTIFY_SERVICE_SCHEMA)
notify_auth_module = await auth_mfa_module_from_config(hass, {"type": "notify"})
services = notify_auth_module.aync_get_available_notify_services()
assert services == ["test1", "test2"]
flow = await notify_auth_module.async_setup_flow("test-user")
step = await flow.async_step_init()
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "init"
schema = step["data_schema"]
schema({"notify_service": "test2"})
with patch("pyotp.HOTP.at", return_value=MOCK_CODE):
step = await flow.async_step_init({"notify_service": "test1"})
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "setup"
# wait service call finished
await hass.async_block_till_done()
assert len(notify_calls) == 1
notify_call = notify_calls[0]
assert notify_call.domain == "notify"
assert notify_call.service == "test1"
message = notify_call.data["message"]
message.hass = hass
assert MOCK_CODE in message.async_render()
with patch("pyotp.HOTP.at", return_value=MOCK_CODE_2):
step = await flow.async_step_setup({"code": "invalid"})
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "setup"
assert step["errors"]["base"] == "invalid_code"
# wait service call finished
await hass.async_block_till_done()
assert len(notify_calls) == 2
notify_call = notify_calls[1]
assert notify_call.domain == "notify"
assert notify_call.service == "test1"
message = notify_call.data["message"]
message.hass = hass
assert MOCK_CODE_2 in message.async_render()
with patch("pyotp.HOTP.verify", return_value=True):
step = await flow.async_step_setup({"code": MOCK_CODE_2})
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_include_exclude_config(hass):
"""Test allow include exclude config."""
async_mock_service(hass, "notify", "include1", NOTIFY_SERVICE_SCHEMA)
async_mock_service(hass, "notify", "include2", NOTIFY_SERVICE_SCHEMA)
async_mock_service(hass, "notify", "exclude1", NOTIFY_SERVICE_SCHEMA)
async_mock_service(hass, "notify", "exclude2", NOTIFY_SERVICE_SCHEMA)
async_mock_service(hass, "other", "include3", NOTIFY_SERVICE_SCHEMA)
async_mock_service(hass, "other", "exclude3", NOTIFY_SERVICE_SCHEMA)
notify_auth_module = await auth_mfa_module_from_config(
hass, {"type": "notify", "exclude": ["exclude1", "exclude2", "exclude3"]}
)
services = notify_auth_module.aync_get_available_notify_services()
assert services == ["include1", "include2"]
notify_auth_module = await auth_mfa_module_from_config(
hass, {"type": "notify", "include": ["include1", "include2", "include3"]}
)
services = notify_auth_module.aync_get_available_notify_services()
assert services == ["include1", "include2"]
# exclude has high priority than include
notify_auth_module = await auth_mfa_module_from_config(
hass,
{
"type": "notify",
"include": ["include1", "include2", "include3"],
"exclude": ["exclude1", "exclude2", "include2"],
},
)
services = notify_auth_module.aync_get_available_notify_services()
assert services == ["include1"]
async def test_setup_user_no_notify_service(hass):
"""Test setup flow abort if there is no available notify service."""
async_mock_service(hass, "notify", "test1", NOTIFY_SERVICE_SCHEMA)
notify_auth_module = await auth_mfa_module_from_config(
hass, {"type": "notify", "exclude": "test1"}
)
services = notify_auth_module.aync_get_available_notify_services()
assert services == []
flow = await notify_auth_module.async_setup_flow("test-user")
step = await flow.async_step_init()
assert step["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert step["reason"] == "no_available_service"
async def test_not_raise_exception_when_service_not_exist(hass):
"""Test login flow will not raise exception when notify service error."""
hass.auth = await auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[{"type": "notify"}],
)
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(hass.auth)
await hass.auth.async_link_user(
user,
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
),
)
await hass.auth.async_enable_user_mfa(
user, "notify", {"notify_service": "invalid-notify"}
)
provider = hass.auth.auth_providers[0]
result = await hass.auth.login_flow.async_init((provider.type, provider.id))
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
with patch("pyotp.HOTP.at", return_value=MOCK_CODE):
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unknown_error"
# wait service call finished
await hass.async_block_till_done()
async def test_race_condition_in_data_loading(hass):
"""Test race condition in the data loading."""
counter = 0
async def mock_load(_):
"""Mock homeassistant.helpers.storage.Store.async_load."""
nonlocal counter
counter += 1
await asyncio.sleep(0)
notify_auth_module = await auth_mfa_module_from_config(hass, {"type": "notify"})
with patch("homeassistant.helpers.storage.Store.async_load", new=mock_load):
task1 = notify_auth_module.async_validate("user", {"code": "value"})
task2 = notify_auth_module.async_validate("user", {"code": "value"})
results = await asyncio.gather(task1, task2, return_exceptions=True)
assert counter == 1
assert results[0] is False
assert results[1] is False
|
import logging
import numpy as np
from gensim import utils
from gensim.models.keyedvectors import KeyedVectors
logger = logging.getLogger(__name__)
class VarEmbed(KeyedVectors):
"""Python wrapper using `Varembed <https://github.com/rguthrie3/MorphologicalPriorsForWordEmbeddings>`_.
Warnings
--------
This is **only** python wrapper for `Varembed <https://github.com/rguthrie3/MorphologicalPriorsForWordEmbeddings>`_,
this allows to load pre-trained models only.
"""
def __init__(self):
super(VarEmbed, self).__init__(vector_size=0)
self.vocab_size = 0
@classmethod
def load_varembed_format(cls, vectors, morfessor_model=None):
"""Load the word vectors into matrix from the varembed output vector files.
Parameters
----------
vectors : dict
Pickle file containing the word vectors.
morfessor_model : str, optional
Path to the trained morfessor model.
Returns
-------
:class:`~gensim.models.wrappers.varembed.VarEmbed`
Ready to use instance.
"""
result = cls()
if vectors is None:
raise Exception("Please provide vectors binary to load varembed model")
d = utils.unpickle(vectors)
word_to_ix = d['word_to_ix']
morpho_to_ix = d['morpho_to_ix']
word_embeddings = d['word_embeddings']
morpho_embeddings = d['morpheme_embeddings']
result.load_word_embeddings(word_embeddings, word_to_ix)
if morfessor_model:
try:
import morfessor
morfessor_model = morfessor.MorfessorIO().read_binary_model_file(morfessor_model)
result.add_morphemes_to_embeddings(morfessor_model, morpho_embeddings, morpho_to_ix)
except ImportError:
# Morfessor Package not found.
logger.error('Could not import morfessor. Not using morpheme embeddings')
raise ImportError('Could not import morfessor.')
logger.info('Loaded varembed model vectors from %s', vectors)
return result
def load_word_embeddings(self, word_embeddings, word_to_ix):
"""Loads the word embeddings.
Parameters
----------
word_embeddings : numpy.ndarray
Matrix with word-embeddings.
word_to_ix : dict of (str, int)
Mapping word to index.
"""
logger.info("Loading the vocabulary")
self.key_to_index = {}
self.index_to_key = []
counts = {}
for word in word_to_ix:
counts[word] = counts.get(word, 0) + 1
self.vocab_size = len(counts)
self.vector_size = word_embeddings.shape[1]
self.vectors = np.zeros((self.vocab_size, self.vector_size))
self.index_to_key = [None] * self.vocab_size
logger.info("Corpus has %i words", len(self))
for word_id, word in enumerate(counts):
self.index_to_key[word_id] = word
self.key_to_index[word] = word_id
self.set_vecattr(word, 'count', counts[word])
self.vectors[word_id] = word_embeddings[word_to_ix[word]]
assert((len(self.key_to_index), self.vector_size) == self.vectors.shape)
logger.info("Loaded matrix of %d size and %d dimensions", self.vocab_size, self.vector_size)
def add_morphemes_to_embeddings(self, morfessor_model, morpho_embeddings, morpho_to_ix):
"""Include morpheme embeddings into vectors.
Parameters
----------
morfessor_model : :class:`morfessor.baseline.BaselineModel`
Morfessor model.
morpho_embeddings : dict
Pickle file containing morpheme embeddings.
morpho_to_ix : dict
Mapping morpheme to index.
"""
for word in self.key_to_index:
morpheme_embedding = np.array(
[
morpho_embeddings[morpho_to_ix.get(m, -1)]
for m in morfessor_model.viterbi_segment(word)[0]
]
).sum(axis=0)
self.vectors[self.get_index(word)] += morpheme_embedding
logger.info("Added morphemes to word vectors")
|
from flask import current_app
from lemur import database
from lemur.logs.models import Log
from lemur.users.models import User
from lemur.certificates.models import Certificate
def create(user, type, certificate=None):
"""
Creates logs a given action.
:param user:
:param type:
:param certificate:
:return:
"""
current_app.logger.info(
"[lemur-audit] action: {0}, user: {1}, certificate: {2}.".format(
type, user.email, certificate.name
)
)
view = Log(user_id=user.id, log_type=type, certificate_id=certificate.id)
database.add(view)
database.commit()
def get_all():
"""
Retrieve all logs from the database.
:return:
"""
query = database.session_query(Log)
return database.find_all(query, Log, {}).all()
def render(args):
"""
Helper that paginates and filters data when requested
through the REST Api
:param args:
:return:
"""
query = database.session_query(Log)
filt = args.pop("filter")
if filt:
terms = filt.split(";")
if "certificate.name" in terms:
sub_query = database.session_query(Certificate.id).filter(
Certificate.name.ilike("%{0}%".format(terms[1]))
)
query = query.filter(Log.certificate_id.in_(sub_query))
elif "user.email" in terms:
sub_query = database.session_query(User.id).filter(
User.email.ilike("%{0}%".format(terms[1]))
)
query = query.filter(Log.user_id.in_(sub_query))
else:
query = database.filter(query, Log, terms)
return database.sort_and_page(query, Log, args)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.architectures import abstract_arch
from compare_gan.architectures.arch_ops import conv2d
from compare_gan.architectures.arch_ops import deconv2d
from compare_gan.architectures.arch_ops import linear
from compare_gan.architectures.arch_ops import lrelu
import numpy as np
import tensorflow as tf
def conv_out_size_same(size, stride):
return int(np.ceil(float(size) / float(stride)))
class Generator(abstract_arch.AbstractGenerator):
"""DCGAN generator.
Details are available at https://arxiv.org/abs/1511.06434. Notable changes
include BatchNorm in the generator, ReLu instead of LeakyReLu and ReLu in the
generator, except for output which uses tanh.
"""
def apply(self, z, y, is_training):
"""Build the generator network for the given inputs.
Args:
z: `Tensor` of shape [batch_size, z_dim] with latent code.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: boolean, are we in train or eval model.
Returns:
A tensor of size [batch_size] + self._image_shape with values in [0, 1].
"""
gf_dim = 64 # Dimension of filters in first convolutional layer.
bs = z.shape[0].value
s_h, s_w, colors = self._image_shape
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
net = linear(z, gf_dim * 8 *s_h16 * s_w16, scope="g_fc1")
net = tf.reshape(net, [-1, s_h16, s_w16, gf_dim * 8])
net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn1")
net = tf.nn.relu(net)
net = deconv2d(net, [bs, s_h8, s_w8, gf_dim*4], 5, 5, 2, 2, name="g_dc1")
net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn2")
net = tf.nn.relu(net)
net = deconv2d(net, [bs, s_h4, s_w4, gf_dim*2], 5, 5, 2, 2, name="g_dc2")
net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn3")
net = tf.nn.relu(net)
net = deconv2d(net, [bs, s_h2, s_w2, gf_dim*1], 5, 5, 2, 2, name="g_dc3")
net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn4")
net = tf.nn.relu(net)
net = deconv2d(net, [bs, s_h, s_w, colors], 5, 5, 2, 2, name="g_dc4")
net = 0.5 * tf.nn.tanh(net) + 0.5
return net
class Discriminator(abstract_arch.AbstractDiscriminator):
"""DCGAN discriminator.
Details are available at https://arxiv.org/abs/1511.06434. Notable changes
include BatchNorm in the discriminator and LeakyReLU for all layers.
"""
def apply(self, x, y, is_training):
"""Apply the discriminator on a input.
Args:
x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: Boolean, whether the architecture should be constructed for
training or inference.
Returns:
Tuple of 3 Tensors, the final prediction of the discriminator, the logits
before the final output activation function and logits form the second
last layer.
"""
bs = x.shape[0].value
df_dim = 64 # Dimension of filters in the first convolutional layer.
net = lrelu(conv2d(x, df_dim, 5, 5, 2, 2, name="d_conv1",
use_sn=self._spectral_norm))
net = conv2d(net, df_dim * 2, 5, 5, 2, 2, name="d_conv2",
use_sn=self._spectral_norm)
net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn1")
net = lrelu(net)
net = conv2d(net, df_dim * 4, 5, 5, 2, 2, name="d_conv3",
use_sn=self._spectral_norm)
net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn2")
net = lrelu(net)
net = conv2d(net, df_dim * 8, 5, 5, 2, 2, name="d_conv4",
use_sn=self._spectral_norm)
net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn3")
net = lrelu(net)
out_logit = linear(
tf.reshape(net, [bs, -1]), 1, scope="d_fc4", use_sn=self._spectral_norm)
out = tf.nn.sigmoid(out_logit)
return out, out_logit, net
|
import diamond.collector
import subprocess
import os
from diamond.collector import str_to_bool
class PowerDNSCollector(diamond.collector.Collector):
_GAUGE_KEYS = [
'cache-bytes', 'cache-entries', 'chain-resends',
'concurrent-queries', 'dlg-only-drops', 'dont-outqueries',
'ipv6-outqueries', 'latency', 'max-mthread-stack', 'negcache-entries',
'nsspeeds-entries',
'packetcache-bytes', 'packetcache-entries', 'packetcache-size',
'qa-latency', 'throttle-entries']
def get_default_config_help(self):
config_help = super(PowerDNSCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the pdns_control binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PowerDNSCollector, self).get_default_config()
config.update({
'bin': '/usr/bin/pdns_control',
'path': 'powerdns',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def collect(self):
if not os.access(self.config['bin'], os.X_OK):
self.log.error("%s is not executable", self.config['bin'])
return False
command = [self.config['bin'], 'list']
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
data = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0]
for metric in data.split(','):
if not metric.strip():
continue
metric, value = metric.split('=')
try:
value = float(value)
except:
pass
if metric not in self._GAUGE_KEYS:
value = self.derivative(metric, value)
if value < 0:
continue
self.publish(metric, value)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 299 x 299 x 3
net = slim.conv2d(inputs, 32, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 149 x 149 x 32
net = slim.conv2d(net, 32, [3, 3], padding='VALID',
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in xrange(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net): return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in xrange(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net): return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in xrange(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v4(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
create_aux_logits=True):
"""Creates the Inception V4 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxiliary logits.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v4_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# Auxiliary Head logits
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
# 17 x 17 x 1024
aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768,
aux_logits.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a')
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes,
activation_fn=None,
scope='Aux_logits')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
# 8 x 8 x 1536
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a')
# 1 x 1 x 1536
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
# 1536
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_v4.default_image_size = 299
inception_v4_arg_scope = inception_utils.inception_arg_scope
|
from datetime import timedelta
import functools
import logging
import socket
import threading
from pilight import pilight
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
CONF_PROTOCOL,
CONF_WHITELIST,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_point_in_utc_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_SEND_DELAY = "send_delay"
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 5001
DEFAULT_SEND_DELAY = 0.0
DOMAIN = "pilight"
EVENT = "pilight_received"
# The Pilight code schema depends on the protocol. Thus only require to have
# the protocol information. Ensure that protocol is in a list otherwise
# segfault in pilight-daemon, https://github.com/pilight/pilight/issues/296
RF_CODE_SCHEMA = vol.Schema(
{vol.Required(CONF_PROTOCOL): vol.All(cv.ensure_list, [cv.string])},
extra=vol.ALLOW_EXTRA,
)
SERVICE_NAME = "send"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_WHITELIST, default={}): {cv.string: [cv.string]},
vol.Optional(CONF_SEND_DELAY, default=DEFAULT_SEND_DELAY): vol.Coerce(
float
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Pilight component."""
host = config[DOMAIN][CONF_HOST]
port = config[DOMAIN][CONF_PORT]
send_throttler = CallRateDelayThrottle(hass, config[DOMAIN][CONF_SEND_DELAY])
try:
pilight_client = pilight.Client(host=host, port=port)
except (OSError, socket.timeout) as err:
_LOGGER.error("Unable to connect to %s on port %s: %s", host, port, err)
return False
def start_pilight_client(_):
"""Run when Home Assistant starts."""
pilight_client.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_pilight_client)
def stop_pilight_client(_):
"""Run once when Home Assistant stops."""
pilight_client.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_pilight_client)
@send_throttler.limited
def send_code(call):
"""Send RF code to the pilight-daemon."""
# Change type to dict from mappingproxy since data has to be JSON
# serializable
message_data = dict(call.data)
try:
pilight_client.send_code(message_data)
except OSError:
_LOGGER.error("Pilight send failed for %s", str(message_data))
hass.services.register(DOMAIN, SERVICE_NAME, send_code, schema=RF_CODE_SCHEMA)
# Publish received codes on the HA event bus
# A whitelist of codes to be published in the event bus
whitelist = config[DOMAIN].get(CONF_WHITELIST)
def handle_received_code(data):
"""Run when RF codes are received."""
# Unravel dict of dicts to make event_data cut in automation rule
# possible
data = dict(
{"protocol": data["protocol"], "uuid": data["uuid"]}, **data["message"]
)
# No whitelist defined, put data on event bus
if not whitelist:
hass.bus.fire(EVENT, data)
# Check if data matches the defined whitelist
elif all(str(data[key]) in whitelist[key] for key in whitelist):
hass.bus.fire(EVENT, data)
pilight_client.set_callback(handle_received_code)
return True
class CallRateDelayThrottle:
"""Helper class to provide service call rate throttling.
This class provides a decorator to decorate service methods that need
to be throttled to not exceed a certain call rate per second.
One instance can be used on multiple service methods to archive
an overall throttling.
As this uses track_point_in_utc_time to schedule delayed executions
it should not block the mainloop.
"""
def __init__(self, hass, delay_seconds: float) -> None:
"""Initialize the delay handler."""
self._delay = timedelta(seconds=max(0.0, delay_seconds))
self._queue = []
self._active = False
self._lock = threading.Lock()
self._next_ts = dt_util.utcnow()
self._schedule = functools.partial(track_point_in_utc_time, hass)
def limited(self, method):
"""Decorate to delay calls on a certain method."""
@functools.wraps(method)
def decorated(*args, **kwargs):
"""Delay a call."""
if self._delay.total_seconds() == 0.0:
method(*args, **kwargs)
return
def action(event):
"""Wrap an action that gets scheduled."""
method(*args, **kwargs)
with self._lock:
self._next_ts = dt_util.utcnow() + self._delay
if not self._queue:
self._active = False
else:
next_action = self._queue.pop(0)
self._schedule(next_action, self._next_ts)
with self._lock:
if self._active:
self._queue.append(action)
else:
self._active = True
schedule_ts = max(dt_util.utcnow(), self._next_ts)
self._schedule(action, schedule_ts)
return decorated
|
from typing import Optional
from pychromecast.controllers.homeassistant import HomeAssistantController
import voluptuous as vol
from homeassistant import auth, config_entries, core
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import config_validation as cv, dispatcher
from homeassistant.helpers.network import get_url
from .const import DOMAIN, SIGNAL_HASS_CAST_SHOW_VIEW
SERVICE_SHOW_VIEW = "show_lovelace_view"
ATTR_VIEW_PATH = "view_path"
ATTR_URL_PATH = "dashboard_path"
async def async_setup_ha_cast(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up Home Assistant Cast."""
user_id: Optional[str] = entry.data.get("user_id")
user: Optional[auth.models.User] = None
if user_id is not None:
user = await hass.auth.async_get_user(user_id)
if user is None:
user = await hass.auth.async_create_system_user(
"Home Assistant Cast", [auth.GROUP_ID_ADMIN]
)
hass.config_entries.async_update_entry(
entry, data={**entry.data, "user_id": user.id}
)
if user.refresh_tokens:
refresh_token: auth.models.RefreshToken = list(user.refresh_tokens.values())[0]
else:
refresh_token = await hass.auth.async_create_refresh_token(user)
async def handle_show_view(call: core.ServiceCall):
"""Handle a Show View service call."""
hass_url = get_url(hass, require_ssl=True, prefer_external=True)
controller = HomeAssistantController(
# If you are developing Home Assistant Cast, uncomment and set to your dev app id.
# app_id="5FE44367",
hass_url=hass_url,
client_id=None,
refresh_token=refresh_token.token,
)
dispatcher.async_dispatcher_send(
hass,
SIGNAL_HASS_CAST_SHOW_VIEW,
controller,
call.data[ATTR_ENTITY_ID],
call.data[ATTR_VIEW_PATH],
call.data.get(ATTR_URL_PATH),
)
hass.helpers.service.async_register_admin_service(
DOMAIN,
SERVICE_SHOW_VIEW,
handle_show_view,
vol.Schema(
{
ATTR_ENTITY_ID: cv.entity_id,
ATTR_VIEW_PATH: str,
vol.Optional(ATTR_URL_PATH): str,
}
),
)
|
__docformat__ = "restructuredtext en"
from six.moves import range
from logilab.common.ureports import HTMLWriter
class DocbookWriter(HTMLWriter):
"""format layouts as HTML"""
def begin_format(self, layout):
"""begin to format a layout"""
super(HTMLWriter, self).begin_format(layout)
if self.snippet is None:
self.writeln('<?xml version="1.0" encoding="ISO-8859-1"?>')
self.writeln("""
<book xmlns:xi='http://www.w3.org/2001/XInclude'
lang='fr'>
""")
def end_format(self, layout):
"""finished to format a layout"""
if self.snippet is None:
self.writeln('</book>')
def visit_section(self, layout):
"""display a section (using <chapter> (level 0) or <section>)"""
if self.section == 0:
tag = "chapter"
else:
tag = "section"
self.section += 1
self.writeln(self._indent('<%s%s>' % (tag, self.handle_attrs(layout))))
self.format_children(layout)
self.writeln(self._indent('</%s>'% tag))
self.section -= 1
def visit_title(self, layout):
"""display a title using <title>"""
self.write(self._indent(' <title%s>' % self.handle_attrs(layout)))
self.format_children(layout)
self.writeln('</title>')
def visit_table(self, layout):
"""display a table as html"""
self.writeln(self._indent(' <table%s><title>%s</title>' \
% (self.handle_attrs(layout), layout.title)))
self.writeln(self._indent(' <tgroup cols="%s">'% layout.cols))
for i in range(layout.cols):
self.writeln(self._indent(' <colspec colname="c%s" colwidth="1*"/>' % i))
table_content = self.get_table_content(layout)
# write headers
if layout.cheaders:
self.writeln(self._indent(' <thead>'))
self._write_row(table_content[0])
self.writeln(self._indent(' </thead>'))
table_content = table_content[1:]
elif layout.rcheaders:
self.writeln(self._indent(' <thead>'))
self._write_row(table_content[-1])
self.writeln(self._indent(' </thead>'))
table_content = table_content[:-1]
# write body
self.writeln(self._indent(' <tbody>'))
for i in range(len(table_content)):
row = table_content[i]
self.writeln(self._indent(' <row>'))
for j in range(len(row)):
cell = row[j] or ' '
self.writeln(self._indent(' <entry>%s</entry>' % cell))
self.writeln(self._indent(' </row>'))
self.writeln(self._indent(' </tbody>'))
self.writeln(self._indent(' </tgroup>'))
self.writeln(self._indent(' </table>'))
def _write_row(self, row):
"""write content of row (using <row> <entry>)"""
self.writeln(' <row>')
for j in range(len(row)):
cell = row[j] or ' '
self.writeln(' <entry>%s</entry>' % cell)
self.writeln(self._indent(' </row>'))
def visit_list(self, layout):
"""display a list (using <itemizedlist>)"""
self.writeln(self._indent(' <itemizedlist%s>' % self.handle_attrs(layout)))
for row in list(self.compute_content(layout)):
self.writeln(' <listitem><para>%s</para></listitem>' % row)
self.writeln(self._indent(' </itemizedlist>'))
def visit_paragraph(self, layout):
"""display links (using <para>)"""
self.write(self._indent(' <para>'))
self.format_children(layout)
self.writeln('</para>')
def visit_span(self, layout):
"""display links (using <p>)"""
#TODO: translate in docbook
self.write('<literal %s>' % self.handle_attrs(layout))
self.format_children(layout)
self.write('</literal>')
def visit_link(self, layout):
"""display links (using <ulink>)"""
self.write('<ulink url="%s"%s>%s</ulink>' % (layout.url,
self.handle_attrs(layout),
layout.label))
def visit_verbatimtext(self, layout):
"""display verbatim text (using <programlisting>)"""
self.writeln(self._indent(' <programlisting>'))
self.write(layout.data.replace('&', '&').replace('<', '<'))
self.writeln(self._indent(' </programlisting>'))
def visit_text(self, layout):
"""add some text"""
self.write(layout.data.replace('&', '&').replace('<', '<'))
def _indent(self, string):
"""correctly indent string according to section"""
return ' ' * 2*(self.section) + string
|
import numpy as np
from scipy.special import ndtr
from scattertext.termranking import AbsoluteFrequencyRanker
from scattertext.termsignificance.TermSignificance import TermSignificance
def z_to_p_val(z_scores):
# return norm.sf(-z_scores) - 0.5 + 0.5
return ndtr(z_scores)
class LogOddsRatioUninformativeDirichletPrior(TermSignificance):
'''
Implements the log-odds-ratio with an uninformative dirichlet prior from
Monroe, B. L., Colaresi, M. P., & Quinn, K. M. (2008). Fightin' words: Lexical feature selection and evaluation for identifying the content of political conflict. Political Analysis, 16(4), 372–403.
'''
def __init__(self, alpha_w=0.001, ranker=AbsoluteFrequencyRanker):
'''
Parameters
----------
alpha_w : np.float
The constant prior.
'''
self.alpha_w = alpha_w
def get_name(self):
return "Log-Odds-Ratio w/ Uninformative Prior Z-Score"
def get_p_vals(self, X):
'''
Parameters
----------
X : np.array
Array of word counts, shape (N, 2) where N is the vocab size. X[:,0] is the
positive class, while X[:,1] is the negative class. None by default
Returns
-------
np.array of p-values
'''
# Eqs 19-22
return z_to_p_val(self.get_zeta_i_j(X))
def get_p_vals_given_separate_counts(self, y_i, y_j):
'''
Parameters
----------
y_i, np.array(int)
Arrays of word counts of words occurring in positive class
y_j, np.array(int)
Returns
np.array of p-values
'''
return z_to_p_val(self.get_zeta_i_j_given_separate_counts(y_i, y_j))
def get_zeta_i_j_given_separate_counts(self, y_i, y_j):
'''
Parameters
----------
y_i, np.array(int)
Arrays of word counts of words occurring in positive class
y_j, np.array(int)
Returns
-------
np.array of z-scores
'''
yp_i = y_i + self.alpha_w
yp_j = y_j + self.alpha_w
np_i = np.sum(yp_i)
np_j = np.sum(yp_j)
delta_i_j = np.log(yp_i / (np_i - yp_i)) - np.log(yp_j / (np_j - yp_j))
var_delta_i_j = (1. / (yp_i)
+ 1. / (np_i - yp_i)
+ 1. / (yp_j)
+ 1. / (np_j - yp_j))
zeta_i_j = delta_i_j / np.sqrt(var_delta_i_j)
return zeta_i_j
def get_zeta_i_j(self, X):
'''
Parameters
----------
X : np.array
Array of word counts, shape (N, 2) where N is the vocab size. X[:,0] is the
positive class, while X[:,1] is the negative class. None by default
Returns
-------
np.array of z-scores
'''
y_i, y_j = X.T[0], X.T[1]
return self.get_zeta_i_j_given_separate_counts(y_i, y_j)
def get_default_score(self):
return 0
def get_p_values_from_counts(self, y_i, y_j):
return ndtr(self.get_zeta_i_j_given_separate_counts(y_i, y_j))
def get_scores(self, y_i, y_j):
'''
Same function as get_zeta_i_j_given_separate_counts
Parameters
----------
y_i, np.array(int)
Arrays of word counts of words occurring in positive class
y_j, np.array(int)
Returns
-------
np.array of z-scores
'''
z_scores = self.get_zeta_i_j_given_separate_counts(y_i, y_j)
# scaled_scores = scale_neg_1_to_1_with_zero_mean_abs_max(z_scores)
return z_scores
|
import subprocess
import diamond.collector
from diamond.collector import str_to_bool
class PostqueueCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PostqueueCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the postqueue binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PostqueueCollector, self).get_default_config()
config.update({
'path': 'postqueue',
'bin': '/usr/bin/postqueue',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def get_postqueue_output(self):
try:
command = [self.config['bin'], '-p']
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
return subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0]
except OSError:
return ""
def collect(self):
output = self.get_postqueue_output()
try:
postqueue_count = int(output.strip().split("\n")[-1].split()[-2])
except:
postqueue_count = 0
self.publish('count', postqueue_count)
|
from datetime import timedelta
import logging
from asmog import AmpioSmog
import voluptuous as vol
from homeassistant.components.air_quality import PLATFORM_SCHEMA, AirQualityEntity
from homeassistant.const import CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Ampio"
CONF_STATION_ID = "station_id"
SCAN_INTERVAL = timedelta(minutes=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_STATION_ID): cv.string, vol.Optional(CONF_NAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Ampio Smog air quality platform."""
name = config.get(CONF_NAME)
station_id = config[CONF_STATION_ID]
session = async_get_clientsession(hass)
api = AmpioSmogMapData(AmpioSmog(station_id, hass.loop, session))
await api.async_update()
if not api.api.data:
_LOGGER.error("Station %s is not available", station_id)
return
async_add_entities([AmpioSmogQuality(api, station_id, name)], True)
class AmpioSmogQuality(AirQualityEntity):
"""Implementation of an Ampio Smog air quality entity."""
def __init__(self, api, station_id, name):
"""Initialize the air quality entity."""
self._ampio = api
self._station_id = station_id
self._name = name or api.api.name
@property
def name(self):
"""Return the name of the air quality entity."""
return self._name
@property
def unique_id(self):
"""Return unique_name."""
return f"ampio_smog_{self._station_id}"
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._ampio.api.pm2_5
@property
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self._ampio.api.pm10
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
async def async_update(self):
"""Get the latest data from the AmpioMap API."""
await self._ampio.async_update()
class AmpioSmogMapData:
"""Get the latest data and update the states."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
@Throttle(SCAN_INTERVAL)
async def async_update(self):
"""Get the latest data from AmpioMap."""
await self.api.get_data()
|
from __future__ import unicode_literals
import re
from xml.etree.ElementTree import SubElement
from markdown.blockprocessors import BlockProcessor
from markdown.extensions import Extension
from markdown.extensions.codehilite import CodeHilite
from markdown.preprocessors import Preprocessor
from markdown.util import HTML_PLACEHOLDER_RE
try:
from pymdownx.highlight import Highlight
except ImportError:
Highlight = None
POSMAP_MARKER_RE = re.compile(r'__posmapmarker__\d+\n\n')
class PosMapExtension(Extension):
""" Position Map Extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Insert the PosMapExtension blockprocessor before any other
extensions to make sure our own markers, inserted by the
preprocessor, are removed before any other extensions get confused
by them.
"""
md.preprocessors.register(PosMapMarkPreprocessor(md), 'posmap_mark', 50)
md.preprocessors.register(PosMapCleanPreprocessor(md), 'posmap_clean', 5)
md.parser.blockprocessors.register(PosMapBlockProcessor(md.parser), 'posmap', 150)
# Monkey patch CodeHilite constructor to remove the posmap markers from
# text before highlighting it
orig_codehilite_init = CodeHilite.__init__
def new_codehilite_init(self, src=None, *args, **kwargs):
src = POSMAP_MARKER_RE.sub('', src)
orig_codehilite_init(self, src=src, *args, **kwargs)
CodeHilite.__init__ = new_codehilite_init
# Same for PyMdown Extensions if it is available
if Highlight is not None:
orig_highlight_highlight = Highlight.highlight
def new_highlight_highlight(self, src, *args, **kwargs):
src = POSMAP_MARKER_RE.sub('', src)
return orig_highlight_highlight(self, src, *args, **kwargs)
Highlight.highlight = new_highlight_highlight
class PosMapMarkPreprocessor(Preprocessor):
""" PosMapMarkPreprocessor - insert __posmapmarker__linenr entries at each empty line """
def run(self, lines):
new_text = []
for i, line in enumerate(lines):
new_text.append(line)
if line == '':
new_text.append('__posmapmarker__%d' % i)
new_text.append('')
return new_text
class PosMapCleanPreprocessor(Preprocessor):
""" PosMapCleanPreprocessor - remove __posmapmarker__linenr entries that
accidentally ended up in the htmlStash. This could have happened
because they were inside html tags or a fenced code block.
"""
def run(self, lines):
for i in range(self.md.htmlStash.html_counter):
block = self.md.htmlStash.rawHtmlBlocks[i]
block = re.sub(POSMAP_MARKER_RE, '', block)
self.md.htmlStash.rawHtmlBlocks[i] = block
return lines
class PosMapBlockProcessor(BlockProcessor):
""" PosMapBlockProcessor - remove each marker and add a data-posmap
attribute to the previous HTML element
"""
def test(self, parent, block):
return block.startswith('__posmapmarker__')
def run(self, parent, blocks):
block = blocks.pop(0)
line_nr = block.split('__')[2]
last_child = self.lastChild(parent)
if last_child != None:
# Avoid setting the attribute on HTML placeholders, because it
# would interfere with later replacement with literal HTML
# fragments. In this case just add an empty <p> with the attribute.
if last_child.text and re.match(HTML_PLACEHOLDER_RE, last_child.text):
last_child = SubElement(parent, 'p')
last_child.set('data-posmap', line_nr)
def makeExtension(*args, **kwargs):
return PosMapExtension(*args, **kwargs)
|
import argparse
_stash = globals()["_stash"]
def logout(n):
"""
Quit StaSh
:param n: exitcode for the shell (not implemented)
:type n: int
"""
import threading
t = threading.Thread(target=_stash.close, name="close thread")
t.daemon = True
t.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Quits a shell")
parser.add_argument("n", nargs="?", default=0, type=int, help="exit the shell with this code. Not implemented.")
ns = parser.parse_args()
logout(ns.n)
|
from homeassistant.components.abode import ATTR_DEVICE_ID
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_RGB_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from .common import setup_platform
from tests.async_mock import patch
DEVICE_ID = "light.living_room_lamp"
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, LIGHT_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get(DEVICE_ID)
assert entry.unique_id == "741385f4388b2637df4c6b398fe50581"
async def test_attributes(hass):
"""Test the light attributes are correct."""
await setup_platform(hass, LIGHT_DOMAIN)
state = hass.states.get(DEVICE_ID)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 204
assert state.attributes.get(ATTR_RGB_COLOR) == (0, 63, 255)
assert state.attributes.get(ATTR_COLOR_TEMP) == 280
assert state.attributes.get(ATTR_DEVICE_ID) == "ZB:db5b1a"
assert not state.attributes.get("battery_low")
assert not state.attributes.get("no_response")
assert state.attributes.get("device_type") == "RGB Dimmer"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "Living Room Lamp"
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 19
async def test_switch_off(hass):
"""Test the light can be turned off."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.switch_off") as mock_switch_off:
assert await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_off.assert_called_once()
async def test_switch_on(hass):
"""Test the light can be turned on."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.switch_on") as mock_switch_on:
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_on.assert_called_once()
async def test_set_brightness(hass):
"""Test the brightness can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_level") as mock_set_level:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "brightness": 100},
blocking=True,
)
await hass.async_block_till_done()
# Brightness is converted in abode.light.AbodeLight.turn_on
mock_set_level.assert_called_once_with(39)
async def test_set_color(hass):
"""Test the color can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_color") as mock_set_color:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "hs_color": [240, 100]},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_once_with((240.0, 100.0))
async def test_set_color_temp(hass):
"""Test the color temp can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_color_temp") as mock_set_color_temp:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "color_temp": 309},
blocking=True,
)
await hass.async_block_till_done()
# Color temp is converted in abode.light.AbodeLight.turn_on
mock_set_color_temp.assert_called_once_with(3236)
|
import unittest
import tempfile
import sys
from os.path import basename, dirname, splitext
from PyQt5.QtCore import Qt, QSettings
from PyQt5.QtGui import QColor, QFont
from PyQt5.QtWidgets import QApplication
from ReText import readListFromSettings, writeListToSettings, \
readFromSettings, writeToSettings
from ReText.highlighter import colorScheme, updateColorScheme
# For this particular test, QCoreApplication is enough. However, we should
# only have one QCoreApplication instance for all tests in a process. As
# other tests need QApplication, we should not create a bare QCoreApplication
# here. Also, keep a reference so it is not garbage collected.
QApplication.setAttribute(Qt.AA_ShareOpenGLContexts)
app = QApplication.instance() or QApplication(sys.argv)
class TestSettings(unittest.TestCase):
def setUp(self):
self.tempFile = tempfile.NamedTemporaryFile(prefix='retext-', suffix='.ini')
baseName = splitext(basename(self.tempFile.name))[0]
QSettings.setPath(QSettings.IniFormat, QSettings.UserScope,
dirname(self.tempFile.name))
self.settings = QSettings(QSettings.IniFormat,
QSettings.UserScope, baseName)
def tearDown(self):
del self.settings # this should be deleted before tempFile
def test_storingLists(self):
data = (
['1', '2', '3', 'test'],
[],
['1'],
['true'],
['foo, bar', 'foo, bar']
)
for l in data:
writeListToSettings('testList', l, self.settings)
lnew = readListFromSettings('testList', self.settings)
self.assertListEqual(lnew, l)
def test_storingBooleans(self):
writeToSettings('testBool', 1, None, self.settings)
self.assertTrue(readFromSettings('testBool', bool, self.settings))
writeToSettings('testBool', 'false', None, self.settings)
self.assertFalse(readFromSettings('testBool', bool, self.settings))
writeToSettings('testBool', 0, None, self.settings)
self.assertFalse(readFromSettings('testBool', bool, self.settings))
def test_storingFonts(self):
font = QFont()
font.setFamily('my family')
font.setPointSize(20)
writeToSettings('testFont', font, None, self.settings)
family = readFromSettings('testFont', str, self.settings)
size = readFromSettings('testFontSize', int, self.settings)
self.assertEqual(family, 'my family')
self.assertEqual(size, 20)
newFont = readFromSettings('testFont', QFont, self.settings, QFont())
self.assertEqual(newFont.family(), family)
self.assertEqual(newFont.pointSize(), size)
def test_storingColors(self):
self.settings.setValue('ColorScheme/htmlTags', 'green')
self.settings.setValue('ColorScheme/htmlSymbols', '#ff8800')
self.settings.setValue('ColorScheme/htmlComments', '#abc')
updateColorScheme(self.settings)
self.assertEqual(colorScheme['htmlTags'], QColor(0x00, 0x80, 0x00))
self.assertEqual(colorScheme['htmlSymbols'], QColor(0xff, 0x88, 0x00))
self.assertEqual(colorScheme['htmlStrings'], Qt.darkYellow) # default
self.assertEqual(colorScheme['htmlComments'], QColor(0xaa, 0xbb, 0xcc))
if __name__ == '__main__':
unittest.main()
|
import json
import traceback
from datetime import datetime
import pyramid
import pytz
from paasta_tools.api import settings
try:
import clog
except ImportError:
clog = None
DEFAULT_REQUEST_LOG_NAME = "tmp_paasta_api_requests"
def includeme(config):
if clog is not None:
config.add_tween(
"paasta_tools.api.tweens.request_logger.request_logger_tween_factory",
under=pyramid.tweens.INGRESS,
)
class request_logger_tween_factory:
"""Tween that logs information about requests"""
def __init__(self, handler, registry):
self.handler = handler
self.registry = registry
self.log_name = registry.settings.get(
"request_log_name", DEFAULT_REQUEST_LOG_NAME,
)
def _log(
self, timestamp=None, level="INFO", additional_fields=None,
):
if clog is not None:
# `settings` values are set by paasta_tools.api.api:setup_paasta_api
if not timestamp:
timestamp = datetime.now(pytz.utc)
dct = {
"human_timestamp": timestamp.strftime("%Y-%m-%dT%H:%M:%S%Z"),
"unix_timestamp": timestamp.timestamp(),
"hostname": settings.hostname,
"level": level,
"cluster": settings.cluster,
}
if additional_fields is not None:
dct.update(additional_fields)
line = json.dumps(dct, sort_keys=True)
clog.log_line(self.log_name, line)
def __call__(self, request):
start_time = datetime.now(pytz.utc) # start clock for response time
request_fields = {
"path": request.path,
"params": request.params.mixed(),
"client_addr": request.client_addr,
"http_method": request.method,
"headers": dict(request.headers), # incls user agent
}
response_fields = {}
log_level = "INFO"
try:
response = self.handler(request)
response_fields["status_code"] = response.status_int
if 300 <= response.status_int < 400:
log_level = "WARNING"
elif 400 <= response.status_int < 600:
log_level = "ERROR"
response_fields["body"] = response.body.decode("utf-8")
return response
except Exception as e:
log_level = "ERROR"
response_fields.update(
{
"status_code": 500,
"exc_type": type(e).__name__,
"exc_info": traceback.format_exc(),
}
)
raise
finally:
response_time_ms = (
datetime.now(pytz.utc) - start_time
).total_seconds() * 1000
response_fields["response_time_ms"] = response_time_ms
self._log(
timestamp=start_time,
level=log_level,
additional_fields={
"request": request_fields,
"response": response_fields,
},
)
|
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import BaseAddon
from weblate.addons.events import EVENT_POST_UPDATE
from weblate.addons.forms import DiscoveryForm
from weblate.trans.discovery import ComponentDiscovery
class DiscoveryAddon(BaseAddon):
events = (EVENT_POST_UPDATE,)
name = "weblate.discovery.discovery"
verbose = _("Component discovery")
description = _(
"Automatically adds or removes project components based on file changes "
"in the version control system."
)
settings_form = DiscoveryForm
multiple = True
icon = "magnify.svg"
repo_scope = True
trigger_update = True
def post_update(self, component, previous_head: str, skip_push: bool):
self.discovery.perform(
remove=self.instance.configuration["remove"], background=True
)
def get_settings_form(self, user, **kwargs):
"""Return configuration form for this addon."""
if "data" not in kwargs:
kwargs["data"] = self.instance.configuration
kwargs["data"]["confirm"] = False
return super().get_settings_form(user, **kwargs)
@cached_property
def discovery(self):
# Handle old settings which did not have this set
if "new_base_template" not in self.instance.configuration:
self.instance.configuration["new_base_template"] = ""
return ComponentDiscovery(
self.instance.component,
**ComponentDiscovery.extract_kwargs(self.instance.configuration)
)
|
from datetime import timedelta
import logging
from typing import Callable, List, Optional
import pyzerproc
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import HomeAssistantType
import homeassistant.util.color as color_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SUPPORT_ZERPROC = SUPPORT_BRIGHTNESS | SUPPORT_COLOR
DISCOVERY_INTERVAL = timedelta(seconds=60)
PARALLEL_UPDATES = 0
def connect_lights(lights: List[pyzerproc.Light]) -> List[pyzerproc.Light]:
"""Attempt to connect to lights, and return the connected lights."""
connected = []
for light in lights:
try:
light.connect(auto_reconnect=True)
connected.append(light)
except pyzerproc.ZerprocException:
_LOGGER.debug("Unable to connect to '%s'", light.address, exc_info=True)
return connected
def discover_entities(hass: HomeAssistant) -> List[Entity]:
"""Attempt to discover new lights."""
lights = pyzerproc.discover()
# Filter out already discovered lights
new_lights = [
light for light in lights if light.address not in hass.data[DOMAIN]["addresses"]
]
entities = []
for light in connect_lights(new_lights):
# Double-check the light hasn't been added in another thread
if light.address not in hass.data[DOMAIN]["addresses"]:
hass.data[DOMAIN]["addresses"].add(light.address)
entities.append(ZerprocLight(light))
return entities
async def async_setup_entry(
hass: HomeAssistantType,
config_entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Abode light devices."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
if "addresses" not in hass.data[DOMAIN]:
hass.data[DOMAIN]["addresses"] = set()
warned = False
async def discover(*args):
"""Wrap discovery to include params."""
nonlocal warned
try:
entities = await hass.async_add_executor_job(discover_entities, hass)
async_add_entities(entities, update_before_add=True)
warned = False
except pyzerproc.ZerprocException:
if warned is False:
_LOGGER.warning("Error discovering Zerproc lights", exc_info=True)
warned = True
# Initial discovery
hass.async_create_task(discover())
# Perform recurring discovery of new devices
async_track_time_interval(hass, discover, DISCOVERY_INTERVAL)
class ZerprocLight(LightEntity):
"""Representation of an Zerproc Light."""
def __init__(self, light):
"""Initialize a Zerproc light."""
self._light = light
self._name = None
self._is_on = None
self._hs_color = None
self._brightness = None
self._available = True
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
self.async_on_remove(
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, self.on_hass_shutdown
)
)
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
await self.hass.async_add_executor_job(self._light.disconnect)
def on_hass_shutdown(self, event):
"""Execute when Home Assistant is shutting down."""
self._light.disconnect()
@property
def name(self):
"""Return the display name of this light."""
return self._light.name
@property
def unique_id(self):
"""Return the ID of this light."""
return self._light.address
@property
def device_info(self):
"""Device info for this light."""
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Zerproc",
}
@property
def icon(self) -> Optional[str]:
"""Return the icon to use in the frontend."""
return "mdi:string-lights"
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_ZERPROC
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def hs_color(self):
"""Return the hs color."""
return self._hs_color
@property
def is_on(self):
"""Return true if light is on."""
return self._is_on
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
if ATTR_BRIGHTNESS in kwargs or ATTR_HS_COLOR in kwargs:
default_hs = (0, 0) if self._hs_color is None else self._hs_color
hue_sat = kwargs.get(ATTR_HS_COLOR, default_hs)
default_brightness = 255 if self._brightness is None else self._brightness
brightness = kwargs.get(ATTR_BRIGHTNESS, default_brightness)
rgb = color_util.color_hsv_to_RGB(*hue_sat, brightness / 255 * 100)
self._light.set_color(*rgb)
else:
self._light.turn_on()
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._light.turn_off()
def update(self):
"""Fetch new state data for this light."""
try:
state = self._light.get_state()
except pyzerproc.ZerprocException:
if self._available:
_LOGGER.warning("Unable to connect to %s", self.entity_id)
self._available = False
return
if self._available is False:
_LOGGER.info("Reconnected to %s", self.entity_id)
self._available = True
self._is_on = state.is_on
hsv = color_util.color_RGB_to_hsv(*state.color)
self._hs_color = hsv[:2]
self._brightness = int(round((hsv[2] / 100) * 255))
|
import datetime as dt
from ipaddress import ip_address
import logging
from telegram.error import TimedOut
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
HTTP_BAD_REQUEST,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers.network import get_url
from . import (
CONF_ALLOWED_CHAT_IDS,
CONF_TRUSTED_NETWORKS,
CONF_URL,
BaseTelegramBotEntity,
initialize_bot,
)
_LOGGER = logging.getLogger(__name__)
TELEGRAM_HANDLER_URL = "/api/telegram_webhooks"
REMOVE_HANDLER_URL = ""
async def async_setup_platform(hass, config):
"""Set up the Telegram webhooks platform."""
bot = initialize_bot(config)
current_status = await hass.async_add_executor_job(bot.getWebhookInfo)
base_url = config.get(
CONF_URL, get_url(hass, require_ssl=True, allow_internal=False)
)
# Some logging of Bot current status:
last_error_date = getattr(current_status, "last_error_date", None)
if (last_error_date is not None) and (isinstance(last_error_date, int)):
last_error_date = dt.datetime.fromtimestamp(last_error_date)
_LOGGER.info(
"telegram webhook last_error_date: %s. Status: %s",
last_error_date,
current_status,
)
else:
_LOGGER.debug("telegram webhook Status: %s", current_status)
handler_url = f"{base_url}{TELEGRAM_HANDLER_URL}"
if not handler_url.startswith("https"):
_LOGGER.error("Invalid telegram webhook %s must be https", handler_url)
return False
def _try_to_set_webhook():
retry_num = 0
while retry_num < 3:
try:
return bot.setWebhook(handler_url, timeout=5)
except TimedOut:
retry_num += 1
_LOGGER.warning("Timeout trying to set webhook (retry #%d)", retry_num)
if current_status and current_status["url"] != handler_url:
result = await hass.async_add_executor_job(_try_to_set_webhook)
if result:
_LOGGER.info("Set new telegram webhook %s", handler_url)
else:
_LOGGER.error("Set telegram webhook failed %s", handler_url)
return False
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, lambda event: bot.setWebhook(REMOVE_HANDLER_URL)
)
hass.http.register_view(
BotPushReceiver(
hass, config[CONF_ALLOWED_CHAT_IDS], config[CONF_TRUSTED_NETWORKS]
)
)
return True
class BotPushReceiver(HomeAssistantView, BaseTelegramBotEntity):
"""Handle pushes from Telegram."""
requires_auth = False
url = TELEGRAM_HANDLER_URL
name = "telegram_webhooks"
def __init__(self, hass, allowed_chat_ids, trusted_networks):
"""Initialize the class."""
BaseTelegramBotEntity.__init__(self, hass, allowed_chat_ids)
self.trusted_networks = trusted_networks
async def post(self, request):
"""Accept the POST from telegram."""
real_ip = ip_address(request.remote)
if not any(real_ip in net for net in self.trusted_networks):
_LOGGER.warning("Access denied from %s", real_ip)
return self.json_message("Access denied", HTTP_UNAUTHORIZED)
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
if not self.process_message(data):
return self.json_message("Invalid message", HTTP_BAD_REQUEST)
return None
|
from math import floor
import numpy as np
class Position(object):
"""
Handles the accounting of entering a new position in an
Asset along with subsequent modifications via additional
trades.
The approach taken here separates the long and short side
for accounting purposes. It also includes an unrealised and
realised running profit & loss of the position.
Parameters
----------
asset : `str`
The Asset symbol string.
current_price : `float`
The initial price of the Position.
current_dt : `pd.Timestamp`
The time at which the Position was created.
buy_quantity : `int`
The amount of the asset bought.
sell_quantity : `int`
The amount of the asset sold.
avg_bought : `float`
The initial price paid for buying assets.
avg_sold : `float`
The initial price paid for selling assets.
buy_commission : `float`
The commission spent on buying assets for this position.
sell_commission : `float`
The commission spent on selling assets for this position.
"""
def __init__(
self,
asset,
current_price,
current_dt,
buy_quantity,
sell_quantity,
avg_bought,
avg_sold,
buy_commission,
sell_commission
):
self.asset = asset
self.current_price = current_price
self.current_dt = current_dt
self.buy_quantity = buy_quantity
self.sell_quantity = sell_quantity
self.avg_bought = avg_bought
self.avg_sold = avg_sold
self.buy_commission = buy_commission
self.sell_commission = sell_commission
@classmethod
def open_from_transaction(cls, transaction):
"""
Constructs a new Position instance from the provided
Transaction.
Parameters
----------
transaction : `Transaction`
The transaction with which to open the Position.
Returns
-------
`Position`
The instantiated position.
"""
asset = transaction.asset
current_price = transaction.price
current_dt = transaction.dt
if transaction.quantity > 0:
buy_quantity = transaction.quantity
sell_quantity = 0
avg_bought = current_price
avg_sold = 0.0
buy_commission = transaction.commission
sell_commission = 0.0
else:
buy_quantity = 0
sell_quantity = -1.0 * transaction.quantity
avg_bought = 0.0
avg_sold = current_price
buy_commission = 0.0
sell_commission = transaction.commission
return cls(
asset,
current_price,
current_dt,
buy_quantity,
sell_quantity,
avg_bought,
avg_sold,
buy_commission,
sell_commission
)
def _check_set_dt(self, dt):
"""
Checks that the provided timestamp is valid and if so sets
the new current time of the Position.
Parameters
----------
dt : `pd.Timestamp`
The timestamp to be checked and potentially used as
the new current time.
"""
if dt is not None:
if (dt < self.current_dt):
raise ValueError(
'Supplied update time of "%s" is earlier than '
'the current time of "%s".' % (dt, self.current_dt)
)
else:
self.current_dt = dt
@property
def direction(self):
"""
Returns an integer value representing the direction.
Returns
-------
`int`
1 - Long, 0 - No direction, -1 - Short.
"""
if self.net_quantity == 0:
return 0
else:
return np.copysign(1, self.net_quantity)
@property
def market_value(self):
"""
Return the market value (respecting the direction) of the
Position based on the current price available to the Position.
Returns
-------
`float`
The current market value of the Position.
"""
return self.current_price * self.net_quantity
@property
def avg_price(self):
"""
The average price paid for all assets on the long or short side.
Returns
-------
`float`
The average price on either the long or short side.
"""
if self.net_quantity == 0:
return 0.0
elif self.net_quantity > 0:
return (self.avg_bought * self.buy_quantity + self.buy_commission) / self.buy_quantity
else:
return (self.avg_sold * self.sell_quantity - self.sell_commission) / self.sell_quantity
@property
def net_quantity(self):
"""
The difference in the quantity of assets bought and sold to date.
Returns
-------
`int`
The net quantity of assets.
"""
return self.buy_quantity - self.sell_quantity
@property
def total_bought(self):
"""
Calculates the total average cost of assets bought.
Returns
-------
`float`
The total average cost of assets bought.
"""
return self.avg_bought * self.buy_quantity
@property
def total_sold(self):
"""
Calculates the total average cost of assets sold.
Returns
-------
`float`
The total average cost of assets solds.
"""
return self.avg_sold * self.sell_quantity
@property
def net_total(self):
"""
Calculates the net total average cost of assets
bought and sold.
Returns
-------
`float`
The net total average cost of assets bought
and sold.
"""
return self.total_sold - self.total_bought
@property
def commission(self):
"""
Calculates the total commission from assets bought and sold.
Returns
-------
`float`
The total commission from assets bought and sold.
"""
return self.buy_commission + self.sell_commission
@property
def net_incl_commission(self):
"""
Calculates the net total average cost of assets bought
and sold including the commission.
Returns
-------
`float`
The net total average cost of assets bought and
sold including the commission.
"""
return self.net_total - self.commission
@property
def realised_pnl(self):
"""
Calculates the profit & loss (P&L) that has been 'realised' via
two opposing asset transactions in the Position to date.
Returns
-------
`float`
The calculated realised P&L.
"""
if self.direction == 1:
if self.sell_quantity == 0:
return 0.0
else:
return (
((self.avg_sold - self.avg_bought) * self.sell_quantity) -
((self.sell_quantity / self.buy_quantity) * self.buy_commission) -
self.sell_commission
)
elif self.direction == -1:
if self.buy_quantity == 0:
return 0.0
else:
return (
((self.avg_sold - self.avg_bought) * self.buy_quantity) -
((self.buy_quantity / self.sell_quantity) * self.sell_commission) -
self.buy_commission
)
else:
return self.net_incl_commission
@property
def unrealised_pnl(self):
"""
Calculates the profit & loss (P&L) that has yet to be 'realised'
in the remaining non-zero quantity of assets, due to the current
market price.
Returns
-------
`float`
The calculated unrealised P&L.
"""
return (self.current_price - self.avg_price) * self.net_quantity
@property
def total_pnl(self):
"""
Calculates the sum of the unrealised and realised profit & loss (P&L).
Returns
-------
`float`
The sum of the unrealised and realised P&L.
"""
return self.realised_pnl + self.unrealised_pnl
def update_current_price(self, market_price, dt=None):
"""
Updates the Position's awareness of the current market price
of the Asset, with an optional timestamp.
Parameters
----------
market_price : `float`
The current market price.
dt : `pd.Timestamp`, optional
The optional timestamp of the current market price.
"""
self._check_set_dt(dt)
if market_price <= 0.0:
raise ValueError(
'Market price "%s" of asset "%s" must be positive to '
'update the position.' % (market_price, self.asset)
)
else:
self.current_price = market_price
def _transact_buy(self, quantity, price, commission):
"""
Handle the accounting for creating a new long leg for the
Position.
Parameters
----------
quantity : `int`
The additional quantity of assets to purchase.
price : `float`
The price at which this leg was purchased.
commission : `float`
The commission paid to the broker for the purchase.
"""
self.avg_bought = ((self.avg_bought * self.buy_quantity) + (quantity * price)) / (self.buy_quantity + quantity)
self.buy_quantity += quantity
self.buy_commission += commission
def _transact_sell(self, quantity, price, commission):
"""
Handle the accounting for creating a new short leg for the
Position.
Parameters
----------
quantity : `int`
The additional quantity of assets to sell.
price : `float`
The price at which this leg was sold.
commission : `float`
The commission paid to the broker for the sale.
"""
self.avg_sold = ((self.avg_sold * self.sell_quantity) + (quantity * price)) / (self.sell_quantity + quantity)
self.sell_quantity += quantity
self.sell_commission += commission
def transact(self, transaction):
"""
Calculates the adjustments to the Position that occur
once new units in an Asset are bought and sold.
Parameters
----------
transaction : `Transaction`
The Transaction to update the Position with.
"""
if self.asset != transaction.asset:
raise ValueError(
'Failed to update Position with asset %s when '
'carrying out transaction in asset %s. ' % (
self.asset, transaction.asset
)
)
# Nothing to do if the transaction has no quantity
if int(floor(transaction.quantity)) == 0:
return
# Depending upon the direction of the transaction
# ensure the correct calculation is called
if transaction.quantity > 0:
self._transact_buy(
transaction.quantity,
transaction.price,
transaction.commission
)
else:
self._transact_sell(
-1.0 * transaction.quantity,
transaction.price,
transaction.commission
)
# Update the current trade information
self.update_current_price(transaction.price, transaction.dt)
self.current_dt = transaction.dt
|
import asyncio
import itertools
from . import exceptions
async def get_files_for_tasks(task_list, file_list, max_workers):
no_files_found = True
async def process(task_fname):
task, fname = task_fname
try:
fobj = await task.file(fname)
except exceptions.SlaveDoesNotExist:
if task is None:
print(f"(Unknown Task):{fname} (Slave no longer exists)")
else:
print(f"{task['id']}:{task_fname} (Slave no longer exists)")
raise exceptions.SkipResult
if await fobj.exists():
return fobj
elements = itertools.chain(
*[[(task, fname) for fname in file_list] for task in task_list]
)
futures = [asyncio.ensure_future(process(element)) for element in elements]
if futures:
for result in asyncio.as_completed(futures):
try:
result = await result
if result:
no_files_found = False
yield result
except exceptions.SkipResult:
pass
if no_files_found:
raise exceptions.FileNotFoundForTaskException(
"None of the tasks in {} contain the files in list {}".format(
",".join([task["id"] for task in task_list]), ",".join(file_list)
)
)
|
import argparse
import getpass
from homeassistant.util.yaml import _SECRET_NAMESPACE
# mypy: allow-untyped-defs
REQUIREMENTS = ["credstash==1.15.0"]
def run(args):
"""Handle credstash script."""
parser = argparse.ArgumentParser(
description=(
"Modify Home Assistant secrets in credstash."
"Use the secrets in configuration files with: "
"!secret <name>"
)
)
parser.add_argument("--script", choices=["credstash"])
parser.add_argument(
"action",
choices=["get", "put", "del", "list"],
help="Get, put or delete a secret, or list all available secrets",
)
parser.add_argument("name", help="Name of the secret", nargs="?", default=None)
parser.add_argument(
"value", help="The value to save when putting a secret", nargs="?", default=None
)
# pylint: disable=import-error, no-member, import-outside-toplevel
import credstash
args = parser.parse_args(args)
table = _SECRET_NAMESPACE
try:
credstash.listSecrets(table=table)
except Exception: # pylint: disable=broad-except
credstash.createDdbTable(table=table)
if args.action == "list":
secrets = [i["name"] for i in credstash.listSecrets(table=table)]
deduped_secrets = sorted(set(secrets))
print("Saved secrets:")
for secret in deduped_secrets:
print(secret)
return 0
if args.name is None:
parser.print_help()
return 1
if args.action == "put":
if args.value:
the_secret = args.value
else:
the_secret = getpass.getpass(f"Please enter the secret for {args.name}: ")
current_version = credstash.getHighestVersion(args.name, table=table)
credstash.putSecret(
args.name, the_secret, version=int(current_version) + 1, table=table
)
print(f"Secret {args.name} put successfully")
elif args.action == "get":
the_secret = credstash.getSecret(args.name, table=table)
if the_secret is None:
print(f"Secret {args.name} not found")
else:
print(f"Secret {args.name}={the_secret}")
elif args.action == "del":
credstash.deleteSecrets(args.name, table=table)
print(f"Deleted secret {args.name}")
|
import unittest
from pandas import DataFrame
from numpy import NaN
from pgmpy.models import BayesianModel
from pgmpy.estimators import ParameterEstimator
class TestParameterEstimator(unittest.TestCase):
def setUp(self):
self.m1 = BayesianModel([("A", "C"), ("B", "C"), ("D", "B")])
self.d1 = DataFrame(
data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0], "D": ["X", "Y", "Z"]}
)
self.d2 = DataFrame(
data={
"A": [0, NaN, 1],
"B": [0, 1, 0],
"C": [1, 1, NaN],
"D": [NaN, "Y", NaN],
}
)
def test_state_count(self):
e = ParameterEstimator(self.m1, self.d1)
self.assertEqual(e.state_counts("A").values.tolist(), [[2], [1]])
self.assertEqual(
e.state_counts("C").values.tolist(),
[[0.0, 0.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0]],
)
def test_missing_data(self):
e = ParameterEstimator(
self.m1, self.d2, state_names={"C": [0, 1]}, complete_samples_only=False
)
self.assertEqual(
e.state_counts("A", complete_samples_only=True).values.tolist(), [[0], [0]]
)
self.assertEqual(e.state_counts("A").values.tolist(), [[1], [1]])
self.assertEqual(
e.state_counts("C", complete_samples_only=True).values.tolist(),
[[0, 0, 0, 0], [0, 0, 0, 0]],
)
self.assertEqual(
e.state_counts("C").values.tolist(), [[0, 0, 0, 0], [1, 0, 0, 0]]
)
def tearDown(self):
del self.m1
del self.d1
|
from __future__ import print_function
###{standalone
#
#
# Lark Stand-alone Generator Tool
# ----------------------------------
# Generates a stand-alone LALR(1) parser with a standard lexer
#
# Git: https://github.com/erezsh/lark
# Author: Erez Shinan ([email protected])
#
#
# >>> LICENSE
#
# This tool and its generated code use a separate license from Lark,
# and are subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
#
# If you wish to purchase a commercial license for this tool and its
# generated code, you may contact me via email or otherwise.
#
# If MPL2 is incompatible with your free or open-source project,
# contact me and we'll work it out.
#
#
from io import open
###}
import sys
import token, tokenize
import os
from os import path
from collections import defaultdict
from functools import partial
from argparse import ArgumentParser, SUPPRESS
from warnings import warn
import lark
from lark import Lark
from lark.tools import lalr_argparser, build_lalr, make_warnings_comments
from lark.grammar import RuleOptions, Rule
from lark.lexer import TerminalDef
_dir = path.dirname(__file__)
_larkdir = path.join(_dir, path.pardir)
EXTRACT_STANDALONE_FILES = [
'tools/standalone.py',
'exceptions.py',
'utils.py',
'tree.py',
'visitors.py',
'indenter.py',
'grammar.py',
'lexer.py',
'common.py',
'parse_tree_builder.py',
'parsers/lalr_parser.py',
'parsers/lalr_analysis.py',
'parser_frontends.py',
'lark.py',
]
def extract_sections(lines):
section = None
text = []
sections = defaultdict(list)
for l in lines:
if l.startswith('###'):
if l[3] == '{':
section = l[4:].strip()
elif l[3] == '}':
sections[section] += text
section = None
text = []
else:
raise ValueError(l)
elif section:
text.append(l)
return {name:''.join(text) for name, text in sections.items()}
def strip_docstrings(line_gen):
""" Strip comments and docstrings from a file.
Based on code from: https://stackoverflow.com/questions/1769332/script-to-remove-python-comments-docstrings
"""
res = []
prev_toktype = token.INDENT
last_lineno = -1
last_col = 0
tokgen = tokenize.generate_tokens(line_gen)
for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
if slineno > last_lineno:
last_col = 0
if scol > last_col:
res.append(" " * (scol - last_col))
if toktype == token.STRING and prev_toktype == token.INDENT:
# Docstring
res.append("#--")
elif toktype == tokenize.COMMENT:
# Comment
res.append("##\n")
else:
res.append(ttext)
prev_toktype = toktype
last_col = ecol
last_lineno = elineno
return ''.join(res)
def main(fobj, start, print=print):
warn('`lark.tools.standalone.main` is being redesigned. Use `gen_standalone`', DeprecationWarning)
lark_inst = Lark(fobj, parser="lalr", lexer="contextual", start=start)
gen_standalone(lark_inst, print)
def gen_standalone(lark_inst, output=None, out=sys.stdout, compress=False):
if output is None:
output = partial(print, file=out)
import pickle, zlib, base64
def compressed_output(obj):
s = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
c = zlib.compress(s)
output(repr(base64.b64encode(c)))
def output_decompress(name):
output('%(name)s = pickle.loads(zlib.decompress(base64.b64decode(%(name)s)))' % locals())
output('# The file was automatically generated by Lark v%s' % lark.__version__)
output('__version__ = "%s"' % lark.__version__)
output()
for i, pyfile in enumerate(EXTRACT_STANDALONE_FILES):
with open(os.path.join(_larkdir, pyfile)) as f:
code = extract_sections(f)['standalone']
if i: # if not this file
code = strip_docstrings(partial(next, iter(code.splitlines(True))))
output(code)
data, m = lark_inst.memo_serialize([TerminalDef, Rule])
output('import pickle, zlib, base64')
if compress:
output('DATA = (')
compressed_output(data)
output(')')
output_decompress('DATA')
output('MEMO = (')
compressed_output(m)
output(')')
output_decompress('MEMO')
else:
output('DATA = (')
output(data)
output(')')
output('MEMO = (')
output(m)
output(')')
output('Shift = 0')
output('Reduce = 1')
output("def Lark_StandAlone(**kwargs):")
output(" return Lark._load_from_dict(DATA, MEMO, **kwargs)")
def main():
make_warnings_comments()
parser = ArgumentParser(prog="prog='python -m lark.tools.standalone'", description="Lark Stand-alone Generator Tool",
parents=[lalr_argparser], epilog='Look at the Lark documentation for more info on the options')
parser.add_argument("old_start", nargs='?', help=SUPPRESS)
parser.add_argument('-c', '--compress', action='store_true', default=0, help="Enable compression")
ns = parser.parse_args()
if ns.old_start is not None:
warn('The syntax `python -m lark.tools.standalone <grammar-file> <start>` is deprecated. Use the -s option')
ns.start.append(ns.old_start)
lark_inst, out = build_lalr(ns)
gen_standalone(lark_inst, out=out, compress=ns.compress)
if __name__ == '__main__':
main()
|
import asyncio
from datetime import timedelta
import logging
import aiohttp
import async_timeout
from hass_nabucasa import cloud_api
from homeassistant.components.alexa import (
config as alexa_config,
entities as alexa_entities,
errors as alexa_errors,
state_report as alexa_state_report,
)
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES, HTTP_BAD_REQUEST
from homeassistant.core import callback, split_entity_id
from homeassistant.helpers import entity_registry
from homeassistant.helpers.event import async_call_later
from homeassistant.util.dt import utcnow
from .const import CONF_ENTITY_CONFIG, CONF_FILTER, PREF_SHOULD_EXPOSE, RequireRelink
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
# Time to wait when entity preferences have changed before syncing it to
# the cloud.
SYNC_DELAY = 1
class AlexaConfig(alexa_config.AbstractConfig):
"""Alexa Configuration."""
def __init__(self, hass, config, prefs: CloudPreferences, cloud):
"""Initialize the Alexa config."""
super().__init__(hass)
self._config = config
self._prefs = prefs
self._cloud = cloud
self._token = None
self._token_valid = None
self._cur_entity_prefs = prefs.alexa_entity_configs
self._cur_default_expose = prefs.alexa_default_expose
self._alexa_sync_unsub = None
self._endpoint = None
prefs.async_listen_updates(self._async_prefs_updated)
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
@property
def enabled(self):
"""Return if Alexa is enabled."""
return self._prefs.alexa_enabled
@property
def supports_auth(self):
"""Return if config supports auth."""
return True
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._prefs.alexa_report_state
@property
def endpoint(self):
"""Endpoint for report state."""
if self._endpoint is None:
raise ValueError("No endpoint available. Fetch access token first")
return self._endpoint
@property
def locale(self):
"""Return config locale."""
# Not clear how to determine locale atm.
return "en-US"
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
def should_expose(self, entity_id):
"""If an entity should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config[CONF_FILTER].empty_filter:
return self._config[CONF_FILTER](entity_id)
entity_configs = self._prefs.alexa_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
default_expose = self._prefs.alexa_default_expose
# Backwards compat
if default_expose is None:
return True
return split_entity_id(entity_id)[0] in default_expose
@callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._token_valid = None
async def async_get_access_token(self):
"""Get an access token."""
if self._token_valid is not None and self._token_valid > utcnow():
return self._token
resp = await cloud_api.async_alexa_access_token(self._cloud)
body = await resp.json()
if resp.status == HTTP_BAD_REQUEST:
if body["reason"] in ("RefreshTokenNotFound", "UnknownRegion"):
if self.should_report_state:
await self._prefs.async_update(alexa_report_state=False)
self.hass.components.persistent_notification.async_create(
f"There was an error reporting state to Alexa ({body['reason']}). "
"Please re-link your Alexa skill via the Alexa app to "
"continue using it.",
"Alexa state reporting disabled",
"cloud_alexa_report",
)
raise RequireRelink
raise alexa_errors.NoTokenAvailable
self._token = body["access_token"]
self._endpoint = body["event_endpoint"]
self._token_valid = utcnow() + timedelta(seconds=body["expires_in"])
return self._token
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if self.should_report_state != self.is_reporting_states:
if self.should_report_state:
await self.async_enable_proactive_mode()
else:
await self.async_disable_proactive_mode()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities()
return
# If user has filter in config.yaml, don't sync.
if not self._config[CONF_FILTER].empty_filter:
return
# If entity prefs are the same, don't sync.
if (
self._cur_entity_prefs is prefs.alexa_entity_configs
and self._cur_default_expose is prefs.alexa_default_expose
):
return
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
if self._cur_default_expose is not prefs.alexa_default_expose:
await self.async_sync_entities()
return
self._alexa_sync_unsub = async_call_later(
self.hass, SYNC_DELAY, self._sync_prefs
)
async def _sync_prefs(self, _now):
"""Sync the updated preferences to Alexa."""
self._alexa_sync_unsub = None
old_prefs = self._cur_entity_prefs
new_prefs = self._prefs.alexa_entity_configs
seen = set()
to_update = []
to_remove = []
for entity_id, info in old_prefs.items():
seen.add(entity_id)
old_expose = info.get(PREF_SHOULD_EXPOSE)
if entity_id in new_prefs:
new_expose = new_prefs[entity_id].get(PREF_SHOULD_EXPOSE)
else:
new_expose = None
if old_expose == new_expose:
continue
if new_expose:
to_update.append(entity_id)
else:
to_remove.append(entity_id)
# Now all the ones that are in new prefs but never were in old prefs
for entity_id, info in new_prefs.items():
if entity_id in seen:
continue
new_expose = info.get(PREF_SHOULD_EXPOSE)
if new_expose is None:
continue
# Only test if we should expose. It can never be a remove action,
# as it didn't exist in old prefs object.
if new_expose:
to_update.append(entity_id)
# We only set the prefs when update is successful, that way we will
# retry when next change comes in.
if await self._sync_helper(to_update, to_remove):
self._cur_entity_prefs = new_prefs
async def async_sync_entities(self):
"""Sync all entities to Alexa."""
# Remove any pending sync
if self._alexa_sync_unsub:
self._alexa_sync_unsub()
self._alexa_sync_unsub = None
to_update = []
to_remove = []
for entity in alexa_entities.async_get_entities(self.hass, self):
if self.should_expose(entity.entity_id):
to_update.append(entity.entity_id)
else:
to_remove.append(entity.entity_id)
return await self._sync_helper(to_update, to_remove)
async def _sync_helper(self, to_update, to_remove) -> bool:
"""Sync entities to Alexa.
Return boolean if it was successful.
"""
if not to_update and not to_remove:
return True
# Make sure it's valid.
await self.async_get_access_token()
tasks = []
if to_update:
tasks.append(
alexa_state_report.async_send_add_or_update_message(
self.hass, self, to_update
)
)
if to_remove:
tasks.append(
alexa_state_report.async_send_delete_message(self.hass, self, to_remove)
)
try:
with async_timeout.timeout(10):
await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
return True
except asyncio.TimeoutError:
_LOGGER.warning("Timeout trying to sync entities to Alexa")
return False
except aiohttp.ClientError as err:
_LOGGER.warning("Error trying to sync entities to Alexa: %s", err)
return False
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
entity_id = event.data["entity_id"]
if not self.should_expose(entity_id):
return
action = event.data["action"]
to_update = []
to_remove = []
if action == "create":
to_update.append(entity_id)
elif action == "remove":
to_remove.append(entity_id)
elif action == "update" and bool(
set(event.data["changes"]) & entity_registry.ENTITY_DESCRIBING_ATTRIBUTES
):
to_update.append(entity_id)
if "old_entity_id" in event.data:
to_remove.append(event.data["old_entity_id"])
try:
await self._sync_helper(to_update, to_remove)
except alexa_errors.NoTokenAvailable:
pass
|
import pytest
from homeassistant.components.stream.const import (
ATTR_STREAMS,
CONF_LOOKBACK,
CONF_STREAM_SOURCE,
DOMAIN,
SERVICE_RECORD,
)
from homeassistant.const import CONF_FILENAME
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, MagicMock, patch
async def test_record_service_invalid_file(hass):
"""Test record service call with invalid file."""
await async_setup_component(hass, "stream", {"stream": {}})
data = {CONF_STREAM_SOURCE: "rtsp://my.video", CONF_FILENAME: "/my/invalid/path"}
with pytest.raises(HomeAssistantError):
await hass.services.async_call(DOMAIN, SERVICE_RECORD, data, blocking=True)
async def test_record_service_init_stream(hass):
"""Test record service call with invalid file."""
await async_setup_component(hass, "stream", {"stream": {}})
data = {CONF_STREAM_SOURCE: "rtsp://my.video", CONF_FILENAME: "/my/invalid/path"}
with patch("homeassistant.components.stream.Stream") as stream_mock, patch.object(
hass.config, "is_allowed_path", return_value=True
):
# Setup stubs
stream_mock.return_value.outputs = {}
# Call Service
await hass.services.async_call(DOMAIN, SERVICE_RECORD, data, blocking=True)
# Assert
assert stream_mock.called
async def test_record_service_existing_record_session(hass):
"""Test record service call with invalid file."""
await async_setup_component(hass, "stream", {"stream": {}})
source = "rtsp://my.video"
data = {CONF_STREAM_SOURCE: source, CONF_FILENAME: "/my/invalid/path"}
# Setup stubs
stream_mock = MagicMock()
stream_mock.return_value.outputs = {"recorder": MagicMock()}
hass.data[DOMAIN][ATTR_STREAMS][source] = stream_mock
with patch.object(hass.config, "is_allowed_path", return_value=True), pytest.raises(
HomeAssistantError
):
# Call Service
await hass.services.async_call(DOMAIN, SERVICE_RECORD, data, blocking=True)
async def test_record_service_lookback(hass):
"""Test record service call with invalid file."""
await async_setup_component(hass, "stream", {"stream": {}})
data = {
CONF_STREAM_SOURCE: "rtsp://my.video",
CONF_FILENAME: "/my/invalid/path",
CONF_LOOKBACK: 4,
}
with patch("homeassistant.components.stream.Stream") as stream_mock, patch.object(
hass.config, "is_allowed_path", return_value=True
):
# Setup stubs
hls_mock = MagicMock()
hls_mock.target_duration = 2
hls_mock.recv = AsyncMock(return_value=None)
stream_mock.return_value.outputs = {"hls": hls_mock}
# Call Service
await hass.services.async_call(DOMAIN, SERVICE_RECORD, data, blocking=True)
assert stream_mock.called
stream_mock.return_value.add_provider.assert_called_once_with("recorder")
assert hls_mock.recv.called
|
import logging
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
BENCHMARK_NAME = 'cluster_boot'
BENCHMARK_CONFIG = """
cluster_boot:
description: >
Create a cluster, record all times to boot.
Specify the cluster size with --num_vms.
vm_groups:
default:
vm_spec:
AWS:
machine_type: m5.large
zone: us-east-1
Azure:
machine_type: Standard_D2s_v3
zone: eastus
boot_disk_type: StandardSSD_LRS
GCP:
machine_type: n1-standard-2
zone: us-central1-a
boot_disk_type: pd-ssd
Kubernetes:
image: null
vm_count: null
flags:
# We don't want boot time samples to be affected from retrying, so don't
# retry cluster_boot when rate limited.
retry_on_rate_limited: False
"""
flags.DEFINE_boolean(
'cluster_boot_time_reboot', False,
'Whether to reboot the VMs during the cluster boot benchmark to measure '
'reboot performance.')
flags.DEFINE_boolean(
'cluster_boot_test_port_listening', False,
'Test the time it takes to successfully connect to the port that is used to run the remote command.'
)
FLAGS = flags.FLAGS
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(unused_benchmark_spec):
pass
def GetTimeToBoot(vms):
"""Creates Samples for the boot time of a list of VMs.
The boot time is the time difference from before the VM is created to when
the VM is responsive to SSH commands.
Args:
vms: List of BaseVirtualMachine subclasses.
Returns:
List of Samples containing the boot times and an overall cluster boot time.
"""
if not vms:
return []
min_create_start_time = min(vm.create_start_time for vm in vms)
max_create_delay_sec = 0
max_boot_time_sec = 0
max_port_listening_time_sec = 0
max_rdp_port_listening_time_sec = 0
samples = []
os_types = set()
for i, vm in enumerate(vms):
assert vm.bootable_time
assert vm.create_start_time
assert vm.bootable_time >= vm.create_start_time
os_types.add(vm.OS_TYPE)
create_delay_sec = vm.create_start_time - min_create_start_time
max_create_delay_sec = max(max_create_delay_sec, create_delay_sec)
metadata = {
'machine_instance': i,
'num_vms': len(vms),
'os_type': vm.OS_TYPE,
'create_delay_sec': '%0.1f' % create_delay_sec
}
boot_time_sec = vm.bootable_time - min_create_start_time
max_boot_time_sec = max(max_boot_time_sec, boot_time_sec)
samples.append(
sample.Sample('Boot Time', boot_time_sec, 'seconds', metadata))
if FLAGS.cluster_boot_test_port_listening:
assert vm.port_listening_time
assert vm.port_listening_time >= vm.create_start_time
port_listening_time_sec = vm.port_listening_time - min_create_start_time
max_port_listening_time_sec = max(max_port_listening_time_sec,
port_listening_time_sec)
samples.append(
sample.Sample('Port Listening Time', port_listening_time_sec,
'seconds', metadata))
# TODO(user): refactor so Windows specifics aren't in linux_benchmarks
if FLAGS.cluster_boot_test_rdp_port_listening:
assert vm.rdp_port_listening_time
assert vm.rdp_port_listening_time >= vm.create_start_time
rdp_port_listening_time_sec = (
vm.rdp_port_listening_time - min_create_start_time)
max_rdp_port_listening_time_sec = max(max_rdp_port_listening_time_sec,
rdp_port_listening_time_sec)
samples.append(
sample.Sample('RDP Port Listening Time', rdp_port_listening_time_sec,
'seconds', metadata))
# Add a total cluster boot sample as the maximum boot time.
metadata = {
'num_vms': len(vms),
'os_type': ','.join(sorted(os_types)),
'max_create_delay_sec': '%0.1f' % max_create_delay_sec
}
samples.append(
sample.Sample('Cluster Boot Time', max_boot_time_sec, 'seconds',
metadata))
if FLAGS.cluster_boot_test_port_listening:
samples.append(
sample.Sample('Cluster Port Listening Time',
max_port_listening_time_sec, 'seconds', metadata))
if FLAGS.cluster_boot_test_rdp_port_listening:
samples.append(
sample.Sample('Cluster RDP Port Listening Time',
max_rdp_port_listening_time_sec, 'seconds', metadata))
if max_create_delay_sec > 1:
logging.warning(
'The maximum delay between starting VM creations is %0.1fs.',
max_create_delay_sec)
return samples
def _MeasureReboot(vms):
"""Measures the time to reboot the cluster of VMs.
Args:
vms: List of BaseVirtualMachine subclasses.
Returns:
List of Samples containing the reboot times and an overall cluster reboot
time.
"""
samples = []
before_reboot_timestamp = time.time()
reboot_times = vm_util.RunThreaded(lambda vm: vm.Reboot(), vms)
cluster_reboot_time = time.time() - before_reboot_timestamp
os_types = set()
for i, vm in enumerate(vms):
metadata = {
'machine_instance': i,
'num_vms': len(vms),
'os_type': vm.OS_TYPE
}
os_types.add(vm.OS_TYPE)
samples.append(
sample.Sample('Reboot Time', reboot_times[i], 'seconds', metadata))
metadata = {'num_vms': len(vms), 'os_type': ','.join(sorted(os_types))}
samples.append(
sample.Sample('Cluster Reboot Time', cluster_reboot_time, 'seconds',
metadata))
return samples
def Run(benchmark_spec):
"""Measure the boot time for all VMs.
Args:
benchmark_spec: The benchmark specification.
Returns:
An empty list (all boot samples will be added later).
"""
samples = []
if FLAGS.cluster_boot_time_reboot:
samples.extend(_MeasureReboot(benchmark_spec.vms))
return samples
def Cleanup(unused_benchmark_spec):
pass
|
import numpy as np
import six
import unittest
from chainercv.chainer_experimental.datasets.sliceable import SliceableDataset
from chainercv.utils import testing
class SampleDataset(SliceableDataset):
def __len__(self):
return 10
@property
def keys(self):
return ('item0', 'item1', 'item2')
def get_example_by_keys(self, i, key_indices):
return tuple(
'{:s}({:d})'.format(self.keys[key_index], i)
for key_index in key_indices)
@testing.parameterize(
{'iterable': tuple},
{'iterable': list},
{'iterable': np.array},
)
class TestSliceableDataset(unittest.TestCase):
def setUp(self):
self.dataset = SampleDataset()
def test_base(self):
self.assertEqual(
self.dataset[0], ('item0(0)', 'item1(0)', 'item2(0)'))
def test_slice_keys_single_name(self):
dataset = self.dataset.slice[:, 'item0']
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), len(self.dataset))
self.assertEqual(dataset.keys, 'item0')
self.assertEqual(dataset[1], 'item0(1)')
def test_slice_keys_single_index(self):
dataset = self.dataset.slice[:, 0]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), len(self.dataset))
self.assertEqual(dataset.keys, 'item0')
self.assertEqual(dataset[1], 'item0(1)')
def test_slice_keys_single_tuple_name(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
dataset = self.dataset.slice[:, self.iterable(('item1',))]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), len(self.dataset))
self.assertEqual(dataset.keys, ('item1',))
self.assertEqual(dataset[2], ('item1(2)',))
def test_slice_keys_single_tuple_index(self):
dataset = self.dataset.slice[:, self.iterable((1,))]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), len(self.dataset))
self.assertEqual(dataset.keys, ('item1',))
self.assertEqual(dataset[2], ('item1(2)',))
def test_slice_keys_multiple_name(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
dataset = self.dataset.slice[:, self.iterable(('item0', 'item2'))]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), len(self.dataset))
self.assertEqual(dataset.keys, ('item0', 'item2'))
self.assertEqual(dataset[3], ('item0(3)', 'item2(3)'))
def test_slice_keys_multiple_index(self):
dataset = self.dataset.slice[:, self.iterable((0, 2))]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), len(self.dataset))
self.assertEqual(dataset.keys, ('item0', 'item2'))
self.assertEqual(dataset[3], ('item0(3)', 'item2(3)'))
def test_slice_keys_multiple_bool(self):
dataset = self.dataset.slice[:, self.iterable((True, False, True))]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), len(self.dataset))
self.assertEqual(dataset.keys, ('item0', 'item2'))
self.assertEqual(dataset[3], ('item0(3)', 'item2(3)'))
def test_slice_keys_multiple_mixed(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
dataset = self.dataset.slice[:, self.iterable(('item0', 2))]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), len(self.dataset))
self.assertEqual(dataset.keys, ('item0', 'item2'))
self.assertEqual(dataset[3], ('item0(3)', 'item2(3)'))
def test_slice_keys_invalid_name(self):
with self.assertRaises(KeyError):
self.dataset.slice[:, 'invalid']
def test_slice_keys_invalid_index(self):
with self.assertRaises(IndexError):
self.dataset.slice[:, 3]
def test_slice_keys_invalid_bool(self):
with self.assertRaises(ValueError):
self.dataset.slice[:, (True, False)]
def test_slice_indices_slice(self):
dataset = self.dataset.slice[3:8:2]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), 3)
self.assertEqual(dataset.keys, self.dataset.keys)
self.assertEqual(
dataset[1], ('item0(5)', 'item1(5)', 'item2(5)'))
def test_slice_indices_list(self):
if self.iterable is tuple:
self.skipTest('tuple indices is not supported')
dataset = self.dataset.slice[self.iterable((2, 1, 5))]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), 3)
self.assertEqual(dataset.keys, self.dataset.keys)
self.assertEqual(
dataset[0], ('item0(2)', 'item1(2)', 'item2(2)'))
def test_slice_indices_bool(self):
if self.iterable is tuple:
self.skipTest('tuple indices is not supported')
dataset = self.dataset.slice[self.iterable(
(False, True, False, False, True,
True, False, False, True, False))]
self.assertIsInstance(dataset, SliceableDataset)
self.assertEqual(len(dataset), 4)
self.assertEqual(dataset.keys, self.dataset.keys)
self.assertEqual(
dataset[1], ('item0(4)', 'item1(4)', 'item2(4)'))
def test_slice_indices_invalid_bool(self):
with self.assertRaises(ValueError):
self.dataset.slice[[False, True]]
def test_iter(self):
it = iter(self.dataset)
for i in six.moves.range(len(self.dataset)):
self.assertEqual(
next(it), (
'item0({:d})'.format(i),
'item1({:d})'.format(i),
'item2({:d})'.format(i),
))
with self.assertRaises(StopIteration):
next(it)
testing.run_module(__name__, __file__)
|
import logging
import os
import time
from flask import jsonify, Blueprint
from flask import request
from werkzeug.utils import secure_filename
from kalliope import Utils
from kalliope.core.ConfigurationManager import BrainLoader, SettingEditor
from kalliope.core.ConfigurationManager.ConfigurationChecker import KalliopeModuleNotFoundError, ConfigurationChecker, \
InvalidSynapeName, NoSynapeNeurons, NoSynapeSignals
from kalliope.core.Cortex import Cortex
from kalliope.core.Lifo.LifoManager import LifoManager
from kalliope.core.Models import Synapse
from kalliope.core.Models.MatchedSynapse import MatchedSynapse
from kalliope.core.OrderListener import OrderListener
from kalliope.core.RestAPI import utils
from kalliope.core.RestAPI.utils import requires_auth
from kalliope.core.SynapseLauncher import SynapseLauncher
logging.basicConfig()
logger = logging.getLogger("kalliope")
UPLOAD_FOLDER = '/tmp/kalliope/tmp_uploaded_audio'
ALLOWED_EXTENSIONS = {'wav'}
class SynapsesView(Blueprint):
def __init__(self, name, import_name, app, brain=None, settings=None):
self.brain = brain
self.settings = settings
self.app = app
super(SynapsesView, self).__init__(name, import_name)
# api_response sent by the Order Analyser when using the /synapses/start/audio URL
self.api_response = None
# boolean used to notify the main process that we get the list of returned synapse
self.order_analyser_return = False
# routes
self.add_url_rule('/synapses', view_func=self.get_synapses, methods=['GET'])
self.add_url_rule('/synapses', view_func=self.create_synapses, methods=['POST'])
self.add_url_rule('/synapses/<synapse_name>', view_func=self.get_synapse, methods=['GET'])
self.add_url_rule('/synapses/<synapse_name>', view_func=self.delete_synapse, methods=['DELETE'])
self.add_url_rule('/synapses/start/id/<synapse_name>', view_func=self.run_synapse_by_name, methods=['POST'])
self.add_url_rule('/synapses/start/order', view_func=self.run_synapse_by_order, methods=['POST'])
self.add_url_rule('/synapses/start/audio', view_func=self.run_synapse_by_audio, methods=['POST'])
def _get_synapse_by_name(self, synapse_name):
"""
Find a synapse in the brain by its name
:param synapse_name:
:return:
"""
all_synapse = self.brain.synapses
for synapse in all_synapse:
try:
if synapse.name == synapse_name:
return synapse
except KeyError:
pass
return None
@requires_auth
def create_synapses(self):
"""
curl -i -H "Content-Type: application/json" \
--user admin:secret \
-X POST \
-d '{
"name": "Say-hello",
"signals": [
{
"order": "je suis nicolas"
}
],
"neurons": [
{
"say": {
"message": "je sais"
}
}
]
}' \
http://127.0.0.1:5000/synapses
:return:
"""
if not request.get_json() or 'name' not in request.get_json():
data = {
"Error": "Wrong parameters, 'name' not set"
}
return jsonify(error=data), 400
new_synapse = request.get_json()
try:
ConfigurationChecker().check_synape_dict(new_synapse)
except (InvalidSynapeName, NoSynapeNeurons, NoSynapeSignals) as e:
data = {
"error": "%s" % e
}
return jsonify(data), 400
try:
name = new_synapse["name"]
neurons = BrainLoader.get_neurons(new_synapse["neurons"], self.settings)
signals = BrainLoader.get_signals(new_synapse["signals"])
new_synapse_instance = Synapse(name=name, neurons=neurons, signals=signals)
self.brain.synapses.append(new_synapse_instance)
# TODO save the brain in yaml
return jsonify(new_synapse_instance.serialize()), 201
except KalliopeModuleNotFoundError as e:
data = {
"error": "%s" % e
}
return jsonify(data), 400
@requires_auth
def get_synapses(self):
"""
get all synapses.
test with curl:
curl -i --user admin:secret -X GET http://127.0.0.1:5000/synapses
"""
logger.debug("[FlaskAPI] get_synapses: all")
data = jsonify(synapses=[e.serialize() for e in self.brain.synapses])
return data, 200
@requires_auth
def get_synapse(self, synapse_name):
"""
get a synapse by its name
test with curl:
curl --user admin:secret -i -X GET http://127.0.0.1:5000/synapses/say-hello-en
"""
logger.debug("[FlaskAPI] get_synapse: synapse_name -> %s" % synapse_name)
synapse_target = self._get_synapse_by_name(synapse_name)
if synapse_target is not None:
data = jsonify(synapses=synapse_target.serialize())
return data, 200
data = {
"synapse name not found": "%s" % synapse_name
}
return jsonify(error=data), 404
@requires_auth
def delete_synapse(self, synapse_name):
"""
delete a synapse by its name
test with curl:
curl --user admin:secret -i -X DELETE http://127.0.0.1:5000/synapses/say-hello-en
"""
logger.debug("[FlaskAPI] delete_synapse -> %s" % synapse_name)
synapse_target = self._get_synapse_by_name(synapse_name)
if synapse_target is not None:
# delete from brain
self._delete_synapse_by_name(synapse_name)
return '', 204
data = {
"synapse name not found": "%s" % synapse_name
}
return jsonify(error=data), 404
@requires_auth
def run_synapse_by_name(self, synapse_name):
"""
Run a synapse by its name
test with curl:
curl -i --user admin:secret -X POST http://127.0.0.1:5000/synapses/start/id/say-hello-fr
run a synapse without making kalliope speaking
curl -i -H "Content-Type: application/json" --user admin:secret -X POST \
-d '{"mute":"true"}' http://127.0.0.1:5000/synapses/start/id/say-hello-fr
Run a synapse by its name and pass order's parameters
curl -i -H "Content-Type: application/json" --user admin:secret -X POST \
-d '{"mute":"true", "parameters": {"parameter1": "value1" }}' \
http://127.0.0.1:5000/synapses/start/id/say-hello-fr
:param synapse_name: name(id) of the synapse to execute
:return:
"""
# get a synapse object from the name
logger.debug("[FlaskAPI] run_synapse_by_name: synapse name -> %s" % synapse_name)
synapse_target = BrainLoader().brain.get_synapse_by_name(synapse_name=synapse_name)
# Store the mute value, then apply depending of the request parameters
old_mute_value = self.settings.options.mute
mute = utils.get_value_flag_from_request(http_request=request,
flag_to_find="mute",
is_boolean=True)
if mute is not None:
SettingEditor.set_mute_status(mute=mute)
# get parameters
parameters = utils.get_parameters_from_request(request)
if synapse_target is None:
data = {
"synapse name not found": "%s" % synapse_name
}
if mute is not None:
SettingEditor.set_mute_status(mute=old_mute_value)
return jsonify(error=data), 404
else:
# generate a MatchedSynapse from the synapse
matched_synapse = MatchedSynapse(matched_synapse=synapse_target, overriding_parameter=parameters)
# get the current LIFO buffer from the singleton
lifo_buffer = LifoManager.get_singleton_lifo()
lifo_buffer.add_synapse_list_to_lifo([matched_synapse])
response = lifo_buffer.execute(is_api_call=True)
data = jsonify(response)
if mute is not None:
SettingEditor.set_mute_status(mute=old_mute_value)
return data, 201
@requires_auth
def run_synapse_by_order(self):
"""
Give an order to Kalliope via API like it was from a spoken one
Test with curl
curl -i --user admin:secret -H "Content-Type: application/json" -X POST \
-d '{"order":"my order"}' http://localhost:5000/synapses/start/order
In case of quotes in the order or accents, use a file
cat post.json:
{"order":"j'aime"}
curl -i --user admin:secret -H "Content-Type: application/json" -X POST \
--data @post.json http://localhost:5000/order/
Can be used with mute flag
curl -i --user admin:secret -H "Content-Type: application/json" -X POST \
-d '{"order":"my order", "mute":"true"}' http://localhost:5000/synapses/start/order
:return:
"""
if not request.get_json() or 'order' not in request.get_json():
data = {
"Error": "Wrong parameters, 'order' not set"
}
return jsonify(error=data), 400
order = request.get_json('order')
# Store the mute value, then apply depending of the request parameters
old_mute_value = self.settings.options.mute
mute = utils.get_value_flag_from_request(http_request=request,
flag_to_find="mute",
is_boolean=True)
if mute is not None:
SettingEditor.set_mute_status(mute=mute)
if order is not None:
# get the order
order_to_run = order["order"]
logger.debug("[FlaskAPI] run_synapse_by_order: order to run -> %s" % order_to_run)
api_response = SynapseLauncher.run_matching_synapse_from_order(order_to_run,
self.brain,
self.settings,
is_api_call=True)
Cortex.save('kalliope_last_order', order_to_run)
data = jsonify(api_response)
if mute is not None:
SettingEditor.set_mute_status(mute=old_mute_value)
return data, 201
else:
data = {
"error": "order cannot be null"
}
if mute is not None:
SettingEditor.set_mute_status(mute=old_mute_value)
return jsonify(error=data), 400
@requires_auth
def run_synapse_by_audio(self):
"""
Give an order to Kalliope with an audio file
Test with curl
curl -i --user admin:secret -X POST http://localhost:5000/synapses/start/audio -F "file=@/path/to/input.wav"
With mute flag
curl -i --user admin:secret -X POST \
http://localhost:5000/synapses/start/audio -F "file=@path/to/file.wav" -F mute="true"
:return:
"""
# check if the post request has the file part
if 'file' not in request.files:
data = {
"error": "No file provided"
}
return jsonify(error=data), 400
uploaded_file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if uploaded_file.filename == '':
data = {
"error": "No file provided"
}
return jsonify(error=data), 400
# Store the mute value, then apply depending of the request parameters
old_mute_value = self.settings.options.mute
if request.form.get("mute"):
SettingEditor.set_mute_status(mute=Utils.str_to_bool(request.form.get("mute")))
# save the file
filename = secure_filename(uploaded_file.filename)
base_path = os.path.join(self.app.config['UPLOAD_FOLDER'])
uploaded_file.save(os.path.join(base_path, filename))
# now start analyse the audio with STT engine
audio_path = base_path + os.sep + filename
logger.debug("[FlaskAPI] run_synapse_by_audio: with file path %s" % audio_path)
if not self.allowed_file(audio_path):
audio_path = self._convert_to_wav(audio_file_path=audio_path)
ol = OrderListener(callback=self.audio_analyser_callback, audio_file_path=audio_path)
ol.start()
ol.join()
# wait the Order Analyser processing. We need to wait in this thread to keep the context
while not self.order_analyser_return:
time.sleep(0.1)
self.order_analyser_return = False
if self.api_response is not None and self.api_response:
data = jsonify(self.api_response)
self.api_response = None
logger.debug("[FlaskAPI] run_synapse_by_audio: data %s" % data)
if request.form.get("mute"):
SettingEditor.set_mute_status(mute=old_mute_value)
return data, 201
else:
data = {
"error": "The given order doesn't match any synapses"
}
if request.form.get("mute"):
SettingEditor.set_mute_status(mute=old_mute_value)
return jsonify(error=data), 400
def audio_analyser_callback(self, order):
"""
Callback of the OrderListener. Called after the processing of the audio file
This method will
- call the Order Analyser to analyse the order and launch corresponding synapse as usual.
- get a list of launched synapse.
- give the list to the main process via self.launched_synapses
- notify that the processing is over via order_analyser_return
:param order: string order to analyse
:return:
"""
logger.debug("[FlaskAPI] audio_analyser_callback: order to process -> %s" % order)
api_response = SynapseLauncher.run_matching_synapse_from_order(order,
self.brain,
self.settings,
is_api_call=True)
self.api_response = api_response
Cortex.save('kalliope_last_order', order)
# this boolean will notify the main process that the order have been processed
self.order_analyser_return = True
@staticmethod
def _convert_to_wav(audio_file_path):
"""
If not already .wav, convert an incoming audio file to wav format. Using system avconv (raspberry)
:param audio_file_path: the current full file path
:return: Wave file path
"""
# Not allowed so convert into wav using avconv (raspberry)
base = os.path.splitext(audio_file_path)[0]
extension = os.path.splitext(audio_file_path)[1]
if extension != ".wav":
current_file_path = audio_file_path
logger.debug("Converting file " + current_file_path + " to .wav")
audio_file_path = base + ".wav"
os.system("ffmpeg -loglevel panic -y -i " + current_file_path + " " + audio_file_path) # --> deprecated
# subprocess.call(['avconv', '-y', '-i', audio_path, new_file_path], shell=True) # Not working ...
return audio_file_path
@staticmethod
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def _delete_synapse_by_name(self, synapse_name):
all_synapse = self.brain.synapses
for synapse in all_synapse:
try:
if synapse.name == synapse_name:
logger.debug("[FlaskAPI] remove synapse from the brain: '%s'" % synapse_name)
all_synapse.remove(synapse)
# TODO save the brain in yaml
except KeyError:
pass
return None
|
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS
from .const import (
ADVANTAGE_AIR_STATE_CLOSE,
ADVANTAGE_AIR_STATE_OFF,
ADVANTAGE_AIR_STATE_ON,
ADVANTAGE_AIR_STATE_OPEN,
DOMAIN as ADVANTAGE_AIR_DOMAIN,
)
from .entity import AdvantageAirEntity
ADVANTAGE_AIR_HVAC_MODES = {
"heat": HVAC_MODE_HEAT,
"cool": HVAC_MODE_COOL,
"vent": HVAC_MODE_FAN_ONLY,
"dry": HVAC_MODE_DRY,
}
HASS_HVAC_MODES = {v: k for k, v in ADVANTAGE_AIR_HVAC_MODES.items()}
ADVANTAGE_AIR_FAN_MODES = {
"auto": FAN_AUTO,
"low": FAN_LOW,
"medium": FAN_MEDIUM,
"high": FAN_HIGH,
}
HASS_FAN_MODES = {v: k for k, v in ADVANTAGE_AIR_FAN_MODES.items()}
FAN_SPEEDS = {FAN_LOW: 30, FAN_MEDIUM: 60, FAN_HIGH: 100}
AC_HVAC_MODES = [
HVAC_MODE_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_DRY,
]
ZONE_HVAC_MODES = [HVAC_MODE_OFF, HVAC_MODE_FAN_ONLY]
PARALLEL_UPDATES = 0
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up AdvantageAir climate platform."""
instance = hass.data[ADVANTAGE_AIR_DOMAIN][config_entry.entry_id]
entities = []
for ac_key, ac_device in instance["coordinator"].data["aircons"].items():
entities.append(AdvantageAirAC(instance, ac_key))
for zone_key, zone in ac_device["zones"].items():
# Only add zone climate control when zone is in temperature control
if zone["type"] != 0:
entities.append(AdvantageAirZone(instance, ac_key, zone_key))
async_add_entities(entities)
class AdvantageAirClimateEntity(AdvantageAirEntity, ClimateEntity):
"""AdvantageAir Climate class."""
@property
def temperature_unit(self):
"""Return the temperature unit."""
return TEMP_CELSIUS
@property
def target_temperature_step(self):
"""Return the supported temperature step."""
return PRECISION_WHOLE
@property
def max_temp(self):
"""Return the maximum supported temperature."""
return 32
@property
def min_temp(self):
"""Return the minimum supported temperature."""
return 16
class AdvantageAirAC(AdvantageAirClimateEntity):
"""AdvantageAir AC unit."""
@property
def name(self):
"""Return the name."""
return self._ac["name"]
@property
def unique_id(self):
"""Return a unique id."""
return f'{self.coordinator.data["system"]["rid"]}-{self.ac_key}'
@property
def target_temperature(self):
"""Return the current target temperature."""
return self._ac["setTemp"]
@property
def hvac_mode(self):
"""Return the current HVAC modes."""
if self._ac["state"] == ADVANTAGE_AIR_STATE_ON:
return ADVANTAGE_AIR_HVAC_MODES.get(self._ac["mode"])
return HVAC_MODE_OFF
@property
def hvac_modes(self):
"""Return the supported HVAC modes."""
return AC_HVAC_MODES
@property
def fan_mode(self):
"""Return the current fan modes."""
return ADVANTAGE_AIR_FAN_MODES.get(self._ac["fan"])
@property
def fan_modes(self):
"""Return the supported fan modes."""
return [FAN_AUTO, FAN_LOW, FAN_MEDIUM, FAN_HIGH]
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
async def async_set_hvac_mode(self, hvac_mode):
"""Set the HVAC Mode and State."""
if hvac_mode == HVAC_MODE_OFF:
await self.async_change(
{self.ac_key: {"info": {"state": ADVANTAGE_AIR_STATE_OFF}}}
)
else:
await self.async_change(
{
self.ac_key: {
"info": {
"state": ADVANTAGE_AIR_STATE_ON,
"mode": HASS_HVAC_MODES.get(hvac_mode),
}
}
}
)
async def async_set_fan_mode(self, fan_mode):
"""Set the Fan Mode."""
await self.async_change(
{self.ac_key: {"info": {"fan": HASS_FAN_MODES.get(fan_mode)}}}
)
async def async_set_temperature(self, **kwargs):
"""Set the Temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
await self.async_change({self.ac_key: {"info": {"setTemp": temp}}})
class AdvantageAirZone(AdvantageAirClimateEntity):
"""AdvantageAir Zone control."""
@property
def name(self):
"""Return the name."""
return self._zone["name"]
@property
def unique_id(self):
"""Return a unique id."""
return f'{self.coordinator.data["system"]["rid"]}-{self.ac_key}-{self.zone_key}'
@property
def current_temperature(self):
"""Return the current temperature."""
return self._zone["measuredTemp"]
@property
def target_temperature(self):
"""Return the target temperature."""
return self._zone["setTemp"]
@property
def hvac_mode(self):
"""Return the current HVAC modes."""
if self._zone["state"] == ADVANTAGE_AIR_STATE_OPEN:
return HVAC_MODE_FAN_ONLY
return HVAC_MODE_OFF
@property
def hvac_modes(self):
"""Return supported HVAC modes."""
return ZONE_HVAC_MODES
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_TARGET_TEMPERATURE
async def async_set_hvac_mode(self, hvac_mode):
"""Set the HVAC Mode and State."""
if hvac_mode == HVAC_MODE_OFF:
await self.async_change(
{
self.ac_key: {
"zones": {self.zone_key: {"state": ADVANTAGE_AIR_STATE_CLOSE}}
}
}
)
else:
await self.async_change(
{
self.ac_key: {
"zones": {self.zone_key: {"state": ADVANTAGE_AIR_STATE_OPEN}}
}
}
)
async def async_set_temperature(self, **kwargs):
"""Set the Temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
await self.async_change(
{self.ac_key: {"zones": {self.zone_key: {"setTemp": temp}}}}
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.