text
stringlengths 213
32.3k
|
---|
import unittest
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import relational_db
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.gcp import gce_virtual_machine
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
_BENCHMARK_NAME = 'name'
_BENCHMARK_UID = 'benchmark_uid'
_COMPONENT = 'test_component'
def _mergeDicts(dict1, dict2):
result = dict1.copy()
result.update(dict2)
return result
class FakeRelationalDb(relational_db.BaseRelationalDb):
def GetEndpoint(self):
pass
def GetPort(self):
pass
def _Create(self):
pass
def _Delete(self):
pass
def GetDefaultEngineVersion(self, _):
pass
def _FailoverHA(self):
pass
class RelationalDbSpecTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(RelationalDbSpecTestCase, self).setUp()
FLAGS['run_uri'].parse('123')
FLAGS['use_managed_db'].parse(True)
self.minimal_spec = {
'cloud': 'GCP',
'engine': 'mysql',
'db_spec': {
'GCP': {
'machine_type': 'n1-standard-1'
}
},
'db_disk_spec': {
'GCP': {
'disk_size': 500
}
}
}
relational_db._MANAGED_RELATIONAL_DB_REGISTRY = {
'GCP': FakeRelationalDb(None)
}
def tearDown(self):
super(RelationalDbSpecTestCase, self).tearDown()
relational_db._MANAGED_RELATIONAL_DB_REGISTRY = {}
def testMinimalConfig(self):
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.minimal_spec)
self.assertEqual(result.engine, 'mysql')
self.assertEqual(result.cloud, 'GCP')
self.assertIsInstance(result.db_spec, gce_virtual_machine.GceVmSpec)
def testDefaultDatabaseName(self):
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.minimal_spec)
self.assertEqual(result.database_name, 'pkb-db-123')
def testCustomDatabaseName(self):
spec = _mergeDicts(self.minimal_spec, {'database_name': 'fakename'})
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **spec)
self.assertEqual(result.database_name, 'fakename')
def testCustomDatabaseVersion(self):
spec = _mergeDicts(self.minimal_spec, {'engine_version': '6.6'})
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **spec)
self.assertEqual(result.engine_version, '6.6')
def testDefaultDatabasePassword(self):
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.minimal_spec)
self.assertIsInstance(result.database_password, str)
self.assertEqual(len(result.database_password), 13)
def testRandomDatabasePassword(self):
spec = _mergeDicts(self.minimal_spec, {'database_password': 'fakepassword'})
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **spec)
self.assertEqual(result.database_password, 'fakepassword')
def testDefaultHighAvailability(self):
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.minimal_spec)
self.assertEqual(result.high_availability, False)
def testCustomHighAvailability(self):
spec = _mergeDicts(self.minimal_spec, {'high_availability': True})
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **spec)
self.assertEqual(result.high_availability, True)
def testDefaultBackupEnabled(self):
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.minimal_spec)
self.assertEqual(result.backup_enabled, True)
def testCustomBackupEnabled(self):
spec = _mergeDicts(self.minimal_spec, {'backup_enabled': False})
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **spec)
self.assertEqual(result.backup_enabled, False)
def testDefaultBackupTime(self):
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.minimal_spec)
self.assertEqual(result.backup_start_time, '07:00')
def testCustomBackupTime(self):
spec = _mergeDicts(self.minimal_spec, {'backup_start_time': '08:00'})
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **spec)
self.assertEqual(result.backup_start_time, '08:00')
class RelationalDbMinimalSpecTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(RelationalDbMinimalSpecTestCase, self).setUp()
FLAGS['run_uri'].parse('123')
self.spec = {
'cloud': 'GCP',
'engine': 'mysql',
'db_spec': {
'GCP': {
'machine_type': 'n1-standard-1'
}
},
'db_disk_spec': {
'GCP': {
'disk_size': 500
}
}
}
def testDiskSpecRequired(self):
del self.spec['db_disk_spec']
with self.assertRaisesRegexp(errors.Config.MissingOption, 'db_disk_spec'):
benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.spec)
def testVmSpecRequired(self):
del self.spec['db_spec']
with self.assertRaisesRegexp(errors.Config.MissingOption, 'db_spec'):
benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.spec)
class RelationalDbFlagsTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(RelationalDbFlagsTestCase, self).setUp()
FLAGS['run_uri'].parse('123')
self.full_spec = {
'cloud': 'GCP',
'engine': 'mysql',
'database_name': 'fake_name',
'database_password': 'fake_password',
'backup_enabled': True,
'backup_start_time': '07:00',
'db_spec': {
'GCP': {
'machine_type': 'n1-standard-1',
'zone': 'us-west1-a',
}
},
'db_disk_spec': {
'GCP': {
'disk_size': 500,
}
},
'vm_groups': {
'clients': {
'vm_spec': {
'GCP': {
'zone': 'us-central1-c',
'machine_type': 'n1-standard-1'
}
},
'disk_spec': {
'GCP': {
'disk_size': 500,
'disk_type': 'pd-ssd'
}
}
},
'servers': {
'vm_spec': {
'GCP': {
'zone': 'us-central1-c',
'machine_type': 'n1-standard-1'
}
},
'disk_spec': {
'GCP': {
'disk_size': 500,
'disk_type': 'pd-ssd'
}
}
}
}
}
relational_db._MANAGED_RELATIONAL_DB_REGISTRY = {
'GCP': FakeRelationalDb(None)
}
def tearDown(self):
super(RelationalDbFlagsTestCase, self).tearDown()
relational_db._MANAGED_RELATIONAL_DB_REGISTRY = {}
# Not testing this yet, because it requires the implementation
# of a relational_db provider for the specified
# cloud (other than GCP). We could mock it perhaps.
def testCloudFlag(self):
pass
# TODO(jerlawson): Rename flags 'managed_db_' -> 'db_'.
def testDatabaseFlag(self):
FLAGS['managed_db_engine'].parse('postgres')
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.engine, 'postgres')
def testDatabaseNameFlag(self):
FLAGS['managed_db_database_name'].parse('fakedbname')
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.database_name, 'fakedbname')
def testDatabasePasswordFlag(self):
FLAGS['managed_db_database_password'].parse('fakepassword')
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.database_password, 'fakepassword')
def testHighAvailabilityFlag(self):
FLAGS['managed_db_high_availability'].parse(True)
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.high_availability, True)
def testDatabaseVersionFlag(self):
FLAGS['managed_db_engine_version'].parse('5.6')
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.engine_version, '5.6')
def testBackupEnabledFlag(self):
FLAGS['managed_db_backup_enabled'].parse(False)
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.backup_enabled, False)
def testBackupStartTimeFlag(self):
FLAGS['managed_db_backup_start_time'].parse('12:23')
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.backup_start_time, '12:23')
def testZoneFlag(self):
FLAGS['managed_db_zone'].parse('us-east1-b')
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.db_spec.zone, 'us-east1-b')
self.assertEqual(result.vm_groups['servers'].vm_spec.zone, 'us-east1-b')
def testClientVmZoneFlag(self):
FLAGS['client_vm_zone'].parse('us-east1-b')
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.vm_groups['clients'].vm_spec.zone, 'us-east1-b')
def testDiskSizeFlag(self):
FLAGS['managed_db_disk_size'].parse(2000)
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.db_disk_spec.disk_size, 2000)
self.assertEqual(result.vm_groups['servers'].disk_spec.disk_size, 2000)
def testClientVmDiskSizeFlag(self):
FLAGS['client_vm_disk_size'].parse(2000)
result = benchmark_config_spec._RelationalDbSpec(
_COMPONENT, flag_values=FLAGS, **self.full_spec)
self.assertEqual(result.vm_groups['clients'].disk_spec.disk_size, 2000)
if __name__ == '__main__':
unittest.main()
|
import warnings
from six import add_metaclass
from logilab.common.testlib import TestCase, unittest_main
from logilab.common import deprecation
class RawInputTC(TestCase):
# XXX with 2.6 we could test warnings
# http://docs.python.org/library/warnings.html#testing-warnings
# instead we just make sure it does not crash
def mock_warn(self, *args, **kwargs):
self.messages.append(args[0])
def setUp(self):
self.messages = []
deprecation.warn = self.mock_warn
def tearDown(self):
deprecation.warn = warnings.warn
def mk_func(self):
def any_func():
pass
return any_func
def test_class_deprecated(self):
@add_metaclass(deprecation.class_deprecated)
class AnyClass(object):
pass
AnyClass()
self.assertEqual(self.messages,
['AnyClass is deprecated'])
def test_deprecated_func(self):
any_func = deprecation.deprecated()(self.mk_func())
any_func()
any_func = deprecation.deprecated('message')(self.mk_func())
any_func()
self.assertEqual(self.messages,
['The function "any_func" is deprecated', 'message'])
def test_deprecated_decorator(self):
@deprecation.deprecated()
def any_func():
pass
any_func()
@deprecation.deprecated('message')
def any_func():
pass
any_func()
self.assertEqual(self.messages,
['The function "any_func" is deprecated', 'message'])
def test_moved(self):
module = 'data.deprecation'
any_func = deprecation.moved(module, 'moving_target')
any_func()
self.assertEqual(self.messages,
['object moving_target has been moved to module data.deprecation'])
def test_deprecated_manager(self):
deprecator = deprecation.DeprecationManager("module_name")
deprecator.compatibility('1.3')
# This warn should be printed.
deprecator.warn('1.1', "Major deprecation message.", 1)
deprecator.warn('1.1')
@deprecator.deprecated('1.2', 'Major deprecation message.')
def any_func():
pass
any_func()
@deprecator.deprecated('1.2')
def other_func():
pass
other_func()
self.assertListEqual(self.messages,
['[module_name 1.1] Major deprecation message.',
'[module_name 1.1] ',
'[module_name 1.2] Major deprecation message.',
'[module_name 1.2] The function "other_func" is deprecated'])
def test_class_deprecated_manager(self):
deprecator = deprecation.DeprecationManager("module_name")
deprecator.compatibility('1.3')
@add_metaclass(deprecator.class_deprecated('1.2'))
class AnyClass(object):
pass
AnyClass()
self.assertEqual(self.messages,
['[module_name 1.2] AnyClass is deprecated'])
def test_deprecated_manager_noprint(self):
deprecator = deprecation.DeprecationManager("module_name")
deprecator.compatibility('1.3')
# This warn should not be printed.
deprecator.warn('1.3', "Minor deprecation message.", 1)
@deprecator.deprecated('1.3', 'Minor deprecation message.')
def any_func():
pass
any_func()
@deprecator.deprecated('1.20')
def other_func():
pass
other_func()
@deprecator.deprecated('1.4')
def other_func():
pass
other_func()
class AnyClass(object):
__metaclass__ = deprecator.class_deprecated((1,5))
AnyClass()
self.assertFalse(self.messages)
if __name__ == '__main__':
unittest_main()
|
import asyncio
from datetime import timedelta
import json
import pytest
from homeassistant.const import (
EVENT_HOMEASSISTANT_FINAL_WRITE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CoreState
from homeassistant.helpers import storage
from homeassistant.util import dt
from tests.async_mock import Mock, patch
from tests.common import async_fire_time_changed
MOCK_VERSION = 1
MOCK_KEY = "storage-test"
MOCK_DATA = {"hello": "world"}
MOCK_DATA2 = {"goodbye": "cruel world"}
@pytest.fixture
def store(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
yield storage.Store(hass, MOCK_VERSION, MOCK_KEY)
async def test_loading(hass, store):
"""Test we can save and load data."""
await store.async_save(MOCK_DATA)
data = await store.async_load()
assert data == MOCK_DATA
async def test_custom_encoder(hass):
"""Test we can save and load data."""
class JSONEncoder(json.JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
return "9"
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY, encoder=JSONEncoder)
await store.async_save(Mock())
data = await store.async_load()
assert data == "9"
async def test_loading_non_existing(hass, store):
"""Test we can save and load data."""
with patch("homeassistant.util.json.open", side_effect=FileNotFoundError):
data = await store.async_load()
assert data is None
async def test_loading_parallel(hass, store, hass_storage, caplog):
"""Test we can save and load data."""
hass_storage[store.key] = {"version": MOCK_VERSION, "data": MOCK_DATA}
results = await asyncio.gather(store.async_load(), store.async_load())
assert results[0] is MOCK_DATA
assert results[1] is MOCK_DATA
assert caplog.text.count(f"Loading data for {store.key}")
async def test_saving_with_delay(hass, store, hass_storage):
"""Test saving data after a delay."""
store.async_delay_save(lambda: MOCK_DATA, 1)
assert store.key not in hass_storage
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": MOCK_DATA,
}
async def test_saving_on_final_write(hass, hass_storage):
"""Test delayed saves trigger when we quit Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
store.async_delay_save(lambda: MOCK_DATA, 5)
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
hass.state = CoreState.stopping
await hass.async_block_till_done()
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_FINAL_WRITE)
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": MOCK_DATA,
}
async def test_not_delayed_saving_while_stopping(hass, hass_storage):
"""Test delayed saves don't write after the stop event has fired."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
hass.state = CoreState.stopping
store.async_delay_save(lambda: MOCK_DATA, 1)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=2))
await hass.async_block_till_done()
assert store.key not in hass_storage
async def test_not_delayed_saving_after_stopping(hass, hass_storage):
"""Test delayed saves don't write after stop if issued before stopping Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
store.async_delay_save(lambda: MOCK_DATA, 10)
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
hass.state = CoreState.stopping
await hass.async_block_till_done()
assert store.key not in hass_storage
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=15))
await hass.async_block_till_done()
assert store.key not in hass_storage
async def test_not_saving_while_stopping(hass, hass_storage):
"""Test saves don't write when stopping Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
hass.state = CoreState.stopping
await store.async_save(MOCK_DATA)
assert store.key not in hass_storage
async def test_loading_while_delay(hass, store, hass_storage):
"""Test we load new data even if not written yet."""
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
store.async_delay_save(lambda: {"delay": "yes"}, 1)
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "yes"}
async def test_writing_while_writing_delay(hass, store, hass_storage):
"""Test a write while a write with delay is active."""
store.async_delay_save(lambda: {"delay": "yes"}, 1)
assert store.key not in hass_storage
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "no"}
async def test_multiple_delay_save_calls(hass, store, hass_storage):
"""Test a write while a write with changing delays."""
store.async_delay_save(lambda: {"delay": "yes"}, 1)
store.async_delay_save(lambda: {"delay": "yes"}, 2)
store.async_delay_save(lambda: {"delay": "yes"}, 3)
assert store.key not in hass_storage
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "no"}
async def test_multiple_save_calls(hass, store, hass_storage):
"""Test multiple write tasks."""
assert store.key not in hass_storage
tasks = [store.async_save({"savecount": savecount}) for savecount in range(6)]
await asyncio.gather(*tasks)
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"key": MOCK_KEY,
"data": {"savecount": 5},
}
data = await store.async_load()
assert data == {"savecount": 5}
async def test_migrator_no_existing_config(hass, store, hass_storage):
"""Test migrator with no existing config."""
with patch("os.path.isfile", return_value=False), patch.object(
store, "async_load", return_value={"cur": "config"}
):
data = await storage.async_migrator(hass, "old-path", store)
assert data == {"cur": "config"}
assert store.key not in hass_storage
async def test_migrator_existing_config(hass, store, hass_storage):
"""Test migrating existing config."""
with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
data = await storage.async_migrator(
hass, "old-path", store, old_conf_load_func=lambda _: {"old": "config"}
)
assert len(mock_remove.mock_calls) == 1
assert data == {"old": "config"}
assert hass_storage[store.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"data": data,
}
async def test_migrator_transforming_config(hass, store, hass_storage):
"""Test migrating config to new format."""
async def old_conf_migrate_func(old_config):
"""Migrate old config to new format."""
return {"new": old_config["old"]}
with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
data = await storage.async_migrator(
hass,
"old-path",
store,
old_conf_migrate_func=old_conf_migrate_func,
old_conf_load_func=lambda _: {"old": "config"},
)
assert len(mock_remove.mock_calls) == 1
assert data == {"new": "config"}
assert hass_storage[store.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"data": data,
}
|
from collections import OrderedDict
from django.core.exceptions import NON_FIELD_ERRORS
from tablib import Dataset
class Error:
def __init__(self, error, traceback=None, row=None):
self.error = error
self.traceback = traceback
self.row = row
class RowResult:
IMPORT_TYPE_UPDATE = 'update'
IMPORT_TYPE_NEW = 'new'
IMPORT_TYPE_DELETE = 'delete'
IMPORT_TYPE_SKIP = 'skip'
IMPORT_TYPE_ERROR = 'error'
IMPORT_TYPE_INVALID = 'invalid'
valid_import_types = frozenset([
IMPORT_TYPE_NEW,
IMPORT_TYPE_UPDATE,
IMPORT_TYPE_DELETE,
IMPORT_TYPE_SKIP,
])
def __init__(self):
self.errors = []
self.validation_error = None
self.diff = None
self.import_type = None
self.raw_values = {}
class InvalidRow:
"""A row that resulted in one or more ``ValidationError`` being raised during import."""
def __init__(self, number, validation_error, values):
self.number = number
self.error = validation_error
self.values = values
try:
self.error_dict = validation_error.message_dict
except AttributeError:
self.error_dict = {NON_FIELD_ERRORS: validation_error.messages}
@property
def field_specific_errors(self):
"""Returns a dictionary of field-specific validation errors for this row."""
return {
key: value for key, value in self.error_dict.items()
if key != NON_FIELD_ERRORS
}
@property
def non_field_specific_errors(self):
"""Returns a list of non field-specific validation errors for this row."""
return self.error_dict.get(NON_FIELD_ERRORS, [])
@property
def error_count(self):
"""Returns the total number of validation errors for this row."""
count = 0
for error_list in self.error_dict.values():
count += len(error_list)
return count
class Result:
def __init__(self, *args, **kwargs):
super().__init__()
self.base_errors = []
self.diff_headers = []
self.rows = [] # RowResults
self.invalid_rows = [] # InvalidRow
self.failed_dataset = Dataset()
self.totals = OrderedDict([(RowResult.IMPORT_TYPE_NEW, 0),
(RowResult.IMPORT_TYPE_UPDATE, 0),
(RowResult.IMPORT_TYPE_DELETE, 0),
(RowResult.IMPORT_TYPE_SKIP, 0),
(RowResult.IMPORT_TYPE_ERROR, 0),
(RowResult.IMPORT_TYPE_INVALID, 0)])
self.total_rows = 0
def valid_rows(self):
return [
r for r in self.rows
if r.import_type in RowResult.valid_import_types
]
def append_row_result(self, row_result):
self.rows.append(row_result)
def append_base_error(self, error):
self.base_errors.append(error)
def add_dataset_headers(self, headers):
self.failed_dataset.headers = headers + ["Error"]
def append_failed_row(self, row, error):
row_values = [v for (k, v) in row.items()]
try:
row_values.append(str(error.error))
except AttributeError:
row_values.append(str(error))
self.failed_dataset.append(row_values)
def append_invalid_row(self, number, row, validation_error):
# NOTE: value order must match diff_headers order, so that row
# values and column headers match in the UI when displayed
values = tuple(row.get(col, "---") for col in self.diff_headers)
self.invalid_rows.append(
InvalidRow(number=number, validation_error=validation_error, values=values)
)
def increment_row_result_total(self, row_result):
if row_result.import_type:
self.totals[row_result.import_type] += 1
def row_errors(self):
return [(i + 1, row.errors)
for i, row in enumerate(self.rows) if row.errors]
def has_errors(self):
"""Returns a boolean indicating whether the import process resulted in
any critical (non-validation) errors for this result."""
return bool(self.base_errors or self.row_errors())
def has_validation_errors(self):
"""Returns a boolean indicating whether the import process resulted in
any validation errors for this result."""
return bool(self.invalid_rows)
def __iter__(self):
return iter(self.rows)
|
from homeassistant.components import weather
from homeassistant.components.weather import (
ATTR_FORECAST,
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_WEATHER_ATTRIBUTION,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_OZONE,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
async def test_attributes(hass):
"""Test weather attributes."""
assert await async_setup_component(
hass, weather.DOMAIN, {"weather": {"platform": "demo"}}
)
hass.config.units = METRIC_SYSTEM
await hass.async_block_till_done()
state = hass.states.get("weather.demo_weather_south")
assert state is not None
assert state.state == "sunny"
data = state.attributes
assert data.get(ATTR_WEATHER_TEMPERATURE) == 21.6
assert data.get(ATTR_WEATHER_HUMIDITY) == 92
assert data.get(ATTR_WEATHER_PRESSURE) == 1099
assert data.get(ATTR_WEATHER_WIND_SPEED) == 0.5
assert data.get(ATTR_WEATHER_WIND_BEARING) is None
assert data.get(ATTR_WEATHER_OZONE) is None
assert data.get(ATTR_WEATHER_ATTRIBUTION) == "Powered by Home Assistant"
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_CONDITION) == "rainy"
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_PRECIPITATION) == 1
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_PRECIPITATION_PROBABILITY) == 60
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_TEMP) == 22
assert data.get(ATTR_FORECAST)[0].get(ATTR_FORECAST_TEMP_LOW) == 15
assert data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_CONDITION) == "fog"
assert data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_PRECIPITATION) == 0.2
assert data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_TEMP) == 21
assert data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_TEMP_LOW) == 12
assert (
data.get(ATTR_FORECAST)[6].get(ATTR_FORECAST_PRECIPITATION_PROBABILITY) == 100
)
assert len(data.get(ATTR_FORECAST)) == 7
async def test_temperature_convert(hass):
"""Test temperature conversion."""
assert await async_setup_component(
hass, weather.DOMAIN, {"weather": {"platform": "demo"}}
)
hass.config.units = METRIC_SYSTEM
await hass.async_block_till_done()
state = hass.states.get("weather.demo_weather_north")
assert state is not None
assert state.state == "rainy"
data = state.attributes
assert data.get(ATTR_WEATHER_TEMPERATURE) == -24
|
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array, shape (n_samples, n_features)
Training set.
y : array, shape (n_samples,)
Target values.
**fit_params : dict
Additional fitting parameters passed to ``self.fit``.
Returns
-------
X_new : array, shape (n_samples, n_features_new)
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class EstimatorMixin(object):
"""Mixin class for estimators."""
def get_params(self, deep=True):
"""Get the estimator params.
Parameters
----------
deep : bool
Deep.
"""
return
def set_params(self, **params):
"""Set parameters (mimics sklearn API).
Parameters
----------
**params : dict
Extra parameters.
Returns
-------
inst : object
The instance.
"""
if not params:
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
|
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from weblate.auth.models import User
from weblate.trans.models import AutoComponentList, Translation, Unit
from weblate.trans.util import sort_choices
from weblate.wladmin.models import WeblateModelAdmin
class RepoAdminMixin:
def force_commit(self, request, queryset):
"""Commit pending changes for selected components."""
for obj in queryset:
obj.commit_pending("admin", request)
self.message_user(
request, f"Flushed changes in {queryset.count():d} git repos."
)
force_commit.short_description = _("Commit pending changes")
def update_from_git(self, request, queryset):
"""Update selected components from git."""
for obj in queryset:
obj.do_update(request)
self.message_user(request, f"Updated {queryset.count():d} git repos.")
update_from_git.short_description = _("Update VCS repository")
def get_qs_units(self, queryset):
raise NotImplementedError()
def get_qs_translations(self, queryset):
raise NotImplementedError()
def update_checks(self, request, queryset):
"""Recalculate checks for selected components."""
units = self.get_qs_units(queryset)
for unit in units:
unit.run_checks()
for translation in self.get_qs_translations(queryset):
translation.invalidate_cache()
self.message_user(request, "Updated checks for {:d} units.".format(len(units)))
update_checks.short_description = _("Update quality checks")
class ProjectAdmin(WeblateModelAdmin, RepoAdminMixin):
list_display = (
"name",
"slug",
"web",
"list_admins",
"access_control",
"enable_hooks",
"num_vcs",
"get_total",
"get_source_words",
"get_language_count",
)
prepopulated_fields = {"slug": ("name",)}
search_fields = ["name", "slug", "web"]
actions = ["update_from_git", "update_checks", "force_commit"]
def list_admins(self, obj):
return ", ".join(
User.objects.all_admins(obj).values_list("username", flat=True)
)
list_admins.short_description = _("Administrators")
def get_total(self, obj):
return obj.stats.source_strings
get_total.short_description = _("Source strings")
def get_source_words(self, obj):
return obj.stats.source_words
get_source_words.short_description = _("Source words")
def get_language_count(self, obj):
"""Return number of languages used in this project."""
return obj.stats.languages
get_language_count.short_description = _("Languages")
def num_vcs(self, obj):
return obj.component_set.with_repo().count()
num_vcs.short_description = _("VCS repositories")
def get_qs_units(self, queryset):
return Unit.objects.filter(translation__component__project__in=queryset)
def get_qs_translations(self, queryset):
return Translation.objects.filter(component__project__in=queryset)
class ComponentAdmin(WeblateModelAdmin, RepoAdminMixin):
list_display = ["name", "slug", "project", "repo", "branch", "vcs", "file_format"]
prepopulated_fields = {"slug": ("name",)}
search_fields = ["name", "slug", "repo", "branch", "project__name", "project__slug"]
list_filter = ["project", "vcs", "file_format"]
actions = ["update_from_git", "update_checks", "force_commit"]
ordering = ["project__name", "name"]
def get_qs_units(self, queryset):
return Unit.objects.filter(translation__component__in=queryset)
def get_qs_translations(self, queryset):
return Translation.objects.filter(component__in=queryset)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""Wrapper to sort languages by localized names."""
result = super().formfield_for_foreignkey(db_field, request, **kwargs)
if db_field.name == "source_language":
result.choices = sort_choices(result.choices)
return result
class TranslationAdmin(WeblateModelAdmin):
list_display = ["component", "language", "revision", "filename"]
search_fields = ["component__name", "language__code", "revision", "filename"]
list_filter = ["component__project", "component", "language"]
class UnitAdmin(WeblateModelAdmin):
list_display = ["source", "target", "position", "state"]
search_fields = ["source", "target"]
list_filter = ["translation__component", "translation__language", "state"]
class SuggestionAdmin(WeblateModelAdmin):
list_display = ["target", "unit", "user"]
search_fields = ["unit__source", "target"]
class CommentAdmin(WeblateModelAdmin):
list_display = ["comment", "unit", "user"]
search_fields = ["unit__source", "comment"]
class ChangeAdmin(WeblateModelAdmin):
list_display = ["unit", "user", "timestamp"]
date_hierarchy = "timestamp"
list_filter = ["component", "project", "language"]
raw_id_fields = ("unit",)
class AnnouncementAdmin(WeblateModelAdmin):
list_display = ["message", "project", "component", "language"]
search_fields = ["message"]
list_filter = ["project", "language"]
class AutoComponentListAdmin(admin.TabularInline):
model = AutoComponentList
extra = 0
class ComponentListAdmin(WeblateModelAdmin):
list_display = ["name", "show_dashboard"]
list_filter = ["show_dashboard"]
prepopulated_fields = {"slug": ("name",)}
filter_horizontal = ("components",)
inlines = [AutoComponentListAdmin]
ordering = ["name"]
class ContributorAgreementAdmin(WeblateModelAdmin):
list_display = ["user", "component", "timestamp"]
date_hierarchy = "timestamp"
ordering = ("user__username", "component__project__name", "component__name")
|
import pygogo as gogo
from io import open
from tempfile import NamedTemporaryFile
from os import remove
from meza.compat import encode
from . import coroutine, return_value
try:
from twisted.test.proto_helpers import AccumulatingProtocol
except ImportError:
AccumulatingProtocol = object
else:
from twisted.internet.reactor import callLater
from twisted.protocols.basic import FileSender
from twisted.web.client import getPage, downloadPage
from twisted.test.proto_helpers import StringTransport
logger = gogo.Gogo(__name__, monolog=True).logger
# http://stackoverflow.com/q/26314586/408556
# http://stackoverflow.com/q/8157197/408556
# http://stackoverflow.com/a/33708936/408556
class FileReader(AccumulatingProtocol):
def __init__(self, filename, transform=None, delay=0, verbose=False):
self.f = open(filename, 'rb')
self.transform = transform
self.delay = delay
self.producer = FileSender()
self.logger = gogo.Gogo(__name__, verbose=verbose).logger
def cleanup(self, *args):
self.f.close()
self.producer.stopProducing()
def resumeProducing(self):
chunk = self.file.read(self.CHUNK_SIZE) if self.file else ''
if not chunk:
self.file = None
self.consumer.unregisterProducer()
if self.deferred and self.delay:
callLater(self.delay, self.deferred.callback, self.lastSent)
elif self.deferred:
self.deferred.callback(self.lastSent)
self.deferred = None
return
def connectionLost(self, reason):
self.logger.debug('connectionLost: %s', reason)
self.cleanup()
def connectionMade(self):
self.logger.debug('Connection made from %s', self.transport.getPeer())
args = (self.f, self.transport, self.transform)
self.d = self.closedDeferred = self.producer.beginFileTransfer(*args)
while not self.d.called:
self.producer.resumeProducing()
self.d.addErrback(self.logger.error)
self.d.addBoth(self.cleanup)
@coroutine
def async_read_file(filename, transport, protocol=FileReader, **kwargs):
proto = protocol(filename.replace('file://', ''), **kwargs)
proto.makeConnection(transport)
yield proto.d
# return_value(proto.data)
return_value(proto.transport.value())
@coroutine
def async_get_file(filename, transport, protocol=FileReader, **kwargs):
proto = protocol(filename.replace('file://', ''), **kwargs)
proto.makeConnection(transport)
yield proto.d
proto.transport.io.seek(0)
return_value(proto.transport.io)
@coroutine
def async_url_open(url, timeout=0, **kwargs):
if url.startswith('http'):
page = NamedTemporaryFile(delete=False)
new_url = page.name
yield downloadPage(encode(url), page, timeout=timeout)
else:
page, new_url = None, url
f = yield async_get_file(new_url, StringTransport(), **kwargs)
if not hasattr(f, 'name') and url.startswith('file'):
f.name = url.split('://')[1]
if page:
page.close()
remove(page.name)
return_value(f)
def async_url_read(url, timeout=0, **kwargs):
if url.startswith('http'):
content = getPage(encode(url), timeout=timeout)
else:
content = async_read_file(url, StringTransport(), **kwargs)
return content
|
import pytest
from PyQt5.QtCore import QObject
from qutebrowser.config import stylesheet
class StyleObj(QObject):
def __init__(self, stylesheet=None, parent=None):
super().__init__(parent)
if stylesheet is not None:
self.STYLESHEET = stylesheet # noqa: N801,N806 pylint: disable=invalid-name
self.rendered_stylesheet = None
def setStyleSheet(self, stylesheet):
self.rendered_stylesheet = stylesheet
def test_get_stylesheet(config_stub):
config_stub.val.colors.hints.fg = 'magenta'
observer = stylesheet._StyleSheetObserver(
StyleObj(), stylesheet="{{ conf.colors.hints.fg }}", update=False)
assert observer._get_stylesheet() == 'magenta'
@pytest.mark.parametrize('delete', [True, False])
@pytest.mark.parametrize('stylesheet_param', [True, False])
@pytest.mark.parametrize('update', [True, False])
def test_set_register_stylesheet(delete, stylesheet_param, update, qtbot,
config_stub, caplog):
config_stub.val.colors.hints.fg = 'magenta'
qss = "{{ conf.colors.hints.fg }}"
with caplog.at_level(9): # VDEBUG
if stylesheet_param:
obj = StyleObj()
stylesheet.set_register(obj, qss, update=update)
else:
obj = StyleObj(qss)
stylesheet.set_register(obj, update=update)
assert caplog.messages[-1] == 'stylesheet for StyleObj: magenta'
assert obj.rendered_stylesheet == 'magenta'
if delete:
with qtbot.waitSignal(obj.destroyed):
obj.deleteLater()
config_stub.val.colors.hints.fg = 'yellow'
expected = 'magenta' if delete or not update else 'yellow'
assert obj.rendered_stylesheet == expected
|
import os.path as op
from mne import Annotations
from mne.io import read_raw_fif
from mne.preprocessing.eog import find_eog_events
data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_path, 'test_raw.fif')
event_fname = op.join(data_path, 'test-eve.fif')
proj_fname = op.join(data_path, 'test-proj.fif')
def test_find_eog():
"""Test find EOG peaks."""
raw = read_raw_fif(raw_fname)
raw.set_annotations(Annotations([14, 21], [1, 1], 'BAD_blink'))
events = find_eog_events(raw)
assert len(events) == 4
assert not all(events[:, 0] < 29000)
events = find_eog_events(raw, reject_by_annotation=True)
assert all(events[:, 0] < 29000)
# threshold option
events_thr = find_eog_events(raw, thresh=100e-6)
assert len(events_thr) == 5
|
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DATA_CONFIG_ENTRIES, DATA_DEVICE, DATA_UPDATED, DOMAIN, YeelightEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up Yeelight from a config entry."""
device = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][config_entry.entry_id][DATA_DEVICE]
if device.is_nightlight_supported:
_LOGGER.debug("Adding nightlight mode sensor for %s", device.name)
async_add_entities([YeelightNightlightModeSensor(device, config_entry)])
class YeelightNightlightModeSensor(YeelightEntity, BinarySensorEntity):
"""Representation of a Yeelight nightlight mode sensor."""
async def async_added_to_hass(self):
"""Handle entity which will be added."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
DATA_UPDATED.format(self._device.host),
self.async_write_ha_state,
)
)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._device.name} nightlight"
@property
def is_on(self):
"""Return true if nightlight mode is on."""
return self._device.is_nightlight_enabled
|
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_RESOURCE,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_BAD_REQUEST,
HTTP_OK,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
CONF_BODY_OFF = "body_off"
CONF_BODY_ON = "body_on"
CONF_IS_ON_TEMPLATE = "is_on_template"
CONF_STATE_RESOURCE = "state_resource"
DEFAULT_METHOD = "post"
DEFAULT_BODY_OFF = "OFF"
DEFAULT_BODY_ON = "ON"
DEFAULT_NAME = "REST Switch"
DEFAULT_TIMEOUT = 10
DEFAULT_VERIFY_SSL = True
SUPPORT_REST_METHODS = ["post", "put"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_STATE_RESOURCE): cv.url,
vol.Optional(CONF_HEADERS): {cv.string: cv.string},
vol.Optional(CONF_BODY_OFF, default=DEFAULT_BODY_OFF): cv.template,
vol.Optional(CONF_BODY_ON, default=DEFAULT_BODY_ON): cv.template,
vol.Optional(CONF_IS_ON_TEMPLATE): cv.template,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.All(
vol.Lower, vol.In(SUPPORT_REST_METHODS)
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the RESTful switch."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
body_off = config.get(CONF_BODY_OFF)
body_on = config.get(CONF_BODY_ON)
is_on_template = config.get(CONF_IS_ON_TEMPLATE)
method = config.get(CONF_METHOD)
headers = config.get(CONF_HEADERS)
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
resource = config.get(CONF_RESOURCE)
state_resource = config.get(CONF_STATE_RESOURCE) or resource
verify_ssl = config.get(CONF_VERIFY_SSL)
auth = None
if username:
auth = aiohttp.BasicAuth(username, password=password)
if is_on_template is not None:
is_on_template.hass = hass
if body_on is not None:
body_on.hass = hass
if body_off is not None:
body_off.hass = hass
timeout = config.get(CONF_TIMEOUT)
try:
switch = RestSwitch(
name,
resource,
state_resource,
method,
headers,
auth,
body_on,
body_off,
is_on_template,
timeout,
verify_ssl,
)
req = await switch.get_device_state(hass)
if req.status >= HTTP_BAD_REQUEST:
_LOGGER.error("Got non-ok response from resource: %s", req.status)
else:
async_add_entities([switch])
except (TypeError, ValueError):
_LOGGER.error(
"Missing resource or schema in configuration. "
"Add http:// or https:// to your URL"
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("No route to resource/endpoint: %s", resource)
class RestSwitch(SwitchEntity):
"""Representation of a switch that can be toggled using REST."""
def __init__(
self,
name,
resource,
state_resource,
method,
headers,
auth,
body_on,
body_off,
is_on_template,
timeout,
verify_ssl,
):
"""Initialize the REST switch."""
self._state = None
self._name = name
self._resource = resource
self._state_resource = state_resource
self._method = method
self._headers = headers
self._auth = auth
self._body_on = body_on
self._body_off = body_off
self._is_on_template = is_on_template
self._timeout = timeout
self._verify_ssl = verify_ssl
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
body_on_t = self._body_on.async_render(parse_result=False)
try:
req = await self.set_device_state(body_on_t)
if req.status == HTTP_OK:
self._state = True
else:
_LOGGER.error(
"Can't turn on %s. Is resource/endpoint offline?", self._resource
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error while switching on %s", self._resource)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
body_off_t = self._body_off.async_render(parse_result=False)
try:
req = await self.set_device_state(body_off_t)
if req.status == HTTP_OK:
self._state = False
else:
_LOGGER.error(
"Can't turn off %s. Is resource/endpoint offline?", self._resource
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error while switching off %s", self._resource)
async def set_device_state(self, body):
"""Send a state update to the device."""
websession = async_get_clientsession(self.hass, self._verify_ssl)
with async_timeout.timeout(self._timeout):
req = await getattr(websession, self._method)(
self._resource,
auth=self._auth,
data=bytes(body, "utf-8"),
headers=self._headers,
)
return req
async def async_update(self):
"""Get the current state, catching errors."""
try:
await self.get_device_state(self.hass)
except asyncio.TimeoutError:
_LOGGER.exception("Timed out while fetching data")
except aiohttp.ClientError as err:
_LOGGER.exception("Error while fetching data: %s", err)
async def get_device_state(self, hass):
"""Get the latest data from REST API and update the state."""
websession = async_get_clientsession(hass, self._verify_ssl)
with async_timeout.timeout(self._timeout):
req = await websession.get(
self._state_resource, auth=self._auth, headers=self._headers
)
text = await req.text()
if self._is_on_template is not None:
text = self._is_on_template.async_render_with_possible_json_value(
text, "None"
)
text = text.lower()
if text == "true":
self._state = True
elif text == "false":
self._state = False
else:
self._state = None
else:
if text == self._body_on.template:
self._state = True
elif text == self._body_off.template:
self._state = False
else:
self._state = None
return req
|
from pydexcom import AccountError, SessionError
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.dexcom.const import DOMAIN, MG_DL, MMOL_L
from homeassistant.const import CONF_UNIT_OF_MEASUREMENT, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
from tests.components.dexcom import CONFIG
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.dexcom.config_flow.Dexcom.create_session",
return_value="test_session_id",
), patch(
"homeassistant.components.dexcom.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.dexcom.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == CONFIG[CONF_USERNAME]
assert result2["data"] == CONFIG
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_account_error(hass):
"""Test we handle account error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.dexcom.config_flow.Dexcom",
side_effect=AccountError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_session_error(hass):
"""Test we handle session error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.dexcom.config_flow.Dexcom",
side_effect=SessionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass):
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.dexcom.config_flow.Dexcom",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_option_flow_default(hass):
"""Test config flow options."""
entry = MockConfigEntry(
domain=DOMAIN,
data=CONFIG,
options=None,
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_UNIT_OF_MEASUREMENT: MG_DL,
}
async def test_option_flow(hass):
"""Test config flow options."""
entry = MockConfigEntry(
domain=DOMAIN,
data=CONFIG,
options={CONF_UNIT_OF_MEASUREMENT: MG_DL},
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_UNIT_OF_MEASUREMENT: MMOL_L},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_UNIT_OF_MEASUREMENT: MMOL_L,
}
|
import logging
import os
from absl import flags
import blinker
from perfkitbenchmarker import data
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
_events = blinker.Namespace()
initialization_complete = _events.signal('system-ready', doc="""
Signal sent once after the system is initialized (command-line flags
parsed, temporary directory initialized, run_uri set).
Sender: None
Payload: parsed_flags, the parsed FLAGS object.""")
provider_imported = _events.signal('provider-imported', doc="""
Signal sent after a cloud provider's modules have been imported.
Sender: string. Cloud provider name chosen from providers.VALID_CLOUDS.""")
benchmark_start = _events.signal('benchmark-start', doc="""
Signal sent at the beginning of a benchmark before any resources are
provisioned.
Sender: None
Payload: benchmark_spec.""")
on_vm_startup = _events.signal('on-vm-startup', doc="""
Signal sent on vm startup.
Sender: None
Payload: vm (VirtualMachine object).""")
benchmark_end = _events.signal('benchmark-end', doc="""
Signal sent at the end of a benchmark after any resources have been
torn down (if run_stage includes teardown).
Sender: None
Payload: benchmark_spec.""")
RUN_PHASE = 'run'
before_phase = _events.signal('before-phase', doc="""
Signal sent immediately before a phase runs.
Sender: the phase. Currently only RUN_PHASE.
Payload: benchmark_spec.""")
after_phase = _events.signal('after-phase', doc="""
Signal sent immediately after a phase runs, regardless of whether it was
successful.
Sender: the phase. Currently only RUN_PHASE.
Payload: benchmark_spec.""")
samples_created = _events.signal('samples-created', doc="""
Called with samples list and benchmark spec.
Signal sent immediately after a sample is created.
The samples' metadata is mutable, and may be updated by the subscriber.
Sender: the phase. Currently only RUN_PHASE.
Payload: benchmark_spec (BenchmarkSpec), samples (list of sample.Sample).""")
record_event = _events.signal('record-event', doc="""
Signal sent when an event is recorded.
Signal sent after an event occurred. Record start, end timestamp and metadata
of the event for analysis.
Sender: None
Payload: event (string), start_timestamp (float), end_timestamp (float),
metadata (dict).""")
def RegisterTracingEvents():
record_event.connect(AddEvent, weak=False)
class TracingEvent(object):
"""Represents an event object.
Attributes:
sender: string. Name of the sending class/object.
event: string. Name of the event.
start_timestamp: float. Represents the start timestamp of the event.
end_timestamp: float. Represents the end timestamp of the event.
metadata: dict. Additional metadata of the event.
"""
events = []
def __init__(self, sender, event, start_timestamp, end_timestamp, metadata):
self.sender = sender
self.event = event
self.start_timestamp = start_timestamp
self.end_timestamp = end_timestamp
self.metadata = metadata
def AddEvent(sender, event, start_timestamp, end_timestamp, metadata):
"""Record a TracingEvent."""
TracingEvent.events.append(
TracingEvent(sender, event, start_timestamp, end_timestamp, metadata))
@on_vm_startup.connect
def _RunStartupScript(unused_sender, vm):
"""Run startup script if necessary."""
if FLAGS.startup_script:
vm.RemoteCopy(data.ResourcePath(FLAGS.startup_script))
vm.startup_script_output = vm.RemoteCommand(
'./%s' % os.path.basename(FLAGS.startup_script))
@samples_created.connect
def _AddScriptSamples(unused_sender, benchmark_spec, samples):
def _ScriptResultToMetadata(out):
return {'stdout': out[0], 'stderr': out[1]}
for vm in benchmark_spec.vms:
if FLAGS.startup_script:
samples.append(sample.Sample(
'startup', 0, '', _ScriptResultToMetadata(vm.startup_script_output)))
if FLAGS.postrun_script:
samples.append(sample.Sample(
'postrun', 0, '', _ScriptResultToMetadata(vm.postrun_script_output)))
@after_phase.connect
def _RunPostRunScript(sender, benchmark_spec):
if sender != RUN_PHASE:
logging.info(
'Receive after_phase signal from :%s, not '
'triggering _RunPostRunScript.', sender)
if FLAGS.postrun_script:
for vm in benchmark_spec.vms:
vm.RemoteCopy(FLAGS.postrun_script)
vm.postrun_script_output = vm.RemoteCommand(
'./%s' % os.path.basename(FLAGS.postrun_script))
|
from pytest import mark
from cerberus import errors, Validator
from cerberus.tests import assert_normalized
def must_not_be_called(*args, **kwargs):
raise AssertionError('This shall not be called.')
@mark.parametrize(
'default', ({'default': 'bar_value'}, {'default_setter': lambda doc: 'bar_value'})
)
def test_default_in_schema_with_missing_value(default):
assert_normalized(
schema={
'thing': {
'type': 'dict',
'schema': {
'foo': {'type': 'string'},
'bar': {'type': 'string', **default},
},
}
},
document={'thing': {'foo': 'foo_value'}},
expected={'thing': {'foo': 'foo_value', 'bar': 'bar_value'}},
)
def test_default_setters_with_document_reference():
assert_normalized(
schema={
'a': {'type': 'integer'},
'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1},
'c': {'type': 'integer', 'default_setter': lambda d: d['b'] * 2},
'd': {'type': 'integer', 'default_setter': lambda d: d['b'] + d['c']},
},
document={'a': 1},
expected={'a': 1, 'b': 2, 'c': 4, 'd': 6},
)
def test_default_setters_with_circular_document_reference(validator):
validator(
document={},
schema={
'a': {'default_setter': lambda d: d['b'] + 1},
'b': {'default_setter': lambda d: d['a'] + 1},
},
)
assert errors.SETTING_DEFAULT_FAILED in validator._errors
@mark.parametrize(
'default', ({'default': 'bar_value'}, {'default_setter': must_not_be_called})
)
def test_default_with_existing_value(default):
assert_normalized(
schema={'foo': {'type': 'string'}, 'bar': {'type': 'string', **default}},
document={'foo': 'foo_value', 'bar': 'non_default'},
expected={'foo': 'foo_value', 'bar': 'non_default'},
)
@mark.parametrize(
'default', ({'default': 'bar_value'}, {'default_setter': lambda doc: 'bar_value'})
)
def test_default_with_missing_value(default):
assert_normalized(
schema={'foo': {'type': 'string'}, 'bar': {'type': 'string', **default}},
document={'foo': 'foo_value'},
expected={'foo': 'foo_value', 'bar': 'bar_value'},
)
@mark.parametrize(
'default', ({'default': 'bar_value'}, {'default_setter': lambda doc: 'bar_value'})
)
def test_default_with_non_nullable_field(default):
assert_normalized(
schema={
'foo': {'type': 'string'},
'bar': {'type': 'string', 'nullable': False, **default},
},
document={'foo': 'foo_value', 'bar': None},
expected={'foo': 'foo_value', 'bar': 'bar_value'},
)
def test_default_with_none_as_value_on_nullable_field():
assert_normalized(
schema={
'foo': {'type': 'string'},
'bar': {'type': 'string', 'nullable': True, 'default': None},
},
document={'foo': 'foo_value'},
expected={'foo': 'foo_value', 'bar': None},
)
@mark.parametrize(
'default', ({'default': 'bar_value'}, {'default_setter': must_not_be_called})
)
def test_default_with_nullable_field(default):
assert_normalized(
schema={
'foo': {'type': 'string'},
'bar': {'type': 'string', 'nullable': True, **default},
},
document={'foo': 'foo_value', 'bar': None},
expected={'foo': 'foo_value', 'bar': None},
)
@mark.parametrize(
"default",
[{'default': 'cfg.yaml'}, {'default_setter': lambda document: 'cfg.yaml'}],
)
def test_default_in_schema_in_allow_unknown(default):
validator = Validator(
allow_unknown={
'type': 'dict',
'schema': {
'cfg_path': {'type': 'string', **default},
'package': {'type': 'string'},
},
}
)
assert_normalized(
schema={'meta': {'type': 'dict'}, 'version': {'type': 'string'}},
document={'version': '1.2.3', 'plugin_foo': {'package': 'foo'}},
expected={
'version': '1.2.3',
'plugin_foo': {'package': 'foo', 'cfg_path': 'cfg.yaml'},
},
validator=validator,
)
|
from unittest import TestCase
from scattertext.CorpusFromScikit import CorpusFromScikit
class TestCorpusFromScikit(TestCase):
def test_main(self):
# omitting for travis ci
pass
def _te_ss_t_build(self):
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
count_vectorizer = CountVectorizer()
X_counts = count_vectorizer.fit_transform(newsgroups_train.data)
corpus = CorpusFromScikit(
X=X_counts,
y=newsgroups_train.target,
feature_vocabulary=count_vectorizer.vocabulary_,
category_names=newsgroups_train.target_names,
raw_texts=newsgroups_train.data
).build()
self.assertEqual(corpus.get_categories()[:2], ['alt.atheism', 'comp.graphics'])
self.assertEqual(corpus
.get_term_freq_df()
.assign(score=corpus.get_scaled_f_scores('alt.atheism'))
.sort_values(by='score', ascending=False).index.tolist()[:5],
['atheism', 'atheists', 'islam', 'atheist', 'belief'])
self.assertGreater(len(corpus.get_texts()[0]), 5)
|
import pytest
from coverage.misc import contract, dummy_decorator_with_args, file_be_gone
from coverage.misc import Hasher, one_of, substitute_variables
from coverage.misc import CoverageException, USE_CONTRACTS
from tests.coveragetest import CoverageTest
class HasherTest(CoverageTest):
"""Test our wrapper of md5 hashing."""
run_in_temp_dir = False
def test_string_hashing(self):
h1 = Hasher()
h1.update("Hello, world!")
h2 = Hasher()
h2.update("Goodbye!")
h3 = Hasher()
h3.update("Hello, world!")
self.assertNotEqual(h1.hexdigest(), h2.hexdigest())
self.assertEqual(h1.hexdigest(), h3.hexdigest())
def test_bytes_hashing(self):
h1 = Hasher()
h1.update(b"Hello, world!")
h2 = Hasher()
h2.update(b"Goodbye!")
self.assertNotEqual(h1.hexdigest(), h2.hexdigest())
def test_unicode_hashing(self):
h1 = Hasher()
h1.update(u"Hello, world! \N{SNOWMAN}")
h2 = Hasher()
h2.update(u"Goodbye!")
self.assertNotEqual(h1.hexdigest(), h2.hexdigest())
def test_dict_hashing(self):
h1 = Hasher()
h1.update({'a': 17, 'b': 23})
h2 = Hasher()
h2.update({'b': 23, 'a': 17})
self.assertEqual(h1.hexdigest(), h2.hexdigest())
def test_dict_collision(self):
h1 = Hasher()
h1.update({'a': 17, 'b': {'c': 1, 'd': 2}})
h2 = Hasher()
h2.update({'a': 17, 'b': {'c': 1}, 'd': 2})
self.assertNotEqual(h1.hexdigest(), h2.hexdigest())
class RemoveFileTest(CoverageTest):
"""Tests of misc.file_be_gone."""
def test_remove_nonexistent_file(self):
# It's OK to try to remove a file that doesn't exist.
file_be_gone("not_here.txt")
def test_remove_actual_file(self):
# It really does remove a file that does exist.
self.make_file("here.txt", "We are here, we are here, we are here!")
file_be_gone("here.txt")
self.assert_doesnt_exist("here.txt")
def test_actual_errors(self):
# Errors can still happen.
# ". is a directory" on Unix, or "Access denied" on Windows
with self.assertRaises(OSError):
file_be_gone(".")
class ContractTest(CoverageTest):
"""Tests of our contract decorators."""
run_in_temp_dir = False
def setUp(self):
super(ContractTest, self).setUp()
if not USE_CONTRACTS:
self.skipTest("Contracts are disabled")
def test_bytes(self):
@contract(text='bytes|None')
def need_bytes(text=None):
return text
assert need_bytes(b"Hey") == b"Hey"
assert need_bytes() is None
with pytest.raises(Exception):
need_bytes(u"Oops")
def test_unicode(self):
@contract(text='unicode|None')
def need_unicode(text=None):
return text
assert need_unicode(u"Hey") == u"Hey"
assert need_unicode() is None
with pytest.raises(Exception):
need_unicode(b"Oops")
def test_one_of(self):
@one_of("a, b, c")
def give_me_one(a=None, b=None, c=None):
return (a, b, c)
assert give_me_one(a=17) == (17, None, None)
assert give_me_one(b=set()) == (None, set(), None)
assert give_me_one(c=17) == (None, None, 17)
with pytest.raises(AssertionError):
give_me_one(a=17, b=set())
with pytest.raises(AssertionError):
give_me_one()
def test_dummy_decorator_with_args(self):
@dummy_decorator_with_args("anything", this=17, that="is fine")
def undecorated(a=None, b=None):
return (a, b)
assert undecorated() == (None, None)
assert undecorated(17) == (17, None)
assert undecorated(b=23) == (None, 23)
assert undecorated(b=42, a=3) == (3, 42)
VARS = {
'FOO': 'fooey',
'BAR': 'xyzzy',
}
@pytest.mark.parametrize("before, after", [
("Nothing to do", "Nothing to do"),
("Dollar: $$", "Dollar: $"),
("Simple: $FOO is fooey", "Simple: fooey is fooey"),
("Braced: X${FOO}X.", "Braced: XfooeyX."),
("Missing: x${NOTHING}y is xy", "Missing: xy is xy"),
("Multiple: $$ $FOO $BAR ${FOO}", "Multiple: $ fooey xyzzy fooey"),
("Ill-formed: ${%5} ${{HI}} ${", "Ill-formed: ${%5} ${{HI}} ${"),
("Strict: ${FOO?} is there", "Strict: fooey is there"),
("Defaulted: ${WUT-missing}!", "Defaulted: missing!"),
("Defaulted empty: ${WUT-}!", "Defaulted empty: !"),
])
def test_substitute_variables(before, after):
assert substitute_variables(before, VARS) == after
@pytest.mark.parametrize("text", [
"Strict: ${NOTHING?} is an error",
])
def test_substitute_variables_errors(text):
with pytest.raises(CoverageException) as exc_info:
substitute_variables(text, VARS)
assert text in str(exc_info.value)
assert "Variable NOTHING is undefined" in str(exc_info.value)
|
from __future__ import print_function
import random
from datetime import datetime as dt
from multiprocessing.pool import ThreadPool
import numpy as np
import pandas as pd
import arctic._compression as c
from arctic.serialization.numpy_records import DataFrameSerializer
c.enable_parallel_lz4(True)
c.BENCHMARK_MODE = True
def get_random_df(nrows, ncols):
ret_df = pd.DataFrame(np.random.randn(nrows, ncols),
index=pd.date_range('20170101',
periods=nrows, freq='S'),
columns=["".join([chr(random.randint(ord('A'), ord('Z'))) for _ in range(8)]) for _ in
range(ncols)])
ret_df.index.name = 'index'
ret_df.index = ret_df.index.tz_localize('UTC')
return ret_df
def construct_test_data(df_length, append_mul):
serializer = DataFrameSerializer()
tmp_df = get_random_df(df_length, 10)
recs = serializer.serialize(tmp_df)[0]
_str = recs.tostring()
if append_mul > 1:
_str = "".join([_str] * append_mul)
return _str
def bench_compression_comparison(n_chunks, df_length, append_mul, pool_size, pool_step, repeats,
use_raw_lz4, use_HC):
_str = construct_test_data(df_length, append_mul)
chunk_size = len(_str) / 1024 ** 2.0
_strarr = [_str] * n_chunks
# Single threaded
# ---------------
measurements = bench_single(repeats, _strarr, use_HC)
print_results(1, chunk_size, n_chunks, chunk_size*n_chunks, measurements)
single_mean = np.mean(measurements)
# Multi-threaded
# --------------
for sz in range(2, pool_size + 1, pool_step):
if use_raw_lz4:
pool = ThreadPool(sz)
else:
pool = None
c.set_compression_pool_size(sz)
measurements = bench_multi(repeats, _strarr, use_HC, pool=pool)
print_results(sz, chunk_size, n_chunks, chunk_size * n_chunks, measurements, compare=single_mean)
if pool:
pool.close()
pool.join()
print("")
def bench_single(repeats, _strarr, use_HC):
# Arctic compress single
measurements = []
for i in range(repeats):
now = dt.now()
if use_HC:
res = [c.compressHC(x) for x in _strarr]
else:
res = [c.compress(x) for x in _strarr]
sample = (dt.now() - now).total_seconds()
assert all(res)
measurements.append(sample)
return measurements
def bench_multi(repeats, _strarr, use_HC, pool=None):
measurements = []
for j in range(repeats):
now = dt.now()
if pool:
# Raw LZ4 lib
if use_HC:
res = pool.map(c.lz4_compressHC, _strarr)
else:
res = pool.map(c.lz4_compress, _strarr)
else:
# Arctic's compression layer
if use_HC:
res = c.compressHC_array(_strarr)
else:
res = c.compress_array(_strarr, withHC=False)
sample = (dt.now() - now).total_seconds()
assert len(res) == len(_strarr)
assert all(res)
measurements.append(sample)
return measurements
def print_results(n_threads, chunk_size, n_chunks, total_mb, measurements, compare=None):
mymean = np.mean(measurements)
xfaster = (compare/float(mymean)) if compare is not None else 0
measurements = n_threads, chunk_size, n_chunks, total_mb, \
mymean, np.min(measurements), np.max(measurements), np.std(measurements), \
("{:.2f}x faster than single threaded".format(xfaster) if xfaster > 1 else "")
print("(x{:<3}threads) ({:.1f} MB/chunk, x{:<4} chunks, total {:.1f} MB) \t "
"mean={:.6f} min={:.6f} max={:.6f} std={:.8f} {}".format(*measurements))
def main():
use_HC = False
for df_length in (1000, 3000, 10000, 30000):
for n_chunks in (1, 2, 4, 8, 16, 32, 64, 128):
print("\n\n----------- High compression: {}, Chunks: {}, DataFrame size: {} ------------".format(use_HC, n_chunks, df_length))
bench_compression_comparison(
n_chunks=n_chunks,
df_length=df_length,
append_mul=1,
pool_size=10,
pool_step=2,
repeats=30,
use_raw_lz4=False,
use_HC=use_HC)
if __name__ == '__main__':
main()
|
import asyncio
from functools import partial, wraps
import inspect
import logging
import logging.handlers
import queue
import traceback
from typing import Any, Callable, Coroutine
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.core import HomeAssistant, callback
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text: str) -> None:
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record: logging.LogRecord) -> bool:
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, "*******")
return True
class HomeAssistantQueueHandler(logging.handlers.QueueHandler):
"""Process the log in another thread."""
def emit(self, record: logging.LogRecord) -> None:
"""Emit a log record."""
try:
self.enqueue(record)
except asyncio.CancelledError:
raise
except Exception: # pylint: disable=broad-except
self.handleError(record)
def handle(self, record: logging.LogRecord) -> Any:
"""
Conditionally emit the specified logging record.
Depending on which filters have been added to the handler, push the new
records onto the backing Queue.
The default python logger Handler acquires a lock
in the parent class which we do not need as
SimpleQueue is already thread safe.
See https://bugs.python.org/issue24645
"""
return_value = self.filter(record)
if return_value:
self.emit(record)
return return_value
@callback
def async_activate_log_queue_handler(hass: HomeAssistant) -> None:
"""
Migrate the existing log handlers to use the queue.
This allows us to avoid blocking I/O and formatting messages
in the event loop as log messages are written in another thread.
"""
simple_queue = queue.SimpleQueue() # type: ignore
queue_handler = HomeAssistantQueueHandler(simple_queue)
logging.root.addHandler(queue_handler)
migrated_handlers = []
for handler in logging.root.handlers[:]:
if handler is queue_handler:
continue
logging.root.removeHandler(handler)
migrated_handlers.append(handler)
listener = logging.handlers.QueueListener(simple_queue, *migrated_handlers)
listener.start()
@callback
def _async_stop_queue_handler(_: Any) -> None:
"""Cleanup handler."""
logging.root.removeHandler(queue_handler)
listener.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_stop_queue_handler)
def log_exception(format_err: Callable[..., Any], *args: Any) -> None:
"""Log an exception with additional context."""
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the call stack frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/core/issues/24982
module_name = __name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
def catch_log_exception(
func: Callable[..., Any], format_err: Callable[..., Any], *args: Any
) -> Callable[[], None]:
"""Decorate a callback to catch and log exceptions."""
# Check for partials to properly determine if coroutine function
check_func = func
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func = None
if asyncio.iscoroutinefunction(check_func):
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
await func(*args)
except Exception: # pylint: disable=broad-except
log_exception(format_err, *args)
wrapper_func = async_wrapper
else:
@wraps(func)
def wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
func(*args)
except Exception: # pylint: disable=broad-except
log_exception(format_err, *args)
wrapper_func = wrapper
return wrapper_func
def catch_log_coro_exception(
target: Coroutine[Any, Any, Any], format_err: Callable[..., Any], *args: Any
) -> Coroutine[Any, Any, Any]:
"""Decorate a coroutine to catch and log exceptions."""
async def coro_wrapper(*args: Any) -> Any:
"""Catch and log exception."""
try:
return await target
except Exception: # pylint: disable=broad-except
log_exception(format_err, *args)
return None
return coro_wrapper()
def async_create_catching_coro(target: Coroutine) -> Coroutine:
"""Wrap a coroutine to catch and log exceptions.
The exception will be logged together with a stacktrace of where the
coroutine was wrapped.
target: target coroutine.
"""
trace = traceback.extract_stack()
wrapped_target = catch_log_coro_exception(
target,
lambda *args: "Exception in {} called from\n {}".format(
target.__name__,
"".join(traceback.format_list(trace[:-1])),
),
)
return wrapped_target
|
from pathlib import Path
from aiohttp import web
from aiohttp.hdrs import (
ACCESS_CONTROL_ALLOW_HEADERS,
ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_REQUEST_HEADERS,
ACCESS_CONTROL_REQUEST_METHOD,
AUTHORIZATION,
ORIGIN,
)
import pytest
from homeassistant.components.http.cors import setup_cors
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.setup import async_setup_component
from . import HTTP_HEADER_HA_AUTH
from tests.async_mock import patch
TRUSTED_ORIGIN = "https://home-assistant.io"
async def test_cors_middleware_loaded_by_default(hass):
"""Test accessing to server from banned IP when feature is off."""
with patch("homeassistant.components.http.setup_cors") as mock_setup:
await async_setup_component(hass, "http", {"http": {}})
assert len(mock_setup.mock_calls) == 1
async def test_cors_middleware_loaded_from_config(hass):
"""Test accessing to server from banned IP when feature is off."""
with patch("homeassistant.components.http.setup_cors") as mock_setup:
await async_setup_component(
hass,
"http",
{"http": {"cors_allowed_origins": ["http://home-assistant.io"]}},
)
assert len(mock_setup.mock_calls) == 1
async def mock_handler(request):
"""Return if request was authenticated."""
return web.Response(status=200)
@pytest.fixture
def client(loop, aiohttp_client):
"""Fixture to set up a web.Application."""
app = web.Application()
app.router.add_get("/", mock_handler)
setup_cors(app, [TRUSTED_ORIGIN])
return loop.run_until_complete(aiohttp_client(app))
async def test_cors_requests(client):
"""Test cross origin requests."""
req = await client.get("/", headers={ORIGIN: TRUSTED_ORIGIN})
assert req.status == 200
assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN
# With password in URL
req = await client.get(
"/", params={"api_password": "some-pass"}, headers={ORIGIN: TRUSTED_ORIGIN}
)
assert req.status == 200
assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN
# With password in headers
req = await client.get(
"/", headers={HTTP_HEADER_HA_AUTH: "some-pass", ORIGIN: TRUSTED_ORIGIN}
)
assert req.status == 200
assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN
# With auth token in headers
req = await client.get(
"/", headers={AUTHORIZATION: "Bearer some-token", ORIGIN: TRUSTED_ORIGIN}
)
assert req.status == 200
assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN
async def test_cors_preflight_allowed(client):
"""Test cross origin resource sharing preflight (OPTIONS) request."""
req = await client.options(
"/",
headers={
ORIGIN: TRUSTED_ORIGIN,
ACCESS_CONTROL_REQUEST_METHOD: "GET",
ACCESS_CONTROL_REQUEST_HEADERS: "x-requested-with",
},
)
assert req.status == 200
assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN
assert req.headers[ACCESS_CONTROL_ALLOW_HEADERS] == "X-REQUESTED-WITH"
async def test_cors_middleware_with_cors_allowed_view(hass):
"""Test that we can configure cors and have a cors_allowed view."""
class MyView(HomeAssistantView):
"""Test view that allows CORS."""
requires_auth = False
cors_allowed = True
def __init__(self, url, name):
"""Initialize test view."""
self.url = url
self.name = name
async def get(self, request):
"""Test response."""
return "test"
assert await async_setup_component(
hass, "http", {"http": {"cors_allowed_origins": ["http://home-assistant.io"]}}
)
hass.http.register_view(MyView("/api/test", "api:test"))
hass.http.register_view(MyView("/api/test", "api:test2"))
hass.http.register_view(MyView("/api/test2", "api:test"))
hass.http.app._on_startup.freeze()
await hass.http.app.startup()
async def test_cors_works_with_frontend(hass, hass_client):
"""Test CORS works with the frontend."""
assert await async_setup_component(
hass,
"frontend",
{"http": {"cors_allowed_origins": ["http://home-assistant.io"]}},
)
client = await hass_client()
resp = await client.get("/")
assert resp.status == 200
async def test_cors_on_static_files(hass, hass_client):
"""Test that we enable CORS for static files."""
assert await async_setup_component(
hass, "frontend", {"http": {"cors_allowed_origins": ["http://www.example.com"]}}
)
hass.http.register_static_path("/something", str(Path(__file__).parent))
client = await hass_client()
resp = await client.options(
"/something/__init__.py",
headers={
"origin": "http://www.example.com",
ACCESS_CONTROL_REQUEST_METHOD: "GET",
},
)
assert resp.status == 200
assert resp.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == "http://www.example.com"
|
from __future__ import print_function
__docformat__ = "restructuredtext en"
import sys
import types
from time import clock, time
from inspect import isgeneratorfunction, getargspec
from logilab.common.compat import method_type
# XXX rewrite so we can use the decorator syntax when keyarg has to be specified
class cached_decorator(object):
def __init__(self, cacheattr=None, keyarg=None):
self.cacheattr = cacheattr
self.keyarg = keyarg
def __call__(self, callableobj=None):
assert not isgeneratorfunction(callableobj), \
'cannot cache generator function: %s' % callableobj
if len(getargspec(callableobj).args) == 1 or self.keyarg == 0:
cache = _SingleValueCache(callableobj, self.cacheattr)
elif self.keyarg:
cache = _MultiValuesKeyArgCache(callableobj, self.keyarg, self.cacheattr)
else:
cache = _MultiValuesCache(callableobj, self.cacheattr)
return cache.closure()
class _SingleValueCache(object):
def __init__(self, callableobj, cacheattr=None):
self.callable = callableobj
if cacheattr is None:
self.cacheattr = '_%s_cache_' % callableobj.__name__
else:
assert cacheattr != callableobj.__name__
self.cacheattr = cacheattr
def __call__(__me, self, *args):
try:
return self.__dict__[__me.cacheattr]
except KeyError:
value = __me.callable(self, *args)
setattr(self, __me.cacheattr, value)
return value
def closure(self):
def wrapped(*args, **kwargs):
return self.__call__(*args, **kwargs)
wrapped.cache_obj = self
try:
wrapped.__doc__ = self.callable.__doc__
wrapped.__name__ = self.callable.__name__
except:
pass
return wrapped
def clear(self, holder):
holder.__dict__.pop(self.cacheattr, None)
class _MultiValuesCache(_SingleValueCache):
def _get_cache(self, holder):
try:
_cache = holder.__dict__[self.cacheattr]
except KeyError:
_cache = {}
setattr(holder, self.cacheattr, _cache)
return _cache
def __call__(__me, self, *args, **kwargs):
_cache = __me._get_cache(self)
try:
return _cache[args]
except KeyError:
_cache[args] = __me.callable(self, *args)
return _cache[args]
class _MultiValuesKeyArgCache(_MultiValuesCache):
def __init__(self, callableobj, keyarg, cacheattr=None):
super(_MultiValuesKeyArgCache, self).__init__(callableobj, cacheattr)
self.keyarg = keyarg
def __call__(__me, self, *args, **kwargs):
_cache = __me._get_cache(self)
key = args[__me.keyarg-1]
try:
return _cache[key]
except KeyError:
_cache[key] = __me.callable(self, *args, **kwargs)
return _cache[key]
def cached(callableobj=None, keyarg=None, **kwargs):
"""Simple decorator to cache result of method call."""
kwargs['keyarg'] = keyarg
decorator = cached_decorator(**kwargs)
if callableobj is None:
return decorator
else:
return decorator(callableobj)
class cachedproperty(object):
""" Provides a cached property equivalent to the stacking of
@cached and @property, but more efficient.
After first usage, the <property_name> becomes part of the object's
__dict__. Doing:
del obj.<property_name> empties the cache.
Idea taken from the pyramid_ framework and the mercurial_ project.
.. _pyramid: http://pypi.python.org/pypi/pyramid
.. _mercurial: http://pypi.python.org/pypi/Mercurial
"""
__slots__ = ('wrapped',)
def __init__(self, wrapped):
try:
wrapped.__name__
except AttributeError:
raise TypeError('%s must have a __name__ attribute' %
wrapped)
self.wrapped = wrapped
@property
def __doc__(self):
doc = getattr(self.wrapped, '__doc__', None)
return ('<wrapped by the cachedproperty decorator>%s'
% ('\n%s' % doc if doc else ''))
def __get__(self, inst, objtype=None):
if inst is None:
return self
val = self.wrapped(inst)
setattr(inst, self.wrapped.__name__, val)
return val
def get_cache_impl(obj, funcname):
cls = obj.__class__
member = getattr(cls, funcname)
if isinstance(member, property):
member = member.fget
return member.cache_obj
def clear_cache(obj, funcname):
"""Clear a cache handled by the :func:`cached` decorator. If 'x' class has
@cached on its method `foo`, type
>>> clear_cache(x, 'foo')
to purge this method's cache on the instance.
"""
get_cache_impl(obj, funcname).clear(obj)
def copy_cache(obj, funcname, cacheobj):
"""Copy cache for <funcname> from cacheobj to obj."""
cacheattr = get_cache_impl(obj, funcname).cacheattr
try:
setattr(obj, cacheattr, cacheobj.__dict__[cacheattr])
except KeyError:
pass
class wproperty(object):
"""Simple descriptor expecting to take a modifier function as first argument
and looking for a _<function name> to retrieve the attribute.
"""
def __init__(self, setfunc):
self.setfunc = setfunc
self.attrname = '_%s' % setfunc.__name__
def __set__(self, obj, value):
self.setfunc(obj, value)
def __get__(self, obj, cls):
assert obj is not None
return getattr(obj, self.attrname)
class classproperty(object):
"""this is a simple property-like class but for class attributes.
"""
def __init__(self, get):
self.get = get
def __get__(self, inst, cls):
return self.get(cls)
class iclassmethod(object):
'''Descriptor for method which should be available as class method if called
on the class or instance method if called on an instance.
'''
def __init__(self, func):
self.func = func
def __get__(self, instance, objtype):
if instance is None:
return method_type(self.func, objtype, objtype.__class__)
return method_type(self.func, instance, objtype)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def timed(f):
def wrap(*args, **kwargs):
t = time()
c = clock()
res = f(*args, **kwargs)
print('%s clock: %.9f / time: %.9f' % (f.__name__,
clock() - c, time() - t))
return res
return wrap
def locked(acquire, release):
"""Decorator taking two methods to acquire/release a lock as argument,
returning a decorator function which will call the inner method after
having called acquire(self) et will call release(self) afterwards.
"""
def decorator(f):
def wrapper(self, *args, **kwargs):
acquire(self)
try:
return f(self, *args, **kwargs)
finally:
release(self)
return wrapper
return decorator
def monkeypatch(klass, methodname=None):
"""Decorator extending class with the decorated callable. This is basically
a syntactic sugar vs class assignment.
>>> class A:
... pass
>>> @monkeypatch(A)
... def meth(self):
... return 12
...
>>> a = A()
>>> a.meth()
12
>>> @monkeypatch(A, 'foo')
... def meth(self):
... return 12
...
>>> a.foo()
12
"""
def decorator(func):
try:
name = methodname or func.__name__
except AttributeError:
raise AttributeError('%s has no __name__ attribute: '
'you should provide an explicit `methodname`'
% func)
setattr(klass, name, func)
return func
return decorator
|
from typing import Dict, Sequence
from PyQt5.QtCore import QAbstractItemModel
from qutebrowser.completion.models import (completionmodel, listcategory,
histcategory)
from qutebrowser.browser import history
from qutebrowser.utils import log, objreg
from qutebrowser.config import config
_URLCOL = 0
_TEXTCOL = 1
def _delete_history(data):
urlstr = data[_URLCOL]
log.completion.debug('Deleting history entry {}'.format(urlstr))
history.web_history.delete_url(urlstr)
def _delete_bookmark(data: Sequence[str]) -> None:
urlstr = data[_URLCOL]
log.completion.debug('Deleting bookmark {}'.format(urlstr))
bookmark_manager = objreg.get('bookmark-manager')
bookmark_manager.delete(urlstr)
def _delete_quickmark(data: Sequence[str]) -> None:
name = data[_TEXTCOL]
quickmark_manager = objreg.get('quickmark-manager')
log.completion.debug('Deleting quickmark {}'.format(name))
quickmark_manager.delete(name)
def url(*, info):
"""A model which combines various URLs.
This combines:
- bookmarks
- quickmarks
- search engines
- web history URLs
Used for the `open` command.
"""
model = completionmodel.CompletionModel(column_widths=(40, 50, 10))
# pylint: disable=bad-config-option
quickmarks = [(url, name) for (name, url)
in objreg.get('quickmark-manager').marks.items()]
bookmarks = objreg.get('bookmark-manager').marks.items()
searchengines = [(k, v) for k, v
in sorted(config.val.url.searchengines.items())
if k != 'DEFAULT']
# pylint: enable=bad-config-option
categories = config.val.completion.open_categories
models: Dict[str, QAbstractItemModel] = {}
if searchengines and 'searchengines' in categories:
models['searchengines'] = listcategory.ListCategory(
'Search engines', searchengines, sort=False)
if quickmarks and 'quickmarks' in categories:
models['quickmarks'] = listcategory.ListCategory(
'Quickmarks', quickmarks, delete_func=_delete_quickmark,
sort=False)
if bookmarks and 'bookmarks' in categories:
models['bookmarks'] = listcategory.ListCategory(
'Bookmarks', bookmarks, delete_func=_delete_bookmark, sort=False)
history_disabled = info.config.get('completion.web_history.max_items') == 0
if not history_disabled and 'history' in categories:
hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)
models['history'] = hist_cat
for category in categories:
if category in models:
model.add_category(models[category])
return model
|
import datetime
from kalliope import Utils
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException
class Debug(NeuronModule):
def __init__(self, **kwargs):
super(Debug, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
# check if parameters have been provided
if self._is_parameters_ok():
Utils.print_warning("[Debug neuron, %s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
self.message))
def _is_parameters_ok(self):
"""
Check if received parameters are ok to perform operations in the neuron
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingParameterException
"""
if self.message is None:
raise MissingParameterException("You must specify a message string or a list of messages as parameter")
return True
|
from datetime import timedelta
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
WeatherEntity,
)
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
import homeassistant.util.dt as dt_util
CONDITION_CLASSES = {
"cloudy": [],
"fog": [],
"hail": [],
"lightning": [],
"lightning-rainy": [],
"partlycloudy": [],
"pouring": [],
"rainy": ["shower rain"],
"snowy": [],
"snowy-rainy": [],
"sunny": ["sunshine"],
"windy": [],
"windy-variant": [],
"exceptional": [],
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
setup_platform(hass, {}, async_add_entities)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Demo weather."""
add_entities(
[
DemoWeather(
"South",
"Sunshine",
21.6414,
92,
1099,
0.5,
TEMP_CELSIUS,
[
["rainy", 1, 22, 15, 60],
["rainy", 5, 19, 8, 30],
["cloudy", 0, 15, 9, 10],
["sunny", 0, 12, 6, 0],
["partlycloudy", 2, 14, 7, 20],
["rainy", 15, 18, 7, 0],
["fog", 0.2, 21, 12, 100],
],
),
DemoWeather(
"North",
"Shower rain",
-12,
54,
987,
4.8,
TEMP_FAHRENHEIT,
[
["snowy", 2, -10, -15, 60],
["partlycloudy", 1, -13, -14, 25],
["sunny", 0, -18, -22, 70],
["sunny", 0.1, -23, -23, 90],
["snowy", 4, -19, -20, 40],
["sunny", 0.3, -14, -19, 0],
["sunny", 0, -9, -12, 0],
],
),
]
)
class DemoWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(
self,
name,
condition,
temperature,
humidity,
pressure,
wind_speed,
temperature_unit,
forecast,
):
"""Initialize the Demo weather."""
self._name = name
self._condition = condition
self._temperature = temperature
self._temperature_unit = temperature_unit
self._humidity = humidity
self._pressure = pressure
self._wind_speed = wind_speed
self._forecast = forecast
@property
def name(self):
"""Return the name of the sensor."""
return f"Demo Weather {self._name}"
@property
def should_poll(self):
"""No polling needed for a demo weather condition."""
return False
@property
def temperature(self):
"""Return the temperature."""
return self._temperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._temperature_unit
@property
def humidity(self):
"""Return the humidity."""
return self._humidity
@property
def wind_speed(self):
"""Return the wind speed."""
return self._wind_speed
@property
def pressure(self):
"""Return the pressure."""
return self._pressure
@property
def condition(self):
"""Return the weather condition."""
return [
k for k, v in CONDITION_CLASSES.items() if self._condition.lower() in v
][0]
@property
def attribution(self):
"""Return the attribution."""
return "Powered by Home Assistant"
@property
def forecast(self):
"""Return the forecast."""
reftime = dt_util.now().replace(hour=16, minute=00)
forecast_data = []
for entry in self._forecast:
data_dict = {
ATTR_FORECAST_TIME: reftime.isoformat(),
ATTR_FORECAST_CONDITION: entry[0],
ATTR_FORECAST_PRECIPITATION: entry[1],
ATTR_FORECAST_TEMP: entry[2],
ATTR_FORECAST_TEMP_LOW: entry[3],
ATTR_FORECAST_PRECIPITATION_PROBABILITY: entry[4],
}
reftime = reftime + timedelta(hours=4)
forecast_data.append(data_dict)
return forecast_data
|
from unittest import mock
from homeassistant.components.local_file.const import DOMAIN, SERVICE_UPDATE_FILE_PATH
from homeassistant.setup import async_setup_component
from tests.common import mock_registry
async def test_loading_file(hass, hass_client):
"""Test that it loads image from disk."""
mock_registry(hass)
with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch(
"os.access", mock.Mock(return_value=True)
):
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "local_file",
"file_path": "mock.file",
}
},
)
await hass.async_block_till_done()
client = await hass_client()
m_open = mock.mock_open(read_data=b"hello")
with mock.patch(
"homeassistant.components.local_file.camera.open", m_open, create=True
):
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == 200
body = await resp.text()
assert body == "hello"
async def test_file_not_readable(hass, caplog):
"""Test a warning is shown setup when file is not readable."""
mock_registry(hass)
with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch(
"os.access", mock.Mock(return_value=False)
):
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "local_file",
"file_path": "mock.file",
}
},
)
await hass.async_block_till_done()
assert "Could not read" in caplog.text
assert "config_test" in caplog.text
assert "mock.file" in caplog.text
async def test_camera_content_type(hass, hass_client):
"""Test local_file camera content_type."""
cam_config_jpg = {
"name": "test_jpg",
"platform": "local_file",
"file_path": "/path/to/image.jpg",
}
cam_config_png = {
"name": "test_png",
"platform": "local_file",
"file_path": "/path/to/image.png",
}
cam_config_svg = {
"name": "test_svg",
"platform": "local_file",
"file_path": "/path/to/image.svg",
}
cam_config_noext = {
"name": "test_no_ext",
"platform": "local_file",
"file_path": "/path/to/image",
}
await async_setup_component(
hass,
"camera",
{"camera": [cam_config_jpg, cam_config_png, cam_config_svg, cam_config_noext]},
)
await hass.async_block_till_done()
client = await hass_client()
image = "hello"
m_open = mock.mock_open(read_data=image.encode())
with mock.patch(
"homeassistant.components.local_file.camera.open", m_open, create=True
):
resp_1 = await client.get("/api/camera_proxy/camera.test_jpg")
resp_2 = await client.get("/api/camera_proxy/camera.test_png")
resp_3 = await client.get("/api/camera_proxy/camera.test_svg")
resp_4 = await client.get("/api/camera_proxy/camera.test_no_ext")
assert resp_1.status == 200
assert resp_1.content_type == "image/jpeg"
body = await resp_1.text()
assert body == image
assert resp_2.status == 200
assert resp_2.content_type == "image/png"
body = await resp_2.text()
assert body == image
assert resp_3.status == 200
assert resp_3.content_type == "image/svg+xml"
body = await resp_3.text()
assert body == image
# default mime type
assert resp_4.status == 200
assert resp_4.content_type == "image/jpeg"
body = await resp_4.text()
assert body == image
async def test_update_file_path(hass):
"""Test update_file_path service."""
# Setup platform
mock_registry(hass)
with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch(
"os.access", mock.Mock(return_value=True)
):
camera_1 = {"platform": "local_file", "file_path": "mock/path.jpg"}
camera_2 = {
"platform": "local_file",
"name": "local_file_camera_2",
"file_path": "mock/path_2.jpg",
}
await async_setup_component(hass, "camera", {"camera": [camera_1, camera_2]})
await hass.async_block_till_done()
# Fetch state and check motion detection attribute
state = hass.states.get("camera.local_file")
assert state.attributes.get("friendly_name") == "Local File"
assert state.attributes.get("file_path") == "mock/path.jpg"
service_data = {"entity_id": "camera.local_file", "file_path": "new/path.jpg"}
await hass.services.async_call(DOMAIN, SERVICE_UPDATE_FILE_PATH, service_data)
await hass.async_block_till_done()
state = hass.states.get("camera.local_file")
assert state.attributes.get("file_path") == "new/path.jpg"
# Check that local_file_camera_2 file_path is still as configured
state = hass.states.get("camera.local_file_camera_2")
assert state.attributes.get("file_path") == "mock/path_2.jpg"
|
from __future__ import print_function
import argparse
import sys
_stash = globals()["_stash"]
class ConsoleOpenin(object):
def __init__(self, args):
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("file", action="store", help="file to open")
ns = p.parse_args(args)
self.filename = ns.file
def open_in(self):
_stash.libdist.open_in(self.filename)
if __name__ == "__main__":
ConsoleOpenin(sys.argv[1:]).open_in()
|
from pytouchline import PyTouchline
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, CONF_HOST, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
PRESET_MODES = {
"Normal": {"mode": 0, "program": 0},
"Night": {"mode": 1, "program": 0},
"Holiday": {"mode": 2, "program": 0},
"Pro 1": {"mode": 0, "program": 1},
"Pro 2": {"mode": 0, "program": 2},
"Pro 3": {"mode": 0, "program": 3},
}
TOUCHLINE_HA_PRESETS = {
(settings["mode"], settings["program"]): preset
for preset, settings in PRESET_MODES.items()
}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_HOST): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Touchline devices."""
host = config[CONF_HOST]
py_touchline = PyTouchline()
number_of_devices = int(py_touchline.get_number_of_devices(host))
devices = []
for device_id in range(0, number_of_devices):
devices.append(Touchline(PyTouchline(device_id)))
add_entities(devices, True)
class Touchline(ClimateEntity):
"""Representation of a Touchline device."""
def __init__(self, touchline_thermostat):
"""Initialize the Touchline device."""
self.unit = touchline_thermostat
self._name = None
self._current_temperature = None
self._target_temperature = None
self._current_operation_mode = None
self._preset_mode = None
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def update(self):
"""Update thermostat attributes."""
self.unit.update()
self._name = self.unit.get_name()
self._current_temperature = self.unit.get_current_temperature()
self._target_temperature = self.unit.get_target_temperature()
self._preset_mode = TOUCHLINE_HA_PRESETS.get(
(self.unit.get_operation_mode(), self.unit.get_week_program())
)
@property
def hvac_mode(self):
"""Return current HVAC mode.
Need to be one of HVAC_MODE_*.
"""
return HVAC_MODE_HEAT
@property
def hvac_modes(self):
"""Return list of possible operation modes."""
return [HVAC_MODE_HEAT]
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def preset_mode(self):
"""Return the current preset mode."""
return self._preset_mode
@property
def preset_modes(self):
"""Return available preset modes."""
return list(PRESET_MODES)
def set_preset_mode(self, preset_mode):
"""Set new target preset mode."""
self.unit.set_operation_mode(PRESET_MODES[preset_mode]["mode"])
self.unit.set_week_program(PRESET_MODES[preset_mode]["program"])
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
self._current_operation_mode = HVAC_MODE_HEAT
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
self.unit.set_target_temperature(self._target_temperature)
|
revision = "984178255c83"
down_revision = "f2383bf08fbc"
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column("pending_certs", sa.Column("resolved", sa.Boolean(), nullable=True))
op.add_column(
"pending_certs", sa.Column("resolved_cert_id", sa.Integer(), nullable=True)
)
def downgrade():
op.drop_column("pending_certs", "resolved_cert_id")
op.drop_column("pending_certs", "resolved")
|
import logging
import os
from pyps4_2ndscreen.ddp import async_create_ddp_endpoint
from pyps4_2ndscreen.media_art import COUNTRIES
import voluptuous as vol
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_GAME,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ATTR_LOCKED,
CONF_REGION,
CONF_TOKEN,
)
from homeassistant.core import split_entity_id
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import location
from homeassistant.util.json import load_json, save_json
from .config_flow import PlayStation4FlowHandler # noqa: pylint: disable=unused-import
from .const import ATTR_MEDIA_IMAGE_URL, COMMANDS, DOMAIN, GAMES_FILE, PS4_DATA
_LOGGER = logging.getLogger(__name__)
SERVICE_COMMAND = "send_command"
PS4_COMMAND_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_COMMAND): vol.In(list(COMMANDS)),
}
)
class PS4Data:
"""Init Data Class."""
def __init__(self):
"""Init Class."""
self.devices = []
self.protocol = None
async def async_setup(hass, config):
"""Set up the PS4 Component."""
hass.data[PS4_DATA] = PS4Data()
transport, protocol = await async_create_ddp_endpoint()
hass.data[PS4_DATA].protocol = protocol
_LOGGER.debug("PS4 DDP endpoint created: %s, %s", transport, protocol)
service_handle(hass)
return True
async def async_setup_entry(hass, config_entry):
"""Set up PS4 from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "media_player")
)
return True
async def async_unload_entry(hass, entry):
"""Unload a PS4 config entry."""
await hass.config_entries.async_forward_entry_unload(entry, "media_player")
return True
async def async_migrate_entry(hass, entry):
"""Migrate old entry."""
config_entries = hass.config_entries
data = entry.data
version = entry.version
_LOGGER.debug("Migrating PS4 entry from Version %s", version)
reason = {
1: "Region codes have changed",
2: "Format for Unique ID for entity registry has changed",
}
# Migrate Version 1 -> Version 2: New region codes.
if version == 1:
loc = await location.async_detect_location_info(
hass.helpers.aiohttp_client.async_get_clientsession()
)
if loc:
country = loc.country_name
if country in COUNTRIES:
for device in data["devices"]:
device[CONF_REGION] = country
version = entry.version = 2
config_entries.async_update_entry(entry, data=data)
_LOGGER.info(
"PlayStation 4 Config Updated: \
Region changed to: %s",
country,
)
# Migrate Version 2 -> Version 3: Update identifier format.
if version == 2:
# Prevent changing entity_id. Updates entity registry.
registry = await entity_registry.async_get_registry(hass)
for entity_id, e_entry in registry.entities.items():
if e_entry.config_entry_id == entry.entry_id:
unique_id = e_entry.unique_id
# Remove old entity entry.
registry.async_remove(entity_id)
# Format old unique_id.
unique_id = format_unique_id(entry.data[CONF_TOKEN], unique_id)
# Create new entry with old entity_id.
new_id = split_entity_id(entity_id)[1]
registry.async_get_or_create(
"media_player",
DOMAIN,
unique_id,
suggested_object_id=new_id,
config_entry=entry,
device_id=e_entry.device_id,
)
entry.version = 3
_LOGGER.info(
"PlayStation 4 identifier for entity: %s \
has changed",
entity_id,
)
config_entries.async_update_entry(entry)
return True
msg = f"""{reason[version]} for the PlayStation 4 Integration.
Please remove the PS4 Integration and re-configure
[here](/config/integrations)."""
hass.components.persistent_notification.async_create(
title="PlayStation 4 Integration Configuration Requires Update",
message=msg,
notification_id="config_entry_migration",
)
return False
def format_unique_id(creds, mac_address):
"""Use last 4 Chars of credential as suffix. Unique ID per PSN user."""
suffix = creds[-4:]
return f"{mac_address}_{suffix}"
def load_games(hass: HomeAssistantType, unique_id: str) -> dict:
"""Load games for sources."""
g_file = hass.config.path(GAMES_FILE.format(unique_id))
try:
games = load_json(g_file)
except HomeAssistantError as error:
games = {}
_LOGGER.error("Failed to load games file: %s", error)
if not isinstance(games, dict):
_LOGGER.error("Games file was not parsed correctly")
games = {}
# If file exists
if os.path.isfile(g_file):
games = _reformat_data(hass, games, unique_id)
return games
def save_games(hass: HomeAssistantType, games: dict, unique_id: str):
"""Save games to file."""
g_file = hass.config.path(GAMES_FILE.format(unique_id))
try:
save_json(g_file, games)
except OSError as error:
_LOGGER.error("Could not save game list, %s", error)
def _reformat_data(hass: HomeAssistantType, games: dict, unique_id: str) -> dict:
"""Reformat data to correct format."""
data_reformatted = False
for game, data in games.items():
# Convert str format to dict format.
if not isinstance(data, dict):
# Use existing title. Assign defaults.
games[game] = {
ATTR_LOCKED: False,
ATTR_MEDIA_TITLE: data,
ATTR_MEDIA_IMAGE_URL: None,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
}
data_reformatted = True
_LOGGER.debug("Reformatting media data for item: %s, %s", game, data)
if data_reformatted:
save_games(hass, games, unique_id)
return games
def service_handle(hass: HomeAssistantType):
"""Handle for services."""
async def async_service_command(call):
"""Service for sending commands."""
entity_ids = call.data[ATTR_ENTITY_ID]
command = call.data[ATTR_COMMAND]
for device in hass.data[PS4_DATA].devices:
if device.entity_id in entity_ids:
await device.async_send_command(command)
hass.services.async_register(
DOMAIN, SERVICE_COMMAND, async_service_command, schema=PS4_COMMAND_SCHEMA
)
|
import homeassistant.components.remember_the_milk as rtm
from .const import JSON_STRING, PROFILE, TOKEN
from tests.async_mock import Mock, mock_open, patch
def test_create_new(hass):
"""Test creating a new config file."""
with patch("builtins.open", mock_open()), patch(
"os.path.isfile", Mock(return_value=False)
), patch.object(rtm.RememberTheMilkConfiguration, "save_config"):
config = rtm.RememberTheMilkConfiguration(hass)
config.set_token(PROFILE, TOKEN)
assert config.get_token(PROFILE) == TOKEN
def test_load_config(hass):
"""Test loading an existing token from the file."""
with patch("builtins.open", mock_open(read_data=JSON_STRING)), patch(
"os.path.isfile", Mock(return_value=True)
):
config = rtm.RememberTheMilkConfiguration(hass)
assert config.get_token(PROFILE) == TOKEN
def test_invalid_data(hass):
"""Test starts with invalid data and should not raise an exception."""
with patch("builtins.open", mock_open(read_data="random characters")), patch(
"os.path.isfile", Mock(return_value=True)
):
config = rtm.RememberTheMilkConfiguration(hass)
assert config is not None
def test_id_map(hass):
"""Test the hass to rtm task is mapping."""
hass_id = "hass-id-1234"
list_id = "mylist"
timeseries_id = "my_timeseries"
rtm_id = "rtm-id-4567"
with patch("builtins.open", mock_open()), patch(
"os.path.isfile", Mock(return_value=False)
), patch.object(rtm.RememberTheMilkConfiguration, "save_config"):
config = rtm.RememberTheMilkConfiguration(hass)
assert config.get_rtm_id(PROFILE, hass_id) is None
config.set_rtm_id(PROFILE, hass_id, list_id, timeseries_id, rtm_id)
assert (list_id, timeseries_id, rtm_id) == config.get_rtm_id(PROFILE, hass_id)
config.delete_rtm_id(PROFILE, hass_id)
assert config.get_rtm_id(PROFILE, hass_id) is None
def test_load_key_map(hass):
"""Test loading an existing key map from the file."""
with patch("builtins.open", mock_open(read_data=JSON_STRING)), patch(
"os.path.isfile", Mock(return_value=True)
):
config = rtm.RememberTheMilkConfiguration(hass)
assert ("0", "1", "2") == config.get_rtm_id(PROFILE, "1234")
|
def get_api_url(method):
"""
Returns API URL for the given method.
:param method: Method name
:type method: str
:returns: API URL for the given method
:rtype: str
"""
return 'https://slack.com/api/{}'.format(method)
def get_item_id_by_name(list_dict, key_name):
for d in list_dict:
if d['name'] == key_name:
return d['id']
|
import argparse
import json
import logging
import multiprocessing
import re
import sys
from xml.etree import ElementTree
from functools import partial
from gensim.corpora.wikicorpus import IGNORED_NAMESPACES, WikiCorpus, filter_wiki, find_interlinks, get_namespace, utils
import gensim.utils
logger = logging.getLogger(__name__)
def segment_all_articles(file_path, min_article_character=200, workers=None, include_interlinks=False):
"""Extract article titles and sections from a MediaWiki bz2 database dump.
Parameters
----------
file_path : str
Path to MediaWiki dump, typical filename is <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
workers: int or None
Number of parallel workers, max(1, multiprocessing.cpu_count() - 1) if None.
include_interlinks: bool
Whether or not interlinks should be included in the output
Yields
------
(str, list of (str, str), (Optionally) list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally) [(interlink_article, interlink_text), ...]).
"""
with gensim.utils.open(file_path, 'rb') as xml_fileobj:
wiki_sections_corpus = _WikiSectionsCorpus(
xml_fileobj, min_article_character=min_article_character, processes=workers,
include_interlinks=include_interlinks)
wiki_sections_corpus.metadata = True
wiki_sections_text = wiki_sections_corpus.get_texts_with_sections()
for article in wiki_sections_text:
yield article
def segment_and_write_all_articles(file_path, output_file, min_article_character=200, workers=None,
include_interlinks=False):
"""Write article title and sections to `output_file` (or stdout, if output_file is None).
The output format is one article per line, in json-line format with 4 fields::
'title' - title of article,
'section_titles' - list of titles of sections,
'section_texts' - list of content from sections,
(Optional) 'section_interlinks' - list of interlinks in the article.
Parameters
----------
file_path : str
Path to MediaWiki dump, typical filename is <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2.
output_file : str or None
Path to output file in json-lines format, or None for printing to stdout.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
workers: int or None
Number of parallel workers, max(1, multiprocessing.cpu_count() - 1) if None.
include_interlinks: bool
Whether or not interlinks should be included in the output
"""
if output_file is None:
outfile = getattr(sys.stdout, 'buffer', sys.stdout) # we want write bytes, so for py3 we used 'buffer'
else:
outfile = gensim.utils.open(output_file, 'wb')
try:
article_stream = segment_all_articles(file_path, min_article_character, workers=workers,
include_interlinks=include_interlinks)
for idx, article in enumerate(article_stream):
article_title, article_sections = article[0], article[1]
if include_interlinks:
interlinks = article[2]
output_data = {
"title": article_title,
"section_titles": [],
"section_texts": [],
}
if include_interlinks:
output_data["interlinks"] = interlinks
for section_heading, section_content in article_sections:
output_data["section_titles"].append(section_heading)
output_data["section_texts"].append(section_content)
if (idx + 1) % 100000 == 0:
logger.info("processed #%d articles (at %r now)", idx + 1, article_title)
outfile.write((json.dumps(output_data) + "\n").encode('utf-8'))
finally:
if output_file is not None:
outfile.close()
def extract_page_xmls(f):
"""Extract pages from a MediaWiki database dump.
Parameters
----------
f : file
File descriptor of MediaWiki dump.
Yields
------
str
XML strings for page tags.
"""
elems = (elem for _, elem in ElementTree.iterparse(f, events=("end",)))
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
yield ElementTree.tostring(elem)
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
def segment(page_xml, include_interlinks=False):
"""Parse the content inside a page tag
Parameters
----------
page_xml : str
Content from page tag.
include_interlinks : bool
Whether or not interlinks should be parsed.
Returns
-------
(str, list of (str, str), (Optionally) list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally) [(interlink_article, interlink_text), ...]).
"""
elem = ElementTree.fromstring(page_xml)
filter_namespaces = ('0',)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
lead_section_heading = "Introduction"
top_level_heading_regex = r"\n==[^=].*[^=]==\n"
top_level_heading_regex_capture = r"\n==([^=].*[^=])==\n"
title = elem.find(title_path).text
text = elem.find(text_path).text
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
if text is not None:
if include_interlinks:
interlinks = find_interlinks(text)
section_contents = re.split(top_level_heading_regex, text)
section_headings = [lead_section_heading] + re.findall(top_level_heading_regex_capture, text)
section_headings = [heading.strip() for heading in section_headings]
assert len(section_contents) == len(section_headings)
else:
interlinks = []
section_contents = []
section_headings = []
section_contents = [filter_wiki(section_content) for section_content in section_contents]
sections = list(zip(section_headings, section_contents))
if include_interlinks:
return title, sections, interlinks
else:
return title, sections
class _WikiSectionsCorpus(WikiCorpus):
"""Treat a wikipedia articles dump (<LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2) as a (read-only) corpus.
The documents are extracted on-the-fly, so that the whole (massive) dump can stay compressed on disk.
"""
def __init__(self, fileobj, min_article_character=200, processes=None,
lemmatize=utils.has_pattern(), filter_namespaces=('0',), include_interlinks=False):
"""
Parameters
----------
fileobj : file
File descriptor of MediaWiki dump.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
processes : int, optional
Number of processes, max(1, multiprocessing.cpu_count() - 1) if None.
lemmatize : bool, optional
If `pattern` package is installed, use fancier shallow parsing to get token lemmas.
Otherwise, use simple regexp tokenization.
filter_namespaces : tuple of int, optional
Enumeration of namespaces that will be ignored.
include_interlinks: bool
Whether or not interlinks should be included in the output
"""
self.fileobj = fileobj
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.lemmatize = lemmatize
self.min_article_character = min_article_character
self.include_interlinks = include_interlinks
def get_texts_with_sections(self):
"""Iterate over the dump, returning titles and text versions of all sections of articles.
Notes
-----
Only articles of sufficient length are returned (short articles & redirects
etc are ignored).
Note that this iterates over the **texts**; if you want vectors, just use
the standard corpus interface instead of this function:
.. sourcecode:: pycon
>>> for vec in wiki_corpus:
>>> print(vec)
Yields
------
(str, list of (str, str), list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally)[(interlink_article, interlink_text), ...]).
"""
skipped_namespace, skipped_length, skipped_redirect = 0, 0, 0
total_articles, total_sections = 0, 0
page_xmls = extract_page_xmls(self.fileobj)
pool = multiprocessing.Pool(self.processes)
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(page_xmls, chunksize=10 * self.processes, maxsize=1):
for article in pool.imap(partial(segment, include_interlinks=self.include_interlinks),
group): # chunksize=10): partial(merge_names, b='Sons')
article_title, sections = article[0], article[1]
# article redirects are pruned here
if any(article_title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES): # filter non-articles
skipped_namespace += 1
continue
if not sections or sections[0][1].lstrip().lower().startswith("#redirect"): # filter redirect
skipped_redirect += 1
continue
if sum(len(body.strip()) for (_, body) in sections) < self.min_article_character:
# filter stubs (incomplete, very short articles)
skipped_length += 1
continue
total_articles += 1
total_sections += len(sections)
if self.include_interlinks:
interlinks = article[2]
yield (article_title, sections, interlinks)
else:
yield (article_title, sections)
logger.info(
"finished processing %i articles with %i sections (skipped %i redirects, %i stubs, %i ignored namespaces)",
total_articles, total_sections, skipped_redirect, skipped_length, skipped_namespace)
pool.terminate()
self.length = total_articles # cache corpus length
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=__doc__[:-136])
default_workers = max(1, multiprocessing.cpu_count() - 1)
parser.add_argument('-f', '--file', help='Path to MediaWiki database dump (read-only).', required=True)
parser.add_argument(
'-o', '--output',
help='Path to output file (stdout if not specified). If ends in .gz or .bz2, '
'the output file will be automatically compressed (recommended!).')
parser.add_argument(
'-w', '--workers',
help='Number of parallel workers for multi-core systems. Default: %(default)s.',
type=int,
default=default_workers
)
parser.add_argument(
'-m', '--min-article-character',
help="Ignore articles with fewer characters than this (article stubs). Default: %(default)s.",
type=int,
default=200
)
parser.add_argument(
'-i', '--include-interlinks',
help='Include a mapping for interlinks to other articles in the dump. The mappings format is: '
'"interlinks": [("article_title_1", "interlink_text_1"), ("article_title_2", "interlink_text_2"), ...]',
action='store_true'
)
args = parser.parse_args()
logger.info("running %s", " ".join(sys.argv))
segment_and_write_all_articles(
args.file, args.output,
min_article_character=args.min_article_character,
workers=args.workers,
include_interlinks=args.include_interlinks
)
logger.info("finished running %s", sys.argv[0])
|
import unittest
import six
from trashcli.restore import parse_indexes, InvalidEntry, Range, Single, Sequences
class Test_parse_indexes(unittest.TestCase):
def test_non_numeric(self):
with six.assertRaisesRegex(self, InvalidEntry, "^not an index: a$"):
parse_indexes("a", 10)
def test(self):
with six.assertRaisesRegex(self, InvalidEntry, "^out of range 0..9: 10$"):
parse_indexes("10", 10)
def test2(self):
self.assertEqual(Sequences([Single(9)]), parse_indexes("9", 10))
def test3(self):
self.assertEqual(Sequences([Single(0)]), parse_indexes("0", 10))
def test4(self):
assert Sequences([Range(1, 4)]) == parse_indexes("1-4", 10)
def test5(self):
self.assertEqual(Sequences([Single(1),
Single(2),
Single(3),
Single(4)]),
parse_indexes("1,2,3,4", 10))
def test_interval_without_start(self):
with six.assertRaisesRegex(self, InvalidEntry, "^open interval: -1$"):
parse_indexes("-1", 10)
def test_interval_without_end(self):
with six.assertRaisesRegex(self, InvalidEntry, "^open interval: 1-$"):
parse_indexes("1-", 10)
def test_complex(self):
indexes = parse_indexes("1-5,7", 10)
self.assertEqual(Sequences([Range(1, 5), Single(7)]), indexes)
class TestSequences(unittest.TestCase):
def test(self):
sequences = parse_indexes("1-5,7", 10)
result = [index for index in sequences.all_indexes()]
self.assertEqual([1, 2, 3, 4, 5, 7], result)
|
import numpy as np
import os.path as op
import datetime
import calendar
from .utils import _load_mne_locs, _read_pos
from ...utils import logger, warn, verbose
from ..utils import _read_segments_file
from ..base import BaseRaw
from ..meas_info import _empty_info
from .._digitization import _make_dig_points, DigPoint
from ..constants import FIFF
from ...transforms import get_ras_to_neuromag_trans, apply_trans, Transform
@verbose
def read_raw_artemis123(input_fname, preload=False, verbose=None,
pos_fname=None, add_head_trans=True):
"""Read Artemis123 data as raw object.
Parameters
----------
input_fname : str
Path to the data file (extension ``.bin``). The header file with the
same file name stem and an extension ``.txt`` is expected to be found
in the same directory.
%(preload)s
%(verbose)s
pos_fname : str or None (default None)
If not None, load digitized head points from this file.
add_head_trans : bool (default True)
If True attempt to perform initial head localization. Compute initial
device to head coordinate transform using HPI coils. If no
HPI coils are in info['dig'] hpi coils are assumed to be in canonical
order of fiducial points (nas, rpa, lpa).
Returns
-------
raw : instance of Raw
A Raw object containing the data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawArtemis123(input_fname, preload=preload, verbose=verbose,
pos_fname=pos_fname, add_head_trans=add_head_trans)
def _get_artemis123_info(fname, pos_fname=None):
"""Generate info struct from artemis123 header file."""
fname = op.splitext(fname)[0]
header = fname + '.txt'
logger.info('Reading header...')
# key names for artemis channel info...
chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass',
'FLL_AutoReset', 'FLL_ResetLock']
header_info = dict()
header_info['filter_hist'] = []
header_info['comments'] = ''
header_info['channels'] = []
with open(header, 'r') as fid:
# section flag
# 0 - None
# 1 - main header
# 2 - channel header
# 3 - comments
# 4 - length
# 5 - filtering History
sectionFlag = 0
for line in fid:
# skip emptylines or header line for channel info
if ((not line.strip()) or
(sectionFlag == 2 and line.startswith('DAQ Map'))):
continue
# set sectionFlag
if line.startswith('<end'):
sectionFlag = 0
elif line.startswith("<start main header>"):
sectionFlag = 1
elif line.startswith("<start per channel header>"):
sectionFlag = 2
elif line.startswith("<start comments>"):
sectionFlag = 3
elif line.startswith("<start length>"):
sectionFlag = 4
elif line.startswith("<start filtering history>"):
sectionFlag = 5
else:
# parse header info lines
# part of main header - lines are name value pairs
if sectionFlag == 1:
values = line.strip().split('\t')
if len(values) == 1:
values.append('')
header_info[values[0]] = values[1]
# part of channel header - lines are Channel Info
elif sectionFlag == 2:
values = line.strip().split('\t')
if len(values) != 7:
raise IOError('Error parsing line \n\t:%s\n' % line +
'from file %s' % header)
tmp = dict()
for k, v in zip(chan_keys, values):
tmp[k] = v
header_info['channels'].append(tmp)
elif sectionFlag == 3:
header_info['comments'] = '%s%s' \
% (header_info['comments'], line.strip())
elif sectionFlag == 4:
header_info['num_samples'] = int(line.strip())
elif sectionFlag == 5:
header_info['filter_hist'].append(line.strip())
for k in ['Temporal Filter Active?', 'Decimation Active?',
'Spatial Filter Active?']:
if(header_info[k] != 'FALSE'):
warn('%s - set to but is not supported' % k)
if(header_info['filter_hist']):
warn('Non-Empty Filter history found, BUT is not supported' % k)
# build mne info struct
info = _empty_info(float(header_info['DAQ Sample Rate']))
# Attempt to get time/date from fname
# Artemis123 files saved from the scanner observe the following
# naming convention 'Artemis_Data_YYYY-MM-DD-HHh-MMm_[chosen by user].bin'
try:
date = datetime.datetime.strptime(
op.basename(fname).split('_')[2], '%Y-%m-%d-%Hh-%Mm')
meas_date = (calendar.timegm(date.utctimetuple()), 0)
except Exception:
meas_date = None
# build subject info must be an integer (as per FIFF)
try:
subject_info = {'id': int(header_info['Subject ID'])}
except ValueError:
subject_info = {'id': 0}
# build description
desc = ''
for k in ['Purpose', 'Notes']:
desc += '{} : {}\n'.format(k, header_info[k])
desc += 'Comments : {}'.format(header_info['comments'])
info.update({'meas_date': meas_date,
'description': desc,
'subject_info': subject_info,
'proj_name': header_info['Project Name']})
# Channel Names by type
ref_mag_names = ['REF_001', 'REF_002', 'REF_003',
'REF_004', 'REF_005', 'REF_006']
ref_grad_names = ['REF_007', 'REF_008', 'REF_009',
'REF_010', 'REF_011', 'REF_012']
# load mne loc dictionary
loc_dict = _load_mne_locs()
info['chs'] = []
info['bads'] = []
for i, chan in enumerate(header_info['channels']):
# build chs struct
t = {'cal': float(chan['scaling']), 'ch_name': chan['name'],
'logno': i + 1, 'scanno': i + 1, 'range': 1.0,
'unit_mul': FIFF.FIFF_UNITM_NONE,
'coord_frame': FIFF.FIFFV_COORD_DEVICE}
# REF_018 has a zero cal which can cause problems. Let's set it to
# a value of another ref channel to make writers/readers happy.
if t['cal'] == 0:
t['cal'] = 4.716e-10
info['bads'].append(t['ch_name'])
t['loc'] = loc_dict.get(chan['name'], np.zeros(12))
if (chan['name'].startswith('MEG')):
t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD
t['kind'] = FIFF.FIFFV_MEG_CH
# While gradiometer units are T/m, the meg sensors referred to as
# gradiometers report the field difference between 2 pick-up coils.
# Therefore the units of the measurements should be T
# *AND* the baseline (difference between pickup coils)
# should not be used in leadfield / forwardfield computations.
t['unit'] = FIFF.FIFF_UNIT_T
t['unit_mul'] = FIFF.FIFF_UNITM_F
# 3 axis referance magnetometers
elif (chan['name'] in ref_mag_names):
t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG
t['kind'] = FIFF.FIFFV_REF_MEG_CH
t['unit'] = FIFF.FIFF_UNIT_T
t['unit_mul'] = FIFF.FIFF_UNITM_F
# reference gradiometers
elif (chan['name'] in ref_grad_names):
t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD
t['kind'] = FIFF.FIFFV_REF_MEG_CH
# While gradiometer units are T/m, the meg sensors referred to as
# gradiometers report the field difference between 2 pick-up coils.
# Therefore the units of the measurements should be T
# *AND* the baseline (difference between pickup coils)
# should not be used in leadfield / forwardfield computations.
t['unit'] = FIFF.FIFF_UNIT_T
t['unit_mul'] = FIFF.FIFF_UNITM_F
# other reference channels are unplugged and should be ignored.
elif (chan['name'].startswith('REF')):
t['coil_type'] = FIFF.FIFFV_COIL_NONE
t['kind'] = FIFF.FIFFV_MISC_CH
t['unit'] = FIFF.FIFF_UNIT_V
info['bads'].append(t['ch_name'])
elif (chan['name'].startswith(('AUX', 'TRG', 'MIO'))):
t['coil_type'] = FIFF.FIFFV_COIL_NONE
t['unit'] = FIFF.FIFF_UNIT_V
if (chan['name'].startswith('TRG')):
t['kind'] = FIFF.FIFFV_STIM_CH
else:
t['kind'] = FIFF.FIFFV_MISC_CH
else:
raise ValueError('Channel does not match expected' +
' channel Types:"%s"' % chan['name'])
# incorporate multiplier (unit_mul) into calibration
t['cal'] *= 10 ** t['unit_mul']
t['unit_mul'] = FIFF.FIFF_UNITM_NONE
# append this channel to the info
info['chs'].append(t)
if chan['FLL_ResetLock'] == 'TRUE':
info['bads'].append(t['ch_name'])
# reduce info['bads'] to unique set
info['bads'] = list(set(info['bads']))
# HPI information
# print header_info.keys()
hpi_sub = dict()
# Don't know what event_channel is don't think we have it HPIs are either
# always on or always off.
# hpi_sub['event_channel'] = ???
hpi_sub['hpi_coils'] = [dict(), dict(), dict(), dict()]
hpi_coils = [dict(), dict(), dict(), dict()]
drive_channels = ['MIO_001', 'MIO_003', 'MIO_009', 'MIO_011']
key_base = 'Head Tracking %s %d'
# set default HPI frequencies
if info['sfreq'] == 1000:
default_freqs = [140, 150, 160, 40]
else:
default_freqs = [700, 750, 800, 40]
for i in range(4):
# build coil structure
hpi_coils[i]['number'] = i + 1
hpi_coils[i]['drive_chan'] = drive_channels[i]
this_freq = header_info.pop(key_base % ('Frequency', i + 1),
default_freqs[i])
hpi_coils[i]['coil_freq'] = this_freq
# check if coil is on
if header_info[key_base % ('Channel', i + 1)] == 'OFF':
hpi_sub['hpi_coils'][i]['event_bits'] = [0]
else:
hpi_sub['hpi_coils'][i]['event_bits'] = [256]
info['hpi_subsystem'] = hpi_sub
info['hpi_meas'] = [{'hpi_coils': hpi_coils}]
# read in digitized points if supplied
if pos_fname is not None:
info['dig'] = _read_pos(pos_fname)
else:
info['dig'] = []
info._update_redundant()
return info, header_info
class RawArtemis123(BaseRaw):
"""Raw object from Artemis123 file.
Parameters
----------
input_fname : str
Path to the Artemis123 data file (ending in ``'.bin'``).
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, preload=False, verbose=None,
pos_fname=None, add_head_trans=True): # noqa: D102
from scipy.spatial.distance import cdist
from ...chpi import (compute_chpi_amplitudes, compute_chpi_locs,
_fit_coil_order_dev_head_trans)
fname, ext = op.splitext(input_fname)
if ext == '.txt':
input_fname = fname + '.bin'
elif ext != '.bin':
raise RuntimeError('Valid artemis123 files must end in "txt"' +
' or ".bin".')
if not op.exists(input_fname):
raise RuntimeError('%s - Not Found' % input_fname)
info, header_info = _get_artemis123_info(input_fname,
pos_fname=pos_fname)
last_samps = [header_info.get('num_samples', 1) - 1]
super(RawArtemis123, self).__init__(
info, preload, filenames=[input_fname], raw_extras=[header_info],
last_samps=last_samps, orig_format=np.float32,
verbose=verbose)
if add_head_trans:
n_hpis = 0
for d in info['hpi_subsystem']['hpi_coils']:
if d['event_bits'] == [256]:
n_hpis += 1
if n_hpis < 3:
warn('%d HPIs active. At least 3 needed to perform' % n_hpis +
'head localization\n *NO* head localization performed')
else:
# Localized HPIs using the 1st 250 milliseconds of data.
info['hpi_results'] = [
dict(
dig_points=[dict(
r=np.zeros(3), coord_frame=FIFF.FIFFV_COORD_DEVICE,
ident=ii + 1) for ii in range(n_hpis)],
coord_trans=Transform('meg', 'head'))]
coil_amplitudes = compute_chpi_amplitudes(
self, tmin=0, tmax=0.25, t_window=0.25, t_step_min=0.25)
assert len(coil_amplitudes['times']) == 1
coil_locs = compute_chpi_locs(self.info, coil_amplitudes)
info['hpi_results'] = None
hpi_g = coil_locs['gofs'][0]
hpi_dev = coil_locs['rrs'][0]
# only use HPI coils with localizaton goodness_of_fit > 0.98
bad_idx = []
for i, g in enumerate(hpi_g):
msg = 'HPI coil %d - location goodness of fit (%0.3f)'
if g < 0.98:
bad_idx.append(i)
msg += ' *Removed from coregistration*'
logger.info(msg % (i + 1, g))
hpi_dev = np.delete(hpi_dev, bad_idx, axis=0)
hpi_g = np.delete(hpi_g, bad_idx, axis=0)
if pos_fname is not None:
# Digitized HPI points are needed.
hpi_head = np.array([d['r']
for d in self.info.get('dig', [])
if d['kind'] == FIFF.FIFFV_POINT_HPI])
if (len(hpi_head) != len(hpi_dev)):
mesg = ("number of digitized (%d) and " +
"active (%d) HPI coils are " +
"not the same.")
raise RuntimeError(mesg % (len(hpi_head),
len(hpi_dev)))
# compute initial head to dev transform and hpi ordering
head_to_dev_t, order, trans_g = \
_fit_coil_order_dev_head_trans(hpi_dev, hpi_head)
# set the device to head transform
self.info['dev_head_t'] = \
Transform(FIFF.FIFFV_COORD_DEVICE,
FIFF.FIFFV_COORD_HEAD, head_to_dev_t)
# add hpi_meg_dev to dig...
for idx, point in enumerate(hpi_dev):
d = {'r': point, 'ident': idx + 1,
'kind': FIFF.FIFFV_POINT_HPI,
'coord_frame': FIFF.FIFFV_COORD_DEVICE}
self.info['dig'].append(DigPoint(d))
dig_dists = cdist(hpi_head[order], hpi_head[order])
dev_dists = cdist(hpi_dev, hpi_dev)
tmp_dists = np.abs(dig_dists - dev_dists)
dist_limit = tmp_dists.max() * 1.1
msg = 'HPI-Dig corrregsitration\n'
msg += '\tGOF : %0.3f\n' % trans_g
msg += '\tMax Coil Error : %0.3f cm\n' % (100 *
tmp_dists.max())
logger.info(msg)
else:
logger.info('Assuming Cardinal HPIs')
nas = hpi_dev[0]
lpa = hpi_dev[2]
rpa = hpi_dev[1]
t = get_ras_to_neuromag_trans(nas, lpa, rpa)
self.info['dev_head_t'] = \
Transform(FIFF.FIFFV_COORD_DEVICE,
FIFF.FIFFV_COORD_HEAD, t)
# transform fiducial points
nas = apply_trans(t, nas)
lpa = apply_trans(t, lpa)
rpa = apply_trans(t, rpa)
hpi = apply_trans(self.info['dev_head_t'], hpi_dev)
self.info['dig'] = _make_dig_points(nasion=nas, lpa=lpa,
rpa=rpa, hpi=hpi)
order = np.array([0, 1, 2])
dist_limit = 0.005
# fill in hpi_results
hpi_result = dict()
# add HPI points in device coords...
dig = []
for idx, point in enumerate(hpi_dev):
dig.append({'r': point, 'ident': idx + 1,
'kind': FIFF.FIFFV_POINT_HPI,
'coord_frame': FIFF.FIFFV_COORD_DEVICE})
hpi_result['dig_points'] = dig
# attach Transform
hpi_result['coord_trans'] = self.info['dev_head_t']
# 1 based indexing
hpi_result['order'] = order + 1
hpi_result['used'] = np.arange(3) + 1
hpi_result['dist_limit'] = dist_limit
hpi_result['good_limit'] = 0.98
# Warn for large discrepancies between digitized and fit
# cHPI locations
if hpi_result['dist_limit'] > 0.005:
warn('Large difference between digitized geometry' +
' and HPI geometry. Max coil to coil difference' +
' is %0.2f cm\n' % (100. * tmp_dists.max()) +
'beware of *POOR* head localization')
# store it
self.info['hpi_results'] = [hpi_result]
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
_read_segments_file(
self, data, idx, fi, start, stop, cals, mult, dtype='>f4')
|
from requests.exceptions import ConnectTimeout
from homeassistant import config_entries, setup
from homeassistant.components.plum_lightpad.const import DOMAIN
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData"
), patch(
"homeassistant.components.plum_lightpad.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.plum_lightpad.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-plum-username", "password": "test-plum-password"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "test-plum-username"
assert result2["data"] == {
"username": "test-plum-username",
"password": "test-plum-password",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData",
side_effect=ConnectTimeout,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-plum-username", "password": "test-plum-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_one_entry_per_email_allowed(hass):
"""Test that only one entry allowed per Plum cloud email address."""
MockConfigEntry(
domain=DOMAIN,
unique_id="test-plum-username",
data={"username": "test-plum-username", "password": "test-plum-password"},
).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData"
), patch("homeassistant.components.plum_lightpad.async_setup") as mock_setup, patch(
"homeassistant.components.plum_lightpad.async_setup_entry"
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-plum-username", "password": "test-plum-password"},
)
assert result2["type"] == "abort"
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
async def test_import(hass):
"""Test configuring the flow using configuration.yaml."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData"
), patch(
"homeassistant.components.plum_lightpad.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.plum_lightpad.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"username": "test-plum-username", "password": "test-plum-password"},
)
assert result["type"] == "create_entry"
assert result["title"] == "test-plum-username"
assert result["data"] == {
"username": "test-plum-username",
"password": "test-plum-password",
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
|
import datetime
import json
import logging
import re
import sys
import redis
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger('metrics')
redis_opts = {}
redis_conn = None
cache_prefix = 'bandwidth_log:'
control_cache_key = 'last_line_parsed'
logging_period = 60 * 24 # 24hs
logging_interval = 15 # 15 minutes
exp_time = 60 * 60 * 24 # Key expires in 24hs
try:
with open('/home/docker/environment.json') as f:
env = json.load(f)
# Prod
redis_opts = {
'host': env['REDIS_HOST'],
'port': int(env['REDIS_PORT']),
'db': 1,
'password': env['REDIS_PASSWORD'],
}
except Exception:
# Dev
redis_opts = {
'host': 'localhost',
'port': 6380,
'db': 0,
'password': None,
}
def convert_str_to_datetime(date_str):
return datetime.datetime.strptime(date_str, '%d/%b/%Y:%H:%M:%S')
def raw_line_parser(str_line):
pattern = ("(?P<ip>\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) - - \["
"(?P<date>\d{2}/\w+/\d{4}:\d{2}:\d{2}:\d{2})?\] \""
"(?P<http_request>\w+)? /\w+/\w+/"
"(?P<id>\w+)?/(?P<type>\w+)?")
pattern_2 = ".*?(\d+)$"
results = re.match(pattern, str_line)
if results is None:
return results
results = re.match(pattern, str_line).groupdict()
temp_results = re.match(pattern_2, str_line)
if temp_results is None:
results['size'] = None
return results
results['size'] = re.match(pattern_2, str_line).group(1)
return results
def compute_bandwidth(str_end_time, str_start_time, str_layer_size):
bandwidth = 0.0
if str_start_time is None:
return bandwidth
if str_end_time is None:
return bandwidth
if str_layer_size is None:
return bandwidth
start_time = convert_str_to_datetime(str_start_time)
end_time = convert_str_to_datetime(str_end_time)
layer_size = long(str_layer_size)
layer_size_kb = (layer_size * 8) / 1024 # Kilobits
delta = end_time - start_time
num_seconds = delta.total_seconds()
bandwidth = 0.0
if num_seconds and layer_size_kb > 100:
bandwidth = layer_size_kb / num_seconds # Kilobits-per-second (KB/s)
return bandwidth
def cache_key(key):
return cache_prefix + key
def set_cache(interval, bandwidth):
global redis_conn, exp_period
if redis_conn is None:
logger.error('Failed to find a redis connection.')
return
key = cache_key('{0}'.format(interval))
redis_conn.setex(key, exp_time, bandwidth) # time in seconds
logger.info('Saved in Redis: key: {0} bandwidth: {1}'.format(
key, bandwidth))
def adjust_current_interval(current_interval, end_time, items):
global logging_interval, logging_period
total_items = logging_period / logging_interval
logger.info('Skipping interval: {0}'.format(current_interval))
for i in range(items, total_items):
items = i + 1
current_interval -= datetime.timedelta(minutes=logging_interval)
if current_interval <= end_time:
break
logger.info('Skipping interval: {0}'.format(current_interval))
return current_interval, items
def save_bandwidth(bandwidth, key, items):
# Save the average bandwidth of the give items
avg_bandwidth = round(bandwidth[key] / items[key], 2)
logger.info('Saving in Redis...')
set_cache(key, avg_bandwidth)
def save_last_line_parsed(time):
global redis_conn, cache_prefix
if redis_conn is None:
logger.error('Failed to find a redis connection.')
return
key = cache_key(control_cache_key)
redis_conn.set(key, time)
logger.info('Last time saved: {0}'.format(time))
def get_last_line_parsed():
global redis_conn, cache_prefix
if redis_conn is None:
logger.error('Failed to find a redis connection.')
return
key = cache_key(control_cache_key)
return redis_conn.get(key)
def update_current_interval(items, logging_interval, start_time):
items += 1
interval = logging_interval * items
current_interval = start_time - datetime.timedelta(minutes=interval)
logger.info('Updating interval to: {0}'.format(current_interval))
return current_interval, items
def parse_data(item):
str_start_time = None
str_end_time = None
str_layer_size = None
key = None
if item['http_request'] is not None and item['type'] is not None:
if 'GET' in item['http_request'] and 'layer' in item['type']:
str_end_time = item['date']
elif 'GET' in item['http_request'] and 'json' in item['type']:
str_start_time = item['date']
str_layer_size = item['size']
key = item['id']
return str_start_time, str_end_time, str_layer_size, key
def read_file(file_name):
logger.info('Reading file...')
parsed_data = []
try:
with open(file_name) as f:
for line in reversed(f.readlines()):
processed_line = raw_line_parser(line.rstrip())
if processed_line is not None:
parsed_data.append(processed_line)
except IOError as e:
logger.error('Failed to read the file. {0}'.format(e))
exit(1)
return parsed_data
def generate_bandwidth_data(start_time, min_time, time_interval):
global logging_interval, logging_period
end_times = {}
bandwidth_items = {}
num_items = {}
total_items = logging_period / logging_interval
items = 1
parsed_data = read_file(sys.argv[1])
last_time_parsed = get_last_line_parsed()
most_recent_parsing = None
if last_time_parsed:
last_time_parsed = convert_str_to_datetime(last_time_parsed)
logger.info('Last time parsed: {0}'.format(last_time_parsed))
for item in parsed_data:
str_start_time, str_end_time, str_layer_size, key = parse_data(item)
if str_end_time:
end_times[key] = str_end_time
else:
str_end_time = end_times.get(key)
bandwidth = compute_bandwidth(str_end_time,
str_start_time,
str_layer_size)
if bandwidth:
end_time = convert_str_to_datetime(str_end_time)
if last_time_parsed:
if last_time_parsed >= end_time:
logger.info('Remaining data parsed already. Stopping...')
break
if end_time < min_time:
logger.info('Minimum date reached. Stopping...')
break
if items >= total_items:
logger.info('Maximum number of elements reached. Stopping...')
break
if time_interval > end_time:
if bandwidth_items.get(time_interval, 0):
save_bandwidth(bandwidth_items,
time_interval,
num_items)
if not most_recent_parsing:
most_recent_parsing = str_end_time
time_interval, items = (
update_current_interval(items,
logging_interval,
start_time)
)
else:
time_interval, items = (
adjust_current_interval(time_interval,
end_time,
items)
)
bandwidth_items[time_interval] = (
bandwidth_items.get(time_interval, 0.0) + bandwidth
)
num_items[time_interval] = num_items.get(time_interval, 0.0) + 1
end_times.pop(key, None)
if most_recent_parsing:
save_last_line_parsed(most_recent_parsing)
def run():
global redis_conn, redis_opts
redis_conn = redis.StrictRedis(host=redis_opts['host'],
port=int(redis_opts['port']),
db=int(redis_opts['db']),
password=redis_opts['password'])
logger.info('Redis config: {0}'.format(redis_opts))
start_time = datetime.datetime.utcnow()
min_time = start_time - datetime.timedelta(minutes=logging_period)
time_interval = start_time - datetime.timedelta(minutes=logging_interval)
logger.info('Starting...')
generate_bandwidth_data(start_time, min_time, time_interval)
if __name__ == "__main__":
if len(sys.argv) == 1:
logger.error('Please specify the logfile path.')
exit(1)
run()
|
import os
from babelfish import Language, language_converters
import pytest
import rarfile
from vcr import VCR
from subliminal.exceptions import ConfigurationError, AuthenticationError, ServiceUnavailable
from subliminal.providers.legendastv import LegendasTVSubtitle, LegendasTVProvider, LegendasTVArchive
USERNAME = 'python-subliminal'
PASSWORD = 'subliminal'
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
match_on=['method', 'scheme', 'host', 'port', 'path', 'query', 'body'],
cassette_library_dir=os.path.join('tests', 'cassettes', 'legendastv'))
@pytest.mark.integration
@vcr.use_cassette()
def test_get_archives_logged_in_as_donor():
with LegendasTVProvider() as provider:
archive = provider.get_archives(34084, 2, 'movie', None, None)[0]
assert archive.id == '5515d27a72921'
assert archive.name == 'Interstellar.2014.1080p.BluRay.x264.DTS-RARBG.eng'
assert archive.link == ('http://legendas.tv/download/5515d27a72921/Interstellar/Interstellar_2014_1080p_BluRay_'
'x264_DTS_RARBG_eng')
@pytest.mark.integration
@vcr.use_cassette()
def test_get_archives_logged_in_as_not_donor():
with LegendasTVProvider() as provider:
archive = provider.get_archives(34084, 2, 'movie', None, None)[0]
assert archive.id == '5515d27a72921'
assert archive.name == 'Interstellar.2014.1080p.BluRay.x264.DTS-RARBG.eng'
assert archive.link == ('http://legendas.tv/download/5515d27a72921/Interstellar/Interstellar_2014_1080p_BluRay_'
'x264_DTS_RARBG_eng')
@pytest.mark.converter
def test_converter_convert_alpha3_country():
assert language_converters['legendastv'].convert('por', 'BR') == 1
@pytest.mark.converter
def test_converter_convert_alpha3():
assert language_converters['legendastv'].convert('eng') == 2
@pytest.mark.converter
def test_converter_convert_unsupported_alpha3():
with pytest.raises(ConfigurationError):
language_converters['legendastv'].convert('rus')
@pytest.mark.converter
def test_converter_reverse():
assert language_converters['legendastv'].reverse(10) == ('por',)
@pytest.mark.converter
def test_converter_reverse_name_converter():
assert language_converters['legendastv'].reverse(3) == ('spa',)
@pytest.mark.converter
def test_converter_reverse_unsupported_language_number():
with pytest.raises(ConfigurationError):
language_converters['legendastv'].reverse(20)
def test_get_matches(episodes):
archive = LegendasTVArchive('537a74584945b', 'The.Big.Bang.Theory.S07.HDTV.x264', True, False,
'http://legendas.tv/download/537a74584945b/The_Big_Bang_Theory/'
'The_Big_Bang_Theory_S07_HDTV_x264', 6915, 10)
subtitle = LegendasTVSubtitle(Language('por', 'BR'), 'episode', 'The Big Bang Theory', 2013, 'tt0898266', 7,
archive, 'TBBT S07 x264/The.Big.Bang.Theory.S07E05.HDTV.x264-LOL.srt')
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'year', 'country', 'season', 'episode', 'release_group', 'source', 'video_codec',
'series_imdb_id'}
def test_get_matches_no_match(episodes):
archive = LegendasTVArchive('537a74584945b', 'The.Big.Bang.Theory.S07.HDTV.x264', True, False,
'http://legendas.tv/download/537a74584945b/The_Big_Bang_Theory/'
'The_Big_Bang_Theory_S07_HDTV_x264', 6915, 10)
subtitle = LegendasTVSubtitle(Language('por', 'BR'), 'episode', 'The Big Bang Theory', 2013, 'tt0898266', 7,
archive, 'TBBT S07 x264/The.Big.Bang.Theory.S07E05.HDTV.x264-LOL.srt')
matches = subtitle.get_matches(episodes['dallas_2012_s01e03'])
assert matches == set()
@pytest.mark.integration
@vcr.use_cassette
def test_login():
provider = LegendasTVProvider(USERNAME, PASSWORD)
assert provider.logged_in is False
provider.initialize()
assert provider.logged_in is True
@pytest.mark.integration
@vcr.use_cassette
def test_login_bad_password():
provider = LegendasTVProvider(USERNAME, 'wrong')
with pytest.raises(AuthenticationError):
provider.initialize()
@pytest.mark.integration
@vcr.use_cassette
def test_logout():
provider = LegendasTVProvider(USERNAME, PASSWORD)
provider.initialize()
provider.terminate()
assert provider.logged_in is False
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_episode(episodes):
video = episodes['bbt_s07e05']
with LegendasTVProvider() as provider:
titles = provider.search_titles(video.series, video.season, video.year)
assert len(titles) == 1
assert set(titles.keys()) == {30730}
assert {t['title'] for t in titles.values()} == {video.series}
assert {t['season'] for t in titles.values() if t['type'] == 'episode'} == set([video.season])
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_movie(movies):
with LegendasTVProvider() as provider:
titles = provider.search_titles(movies['interstellar'].title, None, movies['interstellar'].year)
assert len(titles) == 1
assert set(titles.keys()) == {34084}
assert {t['title'] for t in titles.values()} == {movies['interstellar'].title}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_dots():
with LegendasTVProvider() as provider:
titles = provider.search_titles('11.22.63', 1, None)
assert len(titles) == 1
assert set(titles.keys()) == {40092}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_quote():
with LegendasTVProvider() as provider:
titles = provider.search_titles('Marvel\'s Jessica Jones', 1, None)
assert len(titles) == 1
assert set(titles.keys()) == {39376}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_with_season_information_in_english():
with LegendasTVProvider() as provider:
# Season 3 uses '3rd Season'
titles = provider.search_titles('Pretty Little Liars', 3, 2012)
assert len(titles) == 1
assert set(titles.keys()) == {27500}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_containing_year_information():
with LegendasTVProvider() as provider:
titles = provider.search_titles('Bull', 1, 2016)
assert 42047 in titles.keys()
t = titles[42047]
assert t['title'], t['year'] == ('Bull', 2016)
@pytest.mark.integration
@vcr.use_cassette
def test_get_archives():
with LegendasTVProvider() as provider:
archives = provider.get_archives(34084, 2, 'movie', None, None)
assert len(archives) == 2
assert {a.id for a in archives} == {'5515d27a72921', '54a2e41d8cae4'}
assert {a.content for a in archives} == {None}
@pytest.mark.integration
@vcr.use_cassette
def test_get_archives_no_result():
with LegendasTVProvider() as provider:
archives = provider.get_archives(34084, 17, 'movie', None, None)
assert len(archives) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_download_archive():
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
archive = provider.get_archives(34084, 2, 'movie', None, None)[0]
provider.download_archive(archive)
assert archive.content is not None
@pytest.mark.integration
@vcr.use_cassette
def test_query_movie(movies):
video = movies['interstellar']
language = Language('eng')
expected_subtitles = {
('54a2e41d8cae4', 'Interstellar 2014 HDCAM NEW SOURCE READNFO XVID AC3 ACAB.srt'),
('5515d27a72921', 'Interstellar.2014.1080p.BluRay.x264.DTS-RARBG.eng.srt'),
}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.query(language, video.title, year=video.year)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_query_episode(episodes):
video = episodes['colony_s01e09']
language = Language('por', 'BR')
expected_subtitles = {
('56ed8159e36ec', 'Colony.S01E09.HDTV.XviD-FUM.srt'),
('56ed8159e36ec', 'Colony.S01E09.HDTV.x264-FLEET.srt'),
('56ed8159e36ec', 'Colony.S01E09.1080p.WEB-DL.x265.HEVC.AAC.5.1.Condo.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.HDTV.HEVC.x265-RMTeam.srt'),
('56ed8159e36ec', 'Colony.S01E09.WEB-DL.x264-RARBG.srt'),
('56ed8159e36ec', 'Colony.S01E09.Zero.Day.1080p.WEB-DL.6CH.x265.HEVC-PSA.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.HDTV.x264-KILLERS.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.WEB-DL.HEVC.x265-RMTeam.srt'),
('56ed8159e36ec', 'Colony.S01E09.HDTV.XviD-AFG.srt'),
('56ed812f354f6', 'Colony.S01E09.HDTV.x264-FUM.srt'),
('56eb3817111be', 'Colony S01E09 1080p WEB DL DD5 1 H264 RARBG /'
'Colony S01E09 1080p WEB DL DD5 1 H264 RARBG .srt'),
('56ed8159e36ec', 'Colony.S01E09.Zero.Day.1080p.WEB-DL.DD5.1.H265-LGC.srt'),
('56ed8159e36ec', 'Colony.S01E09.Zero.Day.720p.WEB-DL.2CH.x265.HEVC-PSA.srt'),
('56ed8159e36ec', 'Colony.S01E09.1080p.WEB-DL.6CH.HEVC.x265-RMTeam.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.HDTV.2CH.x265.HEVC-PSA.srt'),
('56ed8159e36ec', 'Colony.S01E09.1080p.WEB-DL.DD5.1.H264-RARBG.srt'),
('56ed8159e36ec', 'Colony.S01E09.HDTV.x264-FUM.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.WEB-DL.DD5.1.H264-RARBG.srt'),
('56e442ddbb615', 'Colony.S01E09.720p.HDTV.x264-KILLERS.srt')
}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.query(language, video.series, video.season, video.episodes, video.year)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode(episodes):
video = episodes['the_x_files_s10e02']
languages = {Language('eng')}
expected_subtitles = {('56a756935a76c', 'The.X-Files.S10E02.720p.HDTV.AVS.en.srt')}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.list_subtitles(video, languages)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode_alternative_series(episodes):
video = episodes['turn_s04e03']
languages = {Language('por', 'BR')}
expected_subtitles = {
('5953101413fcc', 'Turn.S04E03.CONVERT.1080p.HEVC.x265-MeGusta.srt'),
('5953101413fcc', 'Turn.S04E03.CONVERT.720p.WEB.h264-TBS.srt'),
('5953101413fcc', 'Turn.S04E03.CONVERT.AAC.MP4-Mobile.srt'),
('5953101413fcc', 'Turn.S04E03.XviD-AFG.srt'),
('5953101413fcc', 'Turn.S04E03.CONVERT.XviD-AFG.srt'),
('5953101413fcc', 'Turn.S04E03.WEBRip.x264-RARBG.srt'),
('5953101413fcc', 'Turn.S04E03.Blood.for.Blood.720p.AMZN.WEBRip.DD5.1.x264-ViSUM.srt'),
('5953101413fcc', 'Turn.S04E03.1080p.HEVC.x265-MeGusta.srt'),
('5953101413fcc', 'Turn.S04E03.CONVERT.WEB.h264-TBS.srt'),
('5953101413fcc', 'Turn.S04E03.Blood.for.Blood.1080p.AMZN.WEBRip.DD5.1.x264-ViSUM.srt'),
('5953101413fcc', 'Turn.S04E03.480p.x264-mSD.srt'),
('5953101413fcc', 'Turn.S04E03.720p.HDTV.x264-SVA.srt'),
('5953101413fcc', 'TURN.Washingtons.Spies.S04E03.Blood.for.Blood.1080p.AMZN.WEBRip.DDP5.1.x264-ViSUM.srt'),
('5953101413fcc', 'Turn.S04E03.1080p.WEBRip.x264-MOROSE.srt'),
('5953101413fcc', 'Turn.S04E03.CONVERT.480p.x264-mSD.srt'),
('5953101413fcc', 'TURN.Washingtons.Spies.S04E03.Blood.for.Blood.720p.AMZN.WEBRip.DDP5.1.x264-ViSUM.srt'),
('5953101413fcc', 'Turn.S04E03.AAC.MP4-Mobile.srt'),
('5953101413fcc', 'Turn.S04E03.720p.HDTV.2CH.x265.HEVC-PSA.srt'),
('5953101413fcc', "TURN.Washington's.Spies.S04E03.720p.WEBRip.2CH.x265.HEVC-PSA.srt"),
('5953101413fcc', 'Turn.S04E03.HDTV.x264-SVA.srt'),
('5953101413fcc', 'Turn.S04E03.720p.HDTV.x264-AVS.srt'),
('5953101413fcc', 'Turn.S04E03.CONVERT.1080p.WEB.h264-TBS.srt')
}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.list_subtitles(video, languages)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_movie(movies):
video = movies['man_of_steel']
languages = {Language('eng')}
expected_subtitles = {('525d8c2444851', 'Man.Of.Steel.2013.[BluRay.BRRip.BDRip].srt')}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.list_subtitles(video, languages)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(movies):
video = movies['man_of_steel']
languages = {Language('eng')}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
assert subtitles[0].is_valid() is True
@pytest.mark.integration
@vcr.use_cassette
def test_under_maintenance(movies):
"""Tests when is under maintenance and http status code 200."""
video = movies['man_of_steel']
languages = {Language('eng')}
with LegendasTVProvider() as provider:
try:
provider.list_subtitles(video, languages)
except ServiceUnavailable:
pass
else:
pytest.fail()
@pytest.mark.integration
def test_unrar_not_available(monkeypatch):
monkeypatch.setattr(rarfile, 'UNRAR_TOOL', 'fake_unrar')
try:
LegendasTVProvider(USERNAME, PASSWORD)
except ConfigurationError as error:
assert 'UNRAR tool not available' == error.args[0]
else:
pytest.fail()
@pytest.mark.integration
@vcr.use_cassette
def test_get_archive_multi_episode(episodes):
video = episodes['fear_walking_dead_s03e10']
languages = {Language('por', 'BR')}
# Archive is multi-episode: Fear_the_Walking_Dead_S03E09E10_HDTV_x264_SVA_AVS_AFG_RARBG_DEFLATE
expected_subtitle = ('59b88ce286178', 'Fear.the.Walking.Dead.S03E10.1080p.WEB-DL.DD5.1.H264-RARBG.srt')
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.list_subtitles(video, languages)
assert expected_subtitle in {(s.archive.id, s.name) for s in subtitles}
|
from django.core.exceptions import ImproperlyConfigured
from django.utils import timezone
from django.views.generic.base import TemplateResponseMixin
class EntryQuerysetTemplateResponseMixin(TemplateResponseMixin):
"""
Return a custom template name for views returning
a queryset of Entry filtered by another model.
"""
model_type = None
model_name = None
def get_model_type(self):
"""
Return the model type for templates.
"""
if self.model_type is None:
raise ImproperlyConfigured(
"%s requires either a definition of "
"'model_type' or an implementation of 'get_model_type()'" %
self.__class__.__name__)
return self.model_type
def get_model_name(self):
"""
Return the model name for templates.
"""
if self.model_name is None:
raise ImproperlyConfigured(
"%s requires either a definition of "
"'model_name' or an implementation of 'get_model_name()'" %
self.__class__.__name__)
return self.model_name
def get_template_names(self):
"""
Return a list of template names to be used for the view.
"""
model_type = self.get_model_type()
model_name = self.get_model_name()
templates = [
'zinnia/%s/%s/entry_list.html' % (model_type, model_name),
'zinnia/%s/%s_entry_list.html' % (model_type, model_name),
'zinnia/%s/entry_list.html' % model_type,
'zinnia/entry_list.html']
if self.template_name is not None:
templates.insert(0, self.template_name)
return templates
class EntryQuerysetArchiveTemplateResponseMixin(TemplateResponseMixin):
"""
Return a custom template name for the archive views based
on the type of the archives and the value of the date.
"""
template_name_suffix = '_archive'
def get_archive_part_value(self, part):
"""
Method for accessing to the value of
self.get_year(), self.get_month(), etc methods
if they exists.
"""
try:
return getattr(self, 'get_%s' % part)()
except AttributeError:
return None
def get_default_base_template_names(self):
"""
Return a list of default base templates used
to build the full list of templates.
"""
return ['entry%s.html' % self.template_name_suffix]
def get_template_names(self):
"""
Return a list of template names to be used for the view.
"""
year = self.get_archive_part_value('year')
week = self.get_archive_part_value('week')
month = self.get_archive_part_value('month')
day = self.get_archive_part_value('day')
templates = []
path = 'zinnia/archives'
template_names = self.get_default_base_template_names()
for template_name in template_names:
templates.extend([template_name,
'zinnia/%s' % template_name,
'%s/%s' % (path, template_name)])
if year:
for template_name in template_names:
templates.append(
'%s/%s/%s' % (path, year, template_name))
if week:
for template_name in template_names:
templates.extend([
'%s/week/%s/%s' % (path, week, template_name),
'%s/%s/week/%s/%s' % (path, year, week, template_name)])
if month:
for template_name in template_names:
templates.extend([
'%s/month/%s/%s' % (path, month, template_name),
'%s/%s/month/%s/%s' % (path, year, month, template_name)])
if day:
for template_name in template_names:
templates.extend([
'%s/day/%s/%s' % (path, day, template_name),
'%s/%s/day/%s/%s' % (path, year, day, template_name),
'%s/month/%s/day/%s/%s' % (path, month, day,
template_name),
'%s/%s/%s/%s/%s' % (path, year, month, day,
template_name)])
if self.template_name is not None:
templates.append(self.template_name)
templates.reverse()
return templates
class EntryQuerysetArchiveTodayTemplateResponseMixin(
EntryQuerysetArchiveTemplateResponseMixin):
"""
Same as EntryQuerysetArchiveTemplateResponseMixin
but use the current date of the day when getting
archive part values.
"""
today = None
def get_archive_part_value(self, part):
"""Return archive part for today"""
parts_dict = {'year': '%Y',
'month': self.month_format,
'week': self.week_format,
'day': '%d'}
if self.today is None:
today = timezone.now()
if timezone.is_aware(today):
today = timezone.localtime(today)
self.today = today
return self.today.strftime(parts_dict[part])
class EntryArchiveTemplateResponseMixin(
EntryQuerysetArchiveTemplateResponseMixin):
"""
Same as EntryQuerysetArchiveTemplateResponseMixin
but use the template defined in the Entry instance
as the base template name.
"""
def get_default_base_template_names(self):
"""
Return the Entry.template value.
"""
return [self.object.detail_template,
'%s.html' % self.object.slug,
'%s_%s' % (self.object.slug, self.object.detail_template)]
|
import httpobs.conf
from httpobs.scanner.analyzer import NUM_TESTS, tests
from httpobs.scanner.grader import get_grade_and_likelihood_for_score, get_score_description
from httpobs.scanner.retriever import retrieve_all
def scan(hostname, **kwargs):
"""Performs an Observatory scan, but doesn't require any database/redis
backing. Given the lowered security concerns due to not being a public
API, you can use this to scan arbitrary ports and paths.
Args:
hostname (str): domain name for host to be scanned. Must not include
protocol (http://, https://) or port number (:80).
Kwargs:
http_port (int): port to scan for HTTP, instead of 80
https_port (int): port to be scanned for HTTPS, instead of 443
path (str): path to scan, instead of "/"
verify (bool): whether to enable or disable certificate verification,
enabled by default. This can allow tested sites to pass the HSTS
and HPKP tests, even with self-signed certificates.
cookies (dict): Cookies sent to the system being scanned. Matches the
requests cookie dict.
headers (dict): HTTP headers sent to the system being scanned. Format
matches the requests headers dict.
Returns:
A dict representing the analyze (scan) and getScanResults (test) API call. Example:
{
'scan': {
'grade': 'A'
...
},
'test': {
'content-security-policy': {
'pass': True
...
}
}
}
"""
# Always allow localhost scans when run in this way
httpobs.conf.SCANNER_ALLOW_LOCALHOST = True
# Attempt to retrieve all the resources, not capturing exceptions
reqs = retrieve_all(hostname, **kwargs)
# If we can't connect at all, let's abort the test
if reqs['responses']['auto'] is None:
return {'error': 'site down'}
# Get all the results
results = [test(reqs) for test in tests]
for result in results:
result['score_description'] = get_score_description(result['result'])
# Get the score, grade, etc.
grades = get_grade_and_likelihood_for_score(100 + sum([result.get('score_modifier', 0) for result in results]))
tests_passed = sum([1 if result.get('pass') else 0 for result in results])
# Return the results
return({
'scan': {
'grade': grades[1],
'likelihood_indicator': grades[2],
'response_headers': dict(reqs['responses']['auto'].headers),
'score': grades[0],
'tests_failed': NUM_TESTS - tests_passed,
'tests_passed': tests_passed,
'tests_quantity': NUM_TESTS,
},
'tests': {result.pop('name'): result for result in results}
})
|
import logging
import os
import unittest
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import sysbench_benchmark
class MySQLServiceBenchmarkTestCase(unittest.TestCase,
test_util.SamplesTestMixin):
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'sysbench-output-sample.txt')
with open(path) as fp:
self.contents = fp.read()
def testParseSysbenchResult(self):
results = []
metadata = {}
sysbench_benchmark.AddMetricsForSysbenchOutput(
self.contents, results, metadata)
logging.info('results are, %s', results)
expected_results = [
sample.Sample('tps_array', -1, 'tps', {'tps': [
1012.86, 1006.64, 1022.3, 1016.16, 1009.03, 1016.99, 1010.0, 1018.0,
1002.01, 998.49, 959.52, 913.49, 936.98, 916.01, 957.96]}),
sample.Sample('latency_array', -1, 'ms', {'latency': [
28.67, 64.47, 38.94, 44.98, 89.16, 29.72, 106.75, 46.63, 116.8,
41.85, 27.17, 104.84, 58.92, 75.82, 73.13]}),
sample.Sample('qps_array', -1, 'qps', {'qps': [
20333.18, 20156.38, 20448.49, 20334.15, 20194.07, 20331.31,
20207.00, 20348.96, 20047.11, 19972.86, 19203.97, 18221.83,
18689.14, 18409.68, 19155.63]})]
self.assertSampleListsEqualUpToTimestamp(results, expected_results)
if __name__ == '__main__':
unittest.main()
|
import zigpy.zcl.clusters.lighting as lighting
from .. import registries, typing as zha_typing
from ..const import REPORT_CONFIG_DEFAULT
from .base import ClientChannel, ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Ballast.cluster_id)
class Ballast(ZigbeeChannel):
"""Ballast channel."""
@registries.CLIENT_CHANNELS_REGISTRY.register(lighting.Color.cluster_id)
class ColorClientChannel(ClientChannel):
"""Color client channel."""
@registries.BINDABLE_CLUSTERS.register(lighting.Color.cluster_id)
@registries.LIGHT_CLUSTERS.register(lighting.Color.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Color.cluster_id)
class ColorChannel(ZigbeeChannel):
"""Color channel."""
CAPABILITIES_COLOR_XY = 0x08
CAPABILITIES_COLOR_TEMP = 0x10
UNSUPPORTED_ATTRIBUTE = 0x86
REPORT_CONFIG = (
{"attr": "current_x", "config": REPORT_CONFIG_DEFAULT},
{"attr": "current_y", "config": REPORT_CONFIG_DEFAULT},
{"attr": "color_temperature", "config": REPORT_CONFIG_DEFAULT},
)
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Initialize ColorChannel."""
super().__init__(cluster, ch_pool)
self._color_capabilities = None
self._min_mireds = 153
self._max_mireds = 500
@property
def min_mireds(self):
"""Return the coldest color_temp that this channel supports."""
return self._min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this channel supports."""
return self._max_mireds
def get_color_capabilities(self):
"""Return the color capabilities."""
return self._color_capabilities
async def async_configure(self):
"""Configure channel."""
await self.fetch_color_capabilities(False)
await super().async_configure()
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.fetch_color_capabilities(True)
attributes = ["color_temperature", "current_x", "current_y"]
await self.get_attributes(attributes, from_cache=from_cache)
async def fetch_color_capabilities(self, from_cache):
"""Get the color configuration."""
attributes = [
"color_temp_physical_min",
"color_temp_physical_max",
"color_capabilities",
]
results = await self.get_attributes(attributes, from_cache=from_cache)
capabilities = results.get("color_capabilities")
self._min_mireds = results.get("color_temp_physical_min", 153)
self._max_mireds = results.get("color_temp_physical_max", 500)
if capabilities is None:
# ZCL Version 4 devices don't support the color_capabilities
# attribute. In this version XY support is mandatory, but we
# need to probe to determine if the device supports color
# temperature.
capabilities = self.CAPABILITIES_COLOR_XY
result = await self.get_attribute_value(
"color_temperature", from_cache=from_cache
)
if result is not None and result is not self.UNSUPPORTED_ATTRIBUTE:
capabilities |= self.CAPABILITIES_COLOR_TEMP
self._color_capabilities = capabilities
await super().async_initialize(from_cache)
|
from django import forms
from weblate.screenshots.models import Screenshot
from weblate.trans.forms import QueryField
from weblate.utils.forms import SortedSelect
class ScreenshotEditForm(forms.ModelForm):
"""Screenshot editing."""
class Meta:
model = Screenshot
fields = ("name", "image")
class LanguageChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.language
class ScreenshotForm(forms.ModelForm):
"""Screenshot upload."""
class Meta:
model = Screenshot
fields = ("name", "image", "translation")
widgets = {
"translation": SortedSelect,
}
field_classes = {
"translation": LanguageChoiceField,
}
def __init__(self, component, data=None, files=None, instance=None):
self.component = component
super().__init__(data=data, files=files, instance=instance)
self.fields[
"translation"
].queryset = component.translation_set.prefetch_related("language")
self.fields["translation"].initial = component.source_translation
class SearchForm(forms.Form):
q = QueryField(required=False)
|
import builtins
import sys
import pytest
import numpy as np
from tensornetwork import connect, contract, Node
from tensornetwork.backends.abstract_backend import AbstractBackend
from tensornetwork.backends import backend_factory
def clean_tensornetwork_modules():
for mod in list(sys.modules.keys()):
if mod.startswith('tensornetwork'):
sys.modules.pop(mod, None)
@pytest.fixture(autouse=True)
def clean_backend_import():
#never do this outside testing
clean_tensornetwork_modules()
yield # use as teardown
clean_tensornetwork_modules()
@pytest.fixture
def no_backend_dependency(monkeypatch):
import_orig = builtins.__import__
# pylint: disable=redefined-builtin
def mocked_import(name, globals, locals, fromlist, level):
if name in ['torch', 'tensorflow', 'jax']:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
# Nuke the cache.
backend_factory._INSTANTIATED_BACKENDS = dict()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_pytorch_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend
PyTorchBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_tensorflow_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.tensorflow.tensorflow_backend \
import TensorFlowBackend
TensorFlowBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_jax_missing_cannot_initialize_backend():
#pylint: disable=import-outside-toplevel
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.jax.jax_backend import JaxBackend
JaxBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_config_backend_missing_can_import_config():
#not sure why config is imported here?
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensornetwork.config
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_import_tensornetwork_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
#pylint: disable=reimported
import tensornetwork
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.pytorch.pytorch_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.tensorflow.tensorflow_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.jax.jax_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.numpy.numpy_backend
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_numpy_network_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=reimported
#pylint: disable=unused-variable
import tensornetwork
a = Node(np.ones((10,)), backend="numpy")
b = Node(np.ones((10,)), backend="numpy")
edge = connect(a[0], b[0])
final_node = contract(edge)
assert final_node.tensor == np.array(10.)
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_network_without_backends_raises_error():
#pylint: disable=import-outside-toplevel
#pylint: disable=reimported
#pylint: disable=unused-variable
import tensornetwork
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="jax")
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="tensorflow")
with pytest.raises(ImportError):
Node(np.ones((2, 2)), backend="pytorch")
def test_abstract_backend_name():
backend = AbstractBackend()
assert backend.name == "abstract backend"
def test_abstract_backend_tensordot_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.tensordot(np.ones((2, 2)), np.ones((2, 2)), axes=[[0], [0]])
def test_abstract_backend_reshape_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.reshape(np.ones((2, 2)), (4, 1))
def test_abstract_backend_transpose_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.transpose(np.ones((2, 2)), [0, 1])
def test_abstract_backend_slice_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.slice(np.ones((2, 2)), (0, 1), (1, 1))
def test_abstract_backend_svd_decompositon_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.svd(np.ones((2, 2)), 0)
def test_abstract_backend_qr_decompositon_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.qr(np.ones((2, 2)), 0)
def test_abstract_backend_cholesky_decompositon_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.cholesky(np.ones((2, 2)), 0)
def test_abstract_backend_rq_decompositon_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.rq(np.ones((2, 2)), 0)
def test_abstract_backend_shape_concat_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.shape_concat([np.ones((2, 2)), np.ones((2, 2))], 0)
def test_abstract_backend_shape_tensor_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.shape_tensor(np.ones((2, 2)))
def test_abstract_backend_shape_tuple_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.shape_tuple(np.ones((2, 2)))
def test_abstract_backend_shape_prod_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.shape_prod(np.ones((2, 2)))
def test_abstract_backend_sqrt_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.sqrt(np.ones((2, 2)))
def test_abstract_backend_convert_to_tensor_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.convert_to_tensor(np.ones((2, 2)))
def test_abstract_backend_trace_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.trace(np.ones((2, 2)))
def test_abstract_backend_outer_product_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.outer_product(np.ones((2, 2)), np.ones((2, 2)))
def test_abstract_backend_einsul_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.einsum("ii", np.ones((2, 2)), optimize=True)
def test_abstract_backend_norm_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.norm(np.ones((2, 2)))
def test_abstract_backend_eye_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.eye(2, dtype=np.float64)
def test_abstract_backend_ones_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.ones((2, 2), dtype=np.float64)
def test_abstract_backend_zeros_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.zeros((2, 2), dtype=np.float64)
def test_abstract_backend_randn_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.randn((2, 2))
def test_abstract_backend_random_uniforl_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.random_uniform((2, 2))
def test_abstract_backend_conj_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.conj(np.ones((2, 2)))
def test_abstract_backend_eigh_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.eigh(np.ones((2, 2)))
def test_abstract_backend_eigs_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.eigs(np.ones((2, 2)))
def test_abstract_backend_eigs_lanczos_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.eigsh_lanczos(lambda x: x, np.ones((2)))
def test_abstract_backend_gmres_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.gmres(lambda x: x, np.ones((2)))
def test_abstract_backend_addition_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.addition(np.ones((2, 2)), np.ones((2, 2)))
def test_abstract_backend_subtraction_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.subtraction(np.ones((2, 2)), np.ones((2, 2)))
def test_abstract_backend_multiply_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.multiply(np.ones((2, 2)), np.ones((2, 2)))
def test_abstract_backend_divide_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.divide(np.ones((2, 2)), np.ones((2, 2)))
def test_abstract_backend_index_update_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.index_update(np.ones((2, 2)), np.ones((2, 2)), np.ones((2, 2)))
def test_abstract_backend_inv_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.inv(np.ones((2, 2)))
def test_abstract_backend_sin_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.sin(np.ones((2, 2)))
def test_abstract_backend_cos_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.cos(np.ones((2, 2)))
def test_abstract_backend_exp_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.exp(np.ones((2, 2)))
def test_abstract_backend_log_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.log(np.ones((2, 2)))
def test_abstract_backend_expm_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.expm(np.ones((2, 2)))
def test_abstract_backend_sparse_shape_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.sparse_shape(np.ones((2, 2)))
def test_abstract_backend_broadcast_right_multiplication_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_right_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_abstract_backend_broadcast_left_multiplication_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.broadcast_left_multiplication(np.ones((2, 2)), np.ones((2, 2)))
def test_backend_instantiation(backend):
backend1 = backend_factory.get_backend(backend)
backend2 = backend_factory.get_backend(backend)
assert backend1 is backend2
def test_abstract_backend_diagonal_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.diagonal(np.ones((2, 2)))
def test_abstract_backend_diagflat_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.diagflat(np.ones((2, 2)))
def test_abstract_backend_sign_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.sign(np.ones((2, 2)))
def test_abstract_backend_abs_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.abs(np.ones((2, 2)))
def test_pivot_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.pivot(np.ones((2, 2)))
def test_power_not_implemented():
backend = AbstractBackend()
with pytest.raises(NotImplementedError):
backend.power(np.array([1, 2]), np.array([1, 2]))
|
import sys
import time
import contextlib
import datetime
import pytest
from PyQt5.QtCore import QProcess
from end2end.fixtures import testprocess
pytestmark = [pytest.mark.not_frozen]
@contextlib.contextmanager
def stopwatch(min_ms=None, max_ms=None):
if min_ms is None and max_ms is None:
raise ValueError("Using stopwatch with both min_ms/max_ms None does "
"nothing.")
start = datetime.datetime.now()
yield
stop = datetime.datetime.now()
delta_ms = (stop - start).total_seconds() * 1000
if min_ms is not None:
assert delta_ms >= min_ms
if max_ms is not None:
assert delta_ms <= max_ms
class PythonProcess(testprocess.Process):
"""A testprocess which runs the given Python code."""
def __init__(self, request):
super().__init__(request)
self.proc.setReadChannel(QProcess.StandardOutput)
self.code = None
def _parse_line(self, line):
print("LINE: {}".format(line))
if line.strip() == 'ready':
self.ready.emit()
return testprocess.Line(line)
def _executable_args(self):
code = [
'import sys, time',
'print("ready")',
'sys.stdout.flush()',
self.code,
'sys.stdout.flush()',
'time.sleep(20)',
]
return (sys.executable, ['-c', ';'.join(code)])
def _default_args(self):
return []
class QuitPythonProcess(PythonProcess):
"""A testprocess which quits immediately."""
def _executable_args(self):
code = [
'import sys',
'print("ready")',
'sys.exit(0)',
]
return (sys.executable, ['-c', ';'.join(code)])
class NoReadyPythonProcess(PythonProcess):
"""A testprocess which never emits 'ready' and quits."""
def _executable_args(self):
code = [
'import sys',
'sys.exit(0)',
]
return (sys.executable, ['-c', ';'.join(code)])
@pytest.fixture
def pyproc(request):
proc = PythonProcess(request)
yield proc
proc.terminate()
@pytest.fixture
def quit_pyproc(request):
proc = QuitPythonProcess(request)
yield proc
proc.terminate()
@pytest.fixture
def noready_pyproc(request):
proc = NoReadyPythonProcess(request)
yield proc
proc.terminate()
def test_no_ready_python_process(noready_pyproc):
"""When a process quits immediately, waiting for start should interrupt."""
with pytest.raises(testprocess.ProcessExited):
with stopwatch(max_ms=5000):
noready_pyproc.start()
def test_quitting_process(qtbot, quit_pyproc):
with qtbot.waitSignal(quit_pyproc.proc.finished):
quit_pyproc.start()
with pytest.raises(testprocess.ProcessExited):
quit_pyproc.after_test()
def test_quitting_process_expected(qtbot, quit_pyproc):
quit_pyproc.exit_expected = True
with qtbot.waitSignal(quit_pyproc.proc.finished):
quit_pyproc.start()
quit_pyproc.after_test()
def test_process_never_started(qtbot, quit_pyproc):
"""Calling after_test without start should not fail."""
quit_pyproc.after_test()
def test_wait_signal_raising(request, qtbot):
"""testprocess._wait_signal should raise by default."""
proc = testprocess.Process(request)
with pytest.raises(qtbot.TimeoutError):
with proc._wait_signal(proc.proc.started, timeout=0):
pass
def test_custom_environment(pyproc):
pyproc.code = 'import os; print(os.environ["CUSTOM_ENV"])'
pyproc.start(env={'CUSTOM_ENV': 'blah'})
pyproc.wait_for(data='blah')
@pytest.mark.posix
def test_custom_environment_system_env(monkeypatch, pyproc):
"""When env=... is given, the system environment should be present."""
monkeypatch.setenv('QUTE_TEST_ENV', 'blubb')
pyproc.code = 'import os; print(os.environ["QUTE_TEST_ENV"])'
pyproc.start(env={})
pyproc.wait_for(data='blubb')
class TestWaitFor:
def test_successful(self, pyproc):
"""Using wait_for with the expected text."""
pyproc.code = "time.sleep(0.5); print('foobar')"
with stopwatch(min_ms=500):
pyproc.start()
pyproc.wait_for(data="foobar")
def test_other_text(self, pyproc):
"""Test wait_for when getting some unrelated text."""
pyproc.code = "time.sleep(0.1); print('blahblah')"
pyproc.start()
with pytest.raises(testprocess.WaitForTimeout):
pyproc.wait_for(data="foobar", timeout=500)
def test_no_text(self, pyproc):
"""Test wait_for when getting no text at all."""
pyproc.code = "pass"
pyproc.start()
with pytest.raises(testprocess.WaitForTimeout):
pyproc.wait_for(data="foobar", timeout=100)
@pytest.mark.parametrize('message', ['foobar', 'literal [x]'])
def test_existing_message(self, message, pyproc):
"""Test with a message which already passed when waiting."""
pyproc.code = "print('{}')".format(message)
pyproc.start()
time.sleep(0.5) # to make sure the message is printed
pyproc.wait_for(data=message)
def test_existing_message_previous_test(self, pyproc):
"""Make sure the message of a previous test gets ignored."""
pyproc.code = "print('foobar')"
pyproc.start()
line = pyproc.wait_for(data="foobar")
line.waited_for = False # so we don't test what the next test does
pyproc.after_test()
with pytest.raises(testprocess.WaitForTimeout):
pyproc.wait_for(data="foobar", timeout=100)
def test_existing_message_already_waited(self, pyproc):
"""Make sure an existing message doesn't stop waiting twice.
wait_for checks existing messages (see above), but we don't want it to
automatically proceed if we already *did* use wait_for on one of the
existing messages, as that makes it likely it's not what we actually
want.
"""
pyproc.code = "time.sleep(0.1); print('foobar')"
pyproc.start()
pyproc.wait_for(data="foobar")
with pytest.raises(testprocess.WaitForTimeout):
pyproc.wait_for(data="foobar", timeout=100)
def test_no_kwargs(self, pyproc):
"""Using wait_for without kwargs should raise an exception.
Otherwise it'd match automatically because of the all(matches).
"""
with pytest.raises(TypeError):
pyproc.wait_for()
def test_do_skip(self, pyproc):
"""Test wait_for when getting no text at all, with do_skip."""
pyproc.code = "pass"
pyproc.start()
with pytest.raises(pytest.skip.Exception):
pyproc.wait_for(data="foobar", timeout=100, do_skip=True)
class TestEnsureNotLogged:
@pytest.mark.parametrize('message, pattern', [
('blacklisted', 'blacklisted'),
('bl[a]cklisted', 'bl[a]cklisted'),
('blacklisted', 'black*'),
])
def test_existing_message(self, pyproc, message, pattern):
pyproc.code = "print('{}')".format(message)
pyproc.start()
with stopwatch(max_ms=1000):
with pytest.raises(testprocess.BlacklistedMessageError):
pyproc.ensure_not_logged(data=pattern, delay=2000)
def test_late_message(self, pyproc):
pyproc.code = "time.sleep(0.5); print('blacklisted')"
pyproc.start()
with pytest.raises(testprocess.BlacklistedMessageError):
pyproc.ensure_not_logged(data='blacklisted', delay=5000)
def test_no_matching_message(self, pyproc):
pyproc.code = "print('blacklisted... nope!')"
pyproc.start()
pyproc.ensure_not_logged(data='blacklisted', delay=100)
def test_wait_for_and_blacklist(self, pyproc):
pyproc.code = "print('blacklisted')"
pyproc.start()
pyproc.wait_for(data='blacklisted')
with pytest.raises(testprocess.BlacklistedMessageError):
pyproc.ensure_not_logged(data='blacklisted', delay=0)
|
import string
import functools
import itertools
import operator
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.utils import usertypes
import qutebrowser.browser.hints
@pytest.fixture(autouse=True)
def setup(win_registry, mode_manager):
pass
@pytest.fixture
def tabbed_browser(tabbed_browser_stubs, web_tab):
tb = tabbed_browser_stubs[0]
tb.widget.tabs = [web_tab]
tb.widget.current_index = 1
tb.widget.cur_url = QUrl('https://www.example.com/')
web_tab.container.expose() # No elements found if we don't do this.
return tb
def test_show_benchmark(benchmark, tabbed_browser, qtbot, mode_manager):
"""Benchmark showing/drawing of hint labels."""
tab = tabbed_browser.widget.tabs[0]
with qtbot.wait_signal(tab.load_finished):
tab.load_url(QUrl('qute://testdata/data/hints/benchmark.html'))
manager = qutebrowser.browser.hints.HintManager(win_id=0)
def bench():
with qtbot.wait_signal(mode_manager.entered):
manager.start()
with qtbot.wait_signal(mode_manager.left):
mode_manager.leave(usertypes.KeyMode.hint)
benchmark(bench)
def test_match_benchmark(benchmark, tabbed_browser, qtbot, mode_manager, qapp,
config_stub):
"""Benchmark matching of hint labels."""
tab = tabbed_browser.widget.tabs[0]
with qtbot.wait_signal(tab.load_finished):
tab.load_url(QUrl('qute://testdata/data/hints/benchmark.html'))
config_stub.val.hints.scatter = False
manager = qutebrowser.browser.hints.HintManager(win_id=0)
with qtbot.wait_signal(mode_manager.entered):
manager.start()
def bench():
manager.handle_partial_key('a')
qapp.processEvents()
manager.handle_partial_key('')
qapp.processEvents()
benchmark(bench)
with qtbot.wait_signal(mode_manager.left):
mode_manager.leave(usertypes.KeyMode.hint)
@pytest.mark.parametrize('min_len', [0, 3])
@pytest.mark.parametrize('num_chars', [5, 9])
@pytest.mark.parametrize('num_elements', itertools.chain(range(1, 26), [125]))
def test_scattered_hints_count(min_len, num_chars, num_elements):
"""Test scattered hints function.
Tests many properties from an invocation of _hint_scattered, including
1. Hints must be unique
2. There can only be two hint lengths, only 1 apart
3. There are no unique prefixes for long hints, such as 'la' with no 'l<x>'
"""
manager = qutebrowser.browser.hints.HintManager(win_id=0)
chars = string.ascii_lowercase[:num_chars]
hints = manager._hint_scattered(min_len, chars,
list(range(num_elements)))
# Check if hints are unique
assert len(hints) == len(set(hints))
# Check if any hints are shorter than min_len
assert not any(x for x in hints if len(x) < min_len)
# Check we don't have more than 2 link lengths
# Eg: 'a' 'bc' and 'def' cannot be in the same hint string
hint_lens = {len(h) for h in hints}
assert len(hint_lens) <= 2
if len(hint_lens) == 2:
# Check if hint_lens are more than 1 apart
# Eg: 'abc' and 'd' cannot be in the same hint sequence, but
# 'ab' and 'c' can
assert abs(functools.reduce(operator.sub, hint_lens)) <= 1
longest_hint_len = max(hint_lens)
shortest_hint_len = min(hint_lens)
longest_hints = [x for x in hints if len(x) == longest_hint_len]
if min_len < max(hint_lens) - 1:
# Check if we have any unique prefixes. For example, 'la'
# alone, with no other 'l<x>'
count_map = {}
for x in longest_hints:
prefix = x[:-1]
count_map[prefix] = count_map.get(prefix, 0) + 1
assert all(e != 1 for e in count_map.values())
# Check that the longest hint length isn't too long
if longest_hint_len > min_len and longest_hint_len > 1:
assert num_chars ** (longest_hint_len - 1) < num_elements
# Check longest hint is not too short
assert num_chars ** longest_hint_len >= num_elements
if longest_hint_len > min_len and longest_hint_len > 1:
# Check that the longest hint length isn't too long
assert num_chars ** (longest_hint_len - 1) < num_elements
if shortest_hint_len == longest_hint_len:
# Check that we really couldn't use any short links
assert ((num_chars ** longest_hint_len) - num_elements <
len(chars) - 1)
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.utils.translation import gettext as _
from django.views.decorators.http import require_POST
from filelock import Timeout
from weblate.trans.util import redirect_param
from weblate.utils import messages
from weblate.utils.errors import report_error
from weblate.utils.views import get_component, get_project, get_translation
def execute_locked(request, obj, message, call, *args, **kwargs):
"""Helper function to catch possible lock exception."""
try:
result = call(*args, **kwargs)
# With False the call is supposed to show errors on its own
if result is None or result:
messages.success(request, message)
except Timeout:
messages.error(
request,
_("Failed to lock the repository, another operation is in progress."),
)
report_error()
return redirect_param(obj, "#repository")
def perform_commit(request, obj):
"""Helper function to do the repository commmit."""
return execute_locked(
request,
obj,
_("All pending translations were committed."),
obj.commit_pending,
"commit",
request.user,
)
def perform_update(request, obj):
"""Helper function to do the repository update."""
return execute_locked(
request,
obj,
_("All repositories were updated."),
obj.do_update,
request,
method=request.GET.get("method"),
)
def perform_push(request, obj):
"""Helper function to do the repository push."""
return execute_locked(
request, obj, _("All repositories were pushed."), obj.do_push, request
)
def perform_reset(request, obj):
"""Helper function to do the repository reset."""
return execute_locked(
request, obj, _("All repositories have been reset."), obj.do_reset, request
)
def perform_cleanup(request, obj):
"""Helper function to do the repository cleanup."""
return execute_locked(
request,
obj,
_("All repositories have been cleaned up."),
obj.do_cleanup,
request,
)
@login_required
@require_POST
def commit_project(request, project):
obj = get_project(request, project)
if not request.user.has_perm("vcs.commit", obj):
raise PermissionDenied()
return perform_commit(request, obj)
@login_required
@require_POST
def commit_component(request, project, component):
obj = get_component(request, project, component)
if not request.user.has_perm("vcs.commit", obj):
raise PermissionDenied()
return perform_commit(request, obj)
@login_required
@require_POST
def commit_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
if not request.user.has_perm("vcs.commit", obj):
raise PermissionDenied()
return perform_commit(request, obj)
@login_required
@require_POST
def update_project(request, project):
obj = get_project(request, project)
if not request.user.has_perm("vcs.update", obj):
raise PermissionDenied()
return perform_update(request, obj)
@login_required
@require_POST
def update_component(request, project, component):
obj = get_component(request, project, component)
if not request.user.has_perm("vcs.update", obj):
raise PermissionDenied()
return perform_update(request, obj)
@login_required
@require_POST
def update_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
if not request.user.has_perm("vcs.update", obj):
raise PermissionDenied()
return perform_update(request, obj)
@login_required
@require_POST
def push_project(request, project):
obj = get_project(request, project)
if not request.user.has_perm("vcs.push", obj):
raise PermissionDenied()
return perform_push(request, obj)
@login_required
@require_POST
def push_component(request, project, component):
obj = get_component(request, project, component)
if not request.user.has_perm("vcs.push", obj):
raise PermissionDenied()
return perform_push(request, obj)
@login_required
@require_POST
def push_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
if not request.user.has_perm("vcs.push", obj):
raise PermissionDenied()
return perform_push(request, obj)
@login_required
@require_POST
def reset_project(request, project):
obj = get_project(request, project)
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return perform_reset(request, obj)
@login_required
@require_POST
def reset_component(request, project, component):
obj = get_component(request, project, component)
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return perform_reset(request, obj)
@login_required
@require_POST
def reset_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return perform_reset(request, obj)
@login_required
@require_POST
def cleanup_project(request, project):
obj = get_project(request, project)
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return perform_cleanup(request, obj)
@login_required
@require_POST
def cleanup_component(request, project, component):
obj = get_component(request, project, component)
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return perform_cleanup(request, obj)
@login_required
@require_POST
def cleanup_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
if not request.user.has_perm("vcs.reset", obj):
raise PermissionDenied()
return perform_cleanup(request, obj)
|
import getpass
import logging
import platform
import urllib.parse
import mechanicalsoup
import requests
from hangups import version
logger = logging.getLogger(__name__)
# Set the logging level for requests to at least INFO, since the DEBUG level
# will log sensitive data:
if logging.getLogger('requests').isEnabledFor(logging.DEBUG):
logging.getLogger('requests').setLevel(logging.INFO)
OAUTH2_CLIENT_ID = '936475272427.apps.googleusercontent.com'
OAUTH2_CLIENT_SECRET = 'KWsJlkaMn1jGLxQpWxMnOox-'
OAUTH2_SCOPES = [
'https://www.google.com/accounts/OAuthLogin',
'https://www.googleapis.com/auth/userinfo.email',
]
# Note that '+' separating scopes must not be escaped by urlencode
OAUTH2_LOGIN_URL = (
'https://accounts.google.com/o/oauth2/programmatic_auth?{}'.format(
urllib.parse.urlencode(dict(
scope='+'.join(OAUTH2_SCOPES),
client_id=OAUTH2_CLIENT_ID,
device_name='hangups',
), safe='+')
)
)
OAUTH2_TOKEN_REQUEST_URL = 'https://accounts.google.com/o/oauth2/token'
FORM_SELECTOR = '#gaia_loginform'
EMAIL_SELECTOR = '#Email'
PASSWORD_SELECTOR = '#Passwd'
VERIFICATION_FORM_SELECTOR = '#challenge'
TOTP_CHALLENGE_SELECTOR = '[action="/signin/challenge/totp/2"]'
PHONE_CHALLENGE_SELECTOR = '[action="/signin/challenge/ipp/4"]'
TOTP_CODE_SELECTOR = '#totpPin'
PHONE_CODE_SELECTOR = '#idvPreregisteredPhonePin'
USER_AGENT = 'hangups/{} ({} {})'.format(
version.__version__, platform.system(), platform.machine()
)
MANUAL_LOGIN_INSTRUCTIONS = '''
To sign in with your Google account:
1) Open the URL provided below in your browser.
2) Log into your Google account normally.
3) You should be redirected to a loading screen. Copy the `oauth_code`
cookie value set by this page and paste it here.
To obtain the cookie value using Chrome or Firefox:
1) Press F12 to open developer tools.
2) Select the "Application" (Chrome) or "Storage" (Firefox) tab.
3) In the sidebar, expand "Cookies" and select
`https://accounts.google.com`.
4) In the cookie list, double click on the value for the `oauth_code`
cookie to select it, and copy the value.
{}
'''.format(OAUTH2_LOGIN_URL)
class GoogleAuthError(Exception):
"""A Google authentication request failed."""
class CredentialsPrompt:
"""Callbacks for prompting user for their Google account credentials.
This implementation prompts the user in a terminal using standard in/out.
"""
@staticmethod
def get_email():
"""Prompt for email.
Returns:
str: Google account email address.
"""
print('Sign in with your Google account:')
return input('Email: ')
@staticmethod
def get_password():
"""Prompt for password.
Returns:
str: Google account password.
"""
return getpass.getpass()
@staticmethod
def get_verification_code():
"""Prompt for verification code.
Returns:
str: Google account verification code.
"""
return input('Verification code: ')
@staticmethod
def get_authorization_code():
"""Prompt for authorization code.
Returns:
str: Google account authorization code.
"""
print(MANUAL_LOGIN_INSTRUCTIONS)
return input('Authorization code: ')
class RefreshTokenCache:
"""File-based cache for refresh token.
Args:
filename (str): Path to file where refresh token will be cached.
"""
def __init__(self, filename):
self._filename = filename
def get(self):
"""Get cached refresh token.
Returns:
Cached refresh token, or ``None`` on failure.
"""
logger.info(
'Loading refresh_token from %s', repr(self._filename)
)
try:
with open(self._filename) as f:
return f.read()
except IOError as e:
logger.info('Failed to load refresh_token: %s', e)
def set(self, refresh_token):
"""Cache a refresh token, ignoring any failure.
Args:
refresh_token (str): Refresh token to cache.
"""
logger.info('Saving refresh_token to %s', repr(self._filename))
try:
with open(self._filename, 'w') as f:
f.write(refresh_token)
except IOError as e:
logger.warning('Failed to save refresh_token: %s', e)
def get_auth(credentials_prompt, refresh_token_cache, manual_login=False):
"""Authenticate with Google.
Args:
refresh_token_cache (RefreshTokenCache): Cache to use so subsequent
logins may not require credentials.
credentials_prompt (CredentialsPrompt): Prompt to use if credentials
are required to log in.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Returns:
dict: Google session cookies.
Raises:
GoogleAuthError: If authentication with Google fails.
"""
with requests.Session() as session:
session.headers = {'user-agent': USER_AGENT}
try:
logger.info('Authenticating with refresh token')
refresh_token = refresh_token_cache.get()
if refresh_token is None:
raise GoogleAuthError("Refresh token not found")
access_token = _auth_with_refresh_token(session, refresh_token)
except GoogleAuthError as e:
logger.info('Failed to authenticate using refresh token: %s', e)
logger.info('Authenticating with credentials')
if manual_login:
authorization_code = (
credentials_prompt.get_authorization_code()
)
else:
authorization_code = _get_authorization_code(
session, credentials_prompt
)
access_token, refresh_token = _auth_with_code(
session, authorization_code
)
refresh_token_cache.set(refresh_token)
logger.info('Authentication successful')
return _get_session_cookies(session, access_token)
def get_auth_stdin(refresh_token_filename, manual_login=False):
"""Simple wrapper for :func:`get_auth` that prompts the user using stdin.
Args:
refresh_token_filename (str): Path to file where refresh token will be
cached.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Raises:
GoogleAuthError: If authentication with Google fails.
"""
refresh_token_cache = RefreshTokenCache(refresh_token_filename)
return get_auth(
CredentialsPrompt(), refresh_token_cache, manual_login=manual_login
)
class Browser:
"""Virtual browser for submitting forms and moving between pages.
Raises GoogleAuthError if URL fails to load.
"""
def __init__(self, session, url):
self._session = session
self._browser = mechanicalsoup.Browser(
soup_config=dict(features='html.parser'), session=self._session
)
try:
self._page = self._browser.get(url)
self._page.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('Failed to load form: {}'.format(e))
def has_selector(self, selector):
"""Return True if selector matches an element on the current page."""
return len(self._page.soup.select(selector)) > 0
def submit_form(self, form_selector, input_dict):
"""Populate and submit a form on the current page.
Raises GoogleAuthError if form can not be submitted.
"""
logger.info(
'Submitting form on page %r', self._page.url.split('?')[0]
)
logger.info(
'Page contains forms: %s',
[elem.get('id') for elem in self._page.soup.select('form')]
)
try:
form = self._page.soup.select(form_selector)[0]
except IndexError:
raise GoogleAuthError(
'Failed to find form {!r} in page'.format(form_selector)
)
logger.info(
'Page contains inputs: %s',
[elem.get('id') for elem in form.select('input')]
)
for selector, value in input_dict.items():
try:
form.select(selector)[0]['value'] = value
except IndexError:
raise GoogleAuthError(
'Failed to find input {!r} in form'.format(selector)
)
try:
self._page = self._browser.submit(form, self._page.url)
self._page.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('Failed to submit form: {}'.format(e))
def get_cookie(self, name):
"""Return cookie value from the browser session.
Raises KeyError if cookie is not found.
"""
return self._session.cookies[name]
def _get_authorization_code(session, credentials_prompt):
"""Get authorization code using Google account credentials.
Because hangups can't use a real embedded browser, it has to use the
Browser class to enter the user's credentials and retrieve the
authorization code, which is placed in a cookie. This is the most fragile
part of the authentication process, because a change to a login form or an
unexpected prompt could break it.
Raises GoogleAuthError authentication fails.
Returns authorization code string.
"""
browser = Browser(session, OAUTH2_LOGIN_URL)
email = credentials_prompt.get_email()
browser.submit_form(FORM_SELECTOR, {EMAIL_SELECTOR: email})
password = credentials_prompt.get_password()
browser.submit_form(FORM_SELECTOR, {PASSWORD_SELECTOR: password})
if browser.has_selector(TOTP_CHALLENGE_SELECTOR):
browser.submit_form(TOTP_CHALLENGE_SELECTOR, {})
elif browser.has_selector(PHONE_CHALLENGE_SELECTOR):
browser.submit_form(PHONE_CHALLENGE_SELECTOR, {})
if browser.has_selector(VERIFICATION_FORM_SELECTOR):
if browser.has_selector(TOTP_CODE_SELECTOR):
input_selector = TOTP_CODE_SELECTOR
elif browser.has_selector(PHONE_CODE_SELECTOR):
input_selector = PHONE_CODE_SELECTOR
else:
raise GoogleAuthError('Unknown verification code input')
verfification_code = credentials_prompt.get_verification_code()
browser.submit_form(
VERIFICATION_FORM_SELECTOR, {input_selector: verfification_code}
)
try:
return browser.get_cookie('oauth_code')
except KeyError:
raise GoogleAuthError('Authorization code cookie not found')
def _auth_with_refresh_token(session, refresh_token):
"""Authenticate using OAuth refresh token.
Raises GoogleAuthError if authentication fails.
Returns access token string.
"""
# Make a token request.
token_request_data = {
'client_id': OAUTH2_CLIENT_ID,
'client_secret': OAUTH2_CLIENT_SECRET,
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
res = _make_token_request(session, token_request_data)
return res['access_token']
def _auth_with_code(session, authorization_code):
"""Authenticate using OAuth authorization code.
Raises GoogleAuthError if authentication fails.
Returns access token string and refresh token string.
"""
# Make a token request.
token_request_data = {
'client_id': OAUTH2_CLIENT_ID,
'client_secret': OAUTH2_CLIENT_SECRET,
'code': authorization_code,
'grant_type': 'authorization_code',
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
}
res = _make_token_request(session, token_request_data)
return res['access_token'], res['refresh_token']
def _make_token_request(session, token_request_data):
"""Make OAuth token request.
Raises GoogleAuthError if authentication fails.
Returns dict response.
"""
try:
r = session.post(OAUTH2_TOKEN_REQUEST_URL, data=token_request_data)
r.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('Token request failed: {}'.format(e))
else:
res = r.json()
# If an error occurred, a key 'error' will contain an error code.
if 'error' in res:
raise GoogleAuthError(
'Token request error: {!r}'.format(res['error'])
)
return res
def _get_session_cookies(session, access_token):
"""Use the access token to get session cookies.
Raises GoogleAuthError if session cookies could not be loaded.
Returns dict of cookies.
"""
headers = {'Authorization': 'Bearer {}'.format(access_token)}
try:
r = session.get(('https://accounts.google.com/accounts/OAuthLogin'
'?source=hangups&issueuberauth=1'), headers=headers)
r.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('OAuthLogin request failed: {}'.format(e))
uberauth = r.text
try:
r = session.get(('https://accounts.google.com/MergeSession?'
'service=mail&'
'continue=http://www.google.com&uberauth={}')
.format(uberauth), headers=headers)
r.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('MergeSession request failed: {}'.format(e))
cookies = session.cookies.get_dict(domain='.google.com')
if cookies == {}:
raise GoogleAuthError('Failed to find session cookies')
return cookies
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
print(get_auth_stdin('refresh_token.txt'))
|
import glob
import os
import os.path
import re
from qutebrowser.utils import log, message, standarddir
_DICT_VERSION_RE = re.compile(r".+-(?P<version>[0-9]+-[0-9]+?)\.bdic")
def version(filename):
"""Extract the version number from the dictionary file name."""
match = _DICT_VERSION_RE.fullmatch(filename)
if match is None:
message.warning(
"Found a dictionary with a malformed name: {}".format(filename))
return None
return tuple(int(n) for n in match.group('version').split('-'))
def dictionary_dir():
"""Return the path (str) to the QtWebEngine's dictionaries directory."""
return os.path.join(standarddir.data(), 'qtwebengine_dictionaries')
def local_files(code):
"""Return all installed dictionaries for the given code.
The returned dictionaries are sorted by version, therefore the latest will
be the first element. The list will be empty if no dictionaries are found.
"""
pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))
matching_dicts = glob.glob(pathname)
versioned_dicts = []
for matching_dict in matching_dicts:
parsed_version = version(matching_dict)
if parsed_version is not None:
filename = os.path.basename(matching_dict)
log.config.debug('Found file for dict {}: {}'
.format(code, filename))
versioned_dicts.append((parsed_version, filename))
return [filename for version, filename
in sorted(versioned_dicts, reverse=True)]
def local_filename(code):
"""Return the newest installed dictionary for the given code.
Return the filename of the installed dictionary with the highest version
number or None if the dictionary is not installed.
"""
all_installed = local_files(code)
return all_installed[0] if all_installed else None
def init():
"""Initialize the dictionary path."""
dict_dir = dictionary_dir()
os.environ['QTWEBENGINE_DICTIONARIES_PATH'] = dict_dir
|
import os
import unittest
import pytest
from smart_open import open
skip_tests = "SMART_OPEN_TEST_MISSING_DEPS" not in os.environ
class PackageTests(unittest.TestCase):
@pytest.mark.skipif(skip_tests, reason="requires missing dependencies")
def test_azure_raises_helpful_error_with_missing_deps(self):
with pytest.raises(ImportError, match=r"pip install smart_open\[azure\]"):
open("azure://foo/bar")
@pytest.mark.skipif(skip_tests, reason="requires missing dependencies")
def test_aws_raises_helpful_error_with_missing_deps(self):
match = r"pip install smart_open\[s3\]"
with pytest.raises(ImportError, match=match):
open("s3://foo/bar")
@pytest.mark.skipif(skip_tests, reason="requires missing dependencies")
def test_gcs_raises_helpful_error_with_missing_deps(self):
with pytest.raises(ImportError, match=r"pip install smart_open\[gcs\]"):
open("gs://foo/bar")
|
import logging
from ... import storage
from ... import toolkit
from .. import config
from . import Index
import sqlalchemy
import sqlalchemy.exc
import sqlalchemy.ext.declarative
import sqlalchemy.orm
import sqlalchemy.sql.functions
logger = logging.getLogger(__name__)
Base = sqlalchemy.ext.declarative.declarative_base()
class Version (Base):
"Schema version for the search-index database"
__tablename__ = 'version'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
def __repr__(self):
return '<{0}(id={1})>'.format(type(self).__name__, self.id)
class Repository (Base):
"Repository description"
__tablename__ = 'repository'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(
sqlalchemy.String(length=30 + 1 + 64), # namespace / respository
nullable=False, unique=True)
description = sqlalchemy.Column(
sqlalchemy.String(length=100))
def __repr__(self):
return "<{0}(name='{1}', description='{2}')>".format(
type(self).__name__, self.name, self.description)
def retry(f):
def _retry(self, *args, **kwargs):
retry_times = 1
i = 0
while True:
try:
return f(self, *args, **kwargs)
except sqlalchemy.exc.DBAPIError as e:
if i < retry_times:
logger.warn("DB is disconnected. Reconnect to it.")
self.reconnect_db()
i += 1
else:
raise e
return _retry
class SQLAlchemyIndex (Index):
"""Maintain an index of repository data
The index is a dictionary. The keys are
'{namespace}/{repository}' strings, and the values are description
strings. For example:
index['library/ubuntu'] = 'An ubuntu image...'
"""
def __init__(self, database=None):
if database is None:
cfg = config.load()
database = cfg.sqlalchemy_index_database
self._database = database
self._engine = sqlalchemy.create_engine(database)
self._session = sqlalchemy.orm.sessionmaker(bind=self._engine)
self.version = 1
self._setup_database()
super(SQLAlchemyIndex, self).__init__()
def reconnect_db(self):
self._engine = sqlalchemy.create_engine(self._database)
self._session = sqlalchemy.orm.sessionmaker(bind=self._engine)
@toolkit.exclusive_lock
def _setup_database(self):
session = self._session()
if self._engine.has_table(table_name=Version.__tablename__):
version = session.query(
sqlalchemy.sql.functions.max(Version.id)).first()[0]
else:
version = None
if version:
if version != self.version:
raise NotImplementedError(
'unrecognized search index version {0}'.format(version))
else:
self._generate_index(session=session)
session.close()
@retry
def _generate_index(self, session):
store = storage.load()
Base.metadata.create_all(self._engine)
session.add(Version(id=self.version))
for repository in self._walk_storage(store=store):
session.add(Repository(**repository))
session.commit()
@retry
def _handle_repository_created(
self, sender, namespace, repository, value):
name = '{0}/{1}'.format(namespace, repository)
description = '' # TODO(wking): store descriptions
session = self._session()
session.add(Repository(name=name, description=description))
session.commit()
session.close()
@retry
def _handle_repository_updated(
self, sender, namespace, repository, value):
name = '{0}/{1}'.format(namespace, repository)
description = '' # TODO(wking): store descriptions
session = self._session()
session.query(Repository).filter(
Repository.name == name
).update(
values={'description': description},
synchronize_session=False
)
session.commit()
session.close()
@retry
def _handle_repository_deleted(self, sender, namespace, repository):
name = '{0}/{1}'.format(namespace, repository)
session = self._session()
session.query(Repository).filter(Repository.name == name).delete()
session.commit()
session.close()
@retry
def results(self, search_term=None):
session = self._session()
repositories = session.query(Repository)
if search_term:
like_term = '%%%s%%' % search_term
repositories = repositories.filter(
sqlalchemy.sql.or_(
Repository.name.like(like_term),
Repository.description.like(like_term)))
results = [
{
'name': repo.name,
'description': repo.description,
}
for repo in repositories]
session.close()
return results
|
import random
import string
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_DOMAINS, CONF_ENTITIES, CONF_NAME, CONF_PORT
from homeassistant.core import callback, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import (
CONF_EXCLUDE_DOMAINS,
CONF_EXCLUDE_ENTITIES,
CONF_INCLUDE_DOMAINS,
CONF_INCLUDE_ENTITIES,
)
from .const import (
CONF_AUTO_START,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_HOMEKIT_MODE,
CONF_SAFE_MODE,
CONF_VIDEO_CODEC,
DEFAULT_AUTO_START,
DEFAULT_CONFIG_FLOW_PORT,
DEFAULT_HOMEKIT_MODE,
DEFAULT_SAFE_MODE,
HOMEKIT_MODE_ACCESSORY,
HOMEKIT_MODES,
SHORT_BRIDGE_NAME,
VIDEO_CODEC_COPY,
)
from .const import DOMAIN # pylint:disable=unused-import
from .util import find_next_available_port
CONF_CAMERA_COPY = "camera_copy"
CONF_INCLUDE_EXCLUDE_MODE = "include_exclude_mode"
MODE_INCLUDE = "include"
MODE_EXCLUDE = "exclude"
INCLUDE_EXCLUDE_MODES = [MODE_EXCLUDE, MODE_INCLUDE]
SUPPORTED_DOMAINS = [
"alarm_control_panel",
"automation",
"binary_sensor",
"camera",
"climate",
"cover",
"demo",
"device_tracker",
"fan",
"humidifier",
"input_boolean",
"light",
"lock",
"media_player",
"person",
"remote",
"scene",
"script",
"sensor",
"switch",
"vacuum",
"water_heater",
]
DEFAULT_DOMAINS = [
"alarm_control_panel",
"climate",
"cover",
"humidifier",
"fan",
"light",
"lock",
"media_player",
"switch",
"vacuum",
"water_heater",
]
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for HomeKit."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize config flow."""
self.homekit_data = {}
self.entry_title = None
async def async_step_pairing(self, user_input=None):
"""Pairing instructions."""
if user_input is not None:
return self.async_create_entry(
title=self.entry_title, data=self.homekit_data
)
return self.async_show_form(
step_id="pairing",
description_placeholders={CONF_NAME: self.homekit_data[CONF_NAME]},
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
port = await self._async_available_port()
name = self._async_available_name()
title = f"{name}:{port}"
self.homekit_data = user_input.copy()
self.homekit_data[CONF_NAME] = name
self.homekit_data[CONF_PORT] = port
self.homekit_data[CONF_FILTER] = {
CONF_INCLUDE_DOMAINS: user_input[CONF_INCLUDE_DOMAINS],
CONF_INCLUDE_ENTITIES: [],
CONF_EXCLUDE_DOMAINS: [],
CONF_EXCLUDE_ENTITIES: [],
}
del self.homekit_data[CONF_INCLUDE_DOMAINS]
self.entry_title = title
return await self.async_step_pairing()
default_domains = [] if self._async_current_names() else DEFAULT_DOMAINS
setup_schema = vol.Schema(
{
vol.Optional(CONF_AUTO_START, default=DEFAULT_AUTO_START): bool,
vol.Required(
CONF_INCLUDE_DOMAINS, default=default_domains
): cv.multi_select(SUPPORTED_DOMAINS),
}
)
return self.async_show_form(
step_id="user", data_schema=setup_schema, errors=errors
)
async def async_step_import(self, user_input=None):
"""Handle import from yaml."""
if not self._async_is_unique_name_port(user_input):
return self.async_abort(reason="port_name_in_use")
return self.async_create_entry(
title=f"{user_input[CONF_NAME]}:{user_input[CONF_PORT]}", data=user_input
)
async def _async_available_port(self):
"""Return an available port the bridge."""
return await self.hass.async_add_executor_job(
find_next_available_port, DEFAULT_CONFIG_FLOW_PORT
)
@callback
def _async_current_names(self):
"""Return a set of bridge names."""
current_entries = self._async_current_entries()
return {
entry.data[CONF_NAME]
for entry in current_entries
if CONF_NAME in entry.data
}
@callback
def _async_available_name(self):
"""Return an available for the bridge."""
# We always pick a RANDOM name to avoid Zeroconf
# name collisions. If the name has been seen before
# pairing will probably fail.
acceptable_chars = string.ascii_uppercase + string.digits
trailer = "".join(random.choices(acceptable_chars, k=4))
all_names = self._async_current_names()
suggested_name = f"{SHORT_BRIDGE_NAME} {trailer}"
while suggested_name in all_names:
trailer = "".join(random.choices(acceptable_chars, k=4))
suggested_name = f"{SHORT_BRIDGE_NAME} {trailer}"
return suggested_name
@callback
def _async_is_unique_name_port(self, user_input):
"""Determine is a name or port is already used."""
name = user_input[CONF_NAME]
port = user_input[CONF_PORT]
for entry in self._async_current_entries():
if entry.data[CONF_NAME] == name or entry.data[CONF_PORT] == port:
return False
return True
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for tado."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
self.homekit_options = {}
self.included_cameras = set()
async def async_step_yaml(self, user_input=None):
"""No options for yaml managed entries."""
if user_input is not None:
# Apparently not possible to abort an options flow
# at the moment
return self.async_create_entry(title="", data=self.config_entry.options)
return self.async_show_form(step_id="yaml")
async def async_step_advanced(self, user_input=None):
"""Choose advanced options."""
if user_input is not None:
self.homekit_options.update(user_input)
for key in (CONF_DOMAINS, CONF_ENTITIES):
if key in self.homekit_options:
del self.homekit_options[key]
return self.async_create_entry(title="", data=self.homekit_options)
schema_base = {}
if self.show_advanced_options:
schema_base[
vol.Optional(
CONF_AUTO_START,
default=self.homekit_options.get(
CONF_AUTO_START, DEFAULT_AUTO_START
),
)
] = bool
else:
self.homekit_options[CONF_AUTO_START] = self.homekit_options.get(
CONF_AUTO_START, DEFAULT_AUTO_START
)
schema_base.update(
{
vol.Optional(
CONF_SAFE_MODE,
default=self.homekit_options.get(CONF_SAFE_MODE, DEFAULT_SAFE_MODE),
): bool
}
)
return self.async_show_form(
step_id="advanced", data_schema=vol.Schema(schema_base)
)
async def async_step_cameras(self, user_input=None):
"""Choose camera config."""
if user_input is not None:
entity_config = self.homekit_options[CONF_ENTITY_CONFIG]
for entity_id in self.included_cameras:
if entity_id in user_input[CONF_CAMERA_COPY]:
entity_config.setdefault(entity_id, {})[
CONF_VIDEO_CODEC
] = VIDEO_CODEC_COPY
elif (
entity_id in entity_config
and CONF_VIDEO_CODEC in entity_config[entity_id]
):
del entity_config[entity_id][CONF_VIDEO_CODEC]
return await self.async_step_advanced()
cameras_with_copy = []
entity_config = self.homekit_options.setdefault(CONF_ENTITY_CONFIG, {})
for entity in self.included_cameras:
hk_entity_config = entity_config.get(entity, {})
if hk_entity_config.get(CONF_VIDEO_CODEC) == VIDEO_CODEC_COPY:
cameras_with_copy.append(entity)
data_schema = vol.Schema(
{
vol.Optional(
CONF_CAMERA_COPY,
default=cameras_with_copy,
): cv.multi_select(self.included_cameras),
}
)
return self.async_show_form(step_id="cameras", data_schema=data_schema)
async def async_step_include_exclude(self, user_input=None):
"""Choose entities to include or exclude from the domain."""
if user_input is not None:
entity_filter = {
CONF_INCLUDE_DOMAINS: [],
CONF_EXCLUDE_DOMAINS: [],
CONF_INCLUDE_ENTITIES: [],
CONF_EXCLUDE_ENTITIES: [],
}
if isinstance(user_input[CONF_ENTITIES], list):
entities = user_input[CONF_ENTITIES]
else:
entities = [user_input[CONF_ENTITIES]]
if (
self.homekit_options[CONF_HOMEKIT_MODE] == HOMEKIT_MODE_ACCESSORY
or user_input[CONF_INCLUDE_EXCLUDE_MODE] == MODE_INCLUDE
):
entity_filter[CONF_INCLUDE_ENTITIES] = entities
# Include all of the domain if there are no entities
# explicitly included as the user selected the domain
domains_with_entities_selected = _domains_set_from_entities(entities)
entity_filter[CONF_INCLUDE_DOMAINS] = [
domain
for domain in self.homekit_options[CONF_DOMAINS]
if domain not in domains_with_entities_selected
]
for entity_id in list(self.included_cameras):
if entity_id not in entities:
self.included_cameras.remove(entity_id)
else:
entity_filter[CONF_INCLUDE_DOMAINS] = self.homekit_options[CONF_DOMAINS]
entity_filter[CONF_EXCLUDE_ENTITIES] = entities
for entity_id in entities:
if entity_id in self.included_cameras:
self.included_cameras.remove(entity_id)
self.homekit_options[CONF_FILTER] = entity_filter
if self.included_cameras:
return await self.async_step_cameras()
return await self.async_step_advanced()
entity_filter = self.homekit_options.get(CONF_FILTER, {})
all_supported_entities = await self.hass.async_add_executor_job(
_get_entities_matching_domains,
self.hass,
self.homekit_options[CONF_DOMAINS],
)
self.included_cameras = {
entity_id
for entity_id in all_supported_entities
if entity_id.startswith("camera.")
}
data_schema = {}
entities = entity_filter.get(CONF_INCLUDE_ENTITIES, [])
if self.homekit_options[CONF_HOMEKIT_MODE] == HOMEKIT_MODE_ACCESSORY:
entity_schema = vol.In
else:
if entities:
include_exclude_mode = MODE_INCLUDE
else:
include_exclude_mode = MODE_EXCLUDE
entities = entity_filter.get(CONF_EXCLUDE_ENTITIES, [])
data_schema[
vol.Required(CONF_INCLUDE_EXCLUDE_MODE, default=include_exclude_mode)
] = vol.In(INCLUDE_EXCLUDE_MODES)
entity_schema = cv.multi_select
data_schema[vol.Optional(CONF_ENTITIES, default=entities)] = entity_schema(
all_supported_entities
)
return self.async_show_form(
step_id="include_exclude", data_schema=vol.Schema(data_schema)
)
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if self.config_entry.source == SOURCE_IMPORT:
return await self.async_step_yaml(user_input)
if user_input is not None:
self.homekit_options.update(user_input)
return await self.async_step_include_exclude()
self.homekit_options = dict(self.config_entry.options)
entity_filter = self.homekit_options.get(CONF_FILTER, {})
homekit_mode = self.homekit_options.get(CONF_HOMEKIT_MODE, DEFAULT_HOMEKIT_MODE)
domains = entity_filter.get(CONF_INCLUDE_DOMAINS, [])
include_entities = entity_filter.get(CONF_INCLUDE_ENTITIES)
if include_entities:
domains.extend(_domains_set_from_entities(include_entities))
data_schema = vol.Schema(
{
vol.Optional(CONF_HOMEKIT_MODE, default=homekit_mode): vol.In(
HOMEKIT_MODES
),
vol.Optional(
CONF_DOMAINS,
default=domains,
): cv.multi_select(SUPPORTED_DOMAINS),
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
def _get_entities_matching_domains(hass, domains):
"""List entities in the given domains."""
included_domains = set(domains)
entity_ids = [
state.entity_id
for state in hass.states.all()
if (split_entity_id(state.entity_id))[0] in included_domains
]
entity_ids.sort()
return entity_ids
def _domains_set_from_entities(entity_ids):
"""Build a set of domains for the given entity ids."""
domains = set()
for entity_id in entity_ids:
domains.add(split_entity_id(entity_id)[0])
return domains
|
import posixpath
from absl import flags
from perfkitbenchmarker import linux_packages
TOMCAT_URL = ('https://archive.apache.org/dist/tomcat/tomcat-8/v8.0.28/bin/'
'apache-tomcat-8.0.28.tar.gz')
TOMCAT_DIR = posixpath.join(linux_packages.INSTALL_DIR, 'tomcat')
TOMCAT_HTTP_PORT = 8080
flags.DEFINE_string('tomcat_url', TOMCAT_URL, 'Tomcat 8 download URL.')
FLAGS = flags.FLAGS
# Start / stop scripts
_TOMCAT_START = posixpath.join(TOMCAT_DIR, 'bin', 'startup.sh')
_TOMCAT_STOP = posixpath.join(TOMCAT_DIR, 'bin', 'shutdown.sh')
_TOMCAT_SERVER_CONF = posixpath.join(TOMCAT_DIR, 'conf', 'server.xml')
_TOMCAT_LOGGING_CONF = posixpath.join(TOMCAT_DIR, 'conf', 'logging.properties')
_TOMCAT_WEB_CONF = posixpath.join(TOMCAT_DIR, 'conf', 'web.xml')
_TOMCAT_PROTOCOL = 'org.apache.coyote.http11.Http11Nio2Protocol'
def _Install(vm):
vm.Install('openjdk')
vm.Install('curl')
vm.RemoteCommand(
('mkdir -p {0} && curl -L {1} | '
'tar -C {0} --strip-components 1 -xzf -').format(TOMCAT_DIR,
FLAGS.tomcat_url))
# Use a non-blocking protocool, and disable access logging (which isn't very
# helpful during load tests).
vm.RemoteCommand(
("""sed -i.bak -e '/Connector port="8080"/ """
's/protocol="[^"]\\+"/protocol="{0}"/\' '
'-e "/org.apache.catalina.valves.AccessLogValve/,+3d" '
'{1}').format(
_TOMCAT_PROTOCOL, _TOMCAT_SERVER_CONF))
# Quiet down localhost logs.
vm.RemoteCommand(
("sed -i.bak "
r"-e 's/\(2localhost.org.apache.*.level\)\s\+=.*$/\1 = WARN/' "
' {0}').format(_TOMCAT_LOGGING_CONF))
# Expire sessions quickly.
vm.RemoteCommand(
("sed -i.bak "
r"-e 's,\(<session-timeout>\)30\(</session-timeout>\),\11\2,' "
" {0}").format(_TOMCAT_WEB_CONF))
def YumInstall(vm):
"""Installs the Tomcat package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the Tomcat package on the VM."""
_Install(vm)
def Start(vm):
"""Starts Tomcat on "vm"."""
# CentOS7 uses systemd as an init system
vm.RemoteCommand('bash ' + _TOMCAT_START)
def Stop(vm):
"""Stops Tomcat on "vm"."""
vm.RemoteCommand('bash ' + _TOMCAT_STOP)
|
import json
from pathlib import Path
import pytest
from redbot.pytest.data_manager import *
from redbot.core import data_manager
def test_no_basic(cog_instance):
with pytest.raises(RuntimeError):
data_manager.core_data_path()
with pytest.raises(RuntimeError):
data_manager.cog_data_path(cog_instance)
@pytest.mark.skip
def test_core_path(data_mgr_config, tmpdir):
conf_path = tmpdir.join("config.json")
conf_path.write(json.dumps(data_mgr_config))
data_manager.load_basic_configuration(Path(str(conf_path)))
assert data_manager.core_data_path().parent == Path(data_mgr_config["BASE_DIR"])
|
import logging
import xmlrpc.client
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_VARIABLES,
CONF_NAME,
CONF_URL,
DATA_RATE_KILOBYTES_PER_SECOND,
STATE_IDLE,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPE_CURRENT_STATUS = "current_status"
SENSOR_TYPE_DOWNLOAD_SPEED = "download_speed"
SENSOR_TYPE_UPLOAD_SPEED = "upload_speed"
SENSOR_TYPE_ALL_TORRENTS = "all_torrents"
SENSOR_TYPE_STOPPED_TORRENTS = "stopped_torrents"
SENSOR_TYPE_COMPLETE_TORRENTS = "complete_torrents"
SENSOR_TYPE_UPLOADING_TORRENTS = "uploading_torrents"
SENSOR_TYPE_DOWNLOADING_TORRENTS = "downloading_torrents"
SENSOR_TYPE_ACTIVE_TORRENTS = "active_torrents"
DEFAULT_NAME = "rtorrent"
SENSOR_TYPES = {
SENSOR_TYPE_CURRENT_STATUS: ["Status", None],
SENSOR_TYPE_DOWNLOAD_SPEED: ["Down Speed", DATA_RATE_KILOBYTES_PER_SECOND],
SENSOR_TYPE_UPLOAD_SPEED: ["Up Speed", DATA_RATE_KILOBYTES_PER_SECOND],
SENSOR_TYPE_ALL_TORRENTS: ["All Torrents", None],
SENSOR_TYPE_STOPPED_TORRENTS: ["Stopped Torrents", None],
SENSOR_TYPE_COMPLETE_TORRENTS: ["Complete Torrents", None],
SENSOR_TYPE_UPLOADING_TORRENTS: ["Uploading Torrents", None],
SENSOR_TYPE_DOWNLOADING_TORRENTS: ["Downloading Torrents", None],
SENSOR_TYPE_ACTIVE_TORRENTS: ["Active Torrents", None],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.url,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the rtorrent sensors."""
url = config[CONF_URL]
name = config[CONF_NAME]
try:
rtorrent = xmlrpc.client.ServerProxy(url)
except (xmlrpc.client.ProtocolError, ConnectionRefusedError) as ex:
_LOGGER.error("Connection to rtorrent daemon failed")
raise PlatformNotReady from ex
dev = []
for variable in config[CONF_MONITORED_VARIABLES]:
dev.append(RTorrentSensor(variable, rtorrent, name))
add_entities(dev)
def format_speed(speed):
"""Return a bytes/s measurement as a human readable string."""
kb_spd = float(speed) / 1024
return round(kb_spd, 2 if kb_spd < 0.1 else 1)
class RTorrentSensor(Entity):
"""Representation of an rtorrent sensor."""
def __init__(self, sensor_type, rtorrent_client, client_name):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.client = rtorrent_client
self.type = sensor_type
self.client_name = client_name
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.data = None
self._available = False
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from rtorrent and updates the state."""
multicall = xmlrpc.client.MultiCall(self.client)
multicall.throttle.global_up.rate()
multicall.throttle.global_down.rate()
multicall.d.multicall2("", "main")
multicall.d.multicall2("", "stopped")
multicall.d.multicall2("", "complete")
multicall.d.multicall2("", "seeding", "d.up.rate=")
multicall.d.multicall2("", "leeching", "d.down.rate=")
try:
self.data = multicall()
self._available = True
except (xmlrpc.client.ProtocolError, ConnectionRefusedError, OSError) as ex:
_LOGGER.error("Connection to rtorrent failed (%s)", ex)
self._available = False
return
upload = self.data[0]
download = self.data[1]
all_torrents = self.data[2]
stopped_torrents = self.data[3]
complete_torrents = self.data[4]
uploading_torrents = 0
for up_torrent in self.data[5]:
if up_torrent[0]:
uploading_torrents += 1
downloading_torrents = 0
for down_torrent in self.data[6]:
if down_torrent[0]:
downloading_torrents += 1
active_torrents = uploading_torrents + downloading_torrents
if self.type == SENSOR_TYPE_CURRENT_STATUS:
if self.data:
if upload > 0 and download > 0:
self._state = "up_down"
elif upload > 0 and download == 0:
self._state = "seeding"
elif upload == 0 and download > 0:
self._state = "downloading"
else:
self._state = STATE_IDLE
else:
self._state = None
if self.data:
if self.type == SENSOR_TYPE_DOWNLOAD_SPEED:
self._state = format_speed(download)
elif self.type == SENSOR_TYPE_UPLOAD_SPEED:
self._state = format_speed(upload)
elif self.type == SENSOR_TYPE_ALL_TORRENTS:
self._state = len(all_torrents)
elif self.type == SENSOR_TYPE_STOPPED_TORRENTS:
self._state = len(stopped_torrents)
elif self.type == SENSOR_TYPE_COMPLETE_TORRENTS:
self._state = len(complete_torrents)
elif self.type == SENSOR_TYPE_UPLOADING_TORRENTS:
self._state = uploading_torrents
elif self.type == SENSOR_TYPE_DOWNLOADING_TORRENTS:
self._state = downloading_torrents
elif self.type == SENSOR_TYPE_ACTIVE_TORRENTS:
self._state = active_torrents
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy.random as random
from numpy.random import randn
import numpy as np
import matplotlib.pyplot as plt
from filterpy.kalman import KalmanFilter
from numpy import array, asarray
DO_PLOT = False
SEED = 1124
def single_measurement_test():
dt = 0.1
sigma = 2.
kf2 = KalmanFilter(dim_x=2, dim_z=1)
kf2.F = array ([[1., dt], [0., 1.]])
kf2.H = array ([[1., 0.]])
kf2.x = array ([[0.], [1.]])
kf2.Q = array ([[dt**3/3, dt**2/2],
[dt**2/2, dt]]) * 0.02
kf2.P *= 100
kf2.R[0,0] = sigma**2
random.seed(SEED)
xs = []
zs = []
nom = []
for i in range(1, 100):
m0 = i + randn()*sigma
z = array([[m0]])
kf2.predict()
kf2.update(z)
xs.append(kf2.x.T[0])
zs.append(z.T[0])
nom.append(i)
xs = asarray(xs)
zs = asarray(zs)
nom = asarray(nom)
res = nom-xs[:,0]
std_dev = np.std(res)
print('std: {:.3f}'.format (std_dev))
global DO_PLOT
if DO_PLOT:
plt.subplot(211)
plt.plot(xs[:,0])
#plt.plot(zs[:,0])
plt.subplot(212)
plt.plot(res)
plt.show()
return std_dev
def sensor_fusion_test(wheel_sigma=2., gps_sigma=4.):
dt = 0.1
kf2 = KalmanFilter(dim_x=2, dim_z=2)
kf2.F = array ([[1., dt], [0., 1.]])
kf2.H = array ([[1., 0.], [1., 0.]])
kf2.x = array ([[0.], [0.]])
kf2.Q = array ([[dt**3/3, dt**2/2],
[dt**2/2, dt]]) * 0.02
kf2.P *= 100
kf2.R[0,0] = wheel_sigma**2
kf2.R[1,1] = gps_sigma**2
random.seed(SEED)
xs = []
zs = []
nom = []
for i in range(1, 100):
m0 = i + randn()*wheel_sigma
m1 = i + randn()*gps_sigma
if gps_sigma >1e40:
m1 = -1e40
z = array([[m0], [m1]])
kf2.predict()
kf2.update(z)
xs.append(kf2.x.T[0])
zs.append(z.T[0])
nom.append(i)
xs = asarray(xs)
zs = asarray(zs)
nom = asarray(nom)
res = nom-xs[:,0]
std_dev = np.std(res)
print('fusion std: {:.3f}'.format (np.std(res)))
if DO_PLOT:
plt.subplot(211)
plt.plot(xs[:,0])
#plt.plot(zs[:,0])
#plt.plot(zs[:,1])
plt.subplot(212)
plt.axhline(0)
plt.plot(res)
plt.show()
print(kf2.Q)
print(kf2.K)
return std_dev
def test_fusion():
std1 = sensor_fusion_test()
std2 = single_measurement_test()
assert (std1 < std2)
if __name__ == "__main__":
DO_PLOT=True
sensor_fusion_test(2,4e100)
single_measurement_test()
test_fusion()
|
import rumps
rumps.debug_mode(True)
@rumps.clicked('Print Something')
def print_something(_):
rumps.alert(message='something', ok='YES!', cancel='NO!')
@rumps.clicked('On/Off Test')
def on_off_test(_):
print_button = app.menu['Print Something']
if print_button.callback is None:
print_button.set_callback(print_something)
else:
print_button.set_callback(None)
@rumps.clicked('Clean Quit')
def clean_up_before_quit(_):
print('execute clean up code')
rumps.quit_application()
app = rumps.App('Hallo Thar', menu=['Print Something', 'On/Off Test', 'Clean Quit'], quit_button=None)
app.run()
|
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
from queue import Queue # noqa
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
from Queue import Queue
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Certain versions of pypy have a bug where clearing the exception stack
# breaks the __exit__ function in a very peculiar way. This is currently
# true for pypy 2.2.1 for instance. The second level of exception blocks
# is necessary because pypy seems to forget to check if an exception
# happend until the next bytecode instruction?
BROKEN_PYPY_CTXMGR_EXIT = False
if hasattr(sys, 'pypy_version_info'):
class _Mgr(object):
def __enter__(self):
return self
def __exit__(self, *args):
sys.exc_clear()
try:
try:
with _Mgr():
raise AssertionError()
except:
raise
except TypeError:
BROKEN_PYPY_CTXMGR_EXIT = True
except AssertionError:
pass
# pylama:skip=1
|
import pytest
from qutebrowser.completion.models import listcategory
@pytest.mark.parametrize('pattern, before, after, after_nosort', [
('foo',
[('foo', ''), ('bar', '')],
[('foo', '')],
[('foo', '')]),
('foo',
[('foob', ''), ('fooc', ''), ('fooa', '')],
[('fooa', ''), ('foob', ''), ('fooc', '')],
[('foob', ''), ('fooc', ''), ('fooa', '')]),
# prefer foobar as it starts with the pattern
('foo',
[('barfoo', ''), ('foobaz', ''), ('foobar', '')],
[('foobar', ''), ('foobaz', ''), ('barfoo', '')],
[('foobaz', ''), ('foobar', ''), ('barfoo', '')]),
('foo',
[('foo', 'bar'), ('bar', 'foo'), ('bar', 'bar')],
[('foo', 'bar'), ('bar', 'foo')],
[('foo', 'bar'), ('bar', 'foo')]),
])
def test_set_pattern(pattern, before, after, after_nosort, model_validator):
"""Validate the filtering and sorting results of set_pattern."""
cat = listcategory.ListCategory('Foo', before)
model_validator.set_model(cat)
cat.set_pattern(pattern)
model_validator.validate(after)
cat = listcategory.ListCategory('Foo', before, sort=False)
model_validator.set_model(cat)
cat.set_pattern(pattern)
model_validator.validate(after_nosort)
|
import sys
import os
import time
import struct
import re
# Fix Path for locating the SNMPCollector
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../',
'snmp',
)))
from diamond.metric import Metric
from snmp import SNMPCollector as parent_SNMPCollector
class NetscalerSNMPCollector(parent_SNMPCollector):
"""
SNMPCollector for Netscaler Metrics
"""
"""
EntityProtocolType ::=
INTEGER{ http(0),
ftp(1),
tcp(2),
udp(3),
sslBridge(4),
monitor(5),
monitorUdp(6),
nntp(7),
httpserver(8),
httpclient(9),
rpcserver(10),
rpcclient(11),
nat(12),
any(13),
ssl(14),
dns(15),
adns(16),
snmp(17),
ha(18),
monitorPing(19),
sslOtherTcp(20),
aaa(21),
secureMonitor(23),
sslvpnUdp(24),
rip(25),
dnsClient(26),
rpcServer(27),
rpcClient(28),
dhcrpa(36),
sipudp(39),
dnstcp(44),
adnstcp(45),
rtsp(46),
push(48),
sslPush(49),
dhcpClient(50),
radius(51),
serviceUnknown(62) }
EntityState ::=
INTEGER{ down(1),
unknown(2),
busy(3),
outOfService(4),
transitionToOutOfService(5),
up(7),
transitionToOutOfServiceDown(8) }
"""
NETSCALER_SYSTEM_GUAGES = {
"cpuUsage": "1.3.6.1.4.1.5951.4.1.1.41.1.0",
"memUsage": "1.3.6.1.4.1.5951.4.1.1.41.2.0",
"surgeQueue": "1.3.6.1.4.1.5951.4.1.1.46.15.0",
"establishedServerConnections": "1.3.6.1.4.1.5951.4.1.1.46.10.0",
"establishedClientConnections": "1.3.6.1.4.1.5951.4.1.1.46.12.0"
}
NETSCALER_SYSTEM_COUNTERS = {
"httpTotRequests": "1.3.6.1.4.1.5951.4.1.1.48.67.0"
}
NETSCALER_VSERVER_NAMES = "1.3.6.1.4.1.5951.4.1.3.1.1.1"
NETSCALER_VSERVER_TYPE = "1.3.6.1.4.1.5951.4.1.3.1.1.4"
NETSCALER_VSERVER_STATE = "1.3.6.1.4.1.5951.4.1.3.1.1.5"
NETSCALER_VSERVER_GUAGES = {
"vsvrRequestRate": "1.3.6.1.4.1.5951.4.1.3.1.1.43",
"vsvrRxBytesRate": "1.3.6.1.4.1.5951.4.1.3.1.1.44",
"vsvrTxBytesRate": "1.3.6.1.4.1.5951.4.1.3.1.1.45",
"vsvrCurServicesUp": "1.3.6.1.4.1.5951.4.1.3.1.1.41",
"vsvrCurServicesDown": "1.3.6.1.4.1.5951.4.1.3.1.1.37",
"vsvrCurServicesUnknown": "1.3.6.1.4.1.5951.4.1.3.1.1.38",
"vsvrCurServicesTransToOutOfSvc": "1.3.6.1.4.1.5951.4.1.3.1.1.40"
}
NETSCALER_SERVICE_NAMES = "1.3.6.1.4.1.5951.4.1.2.1.1.1"
NETSCALER_SERVICE_TYPE = "1.3.6.1.4.1.5951.4.1.2.1.1.4"
NETSCALER_SERVICE_STATE = "1.3.6.1.4.1.5951.4.1.2.1.1.5"
NETSCALER_SERVICE_GUAGES = {
"svcRequestRate": "1.3.6.1.4.1.5951.4.1.2.1.1.42",
"svcSurgeCount": "1.3.6.1.4.1.5951.4.1.2.1.1.10",
"svcEstablishedConn": "1.3.6.1.4.1.5951.4.1.2.1.1.8",
"svcActiveConn": "1.3.6.1.4.1.5951.4.1.2.1.1.9",
"svcCurClntConnections": "1.3.6.1.4.1.5951.4.1.2.1.1.41"
}
MAX_VALUE = 18446744073709551615
def get_default_config_help(self):
config_help = super(NetscalerSNMPCollector,
self).get_default_config_help()
config_help.update({
'host': 'netscaler dns address',
'port': 'Netscaler port to collect snmp data',
'community': 'SNMP community',
'exclude_service_type': "list of service types to exclude" +
" (see MIB EntityProtocolType)",
'exclude_vserver_type': "list of vserver types to exclude" +
" (see MIB EntityProtocolType)"
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NetscalerSNMPCollector, self).get_default_config()
config.update({
'path': 'netscaler',
'timeout': 15,
'exclude_service_type': [],
'exclude_vserver_type': [],
'exclude_service_state': [],
'exclude_vserver_state': []
})
return config
def get_string_index_oid(self, s):
"""Turns a string into an oid format is length of name followed by
name chars in ascii"""
return (len(self.get_bytes(s)), ) + self.get_bytes(s)
def get_bytes(self, s):
"""Turns a string into a list of byte values"""
return struct.unpack('%sB' % len(s), s)
def collect_snmp(self, device, host, port, community):
"""
Collect Netscaler SNMP stats from device
"""
# Log
self.log.info("Collecting Netscaler statistics from: %s", device)
# Set timestamp
timestamp = time.time()
# Collect Netscaler System OIDs
for k, v in self.NETSCALER_SYSTEM_GUAGES.items():
# Get Metric Name and Value
metricName = '.'.join([k])
metricValue = int(self.get(v, host, port, community)[v])
# Get Metric Path
metricPath = '.'.join(['devices', device, 'system', metricName])
# Create Metric
metric = Metric(metricPath, metricValue, timestamp, 0)
# Publish Metric
self.publish_metric(metric)
# Collect Netscaler System Counter OIDs
for k, v in self.NETSCALER_SYSTEM_COUNTERS.items():
# Get Metric Name and Value
metricName = '.'.join([k])
# Get Metric Path
metricPath = '.'.join(['devices', device, 'system', metricName])
# Get Metric Value
metricValue = self.derivative(metricPath, long(
self.get(v, host, port, community)[v]), self.MAX_VALUE)
# Create Metric
metric = Metric(metricPath, metricValue, timestamp, 0)
# Publish Metric
self.publish_metric(metric)
# Collect Netscaler Services
serviceNames = [v.strip("\'") for v in self.walk(
self.NETSCALER_SERVICE_NAMES, host, port, community).values()]
for serviceName in serviceNames:
# Get Service Name in OID form
serviceNameOid = self.get_string_index_oid(serviceName)
# Get Service Type
serviceTypeOid = ".".join([self.NETSCALER_SERVICE_TYPE,
self._convert_from_oid(serviceNameOid)])
serviceType = int(self.get(serviceTypeOid,
host,
port,
community)[serviceTypeOid].strip("\'"))
# Filter excluded service types
if serviceType in map(lambda v: int(v),
self.config.get('exclude_service_type')):
continue
# Get Service State
serviceStateOid = ".".join([self.NETSCALER_SERVICE_STATE,
self._convert_from_oid(serviceNameOid)])
serviceState = int(self.get(serviceStateOid,
host,
port,
community)[serviceStateOid].strip("\'"))
# Filter excluded service states
if serviceState in map(lambda v: int(v),
self.config.get('exclude_service_state')):
continue
for k, v in self.NETSCALER_SERVICE_GUAGES.items():
serviceGuageOid = ".".join(
[v, self._convert_from_oid(serviceNameOid)])
# Get Metric Name
metricName = '.'.join([re.sub(r'\.|\\', '_', serviceName), k])
# Get Metric Value
metricValue = int(self.get(serviceGuageOid,
host,
port,
community
)[serviceGuageOid].strip("\'"))
# Get Metric Path
metricPath = '.'.join(['devices',
device,
'service',
metricName])
# Create Metric
metric = Metric(metricPath, metricValue, timestamp, 0)
# Publish Metric
self.publish_metric(metric)
# Collect Netscaler Vservers
vserverNames = [v.strip("\'") for v in self.walk(
self.NETSCALER_VSERVER_NAMES, host, port, community).values()]
for vserverName in vserverNames:
# Get Vserver Name in OID form
vserverNameOid = self.get_string_index_oid(vserverName)
# Get Vserver Type
vserverTypeOid = ".".join([self.NETSCALER_VSERVER_TYPE,
self._convert_from_oid(vserverNameOid)])
vserverType = int(self.get(vserverTypeOid,
host,
port,
community)[vserverTypeOid].strip("\'"))
# filter excluded vserver types
if vserverType in map(lambda v: int(v),
self.config.get('exclude_vserver_type')):
continue
# Get Service State
vserverStateOid = ".".join([self.NETSCALER_VSERVER_STATE,
self._convert_from_oid(vserverNameOid)])
vserverState = int(self.get(vserverStateOid,
host,
port,
community)[vserverStateOid].strip("\'"))
# Filter excluded vserver state
if vserverState in map(lambda v: int(v),
self.config.get('exclude_vserver_state')):
continue
for k, v in self.NETSCALER_VSERVER_GUAGES.items():
vserverGuageOid = ".".join(
[v, self._convert_from_oid(vserverNameOid)])
# Get Metric Name
metricName = '.'.join([re.sub(r'\.|\\', '_', vserverName), k])
# Get Metric Value
metricValue = int(self.get(vserverGuageOid,
host,
port,
community
)[vserverGuageOid].strip("\'"))
# Get Metric Path
metricPath = '.'.join(['devices',
device,
'vserver',
metricName])
# Create Metric
metric = Metric(metricPath, metricValue, timestamp, 0)
# Publish Metric
self.publish_metric(metric)
|
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_DOOR,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_WINDOW,
DOMAIN,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_ICON
from . import FIBARO_DEVICES, FibaroDevice
SENSOR_TYPES = {
"com.fibaro.floodSensor": ["Flood", "mdi:water", "flood"],
"com.fibaro.motionSensor": ["Motion", "mdi:run", DEVICE_CLASS_MOTION],
"com.fibaro.doorSensor": ["Door", "mdi:window-open", DEVICE_CLASS_DOOR],
"com.fibaro.windowSensor": ["Window", "mdi:window-open", DEVICE_CLASS_WINDOW],
"com.fibaro.smokeSensor": ["Smoke", "mdi:smoking", DEVICE_CLASS_SMOKE],
"com.fibaro.FGMS001": ["Motion", "mdi:run", DEVICE_CLASS_MOTION],
"com.fibaro.heatDetector": ["Heat", "mdi:fire", "heat"],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Perform the setup for Fibaro controller devices."""
if discovery_info is None:
return
add_entities(
[
FibaroBinarySensor(device)
for device in hass.data[FIBARO_DEVICES]["binary_sensor"]
],
True,
)
class FibaroBinarySensor(FibaroDevice, BinarySensorEntity):
"""Representation of a Fibaro Binary Sensor."""
def __init__(self, fibaro_device):
"""Initialize the binary_sensor."""
self._state = None
super().__init__(fibaro_device)
self.entity_id = f"{DOMAIN}.{self.ha_id}"
stype = None
devconf = fibaro_device.device_config
if fibaro_device.type in SENSOR_TYPES:
stype = fibaro_device.type
elif fibaro_device.baseType in SENSOR_TYPES:
stype = fibaro_device.baseType
if stype:
self._device_class = SENSOR_TYPES[stype][2]
self._icon = SENSOR_TYPES[stype][1]
else:
self._device_class = None
self._icon = None
# device_config overrides:
self._device_class = devconf.get(CONF_DEVICE_CLASS, self._device_class)
self._icon = devconf.get(CONF_ICON, self._icon)
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
def update(self):
"""Get the latest data and update the state."""
self._state = self.current_binary_state
|
from datetime import datetime
import django
from django import forms
from django.conf import settings
from django.contrib import admin, messages
from django.contrib.admin.models import ADDITION, CHANGE, DELETION, LogEntry
from django.contrib.auth import get_permission_codename
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import path, reverse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_str
from django.utils.module_loading import import_string
from django.utils.translation import gettext_lazy as _
from django.views.decorators.http import require_POST
from .formats.base_formats import DEFAULT_FORMATS
from .forms import ConfirmImportForm, ExportForm, ImportForm, export_action_form_factory
from .resources import modelresource_factory
from .results import RowResult
from .signals import post_export, post_import
from .tmp_storages import TempFolderStorage
class ImportExportMixinBase:
def get_model_info(self):
app_label = self.model._meta.app_label
return (app_label, self.model._meta.model_name)
class ImportMixin(ImportExportMixinBase):
"""
Import mixin.
This is intended to be mixed with django.contrib.admin.ModelAdmin
https://docs.djangoproject.com/en/2.1/ref/contrib/admin/#modeladmin-objects
"""
#: template for change_list view
change_list_template = 'admin/import_export/change_list_import.html'
#: template for import view
import_template_name = 'admin/import_export/import.html'
#: resource class
resource_class = None
#: available import formats
formats = DEFAULT_FORMATS
#: import data encoding
from_encoding = "utf-8"
skip_admin_log = None
# storage class for saving temporary files
tmp_storage_class = None
def get_skip_admin_log(self):
if self.skip_admin_log is None:
return getattr(settings, 'IMPORT_EXPORT_SKIP_ADMIN_LOG', False)
else:
return self.skip_admin_log
def get_tmp_storage_class(self):
if self.tmp_storage_class is None:
tmp_storage_class = getattr(
settings, 'IMPORT_EXPORT_TMP_STORAGE_CLASS', TempFolderStorage,
)
else:
tmp_storage_class = self.tmp_storage_class
if isinstance(tmp_storage_class, str):
tmp_storage_class = import_string(tmp_storage_class)
return tmp_storage_class
def has_import_permission(self, request):
"""
Returns whether a request has import permission.
"""
IMPORT_PERMISSION_CODE = getattr(settings, 'IMPORT_EXPORT_IMPORT_PERMISSION_CODE', None)
if IMPORT_PERMISSION_CODE is None:
return True
opts = self.opts
codename = get_permission_codename(IMPORT_PERMISSION_CODE, opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def get_urls(self):
urls = super().get_urls()
info = self.get_model_info()
my_urls = [
path('process_import/',
self.admin_site.admin_view(self.process_import),
name='%s_%s_process_import' % info),
path('import/',
self.admin_site.admin_view(self.import_action),
name='%s_%s_import' % info),
]
return my_urls + urls
def get_resource_kwargs(self, request, *args, **kwargs):
return {}
def get_import_resource_kwargs(self, request, *args, **kwargs):
"""Prepares/returns kwargs used when initializing Resource"""
return self.get_resource_kwargs(request, *args, **kwargs)
def get_resource_class(self):
"""Returns ResourceClass"""
if not self.resource_class:
return modelresource_factory(self.model)
else:
return self.resource_class
def get_import_resource_class(self):
"""
Returns ResourceClass to use for import.
"""
return self.get_resource_class()
def get_import_formats(self):
"""
Returns available import formats.
"""
return [f for f in self.formats if f().can_import()]
@method_decorator(require_POST)
def process_import(self, request, *args, **kwargs):
"""
Perform the actual import action (after the user has confirmed the import)
"""
if not self.has_import_permission(request):
raise PermissionDenied
form_type = self.get_confirm_import_form()
confirm_form = form_type(request.POST)
if confirm_form.is_valid():
import_formats = self.get_import_formats()
input_format = import_formats[
int(confirm_form.cleaned_data['input_format'])
]()
tmp_storage = self.get_tmp_storage_class()(name=confirm_form.cleaned_data['import_file_name'])
data = tmp_storage.read(input_format.get_read_mode())
if not input_format.is_binary() and self.from_encoding:
data = force_str(data, self.from_encoding)
dataset = input_format.create_dataset(data)
result = self.process_dataset(dataset, confirm_form, request, *args, **kwargs)
tmp_storage.remove()
return self.process_result(result, request)
def process_dataset(self, dataset, confirm_form, request, *args, **kwargs):
res_kwargs = self.get_import_resource_kwargs(request, form=confirm_form, *args, **kwargs)
resource = self.get_import_resource_class()(**res_kwargs)
imp_kwargs = self.get_import_data_kwargs(request, form=confirm_form, *args, **kwargs)
return resource.import_data(dataset,
dry_run=False,
raise_errors=True,
file_name=confirm_form.cleaned_data['original_file_name'],
user=request.user,
**imp_kwargs)
def process_result(self, result, request):
self.generate_log_entries(result, request)
self.add_success_message(result, request)
post_import.send(sender=None, model=self.model)
url = reverse('admin:%s_%s_changelist' % self.get_model_info(),
current_app=self.admin_site.name)
return HttpResponseRedirect(url)
def generate_log_entries(self, result, request):
if not self.get_skip_admin_log():
# Add imported objects to LogEntry
logentry_map = {
RowResult.IMPORT_TYPE_NEW: ADDITION,
RowResult.IMPORT_TYPE_UPDATE: CHANGE,
RowResult.IMPORT_TYPE_DELETE: DELETION,
}
content_type_id = ContentType.objects.get_for_model(self.model).pk
for row in result:
if row.import_type != row.IMPORT_TYPE_ERROR and row.import_type != row.IMPORT_TYPE_SKIP:
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=content_type_id,
object_id=row.object_id,
object_repr=row.object_repr,
action_flag=logentry_map[row.import_type],
change_message=_("%s through import_export" % row.import_type),
)
def add_success_message(self, result, request):
opts = self.model._meta
success_message = _('Import finished, with {} new and ' \
'{} updated {}.').format(result.totals[RowResult.IMPORT_TYPE_NEW],
result.totals[RowResult.IMPORT_TYPE_UPDATE],
opts.verbose_name_plural)
messages.success(request, success_message)
def get_import_context_data(self, **kwargs):
return self.get_context_data(**kwargs)
def get_context_data(self, **kwargs):
return {}
def get_import_form(self):
"""
Get the form type used to read the import format and file.
"""
return ImportForm
def get_confirm_import_form(self):
"""
Get the form type (class) used to confirm the import.
"""
return ConfirmImportForm
def get_form_kwargs(self, form, *args, **kwargs):
"""
Prepare/returns kwargs for the import form.
To distinguish between import and confirm import forms,
the following approach may be used:
if isinstance(form, ImportForm):
# your code here for the import form kwargs
# e.g. update.kwargs({...})
elif isinstance(form, ConfirmImportForm):
# your code here for the confirm import form kwargs
# e.g. update.kwargs({...})
...
"""
return kwargs
def get_import_data_kwargs(self, request, *args, **kwargs):
"""
Prepare kwargs for import_data.
"""
form = kwargs.get('form')
if form:
kwargs.pop('form')
return kwargs
return {}
def write_to_tmp_storage(self, import_file, input_format):
tmp_storage = self.get_tmp_storage_class()()
data = bytes()
for chunk in import_file.chunks():
data += chunk
tmp_storage.save(data, input_format.get_read_mode())
return tmp_storage
def import_action(self, request, *args, **kwargs):
"""
Perform a dry_run of the import to make sure the import will not
result in errors. If there where no error, save the user
uploaded file to a local temp file that will be used by
'process_import' for the actual import.
"""
if not self.has_import_permission(request):
raise PermissionDenied
context = self.get_import_context_data()
import_formats = self.get_import_formats()
form_type = self.get_import_form()
form_kwargs = self.get_form_kwargs(form_type, *args, **kwargs)
form = form_type(import_formats,
request.POST or None,
request.FILES or None,
**form_kwargs)
if request.POST and form.is_valid():
input_format = import_formats[
int(form.cleaned_data['input_format'])
]()
import_file = form.cleaned_data['import_file']
# first always write the uploaded file to disk as it may be a
# memory file or else based on settings upload handlers
tmp_storage = self.write_to_tmp_storage(import_file, input_format)
# then read the file, using the proper format-specific mode
# warning, big files may exceed memory
try:
data = tmp_storage.read(input_format.get_read_mode())
if not input_format.is_binary() and self.from_encoding:
data = force_str(data, self.from_encoding)
dataset = input_format.create_dataset(data)
except UnicodeDecodeError as e:
return HttpResponse(_(u"<h1>Imported file has a wrong encoding: %s</h1>" % e))
except Exception as e:
return HttpResponse(_(u"<h1>%s encountered while trying to read file: %s</h1>" % (type(e).__name__, import_file.name)))
# prepare kwargs for import data, if needed
res_kwargs = self.get_import_resource_kwargs(request, form=form, *args, **kwargs)
resource = self.get_import_resource_class()(**res_kwargs)
# prepare additional kwargs for import_data, if needed
imp_kwargs = self.get_import_data_kwargs(request, form=form, *args, **kwargs)
result = resource.import_data(dataset, dry_run=True,
raise_errors=False,
file_name=import_file.name,
user=request.user,
**imp_kwargs)
context['result'] = result
if not result.has_errors() and not result.has_validation_errors():
initial = {
'import_file_name': tmp_storage.name,
'original_file_name': import_file.name,
'input_format': form.cleaned_data['input_format'],
}
confirm_form = self.get_confirm_import_form()
initial = self.get_form_kwargs(form=form, **initial)
context['confirm_form'] = confirm_form(initial=initial)
else:
res_kwargs = self.get_import_resource_kwargs(request, form=form, *args, **kwargs)
resource = self.get_import_resource_class()(**res_kwargs)
context.update(self.admin_site.each_context(request))
context['title'] = _("Import")
context['form'] = form
context['opts'] = self.model._meta
context['fields'] = [f.column_name for f in resource.get_user_visible_fields()]
request.current_app = self.admin_site.name
return TemplateResponse(request, [self.import_template_name],
context)
def changelist_view(self, request, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context['has_import_permission'] = self.has_import_permission(request)
return super().changelist_view(request, extra_context)
class ExportMixin(ImportExportMixinBase):
"""
Export mixin.
This is intended to be mixed with django.contrib.admin.ModelAdmin
https://docs.djangoproject.com/en/2.1/ref/contrib/admin/#modeladmin-objects
"""
#: resource class
resource_class = None
#: template for change_list view
change_list_template = 'admin/import_export/change_list_export.html'
#: template for export view
export_template_name = 'admin/import_export/export.html'
#: available export formats
formats = DEFAULT_FORMATS
#: export data encoding
to_encoding = "utf-8"
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('export/',
self.admin_site.admin_view(self.export_action),
name='%s_%s_export' % self.get_model_info()),
]
return my_urls + urls
def has_export_permission(self, request):
"""
Returns whether a request has export permission.
"""
EXPORT_PERMISSION_CODE = getattr(settings, 'IMPORT_EXPORT_EXPORT_PERMISSION_CODE', None)
if EXPORT_PERMISSION_CODE is None:
return True
opts = self.opts
codename = get_permission_codename(EXPORT_PERMISSION_CODE, opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def get_resource_kwargs(self, request, *args, **kwargs):
return {}
def get_export_resource_kwargs(self, request, *args, **kwargs):
return self.get_resource_kwargs(request, *args, **kwargs)
def get_resource_class(self):
if not self.resource_class:
return modelresource_factory(self.model)
else:
return self.resource_class
def get_export_resource_class(self):
"""
Returns ResourceClass to use for export.
"""
return self.get_resource_class()
def get_export_formats(self):
"""
Returns available export formats.
"""
return [f for f in self.formats if f().can_export()]
def get_export_filename(self, request, queryset, file_format):
date_str = datetime.now().strftime('%Y-%m-%d')
filename = "%s-%s.%s" % (self.model.__name__,
date_str,
file_format.get_extension())
return filename
def get_export_queryset(self, request):
"""
Returns export queryset.
Default implementation respects applied search and filters.
"""
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
if self.get_actions(request):
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
changelist_kwargs = {
'request': request,
'model': self.model,
'list_display': list_display,
'list_display_links': list_display_links,
'list_filter': list_filter,
'date_hierarchy': self.date_hierarchy,
'search_fields': search_fields,
'list_select_related': self.list_select_related,
'list_per_page': self.list_per_page,
'list_max_show_all': self.list_max_show_all,
'list_editable': self.list_editable,
'model_admin': self,
}
if django.VERSION >= (2, 1):
changelist_kwargs['sortable_by'] = self.sortable_by
cl = ChangeList(**changelist_kwargs)
return cl.get_queryset(request)
def get_export_data(self, file_format, queryset, *args, **kwargs):
"""
Returns file_format representation for given queryset.
"""
request = kwargs.pop("request")
if not self.has_export_permission(request):
raise PermissionDenied
resource_class = self.get_export_resource_class()
data = resource_class(**self.get_export_resource_kwargs(request)).export(queryset, *args, **kwargs)
export_data = file_format.export_data(data)
return export_data
def get_export_context_data(self, **kwargs):
return self.get_context_data(**kwargs)
def get_context_data(self, **kwargs):
return {}
def export_action(self, request, *args, **kwargs):
if not self.has_export_permission(request):
raise PermissionDenied
formats = self.get_export_formats()
form = ExportForm(formats, request.POST or None)
if form.is_valid():
file_format = formats[
int(form.cleaned_data['file_format'])
]()
queryset = self.get_export_queryset(request)
export_data = self.get_export_data(file_format, queryset, request=request)
content_type = file_format.get_content_type()
response = HttpResponse(export_data, content_type=content_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % (
self.get_export_filename(request, queryset, file_format),
)
post_export.send(sender=None, model=self.model)
return response
context = self.get_export_context_data()
context.update(self.admin_site.each_context(request))
context['title'] = _("Export")
context['form'] = form
context['opts'] = self.model._meta
request.current_app = self.admin_site.name
return TemplateResponse(request, [self.export_template_name],
context)
def changelist_view(self, request, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context['has_export_permission'] = self.has_export_permission(request)
return super().changelist_view(request, extra_context)
class ImportExportMixin(ImportMixin, ExportMixin):
"""
Import and export mixin.
"""
#: template for change_list view
change_list_template = 'admin/import_export/change_list_import_export.html'
class ImportExportModelAdmin(ImportExportMixin, admin.ModelAdmin):
"""
Subclass of ModelAdmin with import/export functionality.
"""
class ExportActionMixin(ExportMixin):
"""
Mixin with export functionality implemented as an admin action.
"""
# Don't use custom change list template.
change_list_template = None
def __init__(self, *args, **kwargs):
"""
Adds a custom action form initialized with the available export
formats.
"""
choices = []
formats = self.get_export_formats()
if formats:
choices.append(('', '---'))
for i, f in enumerate(formats):
choices.append((str(i), f().get_title()))
self.action_form = export_action_form_factory(choices)
super().__init__(*args, **kwargs)
def export_admin_action(self, request, queryset):
"""
Exports the selected rows using file_format.
"""
export_format = request.POST.get('file_format')
if not export_format:
messages.warning(request, _('You must select an export format.'))
else:
formats = self.get_export_formats()
file_format = formats[int(export_format)]()
export_data = self.get_export_data(file_format, queryset, request=request)
content_type = file_format.get_content_type()
response = HttpResponse(export_data, content_type=content_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % (
self.get_export_filename(request, queryset, file_format),
)
return response
export_admin_action.short_description = _(
'Export selected %(verbose_name_plural)s')
actions = admin.ModelAdmin.actions + [export_admin_action]
@property
def media(self):
super_media = super().media
return forms.Media(js=super_media._js + ['import_export/action_formats.js'], css=super_media._css)
class ExportActionModelAdmin(ExportActionMixin, admin.ModelAdmin):
"""
Subclass of ModelAdmin with export functionality implemented as an
admin action.
"""
class ImportExportActionModelAdmin(ImportMixin, ExportActionModelAdmin):
"""
Subclass of ExportActionModelAdmin with import/export functionality.
Export functionality is implemented as an admin action.
"""
|
import sys
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy._cptree import Application
from cherrypy.test import helper
script_names = ['', '/foo', '/users/fred/blog', '/corp/blog']
class ObjectMappingTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self, name='world'):
return name
@cherrypy.expose
def foobar(self):
return 'bar'
@cherrypy.expose
def default(self, *params, **kwargs):
return 'default:' + repr(params)
@cherrypy.expose
def other(self):
return 'other'
@cherrypy.expose
def extra(self, *p):
return repr(p)
@cherrypy.expose
def redirect(self):
raise cherrypy.HTTPRedirect('dir1/', 302)
def notExposed(self):
return 'not exposed'
@cherrypy.expose
def confvalue(self):
return cherrypy.request.config.get('user')
@cherrypy.expose
def redirect_via_url(self, path):
raise cherrypy.HTTPRedirect(cherrypy.url(path))
@cherrypy.expose
def translate_html(self):
return 'OK'
@cherrypy.expose
def mapped_func(self, ID=None):
return 'ID is %s' % ID
setattr(Root, 'Von B\xfclow', mapped_func)
class Exposing:
@cherrypy.expose
def base(self):
return 'expose works!'
cherrypy.expose(base, '1')
cherrypy.expose(base, '2')
class ExposingNewStyle(object):
@cherrypy.expose
def base(self):
return 'expose works!'
cherrypy.expose(base, '1')
cherrypy.expose(base, '2')
class Dir1:
@cherrypy.expose
def index(self):
return 'index for dir1'
@cherrypy.expose
@cherrypy.config(**{'tools.trailing_slash.extra': True})
def myMethod(self):
return 'myMethod from dir1, path_info is:' + repr(
cherrypy.request.path_info)
@cherrypy.expose
def default(self, *params):
return 'default for dir1, param is:' + repr(params)
class Dir2:
@cherrypy.expose
def index(self):
return 'index for dir2, path is:' + cherrypy.request.path_info
@cherrypy.expose
def script_name(self):
return cherrypy.tree.script_name()
@cherrypy.expose
def cherrypy_url(self):
return cherrypy.url('/extra')
@cherrypy.expose
def posparam(self, *vpath):
return '/'.join(vpath)
class Dir3:
def default(self):
return 'default for dir3, not exposed'
class Dir4:
def index(self):
return 'index for dir4, not exposed'
class DefNoIndex:
@cherrypy.expose
def default(self, *args):
raise cherrypy.HTTPRedirect('contact')
# MethodDispatcher code
@cherrypy.expose
class ByMethod:
def __init__(self, *things):
self.things = list(things)
def GET(self):
return repr(self.things)
def POST(self, thing):
self.things.append(thing)
class Collection:
default = ByMethod('a', 'bit')
Root.exposing = Exposing()
Root.exposingnew = ExposingNewStyle()
Root.dir1 = Dir1()
Root.dir1.dir2 = Dir2()
Root.dir1.dir2.dir3 = Dir3()
Root.dir1.dir2.dir3.dir4 = Dir4()
Root.defnoindex = DefNoIndex()
Root.bymethod = ByMethod('another')
Root.collection = Collection()
d = cherrypy.dispatch.MethodDispatcher()
for url in script_names:
conf = {'/': {'user': (url or '/').split('/')[-2]},
'/bymethod': {'request.dispatch': d},
'/collection': {'request.dispatch': d},
}
cherrypy.tree.mount(Root(), url, conf)
class Isolated:
@cherrypy.expose
def index(self):
return 'made it!'
cherrypy.tree.mount(Isolated(), '/isolated')
@cherrypy.expose
class AnotherApp:
def GET(self):
return 'milk'
cherrypy.tree.mount(AnotherApp(), '/app',
{'/': {'request.dispatch': d}})
def testObjectMapping(self):
for url in script_names:
self.script_name = url
self.getPage('/')
self.assertBody('world')
self.getPage('/dir1/myMethod')
self.assertBody(
"myMethod from dir1, path_info is:'/dir1/myMethod'")
self.getPage('/this/method/does/not/exist')
self.assertBody(
"default:('this', 'method', 'does', 'not', 'exist')")
self.getPage('/extra/too/much')
self.assertBody("('too', 'much')")
self.getPage('/other')
self.assertBody('other')
self.getPage('/notExposed')
self.assertBody("default:('notExposed',)")
self.getPage('/dir1/dir2/')
self.assertBody('index for dir2, path is:/dir1/dir2/')
# Test omitted trailing slash (should be redirected by default).
self.getPage('/dir1/dir2')
self.assertStatus(301)
self.assertHeader('Location', '%s/dir1/dir2/' % self.base())
# Test extra trailing slash (should be redirected if configured).
self.getPage('/dir1/myMethod/')
self.assertStatus(301)
self.assertHeader('Location', '%s/dir1/myMethod' % self.base())
# Test that default method must be exposed in order to match.
self.getPage('/dir1/dir2/dir3/dir4/index')
self.assertBody(
"default for dir1, param is:('dir2', 'dir3', 'dir4', 'index')")
# Test *vpath when default() is defined but not index()
# This also tests HTTPRedirect with default.
self.getPage('/defnoindex')
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/contact' % self.base())
self.getPage('/defnoindex/')
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/defnoindex/contact' %
self.base())
self.getPage('/defnoindex/page')
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/defnoindex/contact' %
self.base())
self.getPage('/redirect')
self.assertStatus('302 Found')
self.assertHeader('Location', '%s/dir1/' % self.base())
if not getattr(cherrypy.server, 'using_apache', False):
# Test that we can use URL's which aren't all valid Python
# identifiers
# This should also test the %XX-unquoting of URL's.
self.getPage('/Von%20B%fclow?ID=14')
self.assertBody('ID is 14')
# Test that %2F in the path doesn't get unquoted too early;
# that is, it should not be used to separate path components.
# See ticket #393.
self.getPage('/page%2Fname')
self.assertBody("default:('page/name',)")
self.getPage('/dir1/dir2/script_name')
self.assertBody(url)
self.getPage('/dir1/dir2/cherrypy_url')
self.assertBody('%s/extra' % self.base())
# Test that configs don't overwrite each other from different apps
self.getPage('/confvalue')
self.assertBody((url or '/').split('/')[-2])
self.script_name = ''
# Test absoluteURI's in the Request-Line
self.getPage('http://%s:%s/' % (self.interface(), self.PORT))
self.assertBody('world')
self.getPage('http://%s:%s/abs/?service=http://192.168.0.1/x/y/z' %
(self.interface(), self.PORT))
self.assertBody("default:('abs',)")
self.getPage('/rel/?service=http://192.168.120.121:8000/x/y/z')
self.assertBody("default:('rel',)")
# Test that the "isolated" app doesn't leak url's into the root app.
# If it did leak, Root.default() would answer with
# "default:('isolated', 'doesnt', 'exist')".
self.getPage('/isolated/')
self.assertStatus('200 OK')
self.assertBody('made it!')
self.getPage('/isolated/doesnt/exist')
self.assertStatus('404 Not Found')
# Make sure /foobar maps to Root.foobar and not to the app
# mounted at /foo. See
# https://github.com/cherrypy/cherrypy/issues/573
self.getPage('/foobar')
self.assertBody('bar')
def test_translate(self):
self.getPage('/translate_html')
self.assertStatus('200 OK')
self.assertBody('OK')
self.getPage('/translate.html')
self.assertStatus('200 OK')
self.assertBody('OK')
self.getPage('/translate-html')
self.assertStatus('200 OK')
self.assertBody('OK')
def test_redir_using_url(self):
for url in script_names:
self.script_name = url
# Test the absolute path to the parent (leading slash)
self.getPage('/redirect_via_url?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the relative path to the parent (no leading slash)
self.getPage('/redirect_via_url?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the absolute path to the parent (leading slash)
self.getPage('/redirect_via_url/?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the relative path to the parent (no leading slash)
self.getPage('/redirect_via_url/?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
def testPositionalParams(self):
self.getPage('/dir1/dir2/posparam/18/24/hut/hike')
self.assertBody('18/24/hut/hike')
# intermediate index methods should not receive posparams;
# only the "final" index method should do so.
self.getPage('/dir1/dir2/5/3/sir')
self.assertBody("default for dir1, param is:('dir2', '5', '3', 'sir')")
# test that extra positional args raises an 404 Not Found
# See https://github.com/cherrypy/cherrypy/issues/733.
self.getPage('/dir1/dir2/script_name/extra/stuff')
self.assertStatus(404)
def testExpose(self):
# Test the cherrypy.expose function/decorator
self.getPage('/exposing/base')
self.assertBody('expose works!')
self.getPage('/exposing/1')
self.assertBody('expose works!')
self.getPage('/exposing/2')
self.assertBody('expose works!')
self.getPage('/exposingnew/base')
self.assertBody('expose works!')
self.getPage('/exposingnew/1')
self.assertBody('expose works!')
self.getPage('/exposingnew/2')
self.assertBody('expose works!')
def testMethodDispatch(self):
self.getPage('/bymethod')
self.assertBody("['another']")
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage('/bymethod', method='HEAD')
self.assertBody('')
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage('/bymethod', method='POST', body='thing=one')
self.assertBody('')
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage('/bymethod')
self.assertBody(repr(['another', ntou('one')]))
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage('/bymethod', method='PUT')
self.assertErrorPage(405)
self.assertHeader('Allow', 'GET, HEAD, POST')
# Test default with posparams
self.getPage('/collection/silly', method='POST')
self.getPage('/collection', method='GET')
self.assertBody("['a', 'bit', 'silly']")
# Test custom dispatcher set on app root (see #737).
self.getPage('/app')
self.assertBody('milk')
def testTreeMounting(self):
class Root(object):
@cherrypy.expose
def hello(self):
return 'Hello world!'
# When mounting an application instance,
# we can't specify a different script name in the call to mount.
a = Application(Root(), '/somewhere')
self.assertRaises(ValueError, cherrypy.tree.mount, a, '/somewhereelse')
# When mounting an application instance...
a = Application(Root(), '/somewhere')
# ...we MUST allow in identical script name in the call to mount...
cherrypy.tree.mount(a, '/somewhere')
self.getPage('/somewhere/hello')
self.assertStatus(200)
# ...and MUST allow a missing script_name.
del cherrypy.tree.apps['/somewhere']
cherrypy.tree.mount(a)
self.getPage('/somewhere/hello')
self.assertStatus(200)
# In addition, we MUST be able to create an Application using
# script_name == None for access to the wsgi_environ.
a = Application(Root(), script_name=None)
# However, this does not apply to tree.mount
self.assertRaises(TypeError, cherrypy.tree.mount, a, None)
def testKeywords(self):
if sys.version_info < (3,):
return self.skip('skipped (Python 3 only)')
exec("""class Root(object):
@cherrypy.expose
def hello(self, *, name='world'):
return 'Hello %s!' % name
cherrypy.tree.mount(Application(Root(), '/keywords'))""")
self.getPage('/keywords/hello')
self.assertStatus(200)
self.getPage('/keywords/hello/extra')
self.assertStatus(404)
|
from __future__ import unicode_literals
import sys
import argparse
from lib.fun.fun import lengthchecker, cool
from lib.data.data import paths, pystrs, pyoptions
def parse_args():
parser = argparse.ArgumentParser(prog='pydictor',
formatter_class=argparse.RawTextHelpFormatter,
description=cool.green('*[+] A Useful Hacker Dictionary Builder. [+]*') +
pyoptions.CRLF +
cool.green(' [+] Build by LandGrey email:[email protected]') +
pyoptions.CRLF,
usage=cool.orange('''
pydictor.py [options]
-base [type]
-char [custom_char]
-chunk [chunk1] [chunk2] ...
-extend [string_or_file]
-plug [{plugin}]
--conf [expression_or_file]
--pattern [expression_or_file]
--sedb
-o,--output [directory]
-tool [{tool}]
--len [minlen] [maxlen]
--head [prefix_string]
--tail [suffix_string]
--encode [{encode}]
--occur [letter] [digital] [special]
--types [letter] [digital] [special]
--repeat [letter] [digital] [special]
--regex [regex]
--level [code]
--leet [code]
--dmy'''.format(plugin=",".join(pyoptions.plug_range), encode=",".join(pyoptions.encode_range),
tool=",".join(pyoptions.tool_range))))
parser.add_argument('-base', dest='base', choices=[pystrs.base_dic_type[0], pystrs.base_dic_type[1],
pystrs.base_dic_type[2], pystrs.base_dic_type[3],
pystrs.base_dic_type[4], pystrs.base_dic_type[5],
pystrs.base_dic_type[6]], metavar='Type',
default='', help=cool.yellow('''Choose from ({0}, {1}, {2}, {3}, {4}, {5}, {6})
{0} digital [0 - 9]
{1} lowercase letters [a - z]
{2} capital letters [A - Z]
{3} Mix {0} and {1} [0-9 a-z]
{4} Mix {0} and {2} [0-9 A-Z]
{5} Mix {1} and {2} [a-z A-Z]
{6} Mix {0}, {1} and {3} [0-9 a-z A-Z]'''.format(pystrs.base_dic_type[0], pystrs.base_dic_type[1],
pystrs.base_dic_type[2], pystrs.base_dic_type[3],
pystrs.base_dic_type[4], pystrs.base_dic_type[5],
pystrs.base_dic_type[6])))
parser.add_argument('-char', dest='char', metavar='character', default='',
help=cool.yellow('Use Custom Character build the dictionary'))
parser.add_argument('-chunk', dest='chunk', metavar='arg', nargs='+', type=str, default='',
help=cool.yellow('Use the multi-chunk build the dictionary'))
parser.add_argument('-extend', dest='extend', metavar='arg', nargs='+', type=str, default='',
help=cool.yellow('Extend the string list or file'))
parser.add_argument('-plug', dest='plug', metavar='arg', nargs='+', type=str, default='',
help=cool.yellow('''{plugins_desc}'''.format(plugins_desc=pyoptions.plugins_desc)))
parser.add_argument('--conf', dest='conf', nargs='?', metavar='file_path', default='default', const='const',
help=cool.yellow("Use the configuration string or file build the dictionary"))
parser.add_argument('--pattern', dest='pattern', nargs='?', metavar='file_path', default='default', const='const',
help=cool.yellow("Use pattern string build the dictionary"))
parser.add_argument('--sedb', dest='sedb', default='', action="store_true",
help=cool.yellow('Enter the Social Engineering Dictionary Builder'))
parser.add_argument('-o', '--output', dest='output', metavar='path', type=str, default=paths.results_path,
help=cool.yellow('''Set the output directory path
default: %s''' % paths.results_path))
parser.add_argument('-tool', dest='tool', metavar='arg', nargs='+', type=str, default='',
help=cool.yellow('''{tools_desc}'''.format(tools_desc=pyoptions.tools_desc)))
parser.add_argument('--len', dest='len', metavar=('minlen', 'maxlen'), nargs=2, type=int,
default=(pyoptions.minlen, pyoptions.maxlen),
help=cool.yellow('''Default: min=%s max=%s''' % (pyoptions.minlen, pyoptions.maxlen)))
parser.add_argument('--head', dest='head', metavar='prefix', type=str, default='',
help=cool.yellow('Add string head for the items'))
parser.add_argument('--tail', dest='tail', metavar='suffix', type=str, default='',
help=cool.yellow('Add string tail for the items'))
parser.add_argument('--encode', dest='encode', metavar='encode', default='none',
choices=pyoptions.encode_range,
help=cool.yellow('''{encode_desc}'''.format(encode_desc=pyoptions.encode_desc)))
parser.add_argument('--occur', dest='occur', metavar=('letter', 'digital', 'special'), nargs=3, type=str,
default=(pyoptions.letter_occur, pyoptions.digital_occur, pyoptions.special_occur),
help=cool.yellow('''Default: letter "%s" digital "%s" special "%s"''' %
(pyoptions.letter_occur, pyoptions.digital_occur, pyoptions.special_occur)))
parser.add_argument('--types', dest='types', metavar=('letter', 'digital', 'special'), nargs=3, type=str,
default=(pyoptions.letter_types, pyoptions.digital_types, pyoptions.special_types),
help=cool.yellow('''Default: letter "%s" digital "%s" special "%s"''' %
(pyoptions.letter_types, pyoptions.digital_types, pyoptions.special_types)))
parser.add_argument('--repeat', dest='repeat', metavar=('letter', 'digital', 'special'), nargs=3, type=str,
default=(pyoptions.letter_repeat, pyoptions.digital_repeat, pyoptions.special_repeat),
help=cool.yellow('''Default: letter "%s" digital "%s" special "%s"''' %
(pyoptions.letter_repeat, pyoptions.digital_repeat, pyoptions.special_repeat)))
parser.add_argument('--regex', dest='regex', metavar='regex', nargs=1, type=str,
default=pyoptions.filter_regex, help=cool.yellow('''Filter by regex, Default: (%s)''' %
pyoptions.filter_regex))
parser.add_argument('--level', dest='level', metavar='code', default=pyoptions.level, type=int,
help=cool.yellow('''Use code [1-5] to filter results, default: {0}'''.format(pyoptions.level)))
parser.add_argument('--leet', dest='leet', metavar='code', nargs='+', type=int, default=pyoptions.leetmode_code,
help=cool.yellow('Choose leet mode code (0, 1, 2, 11-19, 21-29)'))
parser.add_argument('--more', dest='more', default='', action="store_true",
help=cool.yellow('Append more simple word list to extend function results, default: false'))
parser.add_argument('--dmy', dest='dmy', default='', action="store_true",
help=cool.yellow('Use ddMMyyyy format date, default date format: yyyyMMdd'))
if len(sys.argv) == 1:
sys.argv.append('-h')
args = parser.parse_args()
check_args(args)
return args
def check_args(args):
lengthchecker(args.len[0], args.len[1])
|
import os
from behave import given
from behave import then
from behave import when
from path import Path
from paasta_tools.utils import _run
@given("a simple service to test")
def given_simple_service(context):
context.fake_service_name = "fake_simple_service"
assert os.path.isfile(os.path.join(context.fake_service_name, "Dockerfile"))
assert os.path.isfile(os.path.join(context.fake_service_name, "Makefile"))
@when(
"we run paasta local-run on a Marathon service in non-interactive mode "
'with environment variable "{var}" set to "{val}"'
)
def non_interactive_local_run(context, var, val):
with Path("fake_simple_service"):
# The local-run invocation here is designed to run and return a sentinel
# exit code that we can look out for. It also sleeps a few seconds
# because the local-run code currently crashes when the docker
# container dies before it gets a chance to lookup the containerid
# (which causes jenkins flakes) The sleep can be removed once local-run
# understands that containers can die quickly.
localrun_cmd = (
"paasta local-run "
"--yelpsoa-config-root ../fake_soa_configs_local_run/ "
"--service fake_simple_service "
"--cluster test-cluster "
"--instance main "
"--build "
"""--cmd '/bin/sh -c "echo \\"%s=$%s\\" && sleep 2s && exit 42"' """
% (var, var)
)
context.return_code, context.output = _run(command=localrun_cmd, timeout=90)
@then(
'we should see the environment variable "{var}" with the value "{val}" in the output'
)
def env_var_in_output(context, var, val):
assert f"{var}={val}" in context.output
@when("we run paasta local-run on an interactive job")
def local_run_on_adhoc_job(context):
with Path("fake_simple_service"):
local_run_cmd = (
"paasta local-run "
"--yelpsoa-config-root ../fake_soa_configs_local_run/ "
"--service fake_simple_service "
"--cluster test-cluster "
"--instance sample_adhoc_job "
"--build "
)
context.return_code, context.output = _run(command=local_run_cmd, timeout=90)
@when("we run paasta local-run on a tron action")
def local_run_on_tron_action(context):
with Path("fake_simple_service"):
local_run_cmd = (
"paasta local-run "
"--yelpsoa-config-root ../fake_soa_configs_local_run/ "
"--service fake_simple_service "
"--cluster test-cluster "
"--instance sample_tron_job.action1 "
"--build "
)
context.return_code, context.output = _run(command=local_run_cmd, timeout=90)
|
import random
import string
import pytest
import arctic._compression as c
@pytest.mark.parametrize("compress",
[c.compress, c.compressHC],
ids=('arctic', 'arcticHC'))
def test_roundtrip(compress):
_str = b"hello world"
cstr = compress(_str)
assert _str == c.decompress(cstr)
@pytest.mark.parametrize("n", [1, 1e2, 1e3, 1e6])
def test_roundtrip_multi(n):
_str = random_string(n)
cstr = c.compress(_str)
assert _str == c.decompress(cstr)
@pytest.mark.parametrize("n, length", [(1, 10), (100, 10), (1000, 10)])
def test_roundtrip_arr(n, length):
_strarr = [random_string(length) for _ in range(n)]
cstr = c.compress_array(_strarr)
assert _strarr == c.decompress_array(cstr)
@pytest.mark.parametrize("n, length", [(1, 10), (100, 10), (1000, 10)])
def test_roundtrip_arrHC(n, length):
_strarr = [random_string(length) for _ in range(n)]
cstr = c.compressHC_array(_strarr)
assert _strarr == c.decompress_array(cstr)
def test_arr_zero():
assert [] == c.compressHC_array([])
assert [] == c.decompress_array([])
def random_string(N):
_str = ''.join(random.choice(string.printable) for _ in range(int(N)))
return _str.encode('ascii')
|
import os
import sys
import unittest
# make sure we import test.py from the right place
script_path = os.path.abspath(os.path.dirname(sys.argv[0]))
sys.path.insert(0, script_path)
test_base_path = os.path.join(script_path, 'src')
sys.path.insert(1, test_base_path)
import test
from DD import DD
cfg = test.Options()
cfg.verbosity = 0
cfg.basedir = test_base_path
cfg.unit_tests = True
def write(line, *args):
if args:
line = line % args
sys.stderr.write(line + '\n')
def find_tests():
test_files = test.get_test_files(cfg)
return test.get_test_cases(test_files, cfg)
class DDTester(DD):
def _test(self, test_cases):
if not test_cases:
return self.PASS
write('Running subset of %d tests %s',
len(test_cases), self.coerce(test_cases))
test_cases = [ item[-1] for item in test_cases ]
pid = os.fork()
if not pid:
# child executes tests
runner = test.CustomTestRunner(cfg, None)
suite = unittest.TestSuite()
suite.addTests(test_cases)
os._exit( not runner.run(suite).wasSuccessful() )
cid, retval = os.waitpid(pid, 0)
if retval:
write('exit status: %d, signal: %d', retval >> 8, retval % 0xFF)
if (retval % 0xFF) > 2: # signal received?
return self.FAIL
return self.PASS
def coerce(self, test_cases):
if not test_cases:
return '[]'
test_cases = [ item[-1] for item in test_cases ]
return '[%s .. %s]' % (test_cases[0].id(), test_cases[-1].id())
def dd_tests():
tests = find_tests()
write('Found %d tests', len(tests))
dd = DDTester()
min_tests = dd.ddmin( list(enumerate(tests)) )
return [ item[-1] for item in min_tests ]
if __name__ == '__main__':
write('Failing tests:\n%s', '\n'.join([test.id() for test in dd_tests()]))
|
from collections import OrderedDict
import logging
import os
from os import O_CREAT, O_TRUNC, O_WRONLY, stat_result
from typing import Dict, List, Optional, Union
import ruamel.yaml
from ruamel.yaml import YAML # type: ignore
from ruamel.yaml.compat import StringIO
from ruamel.yaml.constructor import SafeConstructor
from ruamel.yaml.error import YAMLError
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.yaml import secret_yaml
_LOGGER = logging.getLogger(__name__)
JSON_TYPE = Union[List, Dict, str] # pylint: disable=invalid-name
class ExtSafeConstructor(SafeConstructor):
"""Extended SafeConstructor."""
name: Optional[str] = None
class UnsupportedYamlError(HomeAssistantError):
"""Unsupported YAML."""
class WriteError(HomeAssistantError):
"""Error writing the data."""
def _include_yaml(
constructor: ExtSafeConstructor, node: ruamel.yaml.nodes.Node
) -> JSON_TYPE:
"""Load another YAML file and embeds it using the !include tag.
Example:
device_tracker: !include device_tracker.yaml
"""
if constructor.name is None:
raise HomeAssistantError(
"YAML include error: filename not set for %s" % node.value
)
fname = os.path.join(os.path.dirname(constructor.name), node.value)
return load_yaml(fname, False)
def _yaml_unsupported(
constructor: ExtSafeConstructor, node: ruamel.yaml.nodes.Node
) -> None:
raise UnsupportedYamlError(
f"Unsupported YAML, you can not use {node.tag} in "
f"{os.path.basename(constructor.name or '(None)')}"
)
def object_to_yaml(data: JSON_TYPE) -> str:
"""Create yaml string from object."""
yaml = YAML(typ="rt")
yaml.indent(sequence=4, offset=2)
stream = StringIO()
try:
yaml.dump(data, stream)
result: str = stream.getvalue()
return result
except YAMLError as exc:
_LOGGER.error("YAML error: %s", exc)
raise HomeAssistantError(exc) from exc
def yaml_to_object(data: str) -> JSON_TYPE:
"""Create object from yaml string."""
yaml = YAML(typ="rt")
try:
result: Union[List, Dict, str] = yaml.load(data)
return result
except YAMLError as exc:
_LOGGER.error("YAML error: %s", exc)
raise HomeAssistantError(exc) from exc
def load_yaml(fname: str, round_trip: bool = False) -> JSON_TYPE:
"""Load a YAML file."""
if round_trip:
yaml = YAML(typ="rt")
yaml.preserve_quotes = True
else:
if ExtSafeConstructor.name is None:
ExtSafeConstructor.name = fname
yaml = YAML(typ="safe")
yaml.Constructor = ExtSafeConstructor
try:
with open(fname, encoding="utf-8") as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file) or OrderedDict()
except YAMLError as exc:
_LOGGER.error("YAML error in %s: %s", fname, exc)
raise HomeAssistantError(exc) from exc
except UnicodeDecodeError as exc:
_LOGGER.error("Unable to read file %s: %s", fname, exc)
raise HomeAssistantError(exc) from exc
def save_yaml(fname: str, data: JSON_TYPE) -> None:
"""Save a YAML file."""
yaml = YAML(typ="rt")
yaml.indent(sequence=4, offset=2)
tmp_fname = f"{fname}__TEMP__"
try:
try:
file_stat = os.stat(fname)
except OSError:
file_stat = stat_result((0o644, -1, -1, -1, -1, -1, -1, -1, -1, -1))
with open(
os.open(tmp_fname, O_WRONLY | O_CREAT | O_TRUNC, file_stat.st_mode),
"w",
encoding="utf-8",
) as temp_file:
yaml.dump(data, temp_file)
os.replace(tmp_fname, fname)
if hasattr(os, "chown") and file_stat.st_ctime > -1:
try:
os.chown(fname, file_stat.st_uid, file_stat.st_gid)
except OSError:
pass
except YAMLError as exc:
_LOGGER.error(str(exc))
raise HomeAssistantError(exc) from exc
except OSError as exc:
_LOGGER.exception("Saving YAML file %s failed: %s", fname, exc)
raise WriteError(exc) from exc
finally:
if os.path.exists(tmp_fname):
try:
os.remove(tmp_fname)
except OSError as exc:
# If we are cleaning up then something else went wrong, so
# we should suppress likely follow-on errors in the cleanup
_LOGGER.error("YAML replacement cleanup failed: %s", exc)
ExtSafeConstructor.add_constructor("!secret", secret_yaml)
ExtSafeConstructor.add_constructor("!include", _include_yaml)
ExtSafeConstructor.add_constructor(None, _yaml_unsupported)
|
import pytest
from homeassistant.config import async_process_ha_core_config
from homeassistant.setup import async_setup_component
@pytest.fixture
def mock_client(hass, hass_client):
"""Create http client for webhooks."""
hass.loop.run_until_complete(async_setup_component(hass, "webhook", {}))
return hass.loop.run_until_complete(hass_client())
async def test_unregistering_webhook(hass, mock_client):
"""Test unregistering a webhook."""
hooks = []
webhook_id = hass.components.webhook.async_generate_id()
async def handle(*args):
"""Handle webhook."""
hooks.append(args)
hass.components.webhook.async_register("test", "Test hook", webhook_id, handle)
resp = await mock_client.post(f"/api/webhook/{webhook_id}")
assert resp.status == 200
assert len(hooks) == 1
hass.components.webhook.async_unregister(webhook_id)
resp = await mock_client.post(f"/api/webhook/{webhook_id}")
assert resp.status == 200
assert len(hooks) == 1
async def test_generate_webhook_url(hass):
"""Test we generate a webhook url correctly."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
url = hass.components.webhook.async_generate_url("some_id")
assert url == "https://example.com/api/webhook/some_id"
async def test_async_generate_path(hass):
"""Test generating just the path component of the url correctly."""
path = hass.components.webhook.async_generate_path("some_id")
assert path == "/api/webhook/some_id"
async def test_posting_webhook_nonexisting(hass, mock_client):
"""Test posting to a nonexisting webhook."""
resp = await mock_client.post("/api/webhook/non-existing")
assert resp.status == 200
async def test_posting_webhook_invalid_json(hass, mock_client):
"""Test posting to a nonexisting webhook."""
hass.components.webhook.async_register("test", "Test hook", "hello", None)
resp = await mock_client.post("/api/webhook/hello", data="not-json")
assert resp.status == 200
async def test_posting_webhook_json(hass, mock_client):
"""Test posting a webhook with JSON data."""
hooks = []
webhook_id = hass.components.webhook.async_generate_id()
async def handle(*args):
"""Handle webhook."""
hooks.append((args[0], args[1], await args[2].text()))
hass.components.webhook.async_register("test", "Test hook", webhook_id, handle)
resp = await mock_client.post(f"/api/webhook/{webhook_id}", json={"data": True})
assert resp.status == 200
assert len(hooks) == 1
assert hooks[0][0] is hass
assert hooks[0][1] == webhook_id
assert hooks[0][2] == '{"data": true}'
async def test_posting_webhook_no_data(hass, mock_client):
"""Test posting a webhook with no data."""
hooks = []
webhook_id = hass.components.webhook.async_generate_id()
async def handle(*args):
"""Handle webhook."""
hooks.append(args)
hass.components.webhook.async_register("test", "Test hook", webhook_id, handle)
resp = await mock_client.post(f"/api/webhook/{webhook_id}")
assert resp.status == 200
assert len(hooks) == 1
assert hooks[0][0] is hass
assert hooks[0][1] == webhook_id
assert hooks[0][2].method == "POST"
assert await hooks[0][2].text() == ""
async def test_webhook_put(hass, mock_client):
"""Test sending a put request to a webhook."""
hooks = []
webhook_id = hass.components.webhook.async_generate_id()
async def handle(*args):
"""Handle webhook."""
hooks.append(args)
hass.components.webhook.async_register("test", "Test hook", webhook_id, handle)
resp = await mock_client.put(f"/api/webhook/{webhook_id}")
assert resp.status == 200
assert len(hooks) == 1
assert hooks[0][0] is hass
assert hooks[0][1] == webhook_id
assert hooks[0][2].method == "PUT"
async def test_webhook_head(hass, mock_client):
"""Test sending a head request to a webhook."""
hooks = []
webhook_id = hass.components.webhook.async_generate_id()
async def handle(*args):
"""Handle webhook."""
hooks.append(args)
hass.components.webhook.async_register("test", "Test hook", webhook_id, handle)
resp = await mock_client.head(f"/api/webhook/{webhook_id}")
assert resp.status == 200
assert len(hooks) == 1
assert hooks[0][0] is hass
assert hooks[0][1] == webhook_id
assert hooks[0][2].method == "HEAD"
async def test_listing_webhook(hass, hass_ws_client, hass_access_token):
"""Test unregistering a webhook."""
assert await async_setup_component(hass, "webhook", {})
client = await hass_ws_client(hass, hass_access_token)
hass.components.webhook.async_register("test", "Test hook", "my-id", None)
await client.send_json({"id": 5, "type": "webhook/list"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["success"]
assert msg["result"] == [
{"webhook_id": "my-id", "domain": "test", "name": "Test hook"}
]
|
import sys
from paasta_tools.kubernetes_tools import get_all_kubernetes_services_running_here
from paasta_tools.marathon_tools import marathon_services_running_here
from paasta_tools.mesos_tools import MesosSlaveConnectionError
from paasta_tools.tron_tools import tron_jobs_running_here
from paasta_tools.utils import _log
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_system_paasta_config
def broadcast_log_all_services_running_here(line: str, soa_dir=DEFAULT_SOA_DIR) -> None:
"""Log a line of text to paasta logs of all services running on this host.
:param line: text to log
"""
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
services = get_all_services_running_here(cluster, soa_dir)
for service, instance, _ in services:
_log(
line=line,
service=service,
instance=instance,
component="monitoring",
cluster=cluster,
)
def get_all_services_running_here(cluster, soa_dir):
try:
marathon_services = marathon_services_running_here()
except MesosSlaveConnectionError:
marathon_services = []
try:
tron_services = tron_jobs_running_here()
except MesosSlaveConnectionError:
tron_services = []
try:
kubernetes_services = get_all_kubernetes_services_running_here()
except Exception:
kubernetes_services = []
return marathon_services + tron_services + kubernetes_services
def main() -> None:
broadcast_log_all_services_running_here(sys.stdin.read().strip())
if __name__ == "__main__":
main()
|
import datetime
import os
import pathlib
import subprocess
import sys
from contextlib import suppress
import sphinx_autosummary_accessors
from jinja2.defaults import DEFAULT_FILTERS
import xarray
allowed_failures = set()
print("python exec:", sys.executable)
print("sys.path:", sys.path)
if "conda" in sys.executable:
print("conda environment:")
subprocess.run(["conda", "list"])
else:
print("pip environment:")
subprocess.run(["pip", "list"])
print(f"xarray: {xarray.__version__}, {xarray.__file__}")
with suppress(ImportError):
import matplotlib
matplotlib.use("Agg")
try:
import rasterio # noqa: F401
except ImportError:
allowed_failures.update(
["gallery/plot_rasterio_rgb.py", "gallery/plot_rasterio.py"]
)
try:
import cartopy # noqa: F401
except ImportError:
allowed_failures.update(
[
"gallery/plot_cartopy_facetgrid.py",
"gallery/plot_rasterio_rgb.py",
"gallery/plot_rasterio.py",
]
)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.extlinks",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"nbsphinx",
"sphinx_autosummary_accessors",
"scanpydoc.rtd_github_links",
]
extlinks = {
"issue": ("https://github.com/pydata/xarray/issues/%s", "GH"),
"pull": ("https://github.com/pydata/xarray/pull/%s", "PR"),
}
nbsphinx_timeout = 600
nbsphinx_execute = "always"
nbsphinx_prolog = """
{% set docname = env.doc2path(env.docname, base=None) %}
You can run this notebook in a `live session <https://mybinder.org/v2/gh/pydata/xarray/doc/examples/master?urlpath=lab/tree/doc/{{ docname }}>`_ |Binder| or view it `on Github <https://github.com/pydata/xarray/blob/master/doc/{{ docname }}>`_.
.. |Binder| image:: https://mybinder.org/badge.svg
:target: https://mybinder.org/v2/gh/pydata/xarray/master?urlpath=lab/tree/doc/{{ docname }}
"""
autosummary_generate = True
# for scanpydoc's jinja filter
project_dir = pathlib.Path(__file__).parent.parent
html_context = {
"github_user": "pydata",
"github_repo": "xarray",
"github_version": "master",
}
autodoc_typehints = "none"
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_use_param = False
napoleon_use_rtype = False
napoleon_preprocess_types = True
napoleon_type_aliases = {
# general terms
"sequence": ":term:`sequence`",
"iterable": ":term:`iterable`",
"callable": ":py:func:`callable`",
"dict_like": ":term:`dict-like <mapping>`",
"dict-like": ":term:`dict-like <mapping>`",
"mapping": ":term:`mapping`",
"file-like": ":term:`file-like <file-like object>`",
# special terms
# "same type as caller": "*same type as caller*", # does not work, yet
# "same type as values": "*same type as values*", # does not work, yet
# stdlib type aliases
"MutableMapping": "~collections.abc.MutableMapping",
"sys.stdout": ":obj:`sys.stdout`",
"timedelta": "~datetime.timedelta",
"string": ":class:`string <str>`",
# numpy terms
"array_like": ":term:`array_like`",
"array-like": ":term:`array-like <array_like>`",
"scalar": ":term:`scalar`",
"array": ":term:`array`",
"hashable": ":term:`hashable <name>`",
# matplotlib terms
"color-like": ":py:func:`color-like <matplotlib.colors.is_color_like>`",
"matplotlib colormap name": ":doc:matplotlib colormap name <Colormap reference>",
"matplotlib axes object": ":py:class:`matplotlib axes object <matplotlib.axes.Axes>`",
"colormap": ":py:class:`colormap <matplotlib.colors.Colormap>`",
# objects without namespace
"DataArray": "~xarray.DataArray",
"Dataset": "~xarray.Dataset",
"Variable": "~xarray.Variable",
"ndarray": "~numpy.ndarray",
"MaskedArray": "~numpy.ma.MaskedArray",
"dtype": "~numpy.dtype",
"ComplexWarning": "~numpy.ComplexWarning",
"Index": "~pandas.Index",
"MultiIndex": "~pandas.MultiIndex",
"CategoricalIndex": "~pandas.CategoricalIndex",
"TimedeltaIndex": "~pandas.TimedeltaIndex",
"DatetimeIndex": "~pandas.DatetimeIndex",
"Series": "~pandas.Series",
"DataFrame": "~pandas.DataFrame",
"Categorical": "~pandas.Categorical",
"Path": "~~pathlib.Path",
# objects with abbreviated namespace (from pandas)
"pd.Index": "~pandas.Index",
"pd.NaT": "~pandas.NaT",
}
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates", sphinx_autosummary_accessors.templates_path]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "xarray"
copyright = "2014-%s, xarray Developers" % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = xarray.__version__.split("+")[0]
# The full version, including alpha/beta/rc tags.
release = xarray.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%Y-%m-%d"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"logo_only": True}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/dataset-diagram-logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Sometimes the savefig directory doesn't exist and needs to be created
# https://github.com/ipython/ipython/issues/8733
# becomes obsolete when we can pin ipython>=5.2; see ci/requirements/doc.yml
ipython_savefig_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "_build", "html", "_static"
)
if not os.path.exists(ipython_savefig_dir):
os.makedirs(ipython_savefig_dir)
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = today_fmt
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "xarraydoc"
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# # The paper size ('letterpaper' or 'a4paper').
# # 'papersize': 'letterpaper',
# # The font size ('10pt', '11pt' or '12pt').
# # 'pointsize': '10pt',
# # Additional stuff for the LaTeX preamble.
# # 'preamble': '',
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# ("index", "xarray.tex", "xarray Documentation", "xarray Developers", "manual")
# ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [("index", "xarray", "xarray Documentation", ["xarray Developers"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# (
# "index",
# "xarray",
# "xarray Documentation",
# "xarray Developers",
# "xarray",
# "N-D labeled arrays and datasets in Python.",
# "Miscellaneous",
# )
# ]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"iris": ("https://scitools.org.uk/iris/docs/latest", None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"numba": ("https://numba.pydata.org/numba-doc/latest", None),
"matplotlib": ("https://matplotlib.org", None),
"dask": ("https://docs.dask.org/en/latest", None),
"cftime": ("https://unidata.github.io/cftime", None),
"rasterio": ("https://rasterio.readthedocs.io/en/latest", None),
"sparse": ("https://sparse.pydata.org/en/latest/", None),
}
def escape_underscores(string):
return string.replace("_", r"\_")
def setup(app):
DEFAULT_FILTERS["escape_underscores"] = escape_underscores
|
import paho.mqtt.client as mqtt
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED,
)
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
DOMAIN = "shiftr"
SHIFTR_BROKER = "broker.shiftr.io"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Initialize the Shiftr.io MQTT consumer."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
client_id = "HomeAssistant"
port = 1883
keepalive = 600
mqttc = mqtt.Client(client_id, protocol=mqtt.MQTTv311)
mqttc.username_pw_set(username, password=password)
mqttc.connect(SHIFTR_BROKER, port=port, keepalive=keepalive)
def stop_shiftr(event):
"""Stop the Shiftr.io MQTT component."""
mqttc.disconnect()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_shiftr)
def shiftr_event_listener(event):
"""Listen for new messages on the bus and sends them to Shiftr.io."""
state = event.data.get("new_state")
topic = state.entity_id.replace(".", "/")
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
try:
mqttc.publish(topic, _state, qos=0, retain=False)
if state.attributes:
for attribute, data in state.attributes.items():
mqttc.publish(
f"/{topic}/{attribute}", str(data), qos=0, retain=False
)
except RuntimeError:
pass
hass.bus.listen(EVENT_STATE_CHANGED, shiftr_event_listener)
return True
|
from pylast import Track
import pytest
from homeassistant.components import sensor
from homeassistant.components.lastfm.sensor import STATE_NOT_SCROBBLING
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
class MockUser:
"""Mock user object for pylast."""
def __init__(self, now_playing_result):
"""Initialize the mock."""
self._now_playing_result = now_playing_result
def get_playcount(self):
"""Get mock play count."""
return 1
def get_image(self):
"""Get mock image."""
pass
def get_recent_tracks(self, limit):
"""Get mock recent tracks."""
return []
def get_top_tracks(self, limit):
"""Get mock top tracks."""
return []
def get_now_playing(self):
"""Get mock now playing."""
return self._now_playing_result
@pytest.fixture(name="lastfm_network")
def lastfm_network_fixture():
"""Create fixture for LastFMNetwork."""
with patch("pylast.LastFMNetwork") as lastfm_network:
yield lastfm_network
async def test_update_not_playing(hass, lastfm_network):
"""Test update when no playing song."""
lastfm_network.return_value.get_user.return_value = MockUser(None)
assert await async_setup_component(
hass,
sensor.DOMAIN,
{"sensor": {"platform": "lastfm", "api_key": "secret-key", "users": ["test"]}},
)
await hass.async_block_till_done()
entity_id = "sensor.test"
state = hass.states.get(entity_id)
assert state.state == STATE_NOT_SCROBBLING
async def test_update_playing(hass, lastfm_network):
"""Test update when song playing."""
lastfm_network.return_value.get_user.return_value = MockUser(
Track("artist", "title", None)
)
assert await async_setup_component(
hass,
sensor.DOMAIN,
{"sensor": {"platform": "lastfm", "api_key": "secret-key", "users": ["test"]}},
)
await hass.async_block_till_done()
entity_id = "sensor.test"
state = hass.states.get(entity_id)
assert state.state == "artist - title"
|
from rumps import *
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
def sayhello(sender):
print('hello {}'.format(sender))
def e(_):
print('EEEEEEE')
def adjust_f(sender):
if adjust_f.huh:
sender.add('$')
sender.add('%')
sender['zzz'] = 'zzz'
sender['separator'] = separator
sender['ppp'] = MenuItem('ppp')
else:
del sender['$']
del sender['%']
del sender['separator']
del sender['ppp']
adjust_f.huh = not adjust_f.huh
adjust_f.huh = True
def print_f(_):
print(f)
f = MenuItem('F', callback=adjust_f)
urlretrieve('http://upload.wikimedia.org/wikipedia/commons/thumb/c/'
'c4/Kiss_Logo.svg/200px-Kiss_Logo.svg.png', 'kiss.png')
app = App('lovegun', icon='kiss.png')
app.menu = [
MenuItem('A', callback=print_f, key='F'),
('B', ['1', 2, '3', [4, [5, (6, range(7, 14))]]]),
'C',
[MenuItem('D', callback=sayhello), (1, 11, 111)],
MenuItem('E', callback=e, key='e'),
f,
None,
{
'x': {'hello', 'hey'},
'y': ['what is up']
},
[1, [2]],
('update method', ['walking', 'back', 'to', 'you']),
'stuff',
None
]
@clicked('update method')
def dict_update(menu):
print(menu)
print(menu.setdefault('boo', MenuItem('boo',
callback=lambda _: add_separator(menu)))) # lambda gets THIS menu not submenu
def add_separator(menu):
menu.add(separator)
@clicked('C')
def change_main_menu(_):
print(app.menu)
print('goodbye C')
del app.menu['C'] # DELETE SELF!!!1
@clicked('stuff')
def stuff(sender):
print(sender)
if len(sender):
sender.insert_after('lets', 'go?')
sender['the'].insert_before('band', 'not')
sender['the'].insert_before('band', 'a')
else:
sender.update(['hey', ['ho', MenuItem('HOOOO')], 'lets', 'teenage'], the=['who', 'is', 'band'])
sender.add('waste land')
app.run()
|
import sqlite3
import sys
print_ = print
def print(*args, **kwargs):
print_(*args, file=sys.stdout)
sys.stdout.flush()
with open('first_dump.py', 'r', encoding='utf-8') as fh:
first = eval(fh.read())
with open('second_dump.py', 'r', encoding='utf-8') as fh:
second = eval(fh.read())
if len(first) != len(second):
print(" [!] Databases differ in size.")
for k in first:
if k not in second:
print(" Item", k, "not found in second database.")
for k in second:
if k not in first:
print(" Item", k, "not found in first database.")
conn = sqlite3.connect("cc_debug.sqlite3")
def get_from_db(value):
cursor = conn.cursor()
try:
cursor.execute("SELECT json_data FROM hashes WHERE hash = ?", (value,))
return cursor.fetchone()[0]
except Exception:
print(" [!] Cannot find", value, "in database.")
return None
if first == second:
print("==> Both files are identical.")
exit(0)
VAL_KEY = '_values_:' # yes, ends with a colon
for k in first:
fk, sk = first[k], second[k]
try:
first_values, second_values = fk[VAL_KEY], sk[VAL_KEY]
except KeyError:
print(" [!] Values not found for,", k)
continue
if first_values != second_values:
print(" -> Difference:", k)
for vk in first_values:
fv, sv = first_values[vk], second_values[vk]
if fv != sv:
print(" first :", fv, get_from_db(fv))
print(" second:", sv, get_from_db(sv))
|
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api.const import ERR_NOT_FOUND
from homeassistant.components.websocket_api.decorators import (
async_response,
require_admin,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_registry import async_get_registry
async def async_setup(hass):
"""Enable the Entity Registry views."""
hass.components.websocket_api.async_register_command(websocket_list_entities)
hass.components.websocket_api.async_register_command(websocket_get_entity)
hass.components.websocket_api.async_register_command(websocket_update_entity)
hass.components.websocket_api.async_register_command(websocket_remove_entity)
return True
@async_response
@websocket_api.websocket_command({vol.Required("type"): "config/entity_registry/list"})
async def websocket_list_entities(hass, connection, msg):
"""Handle list registry entries command.
Async friendly.
"""
registry = await async_get_registry(hass)
connection.send_message(
websocket_api.result_message(
msg["id"], [_entry_dict(entry) for entry in registry.entities.values()]
)
)
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/get",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_get_entity(hass, connection, msg):
"""Handle get entity registry entry command.
Async friendly.
"""
registry = await async_get_registry(hass)
entry = registry.entities.get(msg["entity_id"])
if entry is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
connection.send_message(
websocket_api.result_message(msg["id"], _entry_ext_dict(entry))
)
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/update",
vol.Required("entity_id"): cv.entity_id,
# If passed in, we update value. Passing None will remove old value.
vol.Optional("name"): vol.Any(str, None),
vol.Optional("icon"): vol.Any(str, None),
vol.Optional("area_id"): vol.Any(str, None),
vol.Optional("new_entity_id"): str,
# We only allow setting disabled_by user via API.
vol.Optional("disabled_by"): vol.Any("user", None),
}
)
async def websocket_update_entity(hass, connection, msg):
"""Handle update entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
changes = {}
for key in ("name", "icon", "area_id", "disabled_by"):
if key in msg:
changes[key] = msg[key]
if "new_entity_id" in msg and msg["new_entity_id"] != msg["entity_id"]:
changes["new_entity_id"] = msg["new_entity_id"]
if hass.states.get(msg["new_entity_id"]) is not None:
connection.send_message(
websocket_api.error_message(
msg["id"], "invalid_info", "Entity is already registered"
)
)
return
try:
if changes:
entry = registry.async_update_entity(msg["entity_id"], **changes)
except ValueError as err:
connection.send_message(
websocket_api.error_message(msg["id"], "invalid_info", str(err))
)
else:
connection.send_message(
websocket_api.result_message(msg["id"], _entry_ext_dict(entry))
)
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/remove",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_remove_entity(hass, connection, msg):
"""Handle remove entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
registry.async_remove(msg["entity_id"])
connection.send_message(websocket_api.result_message(msg["id"]))
@callback
def _entry_dict(entry):
"""Convert entry to API format."""
return {
"config_entry_id": entry.config_entry_id,
"device_id": entry.device_id,
"area_id": entry.area_id,
"disabled_by": entry.disabled_by,
"entity_id": entry.entity_id,
"name": entry.name,
"icon": entry.icon,
"platform": entry.platform,
}
@callback
def _entry_ext_dict(entry):
"""Convert entry to API format."""
data = _entry_dict(entry)
data["original_name"] = entry.original_name
data["original_icon"] = entry.original_icon
data["unique_id"] = entry.unique_id
data["capabilities"] = entry.capabilities
return data
|
from __future__ import print_function
__docformat__ = "restructuredtext en"
import sys
import logging
from os.path import basename
from logilab.common.configuration import Configuration
from logilab.common.logging_ext import init_log, get_threshold
from logilab.common.deprecation import deprecated
class BadCommandUsage(Exception):
"""Raised when an unknown command is used or when a command is not
correctly used (bad options, too much / missing arguments...).
Trigger display of command usage.
"""
class CommandError(Exception):
"""Raised when a command can't be processed and we want to display it and
exit, without traceback nor usage displayed.
"""
# command line access point ####################################################
class CommandLine(dict):
"""Usage:
>>> LDI = cli.CommandLine('ldi', doc='Logilab debian installer',
version=version, rcfile=RCFILE)
>>> LDI.register(MyCommandClass)
>>> LDI.register(MyOtherCommandClass)
>>> LDI.run(sys.argv[1:])
Arguments:
* `pgm`, the program name, default to `basename(sys.argv[0])`
* `doc`, a short description of the command line tool
* `copyright`, additional doc string that will be appended to the generated
doc
* `version`, version number of string of the tool. If specified, global
--version option will be available.
* `rcfile`, path to a configuration file. If specified, global --C/--rc-file
option will be available? self.rcfile = rcfile
* `logger`, logger to propagate to commands, default to
`logging.getLogger(self.pgm))`
"""
def __init__(self, pgm=None, doc=None, copyright=None, version=None,
rcfile=None, logthreshold=logging.ERROR,
check_duplicated_command=True):
if pgm is None:
pgm = basename(sys.argv[0])
self.pgm = pgm
self.doc = doc
self.copyright = copyright
self.version = version
self.rcfile = rcfile
self.logger = None
self.logthreshold = logthreshold
self.check_duplicated_command = check_duplicated_command
def register(self, cls, force=False):
"""register the given :class:`Command` subclass"""
assert not self.check_duplicated_command or force or not cls.name in self, \
'a command %s is already defined' % cls.name
self[cls.name] = cls
return cls
def run(self, args):
"""main command line access point:
* init logging
* handle global options (-h/--help, --version, -C/--rc-file)
* check command
* run command
Terminate by :exc:`SystemExit`
"""
init_log(debug=True, # so that we use StreamHandler
logthreshold=self.logthreshold,
logformat='%(levelname)s: %(message)s')
try:
arg = args.pop(0)
except IndexError:
self.usage_and_exit(1)
if arg in ('-h', '--help'):
self.usage_and_exit(0)
if self.version is not None and arg in ('--version'):
print(self.version)
sys.exit(0)
rcfile = self.rcfile
if rcfile is not None and arg in ('-C', '--rc-file'):
try:
rcfile = args.pop(0)
arg = args.pop(0)
except IndexError:
self.usage_and_exit(1)
try:
command = self.get_command(arg)
except KeyError:
print('ERROR: no %s command' % arg)
print()
self.usage_and_exit(1)
try:
sys.exit(command.main_run(args, rcfile))
except KeyboardInterrupt as exc:
print('Interrupted', end=' ')
if str(exc):
print(': %s' % exc, end=' ')
print()
sys.exit(4)
except BadCommandUsage as err:
print('ERROR:', err)
print()
print(command.help())
sys.exit(1)
def create_logger(self, handler, logthreshold=None):
logger = logging.Logger(self.pgm)
logger.handlers = [handler]
if logthreshold is None:
logthreshold = get_threshold(self.logthreshold)
logger.setLevel(logthreshold)
return logger
def get_command(self, cmd, logger=None):
if logger is None:
logger = self.logger
if logger is None:
logger = self.logger = logging.getLogger(self.pgm)
logger.setLevel(get_threshold(self.logthreshold))
return self[cmd](logger)
def usage(self):
"""display usage for the main program (i.e. when no command supplied)
and exit
"""
print('usage:', self.pgm, end=' ')
if self.rcfile:
print('[--rc-file=<configuration file>]', end=' ')
print('<command> [options] <command argument>...')
if self.doc:
print('\n%s' % self.doc)
print('''
Type "%(pgm)s <command> --help" for more information about a specific
command. Available commands are :\n''' % self.__dict__)
max_len = max([len(cmd) for cmd in self])
padding = ' ' * max_len
for cmdname, cmd in sorted(self.items()):
if not cmd.hidden:
print(' ', (cmdname + padding)[:max_len], cmd.short_description())
if self.rcfile:
print('''
Use --rc-file=<configuration file> / -C <configuration file> before the command
to specify a configuration file. Default to %s.
''' % self.rcfile)
print('''%(pgm)s -h/--help
display this usage information and exit''' % self.__dict__)
if self.version:
print('''%(pgm)s -v/--version
display version configuration and exit''' % self.__dict__)
if self.copyright:
print('\n', self.copyright)
def usage_and_exit(self, status):
self.usage()
sys.exit(status)
# base command classes #########################################################
class Command(Configuration):
"""Base class for command line commands.
Class attributes:
* `name`, the name of the command
* `min_args`, minimum number of arguments, None if unspecified
* `max_args`, maximum number of arguments, None if unspecified
* `arguments`, string describing arguments, used in command usage
* `hidden`, boolean flag telling if the command should be hidden, e.g. does
not appear in help's commands list
* `options`, options list, as allowed by :mod:configuration
"""
arguments = ''
name = ''
# hidden from help ?
hidden = False
# max/min args, None meaning unspecified
min_args = None
max_args = None
@classmethod
def description(cls):
return cls.__doc__.replace(' ', '')
@classmethod
def short_description(cls):
return cls.description().split('.')[0]
def __init__(self, logger):
usage = '%%prog %s %s\n\n%s' % (self.name, self.arguments,
self.description())
Configuration.__init__(self, usage=usage)
self.logger = logger
def check_args(self, args):
"""check command's arguments are provided"""
if self.min_args is not None and len(args) < self.min_args:
raise BadCommandUsage('missing argument')
if self.max_args is not None and len(args) > self.max_args:
raise BadCommandUsage('too many arguments')
def main_run(self, args, rcfile=None):
"""Run the command and return status 0 if everything went fine.
If :exc:`CommandError` is raised by the underlying command, simply log
the error and return status 2.
Any other exceptions, including :exc:`BadCommandUsage` will be
propagated.
"""
if rcfile:
self.load_file_configuration(rcfile)
args = self.load_command_line_configuration(args)
try:
self.check_args(args)
self.run(args)
except CommandError as err:
self.logger.error(err)
return 2
return 0
def run(self, args):
"""run the command with its specific arguments"""
raise NotImplementedError()
class ListCommandsCommand(Command):
"""list available commands, useful for bash completion."""
name = 'listcommands'
arguments = '[command]'
hidden = True
def run(self, args):
"""run the command with its specific arguments"""
if args:
command = args.pop()
cmd = _COMMANDS[command]
for optname, optdict in cmd.options:
print('--help')
print('--' + optname)
else:
commands = sorted(_COMMANDS.keys())
for command in commands:
cmd = _COMMANDS[command]
if not cmd.hidden:
print(command)
# deprecated stuff #############################################################
_COMMANDS = CommandLine()
DEFAULT_COPYRIGHT = '''\
Copyright (c) 2004-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
http://www.logilab.fr/ -- mailto:[email protected]'''
@deprecated('use cls.register(cli)')
def register_commands(commands):
"""register existing commands"""
for command_klass in commands:
_COMMANDS.register(command_klass)
@deprecated('use args.pop(0)')
def main_run(args, doc=None, copyright=None, version=None):
"""command line tool: run command specified by argument list (without the
program name). Raise SystemExit with status 0 if everything went fine.
>>> main_run(sys.argv[1:])
"""
_COMMANDS.doc = doc
_COMMANDS.copyright = copyright
_COMMANDS.version = version
_COMMANDS.run(args)
@deprecated('use args.pop(0)')
def pop_arg(args_list, expected_size_after=None, msg="Missing argument"):
"""helper function to get and check command line arguments"""
try:
value = args_list.pop(0)
except IndexError:
raise BadCommandUsage(msg)
if expected_size_after is not None and len(args_list) > expected_size_after:
raise BadCommandUsage('too many arguments')
return value
|
from decimal import Decimal
from importlib import reload
from pkg_resources import Distribution, DistributionNotFound
from pytest import mark
from cerberus import validator_factory, TypeDefinition, Validator
from cerberus.base import UnconcernedValidator
from cerberus.tests import assert_fail, assert_success
def test_pkgresources_version(monkeypatch):
def create_fake_distribution(name):
return Distribution(project_name="cerberus", version="1.2.3")
with monkeypatch.context() as m:
cerberus = __import__("cerberus")
m.setattr("pkg_resources.get_distribution", create_fake_distribution)
reload(cerberus)
assert cerberus.__version__ == "1.2.3"
def test_version_not_found(monkeypatch):
def raise_distribution_not_found(name):
raise DistributionNotFound("pkg_resources cannot get distribution")
with monkeypatch.context() as m:
cerberus = __import__("cerberus")
m.setattr("pkg_resources.get_distribution", raise_distribution_not_found)
reload(cerberus)
assert cerberus.__version__ == "unknown"
def test_clear_cache(validator):
assert len(validator._valid_schemas) > 0
validator.clear_caches()
assert len(validator._valid_schemas) == 0
def test_docstring(validator):
assert validator.__doc__
# Test that testing with the sample schema works as expected
# as there might be rules with side-effects in it
@mark.parametrize(
"test,document",
((assert_fail, {"an_integer": 60}), (assert_success, {"an_integer": 110})),
)
def test_that_test_fails(test, document):
try:
test(document)
except AssertionError:
pass
else:
raise AssertionError("test didn't fail")
def test_dynamic_types():
decimal_type = TypeDefinition("decimal", (Decimal,), ())
document = {"measurement": Decimal(0)}
schema = {"measurement": {"type": "decimal"}}
validator = Validator()
validator.types_mapping["decimal"] = decimal_type
assert_success(document, schema, validator)
class MyValidator(Validator):
types_mapping = Validator.types_mapping.copy()
types_mapping["decimal"] = decimal_type
validator = MyValidator()
assert_success(document, schema, validator)
def test_mro():
assert Validator.__mro__ == (
Validator,
UnconcernedValidator,
object,
), Validator.__mro__
def test_mixin_init():
class Mixin(object):
def __init__(self, *args, **kwargs):
kwargs['test'] = True
super().__init__(*args, **kwargs)
MyValidator = validator_factory("MyValidator", Mixin)
validator = MyValidator()
assert validator._config["test"]
def test_sub_init():
class MyValidator(Validator):
def __init__(self, *args, **kwargs):
kwargs['test'] = True
super().__init__(*args, **kwargs)
validator = MyValidator()
assert validator._config["test"]
|
from json import JSONDecodeError
from os import PathLike
from pathlib import Path
from typing import Callable, Dict, Union
from box.box import Box
from box.box_list import BoxList
from box.converters import msgpack_available, toml_available, yaml_available
from box.exceptions import BoxError
try:
from ruamel.yaml import YAMLError
except ImportError:
try:
from yaml import YAMLError # type: ignore
except ImportError:
YAMLError = False # type: ignore
try:
from toml import TomlDecodeError
except ImportError:
TomlDecodeError = False # type: ignore
try:
from msgpack import UnpackException # type: ignore
except ImportError:
UnpackException = False # type: ignore
__all__ = ["box_from_file"]
def _to_json(file, encoding, errors, **kwargs):
try:
return Box.from_json(filename=file, encoding=encoding, errors=errors, **kwargs)
except JSONDecodeError:
raise BoxError("File is not JSON as expected")
except BoxError:
return BoxList.from_json(filename=file, encoding=encoding, errors=errors, **kwargs)
def _to_csv(file, encoding, errors, **kwargs):
return BoxList.from_csv(filename=file, encoding=encoding, errors=errors, **kwargs)
def _to_yaml(file, encoding, errors, **kwargs):
if not yaml_available:
raise BoxError(
f'File "{file}" is yaml but no package is available to open it. Please install "ruamel.yaml" or "PyYAML"'
)
try:
return Box.from_yaml(filename=file, encoding=encoding, errors=errors, **kwargs)
except YAMLError:
raise BoxError("File is not YAML as expected")
except BoxError:
return BoxList.from_yaml(filename=file, encoding=encoding, errors=errors, **kwargs)
def _to_toml(file, encoding, errors, **kwargs):
if not toml_available:
raise BoxError(f'File "{file}" is toml but no package is available to open it. Please install "toml"')
try:
return Box.from_toml(filename=file, encoding=encoding, errors=errors, **kwargs)
except TomlDecodeError:
raise BoxError("File is not TOML as expected")
def _to_msgpack(file, _, __, **kwargs):
if not msgpack_available:
raise BoxError(f'File "{file}" is msgpack but no package is available to open it. Please install "msgpack"')
try:
return Box.from_msgpack(filename=file, **kwargs)
except (UnpackException, ValueError):
raise BoxError("File is not msgpack as expected")
except BoxError:
return BoxList.from_msgpack(filename=file, **kwargs)
converters = {
"json": _to_json,
"jsn": _to_json,
"yaml": _to_yaml,
"yml": _to_yaml,
"toml": _to_toml,
"tml": _to_toml,
"msgpack": _to_msgpack,
"pack": _to_msgpack,
"csv": _to_csv,
} # type: Dict[str, Callable]
def box_from_file(
file: Union[str, PathLike], file_type: str = None, encoding: str = "utf-8", errors: str = "strict", **kwargs
) -> Union[Box, BoxList]:
"""
Loads the provided file and tries to parse it into a Box or BoxList object as appropriate.
:param file: Location of file
:param encoding: File encoding
:param errors: How to handle encoding errors
:param file_type: manually specify file type: json, toml or yaml
:return: Box or BoxList
"""
if not isinstance(file, Path):
file = Path(file)
if not file.exists():
raise BoxError(f'file "{file}" does not exist')
file_type = file_type or file.suffix
file_type = file_type.lower().lstrip(".")
if file_type.lower() in converters:
return converters[file_type.lower()](file, encoding, errors, **kwargs) # type: ignore
raise BoxError(f'"{file_type}" is an unknown type. Please use either csv, toml, msgpack, yaml or json')
|
from flask import g
from marshmallow import fields
from lemur.common.schema import LemurInputSchema, LemurOutputSchema
from lemur.users.schemas import UserNestedOutputSchema, UserInputSchema
def current_user_id():
return {
"id": g.current_user.id,
"email": g.current_user.email,
"username": g.current_user.username,
}
class ApiKeyInputSchema(LemurInputSchema):
name = fields.String(required=False)
user = fields.Nested(
UserInputSchema, missing=current_user_id, default=current_user_id
)
ttl = fields.Integer()
class ApiKeyRevokeSchema(LemurInputSchema):
id = fields.Integer(required=True)
name = fields.String()
user = fields.Nested(UserInputSchema, required=True)
revoked = fields.Boolean()
ttl = fields.Integer()
issued_at = fields.Integer(required=False)
class UserApiKeyInputSchema(LemurInputSchema):
name = fields.String(required=False)
ttl = fields.Integer()
class ApiKeyOutputSchema(LemurOutputSchema):
jwt = fields.String()
class ApiKeyDescribedOutputSchema(LemurOutputSchema):
id = fields.Integer()
name = fields.String()
user = fields.Nested(UserNestedOutputSchema)
ttl = fields.Integer()
issued_at = fields.Integer()
revoked = fields.Boolean()
api_key_input_schema = ApiKeyInputSchema()
api_key_revoke_schema = ApiKeyRevokeSchema()
api_key_output_schema = ApiKeyOutputSchema()
api_keys_output_schema = ApiKeyDescribedOutputSchema(many=True)
api_key_described_output_schema = ApiKeyDescribedOutputSchema()
user_api_key_input_schema = UserApiKeyInputSchema()
|
import pytest
from io import BytesIO
from vine import promise
from unittest.mock import Mock
from kombu.asynchronous import http
from kombu.asynchronous.http.base import BaseClient, normalize_header
from kombu.exceptions import HttpError
from t.mocks import PromiseMock
import t.skip
class test_Headers:
def test_normalize(self):
assert normalize_header('accept-encoding') == 'Accept-Encoding'
@pytest.mark.usefixtures('hub')
class test_Request:
def test_init(self):
x = http.Request('http://foo', method='POST')
assert x.url == 'http://foo'
assert x.method == 'POST'
x = http.Request('x', max_redirects=100)
assert x.max_redirects == 100
assert isinstance(x.headers, http.Headers)
h = http.Headers()
x = http.Request('x', headers=h)
assert x.headers is h
assert isinstance(x.on_ready, promise)
def test_then(self):
callback = PromiseMock(name='callback')
x = http.Request('http://foo')
x.then(callback)
x.on_ready(1)
callback.assert_called_with(1)
@pytest.mark.usefixtures('hub')
class test_Response:
def test_init(self):
req = http.Request('http://foo')
r = http.Response(req, 200)
assert r.status == 'OK'
assert r.effective_url == 'http://foo'
r.raise_for_error()
def test_raise_for_error(self):
req = http.Request('http://foo')
r = http.Response(req, 404)
assert r.status == 'Not Found'
assert r.error
with pytest.raises(HttpError):
r.raise_for_error()
def test_get_body(self):
req = http.Request('http://foo')
req.buffer = BytesIO()
req.buffer.write(b'hello')
rn = http.Response(req, 200, buffer=None)
assert rn.body is None
r = http.Response(req, 200, buffer=req.buffer)
assert r._body is None
assert r.body == b'hello'
assert r._body == b'hello'
assert r.body == b'hello'
class test_BaseClient:
@pytest.fixture(autouse=True)
def setup_hub(self, hub):
self.hub = hub
def test_init(self):
c = BaseClient(Mock(name='hub'))
assert c.hub
assert c._header_parser
def test_perform(self):
c = BaseClient(Mock(name='hub'))
c.add_request = Mock(name='add_request')
c.perform('http://foo')
c.add_request.assert_called()
assert isinstance(c.add_request.call_args[0][0], http.Request)
req = http.Request('http://bar')
c.perform(req)
c.add_request.assert_called_with(req)
def test_add_request(self):
c = BaseClient(Mock(name='hub'))
with pytest.raises(NotImplementedError):
c.add_request(Mock(name='request'))
def test_header_parser(self):
c = BaseClient(Mock(name='hub'))
parser = c._header_parser
headers = http.Headers()
c.on_header(headers, 'HTTP/1.1')
c.on_header(headers, 'x-foo-bar: 123')
c.on_header(headers, 'People: George Costanza')
assert headers._prev_key == 'People'
c.on_header(headers, ' Jerry Seinfeld')
c.on_header(headers, ' Elaine Benes')
c.on_header(headers, ' Cosmo Kramer')
assert not headers.complete
c.on_header(headers, '')
assert headers.complete
with pytest.raises(KeyError):
parser.throw(KeyError('foo'))
c.on_header(headers, '')
assert headers['X-Foo-Bar'] == '123'
assert (headers['People'] ==
'George Costanza Jerry Seinfeld Elaine Benes Cosmo Kramer')
def test_close(self):
BaseClient(Mock(name='hub')).close()
def test_as_context(self):
c = BaseClient(Mock(name='hub'))
c.close = Mock(name='close')
with c:
pass
c.close.assert_called_with()
@t.skip.if_pypy
class test_Client:
def test_get_client(self, hub):
pytest.importorskip('pycurl')
client = http.get_client()
assert client.hub is hub
client2 = http.get_client(hub)
assert client2 is client
assert client2.hub is hub
|
import mock
import pytest
from paasta_tools.list_kubernetes_service_instances import main
from paasta_tools.list_kubernetes_service_instances import parse_args
def test_parse_args():
with mock.patch(
"paasta_tools.list_kubernetes_service_instances.argparse.ArgumentParser",
autospec=True,
) as mock_parser:
assert parse_args() == mock_parser.return_value.parse_args()
@mock.patch("paasta_tools.list_kubernetes_service_instances.parse_args", autospec=True)
@mock.patch(
"paasta_tools.list_kubernetes_service_instances.get_services_for_cluster",
autospec=True,
return_value=[("service_1", "instance1"), ("service_2", "instance1")],
)
@mock.patch("builtins.print", autospec=True)
@pytest.mark.parametrize(
"sanitise,expected",
[
(False, "service_1.instance1\nservice_2.instance1"),
(True, "service--1-instance1\nservice--2-instance1"),
],
)
def test_main(mock_print, mock_get_services, mock_parse_args, sanitise, expected):
mock_parse_args.return_value = mock.Mock(sanitise=sanitise)
with pytest.raises(SystemExit) as e:
main()
assert e.value.code == 0
assert mock_get_services.call_args_list == [
mock.call(
cluster=mock_parse_args.return_value.cluster,
instance_type="kubernetes",
soa_dir=mock_parse_args.return_value.soa_dir,
)
]
assert mock_print.call_args_list == [mock.call(expected)]
|
import numpy as np
import matchzoo as mz
def print_deprecation_warning(instance):
name = instance.__class__.__name__
print(f"WARNING: {name} will be deprecated in MatchZoo v2.2. "
"Use `DataGenerator` with callbacks instead.")
class HistogramDataGenerator(mz.DataGenerator):
def __init__(
self,
data_pack: mz.DataPack,
embedding_matrix: np.ndarray,
bin_size: int = 30,
hist_mode: str = 'CH',
batch_size: int = 32,
shuffle: bool = True
):
super().__init__(
data_pack=data_pack,
batch_size=batch_size,
shuffle=shuffle,
callbacks=[
mz.data_generator.callbacks.Histogram(
embedding_matrix=embedding_matrix,
bin_size=bin_size,
hist_mode=hist_mode
)
]
)
print_deprecation_warning(self)
class HistogramPairDataGenerator(mz.DataGenerator):
def __init__(
self,
data_pack: mz.DataPack,
embedding_matrix: np.ndarray,
bin_size: int = 30,
hist_mode: str = 'CH',
num_dup: int = 1,
num_neg: int = 1,
batch_size: int = 32,
shuffle: bool = True
):
super().__init__(
data_pack=data_pack,
mode='pair',
num_dup=num_dup,
num_neg=num_neg,
batch_size=batch_size,
shuffle=shuffle,
callbacks=[
mz.data_generator.callbacks.Histogram(
embedding_matrix=embedding_matrix,
bin_size=bin_size,
hist_mode=hist_mode
)
]
)
print_deprecation_warning(self)
class DPoolDataGenerator(mz.DataGenerator):
def __init__(
self,
data_pack: mz.DataPack,
fixed_length_left: int,
fixed_length_right: int,
compress_ratio_left: float = 1,
compress_ratio_right: float = 1,
batch_size: int = 32,
shuffle: bool = True
):
super().__init__(
data_pack=data_pack,
shuffle=shuffle,
batch_size=batch_size,
callbacks=[
mz.data_generator.callbacks.DynamicPooling(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
compress_ratio_left=compress_ratio_left,
compress_ratio_right=compress_ratio_right
)
]
)
print_deprecation_warning(self)
class DPoolPairDataGenerator(mz.DataGenerator):
def __init__(
self,
data_pack: mz.DataPack,
fixed_length_left: int,
fixed_length_right: int,
compress_ratio_left: float = 1,
compress_ratio_right: float = 1,
num_dup: int = 1,
num_neg: int = 1,
batch_size: int = 32,
shuffle: bool = True
):
super().__init__(
data_pack=data_pack,
mode='pair',
num_dup=num_dup,
num_neg=num_neg,
batch_size=batch_size,
shuffle=shuffle,
callbacks=[
mz.data_generator.callbacks.DynamicPooling(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
compress_ratio_left=compress_ratio_left,
compress_ratio_right=compress_ratio_right
)
]
)
print_deprecation_warning(self)
class PairDataGenerator(mz.DataGenerator):
def __init__(
self,
data_pack: mz.DataPack,
num_dup: int = 1,
num_neg: int = 1,
batch_size: int = 32,
shuffle: bool = True
):
super().__init__(
data_pack=data_pack,
mode='pair',
num_dup=num_dup,
num_neg=num_neg,
batch_size=batch_size,
shuffle=shuffle,
)
print_deprecation_warning(self)
class DynamicDataGenerator(mz.DataGenerator):
def __init__(self, func, *args, **kwargs):
super().__init__(*args, **kwargs)
callback = mz.data_generator.callbacks.LambdaCallback(
on_batch_data_pack=func)
self.callbacks.append(callback)
print_deprecation_warning(self)
|
from datetime import timedelta
from os import path
import unittest
from homeassistant import config as hass_config
from homeassistant.components.filter.sensor import (
DOMAIN,
LowPassFilter,
OutlierFilter,
RangeFilter,
ThrottleFilter,
TimeSMAFilter,
TimeThrottleFilter,
)
from homeassistant.const import SERVICE_RELOAD
import homeassistant.core as ha
from homeassistant.setup import async_setup_component, setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
assert_setup_component,
get_test_home_assistant,
init_recorder_component,
)
class TestFilterSensor(unittest.TestCase):
"""Test the Data Filter sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.components.add("history")
raw_values = [20, 19, 18, 21, 22, 0]
self.values = []
timestamp = dt_util.utcnow()
for val in raw_values:
self.values.append(
ha.State("sensor.test_monitored", val, last_updated=timestamp)
)
timestamp += timedelta(minutes=1)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
def test_setup_fail(self):
"""Test if filter doesn't exist."""
config = {
"sensor": {
"platform": "filter",
"entity_id": "sensor.test_monitored",
"filters": [{"filter": "nonexisting"}],
}
}
with assert_setup_component(0):
assert setup_component(self.hass, "sensor", config)
self.hass.block_till_done()
def test_chain(self):
"""Test if filter chaining works."""
config = {
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
}
}
with assert_setup_component(1, "sensor"):
assert setup_component(self.hass, "sensor", config)
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(config["sensor"]["entity_id"], value.state)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert "18.05" == state.state
def test_chain_history(self, missing=False):
"""Test if filter chaining works."""
self.init_recorder()
config = {
"history": {},
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
},
}
t_0 = dt_util.utcnow() - timedelta(minutes=1)
t_1 = dt_util.utcnow() - timedelta(minutes=2)
t_2 = dt_util.utcnow() - timedelta(minutes=3)
t_3 = dt_util.utcnow() - timedelta(minutes=4)
if missing:
fake_states = {}
else:
fake_states = {
"sensor.test_monitored": [
ha.State("sensor.test_monitored", 18.0, last_changed=t_0),
ha.State("sensor.test_monitored", "unknown", last_changed=t_1),
ha.State("sensor.test_monitored", 19.0, last_changed=t_2),
ha.State("sensor.test_monitored", 18.2, last_changed=t_3),
]
}
with patch(
"homeassistant.components.history.state_changes_during_period",
return_value=fake_states,
):
with patch(
"homeassistant.components.history.get_last_state_changes",
return_value=fake_states,
):
with assert_setup_component(1, "sensor"):
assert setup_component(self.hass, "sensor", config)
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(config["sensor"]["entity_id"], value.state)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
if missing:
assert "18.05" == state.state
else:
assert "17.05" == state.state
def test_chain_history_missing(self):
"""Test if filter chaining works when recorder is enabled but the source is not recorded."""
return self.test_chain_history(missing=True)
def test_history_time(self):
"""Test loading from history based on a time window."""
self.init_recorder()
config = {
"history": {},
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [{"filter": "time_throttle", "window_size": "00:01"}],
},
}
t_0 = dt_util.utcnow() - timedelta(minutes=1)
t_1 = dt_util.utcnow() - timedelta(minutes=2)
t_2 = dt_util.utcnow() - timedelta(minutes=3)
fake_states = {
"sensor.test_monitored": [
ha.State("sensor.test_monitored", 18.0, last_changed=t_0),
ha.State("sensor.test_monitored", 19.0, last_changed=t_1),
ha.State("sensor.test_monitored", 18.2, last_changed=t_2),
]
}
with patch(
"homeassistant.components.history.state_changes_during_period",
return_value=fake_states,
):
with patch(
"homeassistant.components.history.get_last_state_changes",
return_value=fake_states,
):
with assert_setup_component(1, "sensor"):
assert setup_component(self.hass, "sensor", config)
self.hass.block_till_done()
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert "18.0" == state.state
def test_outlier(self):
"""Test if outlier filter works."""
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
for state in self.values:
filtered = filt.filter_state(state)
assert 21 == filtered.state
def test_outlier_step(self):
"""
Test step-change handling in outlier.
Test if outlier filter handles long-running step-changes correctly.
It should converge to no longer filter once just over half the
window_size is occupied by the new post step-change values.
"""
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=1.1)
self.values[-1].state = 22
for state in self.values:
filtered = filt.filter_state(state)
assert 22 == filtered.state
def test_initial_outlier(self):
"""Test issue #13363."""
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
out = ha.State("sensor.test_monitored", 4000)
for state in [out] + self.values:
filtered = filt.filter_state(state)
assert 21 == filtered.state
def test_unknown_state_outlier(self):
"""Test issue #32395."""
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
out = ha.State("sensor.test_monitored", "unknown")
for state in [out] + self.values + [out]:
try:
filtered = filt.filter_state(state)
except ValueError:
assert state.state == "unknown"
assert 21 == filtered.state
def test_precision_zero(self):
"""Test if precision of zero returns an integer."""
filt = LowPassFilter(window_size=10, precision=0, entity=None, time_constant=10)
for state in self.values:
filtered = filt.filter_state(state)
assert isinstance(filtered.state, int)
def test_lowpass(self):
"""Test if lowpass filter works."""
filt = LowPassFilter(window_size=10, precision=2, entity=None, time_constant=10)
out = ha.State("sensor.test_monitored", "unknown")
for state in [out] + self.values + [out]:
try:
filtered = filt.filter_state(state)
except ValueError:
assert state.state == "unknown"
assert 18.05 == filtered.state
def test_range(self):
"""Test if range filter works."""
lower = 10
upper = 20
filt = RangeFilter(
entity=None, precision=2, lower_bound=lower, upper_bound=upper
)
for unf_state in self.values:
unf = float(unf_state.state)
filtered = filt.filter_state(unf_state)
if unf < lower:
assert lower == filtered.state
elif unf > upper:
assert upper == filtered.state
else:
assert unf == filtered.state
def test_range_zero(self):
"""Test if range filter works with zeroes as bounds."""
lower = 0
upper = 0
filt = RangeFilter(
entity=None, precision=2, lower_bound=lower, upper_bound=upper
)
for unf_state in self.values:
unf = float(unf_state.state)
filtered = filt.filter_state(unf_state)
if unf < lower:
assert lower == filtered.state
elif unf > upper:
assert upper == filtered.state
else:
assert unf == filtered.state
def test_throttle(self):
"""Test if lowpass filter works."""
filt = ThrottleFilter(window_size=3, precision=2, entity=None)
filtered = []
for state in self.values:
new_state = filt.filter_state(state)
if not filt.skip_processing:
filtered.append(new_state)
assert [20, 21] == [f.state for f in filtered]
def test_time_throttle(self):
"""Test if lowpass filter works."""
filt = TimeThrottleFilter(
window_size=timedelta(minutes=2), precision=2, entity=None
)
filtered = []
for state in self.values:
new_state = filt.filter_state(state)
if not filt.skip_processing:
filtered.append(new_state)
assert [20, 18, 22] == [f.state for f in filtered]
def test_time_sma(self):
"""Test if time_sma filter works."""
filt = TimeSMAFilter(
window_size=timedelta(minutes=2), precision=2, entity=None, type="last"
)
for state in self.values:
filtered = filt.filter_state(state)
assert 21.5 == filtered.state
async def test_reload(hass):
"""Verify we can reload filter sensors."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
hass.states.async_set("sensor.test_monitored", 12345)
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("sensor.test")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"filter/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("sensor.test") is None
assert hass.states.get("sensor.filtered_realistic_humidity")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
from datetime import date
from django.db import transaction
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import BaseAddon
from weblate.addons.events import EVENT_COMPONENT_UPDATE, EVENT_DAILY
from weblate.addons.forms import AutoAddonForm
from weblate.trans.tasks import auto_translate
class AutoTranslateAddon(BaseAddon):
events = (EVENT_COMPONENT_UPDATE, EVENT_DAILY)
name = "weblate.autotranslate.autotranslate"
verbose = _("Automatic translation")
description = _(
"Automatically translates strings using machine translation or "
"other components."
)
settings_form = AutoAddonForm
multiple = True
icon = "language.svg"
def component_update(self, component):
for translation in component.translation_set.iterator():
if translation.is_source:
continue
def callback(pk=translation.pk):
auto_translate.delay(None, pk, **self.instance.configuration)
transaction.on_commit(callback)
def daily(self, component):
# Translate every component once in a week to reduce load
if component.id % 7 == date.today().weekday():
self.component_update(component)
|
import argparse
from functools import wraps
import logging
import os
import sys
from flask import current_app
try:
from flask_script import Manager
except ImportError:
Manager = None
from alembic import __version__ as __alembic_version__
from alembic.config import Config as AlembicConfig
from alembic import command
from alembic.util import CommandError
alembic_version = tuple([int(v) for v in __alembic_version__.split('.')[0:3]])
log = logging.getLogger(__name__)
class _MigrateConfig(object):
def __init__(self, migrate, db, **kwargs):
self.migrate = migrate
self.db = db
self.directory = migrate.directory
self.configure_args = kwargs
@property
def metadata(self):
"""
Backwards compatibility, in old releases app.extensions['migrate']
was set to db, and env.py accessed app.extensions['migrate'].metadata
"""
return self.db.metadata
class Config(AlembicConfig):
def get_template_directory(self):
package_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_dir, 'templates')
class Migrate(object):
def __init__(self, app=None, db=None, directory='migrations', **kwargs):
self.configure_callbacks = []
self.db = db
self.directory = str(directory)
self.alembic_ctx_kwargs = kwargs
if app is not None and db is not None:
self.init_app(app, db, directory)
def init_app(self, app, db=None, directory=None, **kwargs):
self.db = db or self.db
self.directory = str(directory or self.directory)
self.alembic_ctx_kwargs.update(kwargs)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['migrate'] = _MigrateConfig(
self, self.db, **self.alembic_ctx_kwargs)
def configure(self, f):
self.configure_callbacks.append(f)
return f
def call_configure_callbacks(self, config):
for f in self.configure_callbacks:
config = f(config)
return config
def get_config(self, directory=None, x_arg=None, opts=None):
if directory is None:
directory = self.directory
directory = str(directory)
config = Config(os.path.join(directory, 'alembic.ini'))
config.set_main_option('script_location', directory)
if config.cmd_opts is None:
config.cmd_opts = argparse.Namespace()
for opt in opts or []:
setattr(config.cmd_opts, opt, True)
if not hasattr(config.cmd_opts, 'x'):
if x_arg is not None:
setattr(config.cmd_opts, 'x', [])
if isinstance(x_arg, list) or isinstance(x_arg, tuple):
for x in x_arg:
config.cmd_opts.x.append(x)
else:
config.cmd_opts.x.append(x_arg)
else:
setattr(config.cmd_opts, 'x', None)
return self.call_configure_callbacks(config)
def catch_errors(f):
@wraps(f)
def wrapped(*args, **kwargs):
try:
f(*args, **kwargs)
except (CommandError, RuntimeError) as exc:
log.error('Error: ' + str(exc))
sys.exit(1)
return wrapped
if Manager is not None:
MigrateCommand = Manager(usage='Perform database migrations')
else:
class FakeCommand(object):
def option(self, *args, **kwargs):
def decorator(f):
return f
return decorator
MigrateCommand = FakeCommand()
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('--multidb', dest='multidb', action='store_true',
default=False,
help=("Multiple databases migraton (default is "
"False)"))
@catch_errors
def init(directory=None, multidb=False):
"""Creates a new migration repository"""
if directory is None:
directory = current_app.extensions['migrate'].directory
config = Config()
config.set_main_option('script_location', directory)
config.config_file_name = os.path.join(directory, 'alembic.ini')
config = current_app.extensions['migrate'].\
migrate.call_configure_callbacks(config)
if multidb:
command.init(config, directory, 'flask-multidb')
else:
command.init(config, directory, 'flask')
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--version-path', dest='version_path', default=None,
help=('Specify specific path from config for version '
'file'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('--splice', dest='splice', action='store_true',
default=False,
help=('Allow a non-head revision as the "head" to '
'splice onto'))
@MigrateCommand.option('--head', dest='head', default='head',
help=('Specify head revision or <branchname>@head to '
'base new revision on'))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('--autogenerate', dest='autogenerate',
action='store_true', default=False,
help=('Populate revision script with candidate '
'migration operations, based on comparison of '
'database to model'))
@MigrateCommand.option('-m', '--message', dest='message', default=None,
help='Revision message')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def revision(directory=None, message=None, autogenerate=False, sql=False,
head='head', splice=False, branch_label=None, version_path=None,
rev_id=None):
"""Create a new revision file."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.revision(config, message, autogenerate=autogenerate, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--version-path', dest='version_path', default=None,
help=('Specify specific path from config for version '
'file'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('--splice', dest='splice', action='store_true',
default=False,
help=('Allow a non-head revision as the "head" to '
'splice onto'))
@MigrateCommand.option('--head', dest='head', default='head',
help=('Specify head revision or <branchname>@head to '
'base new revision on'))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('-m', '--message', dest='message', default=None)
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('-x', '--x-arg', dest='x_arg', default=None,
action='append', help=("Additional arguments consumed "
"by custom env.py scripts"))
@catch_errors
def migrate(directory=None, message=None, sql=False, head='head', splice=False,
branch_label=None, version_path=None, rev_id=None, x_arg=None):
"""Alias for 'revision --autogenerate'"""
config = current_app.extensions['migrate'].migrate.get_config(
directory, opts=['autogenerate'], x_arg=x_arg)
command.revision(config, message, autogenerate=True, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
@MigrateCommand.option('revision', nargs='?', default='head',
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def edit(directory=None, revision='current'):
"""Edit current revision."""
if alembic_version >= (0, 8, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.edit(config, revision)
else:
raise RuntimeError('Alembic 0.8.0 or greater is required')
@MigrateCommand.option('--rev-id', dest='rev_id', default=None,
help=('Specify a hardcoded revision id instead of '
'generating one'))
@MigrateCommand.option('--branch-label', dest='branch_label', default=None,
help=('Specify a branch label to apply to the new '
'revision'))
@MigrateCommand.option('-m', '--message', dest='message', default=None)
@MigrateCommand.option('revisions', nargs='+',
help='one or more revisions, or "heads" for all heads')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def merge(directory=None, revisions='', message=None, branch_label=None,
rev_id=None):
"""Merge two revisions together. Creates a new migration file"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.merge(config, revisions, message=message,
branch_label=branch_label, rev_id=rev_id)
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', nargs='?', default='head',
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('-x', '--x-arg', dest='x_arg', default=None,
action='append', help=("Additional arguments consumed "
"by custom env.py scripts"))
@catch_errors
def upgrade(directory=None, revision='head', sql=False, tag=None, x_arg=None):
"""Upgrade to a later version"""
config = current_app.extensions['migrate'].migrate.get_config(directory,
x_arg=x_arg)
command.upgrade(config, revision, sql=sql, tag=tag)
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', nargs='?', default="-1",
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@MigrateCommand.option('-x', '--x-arg', dest='x_arg', default=None,
action='append', help=("Additional arguments consumed "
"by custom env.py scripts"))
@catch_errors
def downgrade(directory=None, revision='-1', sql=False, tag=None, x_arg=None):
"""Revert to a previous version"""
config = current_app.extensions['migrate'].migrate.get_config(directory,
x_arg=x_arg)
if sql and revision == '-1':
revision = 'head:-1'
command.downgrade(config, revision, sql=sql, tag=tag)
@MigrateCommand.option('revision', nargs='?', default="head",
help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def show(directory=None, revision='head'):
"""Show the revision denoted by the given symbol."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.show(config, revision)
@MigrateCommand.option('-i', '--indicate-current', dest='indicate_current',
action='store_true', default=False,
help=('Indicate current version (Alembic 0.9.9 or '
'greater is required)'))
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-r', '--rev-range', dest='rev_range', default=None,
help=('Specify a revision range; format is '
'[start]:[end]'))
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def history(directory=None, rev_range=None, verbose=False,
indicate_current=False):
"""List changeset scripts in chronological order."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 9, 9):
command.history(config, rev_range, verbose=verbose,
indicate_current=indicate_current)
else:
command.history(config, rev_range, verbose=verbose)
@MigrateCommand.option('--resolve-dependencies', dest='resolve_dependencies',
action='store_true', default=False,
help='Treat dependency versions as down revisions')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def heads(directory=None, verbose=False, resolve_dependencies=False):
"""Show current available heads in the script directory"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.heads(config, verbose=verbose,
resolve_dependencies=resolve_dependencies)
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def branches(directory=None, verbose=False):
"""Show current branch points"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.branches(config, verbose=verbose)
@MigrateCommand.option('--head-only', dest='head_only', action='store_true',
default=False,
help='Deprecated. Use --verbose for additional output')
@MigrateCommand.option('-v', '--verbose', dest='verbose', action='store_true',
default=False, help='Use more verbose output')
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def current(directory=None, verbose=False, head_only=False):
"""Display the current revision for each database."""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.current(config, verbose=verbose, head_only=head_only)
@MigrateCommand.option('--tag', dest='tag', default=None,
help=("Arbitrary 'tag' name - can be used by custom "
"env.py scripts"))
@MigrateCommand.option('--sql', dest='sql', action='store_true', default=False,
help=("Don't emit SQL to database - dump to standard "
"output instead"))
@MigrateCommand.option('revision', default=None, help="revision identifier")
@MigrateCommand.option('-d', '--directory', dest='directory', default=None,
help=("Migration script directory (default is "
"'migrations')"))
@catch_errors
def stamp(directory=None, revision='head', sql=False, tag=None):
"""'stamp' the revision table with the given revision; don't run any
migrations"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
command.stamp(config, revision, sql=sql, tag=tag)
|
import asyncio
import logging
from hass_nabucasa import cloud_api
from hass_nabucasa.google_report_state import ErrorResponse
from homeassistant.components.google_assistant.helpers import AbstractConfig
from homeassistant.const import (
CLOUD_NEVER_EXPOSED_ENTITIES,
EVENT_HOMEASSISTANT_STARTED,
HTTP_OK,
)
from homeassistant.core import CoreState, split_entity_id
from homeassistant.helpers import entity_registry
from .const import (
CONF_ENTITY_CONFIG,
DEFAULT_DISABLE_2FA,
PREF_DISABLE_2FA,
PREF_SHOULD_EXPOSE,
)
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
class CloudGoogleConfig(AbstractConfig):
"""HA Cloud Configuration for Google Assistant."""
def __init__(self, hass, config, cloud_user, prefs: CloudPreferences, cloud):
"""Initialize the Google config."""
super().__init__(hass)
self._config = config
self._user = cloud_user
self._prefs = prefs
self._cloud = cloud
self._cur_entity_prefs = self._prefs.google_entity_configs
self._cur_default_expose = self._prefs.google_default_expose
self._sync_entities_lock = asyncio.Lock()
self._sync_on_started = False
@property
def enabled(self):
"""Return if Google is enabled."""
return self._cloud.is_logged_in and self._prefs.google_enabled
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._prefs.google_secure_devices_pin
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._cloud.is_logged_in and self._prefs.google_report_state
@property
def local_sdk_webhook_id(self):
"""Return the local SDK webhook.
Return None to disable the local SDK.
"""
return self._prefs.google_local_webhook_id
@property
def local_sdk_user_id(self):
"""Return the user ID to be used for actions received via the local SDK."""
return self._user
@property
def cloud_user(self):
"""Return Cloud User account."""
return self._user
async def async_initialize(self):
"""Perform async initialization of config."""
await super().async_initialize()
# Remove bad data that was there until 0.103.6 - Jan 6, 2020
self._store.pop_agent_user_id(self._user)
self._prefs.async_listen_updates(self._async_prefs_updated)
self.hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
def should_expose(self, state):
"""If a state object should be exposed."""
return self._should_expose_entity_id(state.entity_id)
def _should_expose_entity_id(self, entity_id):
"""If an entity ID should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config["filter"].empty_filter:
return self._config["filter"](entity_id)
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
default_expose = self._prefs.google_default_expose
# Backwards compat
if default_expose is None:
return True
return split_entity_id(entity_id)[0] in default_expose
@property
def agent_user_id(self):
"""Return Agent User Id to use for query responses."""
return self._cloud.username
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return self.agent_user_id
def should_2fa(self, state):
"""If an entity should be checked for 2FA."""
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(state.entity_id, {})
return not entity_config.get(PREF_DISABLE_2FA, DEFAULT_DISABLE_2FA)
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
try:
await self._cloud.google_report_state.async_send_message(message)
except ErrorResponse as err:
_LOGGER.warning("Error reporting state - %s: %s", err.code, err.message)
async def _async_request_sync_devices(self, agent_user_id: str):
"""Trigger a sync with Google."""
if self._sync_entities_lock.locked():
return HTTP_OK
async with self._sync_entities_lock:
resp = await cloud_api.async_google_actions_request_sync(self._cloud)
return resp.status
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if self.should_report_state != self.is_reporting_state:
if self.should_report_state:
self.async_enable_report_state()
else:
self.async_disable_report_state()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities_all()
# If entity prefs are the same or we have filter in config.yaml,
# don't sync.
elif (
self._cur_entity_prefs is not prefs.google_entity_configs
or self._cur_default_expose is not prefs.google_default_expose
) and self._config["filter"].empty_filter:
self.async_schedule_google_sync_all()
if self.enabled and not self.is_local_sdk_active:
self.async_enable_local_sdk()
elif not self.enabled and self.is_local_sdk_active:
self.async_disable_local_sdk()
self._cur_entity_prefs = prefs.google_entity_configs
self._cur_default_expose = prefs.google_default_expose
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
# Only consider entity registry updates if info relevant for Google has changed
if event.data["action"] == "update" and not bool(
set(event.data["changes"]) & entity_registry.ENTITY_DESCRIBING_ATTRIBUTES
):
return
entity_id = event.data["entity_id"]
if not self._should_expose_entity_id(entity_id):
return
if self.hass.state == CoreState.running:
self.async_schedule_google_sync_all()
return
if self._sync_on_started:
return
self._sync_on_started = True
async def sync_google(_):
"""Sync entities to Google."""
await self.async_sync_entities_all()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, sync_google)
|
import argparse
import asyncio
import collections
from contextlib import suppress
from datetime import datetime
import json
import logging
from timeit import default_timer as timer
from typing import Callable, Dict, TypeVar
from homeassistant import core
from homeassistant.components.websocket_api.const import JSON_DUMP
from homeassistant.const import ATTR_NOW, EVENT_STATE_CHANGED, EVENT_TIME_CHANGED
from homeassistant.helpers.entityfilter import convert_include_exclude_filter
from homeassistant.helpers.json import JSONEncoder
from homeassistant.util import dt as dt_util
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
# mypy: no-warn-return-any
CALLABLE_T = TypeVar("CALLABLE_T", bound=Callable) # pylint: disable=invalid-name
BENCHMARKS: Dict[str, Callable] = {}
def run(args):
"""Handle benchmark commandline script."""
# Disable logging
logging.getLogger("homeassistant.core").setLevel(logging.CRITICAL)
parser = argparse.ArgumentParser(description=("Run a Home Assistant benchmark."))
parser.add_argument("name", choices=BENCHMARKS)
parser.add_argument("--script", choices=["benchmark"])
args = parser.parse_args()
bench = BENCHMARKS[args.name]
print("Using event loop:", asyncio.get_event_loop_policy().loop_name)
with suppress(KeyboardInterrupt):
while True:
asyncio.run(run_benchmark(bench))
async def run_benchmark(bench):
"""Run a benchmark."""
hass = core.HomeAssistant()
runtime = await bench(hass)
print(f"Benchmark {bench.__name__} done in {runtime}s")
await hass.async_stop()
def benchmark(func: CALLABLE_T) -> CALLABLE_T:
"""Decorate to mark a benchmark."""
BENCHMARKS[func.__name__] = func
return func
@benchmark
async def fire_events(hass):
"""Fire a million events."""
count = 0
event_name = "benchmark_event"
event = asyncio.Event()
@core.callback
def listener(_):
"""Handle event."""
nonlocal count
count += 1
if count == 10 ** 6:
event.set()
hass.bus.async_listen(event_name, listener)
for _ in range(10 ** 6):
hass.bus.async_fire(event_name)
start = timer()
await event.wait()
return timer() - start
@benchmark
async def time_changed_helper(hass):
"""Run a million events through time changed helper."""
count = 0
event = asyncio.Event()
@core.callback
def listener(_):
"""Handle event."""
nonlocal count
count += 1
if count == 10 ** 6:
event.set()
hass.helpers.event.async_track_time_change(listener, minute=0, second=0)
event_data = {ATTR_NOW: datetime(2017, 10, 10, 15, 0, 0, tzinfo=dt_util.UTC)}
for _ in range(10 ** 6):
hass.bus.async_fire(EVENT_TIME_CHANGED, event_data)
start = timer()
await event.wait()
return timer() - start
@benchmark
async def state_changed_helper(hass):
"""Run a million events through state changed helper with 1000 entities."""
count = 0
entity_id = "light.kitchen"
event = asyncio.Event()
@core.callback
def listener(*args):
"""Handle event."""
nonlocal count
count += 1
if count == 10 ** 6:
event.set()
for idx in range(1000):
hass.helpers.event.async_track_state_change(
f"{entity_id}{idx}", listener, "off", "on"
)
event_data = {
"entity_id": f"{entity_id}0",
"old_state": core.State(entity_id, "off"),
"new_state": core.State(entity_id, "on"),
}
for _ in range(10 ** 6):
hass.bus.async_fire(EVENT_STATE_CHANGED, event_data)
start = timer()
await event.wait()
return timer() - start
@benchmark
async def state_changed_event_helper(hass):
"""Run a million events through state changed event helper with 1000 entities."""
count = 0
entity_id = "light.kitchen"
event = asyncio.Event()
@core.callback
def listener(*args):
"""Handle event."""
nonlocal count
count += 1
if count == 10 ** 6:
event.set()
hass.helpers.event.async_track_state_change_event(
[f"{entity_id}{idx}" for idx in range(1000)], listener
)
event_data = {
"entity_id": f"{entity_id}0",
"old_state": core.State(entity_id, "off"),
"new_state": core.State(entity_id, "on"),
}
for _ in range(10 ** 6):
hass.bus.async_fire(EVENT_STATE_CHANGED, event_data)
start = timer()
await event.wait()
return timer() - start
@benchmark
async def logbook_filtering_state(hass):
"""Filter state changes."""
return await _logbook_filtering(hass, 1, 1)
@benchmark
async def logbook_filtering_attributes(hass):
"""Filter attribute changes."""
return await _logbook_filtering(hass, 1, 2)
@benchmark
async def _logbook_filtering(hass, last_changed, last_updated):
# pylint: disable=import-outside-toplevel
from homeassistant.components import logbook
entity_id = "test.entity"
old_state = {"entity_id": entity_id, "state": "off"}
new_state = {
"entity_id": entity_id,
"state": "on",
"last_updated": last_updated,
"last_changed": last_changed,
}
event = _create_state_changed_event_from_old_new(
entity_id, dt_util.utcnow(), old_state, new_state
)
entity_attr_cache = logbook.EntityAttributeCache(hass)
entities_filter = convert_include_exclude_filter(
logbook.INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA({})
)
def yield_events(event):
for _ in range(10 ** 5):
# pylint: disable=protected-access
if logbook._keep_event(hass, event, entities_filter):
yield event
start = timer()
list(logbook.humanify(hass, yield_events(event), entity_attr_cache, {}))
return timer() - start
@benchmark
async def filtering_entity_id(hass):
"""Run a 100k state changes through entity filter."""
config = {
"include": {
"domains": [
"automation",
"script",
"group",
"media_player",
"custom_component",
],
"entity_globs": [
"binary_sensor.*_contact",
"binary_sensor.*_occupancy",
"binary_sensor.*_detected",
"binary_sensor.*_active",
"input_*",
"device_tracker.*_phone",
"switch.*_light",
"binary_sensor.*_charging",
"binary_sensor.*_lock",
"binary_sensor.*_connected",
],
"entities": [
"test.entity_1",
"test.entity_2",
"binary_sensor.garage_door_open",
"test.entity_3",
"test.entity_4",
],
},
"exclude": {
"domains": ["input_number"],
"entity_globs": ["media_player.google_*", "group.all_*"],
"entities": [],
},
}
entity_ids = [
"automation.home_arrival",
"script.shut_off_house",
"binary_sensor.garage_door_open",
"binary_sensor.front_door_lock",
"binary_sensor.kitchen_motion_sensor_occupancy",
"switch.desk_lamp",
"light.dining_room",
"input_boolean.guest_staying_over",
"person.eleanor_fant",
"alert.issue_at_home",
"calendar.eleanor_fant_s_calendar",
"sun.sun",
]
entities_filter = convert_include_exclude_filter(config)
size = len(entity_ids)
start = timer()
for i in range(10 ** 5):
entities_filter(entity_ids[i % size])
return timer() - start
@benchmark
async def valid_entity_id(hass):
"""Run valid entity ID a million times."""
start = timer()
for _ in range(10 ** 6):
core.valid_entity_id("light.kitchen")
return timer() - start
@benchmark
async def json_serialize_states(hass):
"""Serialize million states with websocket default encoder."""
states = [
core.State("light.kitchen", "on", {"friendly_name": "Kitchen Lights"})
for _ in range(10 ** 6)
]
start = timer()
JSON_DUMP(states)
return timer() - start
def _create_state_changed_event_from_old_new(
entity_id, event_time_fired, old_state, new_state
):
"""Create a state changed event from a old and new state."""
attributes = {}
if new_state is not None:
attributes = new_state.get("attributes")
attributes_json = json.dumps(attributes, cls=JSONEncoder)
if attributes_json == "null":
attributes_json = "{}"
row = collections.namedtuple(
"Row",
[
"event_type"
"event_data"
"time_fired"
"context_id"
"context_user_id"
"state"
"entity_id"
"domain"
"attributes"
"state_id",
"old_state_id",
],
)
row.event_type = EVENT_STATE_CHANGED
row.event_data = "{}"
row.attributes = attributes_json
row.time_fired = event_time_fired
row.state = new_state and new_state.get("state")
row.entity_id = entity_id
row.domain = entity_id and core.split_entity_id(entity_id)[0]
row.context_id = None
row.context_user_id = None
row.old_state_id = old_state and 1
row.state_id = new_state and 1
# pylint: disable=import-outside-toplevel
from homeassistant.components import logbook
return logbook.LazyEventPartialState(row)
|
from typing import Optional
from PyQt5.QtSql import QSqlQueryModel
from PyQt5.QtWidgets import QWidget
from qutebrowser.misc import sql
from qutebrowser.utils import debug, message, log
from qutebrowser.config import config
from qutebrowser.completion.models import util
class HistoryCategory(QSqlQueryModel):
"""A completion category that queries the SQL history store."""
def __init__(self, *,
delete_func: util.DeleteFuncType = None,
parent: QWidget = None) -> None:
"""Create a new History completion category."""
super().__init__(parent=parent)
self.name = "History"
self._query: Optional[sql.Query] = None
# advertise that this model filters by URL and title
self.columns_to_filter = [0, 1]
self.delete_func = delete_func
self._empty_prefix: Optional[str] = None
def _atime_expr(self):
"""If max_items is set, return an expression to limit the query."""
max_items = config.val.completion.web_history.max_items
# HistoryCategory should not be added to the completion in that case.
assert max_items != 0
if max_items < 0:
return ''
min_atime = sql.Query(' '.join([
'SELECT min(last_atime) FROM',
'(SELECT last_atime FROM CompletionHistory',
'ORDER BY last_atime DESC LIMIT :limit)',
])).run(limit=max_items).value()
if not min_atime:
# if there are no history items, min_atime may be '' (issue #2849)
return ''
return "AND last_atime >= {}".format(min_atime)
def set_pattern(self, pattern):
"""Set the pattern used to filter results.
Args:
pattern: string pattern to filter by.
"""
raw_pattern = pattern
if (self._empty_prefix is not None and raw_pattern.startswith(
self._empty_prefix)):
log.sql.debug('Skipping query on {} due to '
'prefix {} returning nothing.'
.format(raw_pattern, self._empty_prefix))
return
self._empty_prefix = None
# escape to treat a user input % or _ as a literal, not a wildcard
pattern = pattern.replace('%', '\\%')
pattern = pattern.replace('_', '\\_')
words = ['%{}%'.format(w) for w in pattern.split(' ')]
# build a where clause to match all of the words in any order
# given the search term "a b", the WHERE clause would be:
# (url LIKE '%a%' OR title LIKE '%a%') AND
# (url LIKE '%b%' OR title LIKE '%b%')
where_clause = ' AND '.join(
"(url LIKE :{val} escape '\\' OR title LIKE :{val} escape '\\')"
.format(val=i) for i in range(len(words)))
# replace ' in timestamp-format to avoid breaking the query
timestamp_format = config.val.completion.timestamp_format or ''
timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
.format(timestamp_format.replace("'", "`")))
try:
if (not self._query or
len(words) != len(self._query.bound_values())):
# if the number of words changed, we need to generate a new
# query otherwise, we can reuse the prepared query for
# performance
self._query = sql.Query(' '.join([
"SELECT url, title, {}".format(timefmt),
"FROM CompletionHistory",
# the incoming pattern will have literal % and _ escaped we
# need to tell SQL to treat '\' as an escape character
'WHERE ({})'.format(where_clause),
self._atime_expr(),
"ORDER BY last_atime DESC",
]), forward_only=False)
with debug.log_time('sql', 'Running completion query'):
self._query.run(**{
str(i): w for i, w in enumerate(words)})
except sql.KnownError as e:
# Sometimes, the query we built up was invalid, for example,
# due to a large amount of words.
# Also catches failures in the DB we can't solve.
message.error("Error with SQL query: {}".format(e.text()))
return
self.setQuery(self._query.query)
if not self.rowCount() and not self.canFetchMore():
self._empty_prefix = raw_pattern
def removeRows(self, row, _count, _parent=None):
"""Override QAbstractItemModel::removeRows to re-run SQL query."""
# re-run query to reload updated table
assert self._query is not None
with debug.log_time('sql', 'Re-running completion query post-delete'):
self._query.run()
self.setQuery(self._query.query)
while self.rowCount() < row:
self.fetchMore()
return True
|
import logging
import hikvision.api
from hikvision.error import HikvisionError, MissingParamError
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
# This is the last working version, please test before updating
_LOGGING = logging.getLogger(__name__)
DEFAULT_NAME = "Hikvision Camera Motion Detection"
DEFAULT_PASSWORD = "12345"
DEFAULT_PORT = 80
DEFAULT_USERNAME = "admin"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Hikvision camera."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
hikvision_cam = hikvision.api.CreateDevice(
host, port=port, username=username, password=password, is_https=False
)
except MissingParamError as param_err:
_LOGGING.error("Missing required param: %s", param_err)
return False
except HikvisionError as conn_err:
_LOGGING.error("Unable to connect: %s", conn_err)
return False
add_entities([HikvisionMotionSwitch(name, hikvision_cam)])
class HikvisionMotionSwitch(SwitchEntity):
"""Representation of a switch to toggle on/off motion detection."""
def __init__(self, name, hikvision_cam):
"""Initialize the switch."""
self._name = name
self._hikvision_cam = hikvision_cam
self._state = STATE_OFF
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def state(self):
"""Return the state of the device if any."""
return self._state
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGING.info("Turning on Motion Detection ")
self._hikvision_cam.enable_motion_detection()
def turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGING.info("Turning off Motion Detection ")
self._hikvision_cam.disable_motion_detection()
def update(self):
"""Update Motion Detection state."""
enabled = self._hikvision_cam.is_motion_detection_enabled()
_LOGGING.info("enabled: %s", enabled)
self._state = STATE_ON if enabled else STATE_OFF
|
import cherrypy
from cherrypy import expose, tools
class ExposeExamples(object):
@expose
def no_call(self):
return 'Mr E. R. Bradshaw'
@expose()
def call_empty(self):
return 'Mrs. B.J. Smegma'
@expose('call_alias')
def nesbitt(self):
return 'Mr Nesbitt'
@expose(['alias1', 'alias2'])
def andrews(self):
return 'Mr Ken Andrews'
@expose(alias='alias3')
def watson(self):
return 'Mr. and Mrs. Watson'
class ToolExamples(object):
@expose
# This is here to demonstrate that using the config decorator
# does not overwrite other config attributes added by the Tool
# decorator (in this case response_headers).
@cherrypy.config(**{'response.stream': True})
@tools.response_headers(headers=[('Content-Type', 'application/data')])
def blah(self):
yield b'blah'
|
import os
from openrazer_daemon.dbus_services import endpoint
# Keyboard layout IDs
# There are more than listed here, but others are not known yet.
layoutids = {"01": "en_US",
"02": "el_GR",
"03": "de_DE",
"04": "fr_FR",
"05": "ru_RU",
"06": "en_GB",
"07": "Nordic",
"0A": "tr_TR",
"0C": "ja_JP",
"10": "es_ES",
"11": "it_IT",
"12": "pt_PT",
"81": "en_US_mac"}
@endpoint('razer.device.misc', 'getDriverVersion', out_sig='s')
def version(self):
"""
Get the devices driver version
:return: Get driver version string like 1.0.7
:rtype: str
"""
self.logger.debug("DBus call version")
# Caching
if 'driver_version' in self.method_args:
return self.method_args['driver_version']
driver_path = self.get_driver_path('version')
driver_version = '0.0.0'
if os.path.exists(driver_path):
# Check it exists, as people might not have reloaded driver
with open(driver_path, 'r') as driver_file:
driver_version = driver_file.read().strip()
self.method_args['driver_version'] = driver_version
return driver_version
@endpoint('razer.device.misc', 'getFirmware', out_sig='s')
def get_firmware(self):
"""
Get the devices firmware version
:return: Get firmware string like v1.1
:rtype: str
"""
self.logger.debug("DBus call get_firmware")
driver_path = self.get_driver_path('firmware_version')
with open(driver_path, 'r') as driver_file:
return driver_file.read().strip()
@endpoint('razer.device.misc', 'getDeviceName', out_sig='s')
def get_device_name(self):
"""
Get the device's descriptive string
:return: Device string like 'BlackWidow Ultimate 2013'
:rtype: str
"""
self.logger.debug("DBus call get_device_name")
driver_path = self.get_driver_path('device_type')
with open(driver_path, 'r') as driver_file:
return driver_file.read().strip()
@endpoint('razer.device.misc', 'getKeyboardLayout', out_sig='s')
def get_keyboard_layout(self):
"""
Get the device's keyboard layout
:return: String like 'en_US', 'de_DE', 'en_GB' or 'unknown'
:rtype: str
"""
self.logger.debug("DBus call get_keyboard_layout")
driver_path = self.get_driver_path('kbd_layout')
with open(driver_path, 'r') as driver_file:
try:
return layoutids[driver_file.read().strip()]
except KeyError:
return "unknown"
# Functions to define a hardware class
@endpoint('razer.device.misc', 'getDeviceType', out_sig='s')
def get_device_type_keyboard(self):
"""
Get the device's type
:return: 'keyboard'
:rtype: str
"""
self.logger.debug("DBus call get_device_type")
return 'keyboard'
@endpoint('razer.device.misc', 'getDeviceType', out_sig='s')
def get_device_type_mouse(self):
"""
Get the device's type
:return:'mouse'
:rtype: str
"""
self.logger.debug("DBus call get_device_type")
return 'mouse'
@endpoint('razer.device.misc', 'getDeviceType', out_sig='s')
def get_device_type_mousemat(self):
"""
Get the device's type
:return:'mousemat'
:rtype: str
"""
self.logger.debug("DBus call get_device_type")
return 'mousemat'
@endpoint('razer.device.misc', 'getDeviceType', out_sig='s')
def get_device_type_core(self):
"""
Get the device's type
:return:'core'
:rtype: str
"""
self.logger.debug("DBus call get_device_type")
return 'core'
@endpoint('razer.device.misc', 'getDeviceType', out_sig='s')
def get_device_type_keypad(self):
"""
Get the device's type
:return:'keypad'
:rtype: str
"""
self.logger.debug("DBus call get_device_type")
return 'keypad'
@endpoint('razer.device.misc', 'getDeviceType', out_sig='s')
def get_device_type_headset(self):
"""
Get the device's type
:return:'headset'
:rtype: str
"""
self.logger.debug("DBus call get_device_type")
return 'headset'
@endpoint('razer.device.misc', 'getDeviceType', out_sig='s')
def get_device_type_accessory(self):
"""
Get the device's type
:return:'accessory'
:rtype: str
"""
self.logger.debug("DBus call get_device_type")
return 'accessory'
@endpoint('razer.device.misc', 'hasMatrix', out_sig='b')
def has_matrix(self):
"""
If the device has an LED matrix
"""
self.logger.debug("DBus call has_matrix")
return self.HAS_MATRIX
@endpoint('razer.device.misc', 'getMatrixDimensions', out_sig='ai')
def get_matrix_dims(self):
"""
If the device has an LED matrix
"""
self.logger.debug("DBus call has_matrix")
return list(self.MATRIX_DIMS)
|
import logging
from greeneye import Monitors
import voluptuous as vol
from homeassistant.const import (
CONF_NAME,
CONF_PORT,
CONF_TEMPERATURE_UNIT,
EVENT_HOMEASSISTANT_STOP,
TIME_HOURS,
TIME_MINUTES,
TIME_SECONDS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
_LOGGER = logging.getLogger(__name__)
CONF_CHANNELS = "channels"
CONF_COUNTED_QUANTITY = "counted_quantity"
CONF_COUNTED_QUANTITY_PER_PULSE = "counted_quantity_per_pulse"
CONF_MONITOR_SERIAL_NUMBER = "monitor"
CONF_MONITORS = "monitors"
CONF_NET_METERING = "net_metering"
CONF_NUMBER = "number"
CONF_PULSE_COUNTERS = "pulse_counters"
CONF_SERIAL_NUMBER = "serial_number"
CONF_SENSORS = "sensors"
CONF_SENSOR_TYPE = "sensor_type"
CONF_TEMPERATURE_SENSORS = "temperature_sensors"
CONF_TIME_UNIT = "time_unit"
CONF_VOLTAGE_SENSORS = "voltage"
DATA_GREENEYE_MONITOR = "greeneye_monitor"
DOMAIN = "greeneye_monitor"
SENSOR_TYPE_CURRENT = "current_sensor"
SENSOR_TYPE_PULSE_COUNTER = "pulse_counter"
SENSOR_TYPE_TEMPERATURE = "temperature_sensor"
SENSOR_TYPE_VOLTAGE = "voltage_sensor"
TEMPERATURE_UNIT_CELSIUS = "C"
TEMPERATURE_SENSOR_SCHEMA = vol.Schema(
{vol.Required(CONF_NUMBER): vol.Range(1, 8), vol.Required(CONF_NAME): cv.string}
)
TEMPERATURE_SENSORS_SCHEMA = vol.Schema(
{
vol.Required(CONF_TEMPERATURE_UNIT): cv.temperature_unit,
vol.Required(CONF_SENSORS): vol.All(
cv.ensure_list, [TEMPERATURE_SENSOR_SCHEMA]
),
}
)
VOLTAGE_SENSOR_SCHEMA = vol.Schema(
{vol.Required(CONF_NUMBER): vol.Range(1, 48), vol.Required(CONF_NAME): cv.string}
)
VOLTAGE_SENSORS_SCHEMA = vol.All(cv.ensure_list, [VOLTAGE_SENSOR_SCHEMA])
PULSE_COUNTER_SCHEMA = vol.Schema(
{
vol.Required(CONF_NUMBER): vol.Range(1, 4),
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_COUNTED_QUANTITY): cv.string,
vol.Optional(CONF_COUNTED_QUANTITY_PER_PULSE, default=1.0): vol.Coerce(float),
vol.Optional(CONF_TIME_UNIT, default=TIME_SECONDS): vol.Any(
TIME_SECONDS, TIME_MINUTES, TIME_HOURS
),
}
)
PULSE_COUNTERS_SCHEMA = vol.All(cv.ensure_list, [PULSE_COUNTER_SCHEMA])
CHANNEL_SCHEMA = vol.Schema(
{
vol.Required(CONF_NUMBER): vol.Range(1, 48),
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_NET_METERING, default=False): cv.boolean,
}
)
CHANNELS_SCHEMA = vol.All(cv.ensure_list, [CHANNEL_SCHEMA])
MONITOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_SERIAL_NUMBER): vol.All(
cv.string,
vol.Length(
min=8,
max=8,
msg="GEM serial number must be specified as an 8-character "
"string (including leading zeroes).",
),
vol.Coerce(int),
),
vol.Optional(CONF_CHANNELS, default=[]): CHANNELS_SCHEMA,
vol.Optional(
CONF_TEMPERATURE_SENSORS,
default={CONF_TEMPERATURE_UNIT: TEMPERATURE_UNIT_CELSIUS, CONF_SENSORS: []},
): TEMPERATURE_SENSORS_SCHEMA,
vol.Optional(CONF_PULSE_COUNTERS, default=[]): PULSE_COUNTERS_SCHEMA,
vol.Optional(CONF_VOLTAGE_SENSORS, default=[]): VOLTAGE_SENSORS_SCHEMA,
}
)
MONITORS_SCHEMA = vol.All(cv.ensure_list, [MONITOR_SCHEMA])
COMPONENT_SCHEMA = vol.Schema(
{vol.Required(CONF_PORT): cv.port, vol.Required(CONF_MONITORS): MONITORS_SCHEMA}
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: COMPONENT_SCHEMA}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the GreenEye Monitor component."""
monitors = Monitors()
hass.data[DATA_GREENEYE_MONITOR] = monitors
server_config = config[DOMAIN]
server = await monitors.start_server(server_config[CONF_PORT])
async def close_server(*args):
"""Close the monitoring server."""
await server.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_server)
all_sensors = []
for monitor_config in server_config[CONF_MONITORS]:
monitor_serial_number = {
CONF_MONITOR_SERIAL_NUMBER: monitor_config[CONF_SERIAL_NUMBER]
}
channel_configs = monitor_config[CONF_CHANNELS]
for channel_config in channel_configs:
all_sensors.append(
{
CONF_SENSOR_TYPE: SENSOR_TYPE_CURRENT,
**monitor_serial_number,
**channel_config,
}
)
voltage_configs = monitor_config[CONF_VOLTAGE_SENSORS]
for voltage_config in voltage_configs:
all_sensors.append(
{
CONF_SENSOR_TYPE: SENSOR_TYPE_VOLTAGE,
**monitor_serial_number,
**voltage_config,
}
)
sensor_configs = monitor_config[CONF_TEMPERATURE_SENSORS]
if sensor_configs:
temperature_unit = {
CONF_TEMPERATURE_UNIT: sensor_configs[CONF_TEMPERATURE_UNIT]
}
for sensor_config in sensor_configs[CONF_SENSORS]:
all_sensors.append(
{
CONF_SENSOR_TYPE: SENSOR_TYPE_TEMPERATURE,
**monitor_serial_number,
**temperature_unit,
**sensor_config,
}
)
counter_configs = monitor_config[CONF_PULSE_COUNTERS]
for counter_config in counter_configs:
all_sensors.append(
{
CONF_SENSOR_TYPE: SENSOR_TYPE_PULSE_COUNTER,
**monitor_serial_number,
**counter_config,
}
)
if not all_sensors:
_LOGGER.error(
"Configuration must specify at least one "
"channel, voltage, pulse counter or temperature sensor"
)
return False
hass.async_create_task(
async_load_platform(hass, "sensor", DOMAIN, all_sensors, config)
)
return True
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.lock import DOMAIN
from homeassistant.const import CONF_PLATFORM
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_get_actions_support_open(hass, device_reg, entity_reg):
"""Test we get the expected actions from a lock which supports open."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES["support_open"].unique_id,
device_id=device_entry.id,
)
expected_actions = [
{
"domain": DOMAIN,
"type": "lock",
"device_id": device_entry.id,
"entity_id": "lock.support_open_lock",
},
{
"domain": DOMAIN,
"type": "unlock",
"device_id": device_entry.id,
"entity_id": "lock.support_open_lock",
},
{
"domain": DOMAIN,
"type": "open",
"device_id": device_entry.id,
"entity_id": "lock.support_open_lock",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_actions_not_support_open(hass, device_reg, entity_reg):
"""Test we get the expected actions from a lock which doesn't support open."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES["no_support_open"].unique_id,
device_id=device_entry.id,
)
expected_actions = [
{
"domain": DOMAIN,
"type": "lock",
"device_id": device_entry.id,
"entity_id": "lock.no_support_open_lock",
},
{
"domain": DOMAIN,
"type": "unlock",
"device_id": device_entry.id,
"entity_id": "lock.no_support_open_lock",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_action(hass):
"""Test for lock actions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event_lock"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "lock.entity",
"type": "lock",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_unlock"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "lock.entity",
"type": "unlock",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_open"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "lock.entity",
"type": "open",
},
},
]
},
)
await hass.async_block_till_done()
lock_calls = async_mock_service(hass, "lock", "lock")
unlock_calls = async_mock_service(hass, "lock", "unlock")
open_calls = async_mock_service(hass, "lock", "open")
hass.bus.async_fire("test_event_lock")
await hass.async_block_till_done()
assert len(lock_calls) == 1
assert len(unlock_calls) == 0
assert len(open_calls) == 0
hass.bus.async_fire("test_event_unlock")
await hass.async_block_till_done()
assert len(lock_calls) == 1
assert len(unlock_calls) == 1
assert len(open_calls) == 0
hass.bus.async_fire("test_event_open")
await hass.async_block_till_done()
assert len(lock_calls) == 1
assert len(unlock_calls) == 1
assert len(open_calls) == 1
|
import copy
import datetime
import logging
import re
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_TRIGGER,
)
from homeassistant.const import (
CONF_ARMING_TIME,
CONF_CODE,
CONF_DELAY_TIME,
CONF_DISARM_AFTER_TRIGGER,
CONF_NAME,
CONF_PLATFORM,
CONF_TRIGGER_TIME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_point_in_time
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_CODE_TEMPLATE = "code_template"
CONF_CODE_ARM_REQUIRED = "code_arm_required"
DEFAULT_ALARM_NAME = "HA Alarm"
DEFAULT_DELAY_TIME = datetime.timedelta(seconds=60)
DEFAULT_ARMING_TIME = datetime.timedelta(seconds=60)
DEFAULT_TRIGGER_TIME = datetime.timedelta(seconds=120)
DEFAULT_DISARM_AFTER_TRIGGER = False
SUPPORTED_STATES = [
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED,
]
SUPPORTED_PRETRIGGER_STATES = [
state for state in SUPPORTED_STATES if state != STATE_ALARM_TRIGGERED
]
SUPPORTED_ARMING_STATES = [
state
for state in SUPPORTED_STATES
if state not in (STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED)
]
ATTR_PREVIOUS_STATE = "previous_state"
ATTR_NEXT_STATE = "next_state"
def _state_validator(config):
"""Validate the state."""
config = copy.deepcopy(config)
for state in SUPPORTED_PRETRIGGER_STATES:
if CONF_DELAY_TIME not in config[state]:
config[state][CONF_DELAY_TIME] = config[CONF_DELAY_TIME]
if CONF_TRIGGER_TIME not in config[state]:
config[state][CONF_TRIGGER_TIME] = config[CONF_TRIGGER_TIME]
for state in SUPPORTED_ARMING_STATES:
if CONF_ARMING_TIME not in config[state]:
config[state][CONF_ARMING_TIME] = config[CONF_ARMING_TIME]
return config
def _state_schema(state):
"""Validate the state."""
schema = {}
if state in SUPPORTED_PRETRIGGER_STATES:
schema[vol.Optional(CONF_DELAY_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta
)
schema[vol.Optional(CONF_TRIGGER_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta
)
if state in SUPPORTED_ARMING_STATES:
schema[vol.Optional(CONF_ARMING_TIME)] = vol.All(
cv.time_period, cv.positive_timedelta
)
return vol.Schema(schema)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
{
vol.Required(CONF_PLATFORM): "manual",
vol.Optional(CONF_NAME, default=DEFAULT_ALARM_NAME): cv.string,
vol.Exclusive(CONF_CODE, "code validation"): cv.string,
vol.Exclusive(CONF_CODE_TEMPLATE, "code validation"): cv.template,
vol.Optional(CONF_CODE_ARM_REQUIRED, default=True): cv.boolean,
vol.Optional(CONF_DELAY_TIME, default=DEFAULT_DELAY_TIME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_ARMING_TIME, default=DEFAULT_ARMING_TIME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_TRIGGER_TIME, default=DEFAULT_TRIGGER_TIME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(
CONF_DISARM_AFTER_TRIGGER, default=DEFAULT_DISARM_AFTER_TRIGGER
): cv.boolean,
vol.Optional(STATE_ALARM_ARMED_AWAY, default={}): _state_schema(
STATE_ALARM_ARMED_AWAY
),
vol.Optional(STATE_ALARM_ARMED_HOME, default={}): _state_schema(
STATE_ALARM_ARMED_HOME
),
vol.Optional(STATE_ALARM_ARMED_NIGHT, default={}): _state_schema(
STATE_ALARM_ARMED_NIGHT
),
vol.Optional(STATE_ALARM_ARMED_CUSTOM_BYPASS, default={}): _state_schema(
STATE_ALARM_ARMED_CUSTOM_BYPASS
),
vol.Optional(STATE_ALARM_DISARMED, default={}): _state_schema(
STATE_ALARM_DISARMED
),
vol.Optional(STATE_ALARM_TRIGGERED, default={}): _state_schema(
STATE_ALARM_TRIGGERED
),
},
_state_validator,
)
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the manual alarm platform."""
add_entities(
[
ManualAlarm(
hass,
config[CONF_NAME],
config.get(CONF_CODE),
config.get(CONF_CODE_TEMPLATE),
config.get(CONF_CODE_ARM_REQUIRED),
config.get(CONF_DISARM_AFTER_TRIGGER, DEFAULT_DISARM_AFTER_TRIGGER),
config,
)
]
)
class ManualAlarm(alarm.AlarmControlPanelEntity, RestoreEntity):
"""
Representation of an alarm status.
When armed, will be arming for 'arming_time', after that armed.
When triggered, will be pending for the triggering state's 'delay_time'.
After that will be triggered for 'trigger_time', after that we return to
the previous state or disarm if `disarm_after_trigger` is true.
A trigger_time of zero disables the alarm_trigger service.
"""
def __init__(
self,
hass,
name,
code,
code_template,
code_arm_required,
disarm_after_trigger,
config,
):
"""Init the manual alarm panel."""
self._state = STATE_ALARM_DISARMED
self._hass = hass
self._name = name
if code_template:
self._code = code_template
self._code.hass = hass
else:
self._code = code or None
self._code_arm_required = code_arm_required
self._disarm_after_trigger = disarm_after_trigger
self._previous_state = self._state
self._state_ts = None
self._delay_time_by_state = {
state: config[state][CONF_DELAY_TIME]
for state in SUPPORTED_PRETRIGGER_STATES
}
self._trigger_time_by_state = {
state: config[state][CONF_TRIGGER_TIME]
for state in SUPPORTED_PRETRIGGER_STATES
}
self._arming_time_by_state = {
state: config[state][CONF_ARMING_TIME] for state in SUPPORTED_ARMING_STATES
}
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._state == STATE_ALARM_TRIGGERED:
if self._within_pending_time(self._state):
return STATE_ALARM_PENDING
trigger_time = self._trigger_time_by_state[self._previous_state]
if (
self._state_ts + self._pending_time(self._state) + trigger_time
) < dt_util.utcnow():
if self._disarm_after_trigger:
return STATE_ALARM_DISARMED
self._state = self._previous_state
return self._state
if self._state in SUPPORTED_ARMING_STATES and self._within_arming_time(
self._state
):
return STATE_ALARM_ARMING
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return (
SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT
| SUPPORT_ALARM_TRIGGER
| SUPPORT_ALARM_ARM_CUSTOM_BYPASS
)
@property
def _active_state(self):
"""Get the current state."""
if self.state in (STATE_ALARM_PENDING, STATE_ALARM_ARMING):
return self._previous_state
return self._state
def _arming_time(self, state):
"""Get the arming time."""
return self._arming_time_by_state[state]
def _pending_time(self, state):
"""Get the pending time."""
return self._delay_time_by_state[self._previous_state]
def _within_arming_time(self, state):
"""Get if the action is in the arming time window."""
return self._state_ts + self._arming_time(state) > dt_util.utcnow()
def _within_pending_time(self, state):
"""Get if the action is in the pending time window."""
return self._state_ts + self._pending_time(state) > dt_util.utcnow()
@property
def code_format(self):
"""Return one or more digits/characters."""
if self._code is None:
return None
if isinstance(self._code, str) and re.search("^\\d+$", self._code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return self._code_arm_required
def alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._validate_code(code, STATE_ALARM_DISARMED):
return
self._state = STATE_ALARM_DISARMED
self._state_ts = dt_util.utcnow()
self.schedule_update_ha_state()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
if self._code_arm_required and not self._validate_code(
code, STATE_ALARM_ARMED_HOME
):
return
self._update_state(STATE_ALARM_ARMED_HOME)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
if self._code_arm_required and not self._validate_code(
code, STATE_ALARM_ARMED_AWAY
):
return
self._update_state(STATE_ALARM_ARMED_AWAY)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
if self._code_arm_required and not self._validate_code(
code, STATE_ALARM_ARMED_NIGHT
):
return
self._update_state(STATE_ALARM_ARMED_NIGHT)
def alarm_arm_custom_bypass(self, code=None):
"""Send arm custom bypass command."""
if self._code_arm_required and not self._validate_code(
code, STATE_ALARM_ARMED_CUSTOM_BYPASS
):
return
self._update_state(STATE_ALARM_ARMED_CUSTOM_BYPASS)
def alarm_trigger(self, code=None):
"""
Send alarm trigger command.
No code needed, a trigger time of zero for the current state
disables the alarm.
"""
if not self._trigger_time_by_state[self._active_state]:
return
self._update_state(STATE_ALARM_TRIGGERED)
def _update_state(self, state):
"""Update the state."""
if self._state == state:
return
self._previous_state = self._state
self._state = state
self._state_ts = dt_util.utcnow()
self.schedule_update_ha_state()
if state == STATE_ALARM_TRIGGERED:
pending_time = self._pending_time(state)
track_point_in_time(
self._hass, self.async_scheduled_update, self._state_ts + pending_time
)
trigger_time = self._trigger_time_by_state[self._previous_state]
track_point_in_time(
self._hass,
self.async_scheduled_update,
self._state_ts + pending_time + trigger_time,
)
elif state in SUPPORTED_ARMING_STATES:
arming_time = self._arming_time(state)
if arming_time:
track_point_in_time(
self._hass,
self.async_scheduled_update,
self._state_ts + arming_time,
)
def _validate_code(self, code, state):
"""Validate given code."""
if self._code is None:
return True
if isinstance(self._code, str):
alarm_code = self._code
else:
alarm_code = self._code.render(
parse_result=False, from_state=self._state, to_state=state
)
check = not alarm_code or code == alarm_code
if not check:
_LOGGER.warning("Invalid code given for %s", state)
return check
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_ALARM_PENDING or self.state == STATE_ALARM_ARMING:
return {
ATTR_PREVIOUS_STATE: self._previous_state,
ATTR_NEXT_STATE: self._state,
}
return {}
@callback
def async_scheduled_update(self, now):
"""Update state at a scheduled point in time."""
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
if (
(
state.state == STATE_ALARM_PENDING
or state.state == STATE_ALARM_ARMING
)
and hasattr(state, "attributes")
and state.attributes[ATTR_PREVIOUS_STATE]
):
# If in arming or pending state, we return to the ATTR_PREVIOUS_STATE
self._state = state.attributes[ATTR_PREVIOUS_STATE]
self._state_ts = dt_util.utcnow()
else:
self._state = state.state
self._state_ts = state.last_updated
|
import numpy as np
import pandas as pd
from scattertext.AsianNLP import chinese_nlp
from scattertext.CSRMatrixTools import CSRMatrixFactory
from scattertext.TermDocMatrix import TermDocMatrix
from scattertext.TermDocMatrixFactory import TermDocMatrixFactory
from scattertext.TermDocMatrixWithoutCategories import TermDocMatrixWithoutCategories
from scattertext.indexstore.IndexStore import IndexStore
class ParsePipelineFactoryWithoutCategories(object):
def __init__(self,
nlp,
X_factory,
mX_factory,
term_idx_store,
metadata_idx_store,
term_doc_mat_fact):
if nlp == chinese_nlp:
raise Exception(
"Chinese NLP not yet supported. Preparse chinese documents, and use CorpusFromParsedDocuments or a similar class.")
self.X_factory, self.mX_factory, self.term_idx_store, self.metadata_idx_store, self.nlp \
= X_factory, mX_factory, term_idx_store, metadata_idx_store, nlp
self._term_doc_mat_fact = term_doc_mat_fact
self._text_col = self._term_doc_mat_fact._text_col
self._clean_function = self._term_doc_mat_fact._clean_function
self._verbose = self._term_doc_mat_fact._verbose
def parse(self, row):
cleaned_text = self._clean_function(self._get_raw_text_from_row(row))
parsed_text = self.nlp(cleaned_text)
if self._verbose and row.name % 100:
print(row.name)
self._register_document(parsed_text, row)
def _get_raw_text_from_row(self, row):
return row[self._text_col]
def _register_document(self, parsed_text, row):
self._term_doc_mat_fact._register_doc(X_factory=self.X_factory,
mX_factory=self.mX_factory,
document_index=row.name,
parsed_text=parsed_text,
term_idx_store=self.term_idx_store,
metadata_idx_store=self.metadata_idx_store)
class ParsePipelineFactory(ParsePipelineFactoryWithoutCategories):
def __init__(self,
nlp,
X_factory,
mX_factory,
category_idx_store,
term_idx_store,
metadata_idx_store,
y,
term_doc_mat_fact):
ParsePipelineFactoryWithoutCategories.__init__(self, nlp, X_factory, mX_factory, term_idx_store,
metadata_idx_store, term_doc_mat_fact)
self._register_doc_and_category \
= self._term_doc_mat_fact._register_doc_and_category
self._category_col \
= self._term_doc_mat_fact._category_col
self.category_idx_store = category_idx_store
self.y = y
def _register_document(self, parsed_text, row):
self._register_doc_and_category(X_factory=self.X_factory,
mX_factory=self.mX_factory,
category=row[self._category_col],
category_idx_store=self.category_idx_store,
document_index=row.name,
parsed_text=parsed_text,
term_idx_store=self.term_idx_store,
metadata_idx_store=self.metadata_idx_store,
y=self.y)
def build_sparse_matrices(y, X_factory, mX_factory):
return build_sparse_matrices_with_num_docs(len(y), X_factory, mX_factory)
def build_sparse_matrices_with_num_docs(num_docs, X_factory, mX_factory):
return X_factory.set_last_row_idx(num_docs - 1).get_csr_matrix(), \
mX_factory.set_last_row_idx(num_docs - 1).get_csr_matrix()
class TermDocMatrixFromPandas(TermDocMatrixFactory):
def __init__(self,
data_frame,
category_col,
text_col,
clean_function=lambda x: x,
nlp=None,
feats_from_spacy_doc=None,
verbose=False):
'''Creates a TermDocMatrix from a pandas data frame.
Parameters
----------
data_frame : pd.DataFrame
The data frame that contains columns for the category of interest
and the document text.
text_col : str
The name of the column which contains each document's raw text.
category_col : str
The name of the column which contains the category of interest.
clean_function : function, optional
A function that strips invalid characters out of the document text string,
returning the new string.
nlp : function, optional
feats_from_spacy_doc : FeatsFromSpacyDoc or None
verbose : boolean, optional
If true, prints a message every time a document index % 100 is 0.
See Also
--------
TermDocMatrixFactory
'''
TermDocMatrixFactory.__init__(self,
clean_function=clean_function,
nlp=nlp,
feats_from_spacy_doc=feats_from_spacy_doc)
self.data_frame = data_frame.reset_index()
self._text_col = text_col
self._category_col = category_col
self._verbose = verbose
def build(self):
'''Constructs the term doc matrix.
Returns
-------
TermDocMatrix
'''
X_factory, mX_factory, category_idx_store, term_idx_store, metadata_idx_store, y \
= self._init_term_doc_matrix_variables()
parse_pipeline = ParsePipelineFactory(self.get_nlp(),
X_factory,
mX_factory,
category_idx_store,
term_idx_store,
metadata_idx_store,
y,
self)
df = self._clean_and_filter_nulls_and_empties_from_dataframe()
tdm = self._apply_pipeline_and_get_build_instance(X_factory,
mX_factory,
category_idx_store,
df,
parse_pipeline,
term_idx_store,
metadata_idx_store,
y)
return tdm
def _apply_pipeline_and_get_build_instance(self,
X_factory,
mX_factory,
category_idx_store,
df,
parse_pipeline,
term_idx_store,
metadata_idx_store,
y):
df.apply(parse_pipeline.parse, axis=1)
y = np.array(y)
X, mX = self._build_sparse_matrices(y, X_factory, mX_factory)
tdm = TermDocMatrix(X, mX, y, term_idx_store, category_idx_store, metadata_idx_store)
return tdm
def _build_sparse_matrices(self, y, X_factory, mX_factory):
return build_sparse_matrices(y, X_factory, mX_factory)
def _init_term_doc_matrix_variables(self):
return CorpusFactoryHelper.init_term_doc_matrix_variables()
def _clean_and_filter_nulls_and_empties_from_dataframe(self):
df = self.data_frame.loc[self.data_frame[[self._category_col, self._text_col]].dropna().index]
df = df[df[self._text_col] != ''].reset_index()
return df
class TermDocMatrixWithoutCategoriesFromPandas(TermDocMatrixFactory):
def __init__(self,
data_frame,
text_col,
clean_function=lambda x: x,
nlp=None,
feats_from_spacy_doc=None,
verbose=False):
'''Creates a TermDocMatrix from a pandas data frame.
Parameters
----------
data_frame : pd.DataFrame
The data frame that contains columns for the category of interest
and the document text.
text_col : str
The name of the column which contains each document's raw text.
clean_function : function, optional
A function that strips invalid characters out of the document text string,
returning the new string.
nlp : function, optional
feats_from_spacy_doc : FeatsFromSpacyDoc or None
verbose : boolean, optional
If true, prints a message every time a document index % 100 is 0.
See Also
--------
TermDocMatrixFactory
'''
TermDocMatrixFactory.__init__(self,
clean_function=clean_function,
nlp=nlp,
feats_from_spacy_doc=feats_from_spacy_doc)
self.data_frame = data_frame.reset_index()
self._text_col = text_col
self._verbose = verbose
def build(self):
'''Constructs the term doc matrix.
Returns
-------
TermDocMatrix
'''
X_factory = CSRMatrixFactory()
mX_factory = CSRMatrixFactory()
term_idx_store = IndexStore()
metadata_idx_store = IndexStore()
parse_pipeline = ParsePipelineFactoryWithoutCategories(self.get_nlp(),
X_factory,
mX_factory,
term_idx_store,
metadata_idx_store,
self)
df = self._clean_and_filter_nulls_and_empties_from_dataframe()
tdm = self._apply_pipeline_and_get_build_instance(X_factory,
mX_factory,
df,
parse_pipeline,
term_idx_store,
metadata_idx_store)
return tdm
def _apply_pipeline_and_get_build_instance(self,
X_factory,
mX_factory,
df,
parse_pipeline,
term_idx_store,
metadata_idx_store):
df.apply(parse_pipeline.parse, axis=1)
X, mX = build_sparse_matrices_with_num_docs(len(df), X_factory, mX_factory)
tdm = TermDocMatrixWithoutCategories(X, mX, term_idx_store, metadata_idx_store)
return tdm
def _clean_and_filter_nulls_and_empties_from_dataframe(self):
df = self.data_frame.loc[self.data_frame[[self._text_col]].dropna().index]
df = df[df[self._text_col] != ''].reset_index()
return df
class CorpusFactoryHelper(object):
@staticmethod
def init_term_doc_matrix_variables():
y = []
X_factory = CSRMatrixFactory()
mX_factory = CSRMatrixFactory()
category_idx_store = IndexStore()
term_idx_store = IndexStore()
metadata_idx_store = IndexStore()
return X_factory, mX_factory, category_idx_store, \
term_idx_store, metadata_idx_store, y
|
import asyncio
from datetime import timedelta
from typing import Any, Dict
from directv import DIRECTV, DIRECTVError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME, CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
ATTR_VIA_DEVICE,
DOMAIN,
)
CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.120")
PLATFORMS = ["media_player", "remote"]
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass: HomeAssistant, config: Dict) -> bool:
"""Set up the DirecTV component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up DirecTV from a config entry."""
dtv = DIRECTV(entry.data[CONF_HOST], session=async_get_clientsession(hass))
try:
await dtv.update()
except DIRECTVError as err:
raise ConfigEntryNotReady from err
hass.data[DOMAIN][entry.entry_id] = dtv
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class DIRECTVEntity(Entity):
"""Defines a base DirecTV entity."""
def __init__(self, *, dtv: DIRECTV, name: str, address: str = "0") -> None:
"""Initialize the DirecTV entity."""
self._address = address
self._device_id = address if address != "0" else dtv.device.info.receiver_id
self._is_client = address != "0"
self._name = name
self.dtv = dtv
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this DirecTV receiver."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_id)},
ATTR_NAME: self.name,
ATTR_MANUFACTURER: self.dtv.device.info.brand,
ATTR_MODEL: None,
ATTR_SOFTWARE_VERSION: self.dtv.device.info.version,
ATTR_VIA_DEVICE: (DOMAIN, self.dtv.device.info.receiver_id),
}
|
import pickle
import re
from traceback import format_exception
import pytest
from jinja2 import ChoiceLoader
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import TemplateSyntaxError
@pytest.fixture
def fs_env(filesystem_loader):
"""returns a new environment."""
return Environment(loader=filesystem_loader)
class TestDebug:
def assert_traceback_matches(self, callback, expected_tb):
with pytest.raises(Exception) as exc_info:
callback()
tb = format_exception(exc_info.type, exc_info.value, exc_info.tb)
m = re.search(expected_tb.strip(), "".join(tb))
assert (
m is not None
), "Traceback did not match:\n\n{''.join(tb)}\nexpected:\n{expected_tb}"
def test_runtime_error(self, fs_env):
def test():
tmpl.render(fail=lambda: 1 / 0)
tmpl = fs_env.get_template("broken.html")
self.assert_traceback_matches(
test,
r"""
File ".*?broken.html", line 2, in (top-level template code|<module>)
\{\{ fail\(\) \}\}
File ".*debug?.pyc?", line \d+, in <lambda>
tmpl\.render\(fail=lambda: 1 / 0\)
ZeroDivisionError: (int(eger)? )?division (or modulo )?by zero
""",
)
def test_syntax_error(self, fs_env):
# The trailing .*? is for PyPy 2 and 3, which don't seem to
# clear the exception's original traceback, leaving the syntax
# error in the middle of other compiler frames.
self.assert_traceback_matches(
lambda: fs_env.get_template("syntaxerror.html"),
"""(?sm)
File ".*?syntaxerror.html", line 4, in (template|<module>)
\\{% endif %\\}.*?
(jinja2\\.exceptions\\.)?TemplateSyntaxError: Encountered unknown tag 'endif'. Jinja \
was looking for the following tags: 'endfor' or 'else'. The innermost block that needs \
to be closed is 'for'.
""",
)
def test_regular_syntax_error(self, fs_env):
def test():
raise TemplateSyntaxError("wtf", 42)
self.assert_traceback_matches(
test,
r"""
File ".*debug.pyc?", line \d+, in test
raise TemplateSyntaxError\("wtf", 42\)
(jinja2\.exceptions\.)?TemplateSyntaxError: wtf
line 42""",
)
def test_pickleable_syntax_error(self, fs_env):
original = TemplateSyntaxError("bad template", 42, "test", "test.txt")
unpickled = pickle.loads(pickle.dumps(original))
assert str(original) == str(unpickled)
assert original.name == unpickled.name
def test_include_syntax_error_source(self, filesystem_loader):
e = Environment(
loader=ChoiceLoader(
[
filesystem_loader,
DictLoader({"inc": "a\n{% include 'syntaxerror.html' %}\nb"}),
]
)
)
t = e.get_template("inc")
with pytest.raises(TemplateSyntaxError) as exc_info:
t.render()
assert exc_info.value.source is not None
def test_local_extraction(self):
from jinja2.debug import get_template_locals
from jinja2.runtime import missing
locals = get_template_locals(
{
"l_0_foo": 42,
"l_1_foo": 23,
"l_2_foo": 13,
"l_0_bar": 99,
"l_1_bar": missing,
"l_0_baz": missing,
}
)
assert locals == {"foo": 13, "bar": 99}
def test_get_corresponding_lineno_traceback(self, fs_env):
tmpl = fs_env.get_template("test.html")
assert tmpl.get_corresponding_lineno(1) == 1
|
from sqlalchemy.orm import relationship
from sqlalchemy import Integer, String, Column, Boolean
from sqlalchemy.event import listen
from sqlalchemy_utils.types.arrow import ArrowType
from lemur.database import db
from lemur.models import roles_users
from lemur.extensions import bcrypt
def hash_password(mapper, connect, target):
"""
Helper function that is a listener and hashes passwords before
insertion into the database.
:param mapper:
:param connect:
:param target:
"""
target.hash_password()
class User(db.Model):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
password = Column(String(128))
active = Column(Boolean())
confirmed_at = Column(ArrowType())
username = Column(String(255), nullable=False, unique=True)
email = Column(String(128), unique=True)
profile_picture = Column(String(255))
roles = relationship(
"Role",
secondary=roles_users,
passive_deletes=True,
backref=db.backref("user"),
lazy="dynamic",
)
certificates = relationship(
"Certificate", backref=db.backref("user"), lazy="dynamic"
)
pending_certificates = relationship(
"PendingCertificate", backref=db.backref("user"), lazy="dynamic"
)
authorities = relationship("Authority", backref=db.backref("user"), lazy="dynamic")
keys = relationship("ApiKey", backref=db.backref("user"), lazy="dynamic")
logs = relationship("Log", backref=db.backref("user"), lazy="dynamic")
sensitive_fields = ("password",)
def check_password(self, password):
"""
Hash a given password and check it against the stored value
to determine it's validity.
:param password:
:return:
"""
if self.password:
return bcrypt.check_password_hash(self.password, password)
def hash_password(self):
"""
Generate the secure hash for the password.
:return:
"""
if self.password:
self.password = bcrypt.generate_password_hash(self.password).decode("utf-8")
@property
def is_admin(self):
"""
Determine if the current user has the 'admin' role associated
with it.
:return:
"""
for role in self.roles:
if role.name == "admin":
return True
def __repr__(self):
return "User(username={username})".format(username=self.username)
listen(User, "before_insert", hash_password)
|
import os.path
import coverage
class Plugin(coverage.CoveragePlugin):
"""A file tracer plugin to import, so that it isn't in the test's current directory."""
def file_tracer(self, filename):
"""Trace only files named xyz.py"""
if "xyz.py" in filename:
return FileTracer(filename)
return None
def file_reporter(self, filename):
return FileReporter(filename)
class FileTracer(coverage.FileTracer):
"""A FileTracer emulating a simple static plugin."""
def __init__(self, filename):
"""Claim that */*xyz.py was actually sourced from /src/*ABC.zz"""
self._filename = filename
self._source_filename = os.path.join(
"/src",
os.path.basename(filename.replace("xyz.py", "ABC.zz"))
)
def source_filename(self):
return self._source_filename
def line_number_range(self, frame):
"""Map the line number X to X05,X06,X07."""
lineno = frame.f_lineno
return lineno*100+5, lineno*100+7
class FileReporter(coverage.FileReporter):
"""Dead-simple FileReporter."""
def lines(self):
return set([105, 106, 107, 205, 206, 207])
def coverage_init(reg, options): # pylint: disable=unused-argument
"""Called by coverage to initialize the plugins here."""
reg.add_file_tracer(Plugin())
|
import pytest
from lemur.domains.views import * # noqa
from .vectors import (
VALID_ADMIN_API_TOKEN,
VALID_ADMIN_HEADER_TOKEN,
VALID_USER_HEADER_TOKEN,
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_domain_get(client, token, status):
assert (
client.get(api.url_for(Domains, domain_id=1), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_domain_post_(client, token, status):
assert (
client.post(
api.url_for(Domains, domain_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_domain_put(client, token, status):
assert (
client.put(
api.url_for(Domains, domain_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_domain_delete(client, token, status):
assert (
client.delete(api.url_for(Domains, domain_id=1), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_domain_patch(client, token, status):
assert (
client.patch(
api.url_for(Domains, domain_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_domain_list_post_(client, token, status):
assert (
client.post(api.url_for(DomainsList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_domain_list_get(client, token, status):
assert client.get(api.url_for(DomainsList), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_domain_list_delete(client, token, status):
assert client.delete(api.url_for(DomainsList), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_domain_list_patch(client, token, status):
assert (
client.patch(api.url_for(DomainsList), data={}, headers=token).status_code
== status
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.