text
stringlengths 213
32.3k
|
---|
from pygal.adapters import positive
from pygal.graph.horizontal import HorizontalGraph
from pygal.graph.stackedbar import StackedBar
class VerticalPyramid(StackedBar):
"""Vertical Pyramid graph class"""
_adapters = [positive]
def _value_format(self, value):
"""Format value for dual value display."""
return super(VerticalPyramid, self)._value_format(value and abs(value))
def _get_separated_values(self, secondary=False):
"""Separate values between odd and even series stacked"""
series = self.secondary_series if secondary else self.series
positive_vals = map(
sum,
zip(
*[
serie.safe_values for index, serie in enumerate(series)
if index % 2
]
)
)
negative_vals = map(
sum,
zip(
*[
serie.safe_values for index, serie in enumerate(series)
if not index % 2
]
)
)
return list(positive_vals), list(negative_vals)
def _compute_box(self, positive_vals, negative_vals):
"""Compute Y min and max"""
max_ = max(
max(positive_vals or [self.zero]),
max(negative_vals or [self.zero])
)
if self.range and self.range[0] is not None:
self._box.ymin = self.range[0]
else:
self._box.ymin = -max_
if self.range and self.range[1] is not None:
self._box.ymax = self.range[1]
else:
self._box.ymax = max_
def _pre_compute_secondary(self, positive_vals, negative_vals):
"""Compute secondary y min and max"""
self._secondary_max = max(max(positive_vals), max(negative_vals))
self._secondary_min = -self._secondary_max
def _bar(self, serie, parent, x, y, i, zero, secondary=False):
"""Internal stacking bar drawing function"""
if serie.index % 2:
y = -y
return super(VerticalPyramid,
self)._bar(serie, parent, x, y, i, zero, secondary)
class Pyramid(HorizontalGraph, VerticalPyramid):
"""Horizontal Pyramid graph class like the one used by age pyramid"""
|
import logging
import os
from babelfish import Error as BabelfishError, Language
from enzyme import MKV
logger = logging.getLogger(__name__)
def refine(video, embedded_subtitles=True, **kwargs):
"""Refine a video by searching its metadata.
Several :class:`~subliminal.video.Video` attributes can be found:
* :attr:`~subliminal.video.Video.resolution`
* :attr:`~subliminal.video.Video.video_codec`
* :attr:`~subliminal.video.Video.audio_codec`
* :attr:`~subliminal.video.Video.subtitle_languages`
:param bool embedded_subtitles: search for embedded subtitles.
"""
# skip non existing videos
if not video.exists:
return
# check extensions
extension = os.path.splitext(video.name)[1]
if extension == '.mkv':
with open(video.name, 'rb') as f:
mkv = MKV(f)
# main video track
if mkv.video_tracks:
video_track = mkv.video_tracks[0]
# resolution
if video_track.height in (480, 720, 1080):
if video_track.interlaced:
video.resolution = '%di' % video_track.height
else:
video.resolution = '%dp' % video_track.height
logger.debug('Found resolution %s', video.resolution)
# video codec
if video_track.codec_id == 'V_MPEG4/ISO/AVC':
video.video_codec = 'H.264'
logger.debug('Found video_codec %s', video.video_codec)
elif video_track.codec_id == 'V_MPEG4/ISO/SP':
video.video_codec = 'DivX'
logger.debug('Found video_codec %s', video.video_codec)
elif video_track.codec_id == 'V_MPEG4/ISO/ASP':
video.video_codec = 'Xvid'
logger.debug('Found video_codec %s', video.video_codec)
else:
logger.warning('MKV has no video track')
# main audio track
if mkv.audio_tracks:
audio_track = mkv.audio_tracks[0]
# audio codec
if audio_track.codec_id == 'A_AC3':
video.audio_codec = 'Dolby Digital'
logger.debug('Found audio_codec %s', video.audio_codec)
elif audio_track.codec_id == 'A_DTS':
video.audio_codec = 'DTS'
logger.debug('Found audio_codec %s', video.audio_codec)
elif audio_track.codec_id == 'A_AAC':
video.audio_codec = 'AAC'
logger.debug('Found audio_codec %s', video.audio_codec)
else:
logger.warning('MKV has no audio track')
# subtitle tracks
if mkv.subtitle_tracks:
if embedded_subtitles:
embedded_subtitle_languages = set()
for st in mkv.subtitle_tracks:
if st.language:
try:
embedded_subtitle_languages.add(Language.fromalpha3b(st.language))
except BabelfishError:
logger.error('Embedded subtitle track language %r is not a valid language', st.language)
embedded_subtitle_languages.add(Language('und'))
elif st.name:
try:
embedded_subtitle_languages.add(Language.fromname(st.name))
except BabelfishError:
logger.debug('Embedded subtitle track name %r is not a valid language', st.name)
embedded_subtitle_languages.add(Language('und'))
else:
embedded_subtitle_languages.add(Language('und'))
logger.debug('Found embedded subtitle %r', embedded_subtitle_languages)
video.subtitle_languages |= embedded_subtitle_languages
else:
logger.debug('MKV has no subtitle track')
else:
logger.debug('Unsupported video extension %s', extension)
|
from __future__ import division, print_function
import re
from urwid.compat import bytes, B, ord2, text_type
SAFE_ASCII_RE = re.compile(u"^[ -~]*$")
SAFE_ASCII_BYTES_RE = re.compile(B("^[ -~]*$"))
_byte_encoding = None
# GENERATED DATA
# generated from
# http://www.unicode.org/Public/4.0-Update/EastAsianWidth-4.0.0.txt
widths = [
(126, 1),
(159, 0),
(687, 1),
(710, 0),
(711, 1),
(727, 0),
(733, 1),
(879, 0),
(1154, 1),
(1161, 0),
(4347, 1),
(4447, 2),
(7467, 1),
(7521, 0),
(8369, 1),
(8426, 0),
(9000, 1),
(9002, 2),
(11021, 1),
(12350, 2),
(12351, 1),
(12438, 2),
(12442, 0),
(19893, 2),
(19967, 1),
(55203, 2),
(63743, 1),
(64106, 2),
(65039, 1),
(65059, 0),
(65131, 2),
(65279, 1),
(65376, 2),
(65500, 1),
(65510, 2),
(120831, 1),
(262141, 2),
(1114109, 1),
]
# ACCESSOR FUNCTIONS
def get_width( o ):
"""Return the screen column width for unicode ordinal o."""
global widths
if o == 0xe or o == 0xf:
return 0
for num, wid in widths:
if o <= num:
return wid
return 1
def decode_one( text, pos ):
"""
Return (ordinal at pos, next position) for UTF-8 encoded text.
"""
assert isinstance(text, bytes), text
b1 = ord2(text[pos])
if not b1 & 0x80:
return b1, pos+1
error = ord("?"), pos+1
lt = len(text)
lt = lt-pos
if lt < 2:
return error
if b1 & 0xe0 == 0xc0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
o = ((b1&0x1f)<<6)|(b2&0x3f)
if o < 0x80:
return error
return o, pos+2
if lt < 3:
return error
if b1 & 0xf0 == 0xe0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
b3 = ord2(text[pos+2])
if b3 & 0xc0 != 0x80:
return error
o = ((b1&0x0f)<<12)|((b2&0x3f)<<6)|(b3&0x3f)
if o < 0x800:
return error
return o, pos+3
if lt < 4:
return error
if b1 & 0xf8 == 0xf0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
b3 = ord2(text[pos+2])
if b3 & 0xc0 != 0x80:
return error
b4 = ord2(text[pos+2])
if b4 & 0xc0 != 0x80:
return error
o = ((b1&0x07)<<18)|((b2&0x3f)<<12)|((b3&0x3f)<<6)|(b4&0x3f)
if o < 0x10000:
return error
return o, pos+4
return error
def decode_one_uni(text, i):
"""
decode_one implementation for unicode strings
"""
return ord(text[i]), i+1
def decode_one_right(text, pos):
"""
Return (ordinal at pos, next position) for UTF-8 encoded text.
pos is assumed to be on the trailing byte of a utf-8 sequence.
"""
assert isinstance(text, bytes), text
error = ord("?"), pos-1
p = pos
while p >= 0:
if ord2(text[p])&0xc0 != 0x80:
o, next = decode_one( text, p )
return o, p-1
p -=1
if p == p-4:
return error
def set_byte_encoding(enc):
assert enc in ('utf8', 'narrow', 'wide')
global _byte_encoding
_byte_encoding = enc
def get_byte_encoding():
return _byte_encoding
def calc_text_pos(text, start_offs, end_offs, pref_col):
"""
Calculate the closest position to the screen column pref_col in text
where start_offs is the offset into text assumed to be screen column 0
and end_offs is the end of the range to search.
text may be unicode or a byte string in the target _byte_encoding
Returns (position, actual_col).
"""
assert start_offs <= end_offs, repr((start_offs, end_offs))
utfs = isinstance(text, bytes) and _byte_encoding == "utf8"
unis = not isinstance(text, bytes)
if unis or utfs:
decode = [decode_one, decode_one_uni][unis]
i = start_offs
sc = 0
n = 1 # number to advance by
while i < end_offs:
o, n = decode(text, i)
w = get_width(o)
if w+sc > pref_col:
return i, sc
i = n
sc += w
return i, sc
assert type(text) == bytes, repr(text)
# "wide" and "narrow"
i = start_offs+pref_col
if i >= end_offs:
return end_offs, end_offs-start_offs
if _byte_encoding == "wide":
if within_double_byte(text, start_offs, i) == 2:
i -= 1
return i, i-start_offs
def calc_width(text, start_offs, end_offs):
"""
Return the screen column width of text between start_offs and end_offs.
text may be unicode or a byte string in the target _byte_encoding
Some characters are wide (take two columns) and others affect the
previous character (take zero columns). Use the widths table above
to calculate the screen column width of text[start_offs:end_offs]
"""
assert start_offs <= end_offs, repr((start_offs, end_offs))
utfs = isinstance(text, bytes) and _byte_encoding == "utf8"
unis = not isinstance(text, bytes)
if (unis and not SAFE_ASCII_RE.match(text)
) or (utfs and not SAFE_ASCII_BYTES_RE.match(text)):
decode = [decode_one, decode_one_uni][unis]
i = start_offs
sc = 0
n = 1 # number to advance by
while i < end_offs:
o, n = decode(text, i)
w = get_width(o)
i = n
sc += w
return sc
# "wide", "narrow" or all printable ASCII, just return the character count
return end_offs - start_offs
def is_wide_char(text, offs):
"""
Test if the character at offs within text is wide.
text may be unicode or a byte string in the target _byte_encoding
"""
if isinstance(text, text_type):
o = ord(text[offs])
return get_width(o) == 2
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o, n = decode_one(text, offs)
return get_width(o) == 2
if _byte_encoding == "wide":
return within_double_byte(text, offs, offs) == 1
return False
def move_prev_char(text, start_offs, end_offs):
"""
Return the position of the character before end_offs.
"""
assert start_offs < end_offs
if isinstance(text, text_type):
return end_offs-1
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o = end_offs-1
while ord2(text[o])&0xc0 == 0x80:
o -= 1
return o
if _byte_encoding == "wide" and within_double_byte(text,
start_offs, end_offs-1) == 2:
return end_offs-2
return end_offs-1
def move_next_char(text, start_offs, end_offs):
"""
Return the position of the character after start_offs.
"""
assert start_offs < end_offs
if isinstance(text, text_type):
return start_offs+1
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o = start_offs+1
while o<end_offs and ord2(text[o])&0xc0 == 0x80:
o += 1
return o
if _byte_encoding == "wide" and within_double_byte(text,
start_offs, start_offs) == 1:
return start_offs +2
return start_offs+1
def within_double_byte(text, line_start, pos):
"""Return whether pos is within a double-byte encoded character.
text -- byte string in question
line_start -- offset of beginning of line (< pos)
pos -- offset in question
Return values:
0 -- not within dbe char, or double_byte_encoding == False
1 -- pos is on the 1st half of a dbe char
2 -- pos is on the 2nd half of a dbe char
"""
assert isinstance(text, bytes)
v = ord2(text[pos])
if v >= 0x40 and v < 0x7f:
# might be second half of big5, uhc or gbk encoding
if pos == line_start: return 0
if ord2(text[pos-1]) >= 0x81:
if within_double_byte(text, line_start, pos-1) == 1:
return 2
return 0
if v < 0x80: return 0
i = pos -1
while i >= line_start:
if ord2(text[i]) < 0x80:
break
i -= 1
if (pos - i) & 1:
return 1
return 2
# TABLE GENERATION CODE
def process_east_asian_width():
import sys
out = []
last = None
for line in sys.stdin.readlines():
if line[:1] == "#": continue
line = line.strip()
hex,rest = line.split(";",1)
wid,rest = rest.split(" # ",1)
word1 = rest.split(" ",1)[0]
if "." in hex:
hex = hex.split("..")[1]
num = int(hex, 16)
if word1 in ("COMBINING","MODIFIER","<control>"):
l = 0
elif wid in ("W", "F"):
l = 2
else:
l = 1
if last is None:
out.append((0, l))
last = l
if last == l:
out[-1] = (num, l)
else:
out.append( (num, l) )
last = l
print("widths = [")
for o in out[1:]: # treat control characters same as ascii
print("\t%r," % (o,))
print("]")
if __name__ == "__main__":
process_east_asian_width()
|
import re
def create_matcher(utterance):
"""Create a regex that matches the utterance."""
# Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL
# Pattern matches (GROUP|OPTIONAL): Change light to [the color] {name}
parts = re.split(r"({\w+}|\[[\w\s]+\] *)", utterance)
# Pattern to extract name from GROUP part. Matches {name}
group_matcher = re.compile(r"{(\w+)}")
# Pattern to extract text from OPTIONAL part. Matches [the color]
optional_matcher = re.compile(r"\[([\w ]+)\] *")
pattern = ["^"]
for part in parts:
group_match = group_matcher.match(part)
optional_match = optional_matcher.match(part)
# Normal part
if group_match is None and optional_match is None:
pattern.append(part)
continue
# Group part
if group_match is not None:
pattern.append(r"(?P<{}>[\w ]+?)\s*".format(group_match.groups()[0]))
# Optional part
elif optional_match is not None:
pattern.append(r"(?:{} *)?".format(optional_match.groups()[0]))
pattern.append("$")
return re.compile("".join(pattern), re.I)
|
import sys
import unittest
import lxml.html
class SelectTest(unittest.TestCase):
@staticmethod
def _evaluate_select(options, multiple=False):
options = ''.join('<option' + (' selected="selected"' if selected else '') + '>' + option + '</option>'
for option, selected in options)
string = '<title>test</title><form><select%s>%s</select></form>' % \
(' multiple="multiple"' if multiple else '', options)
return lxml.html.fromstring(string).find('.//select').value
def test_single_select_value_no_options(self):
self.assertEqual(
self._evaluate_select([]),
None)
def test_single_select_value_no_selected_option(self):
# If no option is selected, the HTML5 specification requires the first option to get selected.
self.assertEqual(
self._evaluate_select([('a', False), ('b', False)]),
'a')
def test_single_select_value_multiple_selected_options(self):
# If multiple options are selected, the proposed HTML 5.1 specification
# requires all but the last selected options to get deselected.
self.assertEqual(
self._evaluate_select([('a', True), ('b', True)]),
'b')
def test_multiple_select_value_no_selected_option(self):
self.assertEqual(
self._evaluate_select([('a', False), ('b', False)], multiple=True),
set())
def test_multiple_select_value_multiple_selected_options(self):
self.assertEqual(
self._evaluate_select([('a', True), ('b', True)], multiple=True),
{'a', 'b'})
def test_suite():
loader = unittest.TestLoader()
return loader.loadTestsFromModule(sys.modules[__name__])
|
from typing import Sequence
from typing import Tuple
from paasta_tools import paastaapi
from paasta_tools.api.client import get_paasta_oapi_client
from paasta_tools.cli.utils import get_paasta_metastatus_cmd_args
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import list_clusters
from paasta_tools.utils import list_services
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import SystemPaastaConfig
def add_subparser(subparsers,) -> None:
status_parser = subparsers.add_parser(
"metastatus",
help="Display the status for an entire PaaSTA cluster",
description=(
"'paasta metastatus' is used to get the vital statistics about a PaaSTA "
"cluster as a whole. This tool is helpful when answering the question: 'Is "
"it just my service or the whole cluster that is broken?'\n\n"
"metastatus operates by ssh'ing to a Mesos master of a remote cluster, and "
"querying the local APIs."
),
epilog=(
"The metastatus command may time out during heavy load. When that happens "
"users may execute the ssh command directly, in order to bypass the timeout."
),
)
status_parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="""Print out more output regarding the state of the cluster.
Multiple v options increase verbosity. Maximum is 3.""",
)
clusters_help = (
"A comma separated list of clusters to view. Defaults to view all clusters. "
"Try: --clusters norcal-prod,nova-prod"
)
status_parser.add_argument(
"-c", "--clusters", help=clusters_help
).completer = lazy_choices_completer(list_clusters)
status_parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
status_parser.add_argument(
"-a",
"--autoscaling-info",
action="store_true",
default=False,
dest="autoscaling_info",
help="Show cluster autoscaling info, implies -vv",
)
status_parser.add_argument(
"--use-mesos-cache",
action="store_true",
default=False,
dest="use_mesos_cache",
help="Use Mesos cache for state.json and frameworks",
)
status_parser.add_argument(
"-g",
"--groupings",
nargs="+",
default=["region"],
help=(
"Group resource information of slaves grouped by attribute."
"Note: This is only effective with -vv"
),
)
# The service and instance args default to None if not specified.
status_parser.add_argument(
"-s",
"--service",
help=(
"Show how many of a given service instance can be run on a cluster slave."
"Note: This is only effective with -vvv and --instance must also be specified"
),
).completer = lazy_choices_completer(list_services)
status_parser.add_argument(
"-i",
"--instance",
help=(
"Show how many of a given service instance can be run on a cluster slave."
"Note: This is only effective with -vvv and --service must also be specified"
),
)
status_parser.set_defaults(command=paasta_metastatus)
def paasta_metastatus_on_api_endpoint(
cluster: str,
system_paasta_config: SystemPaastaConfig,
groupings: Sequence[str],
verbose: int,
autoscaling_info: bool = False,
use_mesos_cache: bool = False,
) -> Tuple[int, str]:
client = get_paasta_oapi_client(cluster, system_paasta_config)
if not client:
print("Cannot get a paasta-api client")
exit(1)
try:
cmd_args, _ = get_paasta_metastatus_cmd_args(
groupings=groupings,
verbose=verbose,
autoscaling_info=autoscaling_info,
use_mesos_cache=use_mesos_cache,
)
res = client.default.metastatus(cmd_args=[str(arg) for arg in cmd_args])
output, exit_code = res.output, res.exit_code
except paastaapi.ApiException as exc:
output, exit_code = exc.body, exc.status
return exit_code, output
def print_cluster_status(
cluster: str,
system_paasta_config: SystemPaastaConfig,
groupings: Sequence[str],
verbose: int = 0,
autoscaling_info: bool = False,
use_mesos_cache: bool = False,
) -> int:
"""With a given cluster and verboseness, returns the status of the cluster
output is printed directly to provide dashboards even if the cluster is unavailable"""
return_code, output = paasta_metastatus_on_api_endpoint(
cluster=cluster,
system_paasta_config=system_paasta_config,
groupings=groupings,
verbose=verbose,
autoscaling_info=autoscaling_info,
use_mesos_cache=use_mesos_cache,
)
print("Cluster: %s" % cluster)
print(get_cluster_dashboards(cluster))
print(output)
print()
return return_code
def figure_out_clusters_to_inspect(args, all_clusters) -> Sequence[str]:
if args.clusters is not None:
clusters_to_inspect = args.clusters.split(",")
else:
clusters_to_inspect = all_clusters
return clusters_to_inspect
def get_cluster_dashboards(cluster: str,) -> str:
"""Returns the direct dashboards for humans to use for a given cluster"""
SPACER = " "
try:
dashboards = load_system_paasta_config().get_dashboard_links()[cluster]
except KeyError as e:
if e.args[0] == cluster:
output = [PaastaColors.red("No dashboards configured for %s!" % cluster)]
else:
output = [PaastaColors.red("No dashboards configured!")]
else:
output = ["Dashboards:"]
spacing = max((len(label) for label in dashboards.keys())) + 1
for label, urls in dashboards.items():
if isinstance(urls, list):
urls = "\n %s" % "\n ".join(urls)
output.append(
" {}:{}{}".format(
label, SPACER * (spacing - len(label)), PaastaColors.cyan(urls)
)
)
return "\n".join(output)
def paasta_metastatus(args,) -> int:
"""Print the status of a PaaSTA clusters"""
soa_dir = args.soa_dir
system_paasta_config = load_system_paasta_config()
all_clusters = list_clusters(soa_dir=soa_dir)
clusters_to_inspect = figure_out_clusters_to_inspect(args, all_clusters)
return_codes = []
for cluster in clusters_to_inspect:
if cluster in all_clusters:
return_codes.append(
print_cluster_status(
cluster=cluster,
system_paasta_config=system_paasta_config,
groupings=args.groupings,
verbose=args.verbose,
autoscaling_info=args.autoscaling_info,
use_mesos_cache=args.use_mesos_cache,
)
)
else:
print("Cluster %s doesn't look like a valid cluster?" % args.clusters)
print("Try using tab completion to help complete the cluster name")
return 0 if all([return_code == 0 for return_code in return_codes]) else 1
|
import logging
from pyinsteon import async_connect
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
# pylint: disable=unused-import
from .const import (
CONF_HOUSECODE,
CONF_HUB_VERSION,
CONF_OVERRIDE,
CONF_UNITCODE,
CONF_X10,
DOMAIN,
SIGNAL_ADD_DEVICE_OVERRIDE,
SIGNAL_ADD_X10_DEVICE,
SIGNAL_REMOVE_DEVICE_OVERRIDE,
SIGNAL_REMOVE_X10_DEVICE,
)
from .schemas import (
add_device_override,
add_x10_device,
build_device_override_schema,
build_hub_schema,
build_plm_schema,
build_remove_override_schema,
build_remove_x10_schema,
build_x10_schema,
)
STEP_PLM = "plm"
STEP_HUB_V1 = "hubv1"
STEP_HUB_V2 = "hubv2"
STEP_CHANGE_HUB_CONFIG = "change_hub_config"
STEP_ADD_X10 = "add_x10"
STEP_ADD_OVERRIDE = "add_override"
STEP_REMOVE_OVERRIDE = "remove_override"
STEP_REMOVE_X10 = "remove_x10"
MODEM_TYPE = "modem_type"
PLM = "PowerLinc Modem (PLM)"
HUB1 = "Hub version 1 (pre-2014)"
HUB2 = "Hub version 2"
_LOGGER = logging.getLogger(__name__)
def _only_one_selected(*args):
"""Test if only one item is True."""
return sum(args) == 1
async def _async_connect(**kwargs):
"""Connect to the Insteon modem."""
try:
await async_connect(**kwargs)
_LOGGER.info("Connected to Insteon modem.")
return True
except ConnectionError:
_LOGGER.error("Could not connect to Insteon modem.")
return False
def _remove_override(address, options):
"""Remove a device override from config."""
new_options = {}
if options.get(CONF_X10):
new_options[CONF_X10] = options.get(CONF_X10)
new_overrides = []
for override in options[CONF_OVERRIDE]:
if override[CONF_ADDRESS] != address:
new_overrides.append(override)
if new_overrides:
new_options[CONF_OVERRIDE] = new_overrides
return new_options
def _remove_x10(device, options):
"""Remove an X10 device from the config."""
housecode = device[11].lower()
unitcode = int(device[24:])
new_options = {}
if options.get(CONF_OVERRIDE):
new_options[CONF_OVERRIDE] = options.get(CONF_OVERRIDE)
new_x10 = []
for existing_device in options[CONF_X10]:
if (
existing_device[CONF_HOUSECODE].lower() != housecode
or existing_device[CONF_UNITCODE] != unitcode
):
new_x10.append(existing_device)
if new_x10:
new_options[CONF_X10] = new_x10
return new_options, housecode, unitcode
class InsteonFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Insteon config flow handler."""
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Define the config flow to handle options."""
return InsteonOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Init the config flow."""
errors = {}
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is not None:
selection = user_input.get(MODEM_TYPE)
if selection == PLM:
return await self.async_step_plm()
if selection == HUB1:
return await self.async_step_hubv1()
return await self.async_step_hubv2()
modem_types = [PLM, HUB1, HUB2]
data_schema = vol.Schema({vol.Required(MODEM_TYPE): vol.In(modem_types)})
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
async def async_step_plm(self, user_input=None):
"""Set up the PLM modem type."""
errors = {}
if user_input is not None:
if await _async_connect(**user_input):
return self.async_create_entry(title="", data=user_input)
errors["base"] = "cannot_connect"
schema_defaults = user_input if user_input is not None else {}
data_schema = build_plm_schema(**schema_defaults)
return self.async_show_form(
step_id=STEP_PLM, data_schema=data_schema, errors=errors
)
async def async_step_hubv1(self, user_input=None):
"""Set up the Hub v1 modem type."""
return await self._async_setup_hub(hub_version=1, user_input=user_input)
async def async_step_hubv2(self, user_input=None):
"""Set up the Hub v2 modem type."""
return await self._async_setup_hub(hub_version=2, user_input=user_input)
async def _async_setup_hub(self, hub_version, user_input):
"""Set up the Hub versions 1 and 2."""
errors = {}
if user_input is not None:
user_input[CONF_HUB_VERSION] = hub_version
if await _async_connect(**user_input):
return self.async_create_entry(title="", data=user_input)
user_input.pop(CONF_HUB_VERSION)
errors["base"] = "cannot_connect"
schema_defaults = user_input if user_input is not None else {}
data_schema = build_hub_schema(hub_version=hub_version, **schema_defaults)
step_id = STEP_HUB_V2 if hub_version == 2 else STEP_HUB_V1
return self.async_show_form(
step_id=step_id, data_schema=data_schema, errors=errors
)
async def async_step_import(self, import_info):
"""Import a yaml entry as a config entry."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if not await _async_connect(**import_info):
return self.async_abort(reason="cannot_connect")
return self.async_create_entry(title="", data=import_info)
class InsteonOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle an Insteon options flow."""
def __init__(self, config_entry):
"""Init the InsteonOptionsFlowHandler class."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Init the options config flow."""
errors = {}
if user_input is not None:
change_hub_config = user_input.get(STEP_CHANGE_HUB_CONFIG, False)
device_override = user_input.get(STEP_ADD_OVERRIDE, False)
x10_device = user_input.get(STEP_ADD_X10, False)
remove_override = user_input.get(STEP_REMOVE_OVERRIDE, False)
remove_x10 = user_input.get(STEP_REMOVE_X10, False)
if _only_one_selected(
change_hub_config,
device_override,
x10_device,
remove_override,
remove_x10,
):
if change_hub_config:
return await self.async_step_change_hub_config()
if device_override:
return await self.async_step_add_override()
if x10_device:
return await self.async_step_add_x10()
if remove_override:
return await self.async_step_remove_override()
if remove_x10:
return await self.async_step_remove_x10()
errors["base"] = "select_single"
data_schema = {
vol.Optional(STEP_ADD_OVERRIDE): bool,
vol.Optional(STEP_ADD_X10): bool,
}
if self.config_entry.data.get(CONF_HOST):
data_schema[vol.Optional(STEP_CHANGE_HUB_CONFIG)] = bool
options = {**self.config_entry.options}
if options.get(CONF_OVERRIDE):
data_schema[vol.Optional(STEP_REMOVE_OVERRIDE)] = bool
if options.get(CONF_X10):
data_schema[vol.Optional(STEP_REMOVE_X10)] = bool
return self.async_show_form(
step_id="init", data_schema=vol.Schema(data_schema), errors=errors
)
async def async_step_change_hub_config(self, user_input=None):
"""Change the Hub configuration."""
if user_input is not None:
data = {
**self.config_entry.data,
CONF_HOST: user_input[CONF_HOST],
CONF_PORT: user_input[CONF_PORT],
}
if self.config_entry.data[CONF_HUB_VERSION] == 2:
data[CONF_USERNAME] = user_input[CONF_USERNAME]
data[CONF_PASSWORD] = user_input[CONF_PASSWORD]
self.hass.config_entries.async_update_entry(self.config_entry, data=data)
return self.async_create_entry(
title="",
data={**self.config_entry.options},
)
data_schema = build_hub_schema(**self.config_entry.data)
return self.async_show_form(
step_id=STEP_CHANGE_HUB_CONFIG, data_schema=data_schema
)
async def async_step_add_override(self, user_input=None):
"""Add a device override."""
errors = {}
if user_input is not None:
try:
data = add_device_override({**self.config_entry.options}, user_input)
async_dispatcher_send(self.hass, SIGNAL_ADD_DEVICE_OVERRIDE, user_input)
return self.async_create_entry(title="", data=data)
except ValueError:
errors["base"] = "input_error"
schema_defaults = user_input if user_input is not None else {}
data_schema = build_device_override_schema(**schema_defaults)
return self.async_show_form(
step_id=STEP_ADD_OVERRIDE, data_schema=data_schema, errors=errors
)
async def async_step_add_x10(self, user_input=None):
"""Add an X10 device."""
errors = {}
if user_input is not None:
options = add_x10_device({**self.config_entry.options}, user_input)
async_dispatcher_send(self.hass, SIGNAL_ADD_X10_DEVICE, user_input)
return self.async_create_entry(title="", data=options)
schema_defaults = user_input if user_input is not None else {}
data_schema = build_x10_schema(**schema_defaults)
return self.async_show_form(
step_id=STEP_ADD_X10, data_schema=data_schema, errors=errors
)
async def async_step_remove_override(self, user_input=None):
"""Remove a device override."""
errors = {}
options = self.config_entry.options
if user_input is not None:
options = _remove_override(user_input[CONF_ADDRESS], options)
async_dispatcher_send(
self.hass,
SIGNAL_REMOVE_DEVICE_OVERRIDE,
user_input[CONF_ADDRESS],
)
return self.async_create_entry(title="", data=options)
data_schema = build_remove_override_schema(options[CONF_OVERRIDE])
return self.async_show_form(
step_id=STEP_REMOVE_OVERRIDE, data_schema=data_schema, errors=errors
)
async def async_step_remove_x10(self, user_input=None):
"""Remove an X10 device."""
errors = {}
options = self.config_entry.options
if user_input is not None:
options, housecode, unitcode = _remove_x10(user_input[CONF_DEVICE], options)
async_dispatcher_send(
self.hass, SIGNAL_REMOVE_X10_DEVICE, housecode, unitcode
)
return self.async_create_entry(title="", data=options)
data_schema = build_remove_x10_schema(options[CONF_X10])
return self.async_show_form(
step_id=STEP_REMOVE_X10, data_schema=data_schema, errors=errors
)
|
import logging
import os
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import edw_benchmark_results_aggregator as results_aggregator
from perfkitbenchmarker import edw_service
BENCHMARK_NAME = 'edw_benchmark'
BENCHMARK_CONFIG = """
edw_benchmark:
description: Sample edw benchmark
edw_service:
type: redshift
cluster_identifier: _cluster_id_
endpoint: cluster.endpoint
db: _database_name_
user: _username_
password: _password_
node_type: dc1.large
node_count: 2
snapshot:
vm_groups:
client:
vm_spec: *default_single_core
"""
flags.DEFINE_string(
'local_query_dir', '',
'Optional local directory containing all query files. '
'Can be absolute or relative to the executable.')
FLAGS = flags.FLAGS
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Install script execution environment on the client vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
benchmark_spec.always_call_cleanup = True
edw_service_instance = benchmark_spec.edw_service
vm = benchmark_spec.vms[0]
edw_service_instance.GetClientInterface().SetProvisionedAttributes(
benchmark_spec)
edw_service_instance.GetClientInterface().Prepare('edw_common')
query_locations = [
os.path.join(FLAGS.local_query_dir, query)
for query in FLAGS.edw_power_queries.split(',')
]
any(vm.PushFile(query_loc) for query_loc in query_locations)
def Run(benchmark_spec):
"""Run phase executes the sql scripts on edw cluster and collects duration."""
results = []
edw_service_instance = benchmark_spec.edw_service
client_interface = edw_service_instance.GetClientInterface()
# Run a warm up query in case there are cold start issues.
client_interface.WarmUpQuery()
# Default to executing just the sample query if no queries are provided.
all_queries = FLAGS.edw_power_queries.split(',') or [
os.path.basename(edw_service.SAMPLE_QUERY_PATH)
]
# Accumulator for the entire benchmark's performance
benchmark_performance = results_aggregator.EdwBenchmarkPerformance(
total_iterations=FLAGS.edw_suite_iterations, expected_queries=all_queries)
# Multiple iterations of the suite are performed to avoid cold start penalty
for i in range(1, FLAGS.edw_suite_iterations + 1):
iteration = str(i)
# Accumulator for the current suite's performance
iteration_performance = results_aggregator.EdwPowerIterationPerformance(
iteration_id=iteration, total_queries=len(all_queries))
for query in all_queries:
execution_time, metadata = client_interface.ExecuteQuery(query)
iteration_performance.add_query_performance(query, execution_time,
metadata)
benchmark_performance.add_iteration_performance(iteration_performance)
# Execution complete, generate results only if the benchmark was successful.
benchmark_metadata = {}
benchmark_metadata.update(edw_service_instance.GetMetadata())
if benchmark_performance.is_successful():
query_samples = benchmark_performance.get_all_query_performance_samples(
metadata=benchmark_metadata)
results.extend(query_samples)
geomean_samples = (
benchmark_performance.get_queries_geomean_performance_samples(
metadata=benchmark_metadata))
results.extend(geomean_samples)
else:
logging.error('At least one query failed, so not reporting any results.')
return results
def Cleanup(benchmark_spec):
benchmark_spec.edw_service.Cleanup()
|
import numpy as np
from scipy.sparse.linalg import lsqr
class OLSUngarStyle(object):
def get_scores_and_p_values(self, tdm, category):
'''
Parameters
----------
tdm: TermDocMatrix
category: str, category name
Returns
-------
pd.DataFrame(['coef', 'p-val'])
'''
X = tdm._X
y = self._make_response_variable_1_or_negative_1(category, tdm)
pX = X / X.sum(axis=1)
ansX = self._anscombe_transform(pX.copy())
B, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var\
= lsqr(A=ansX, b=y, calc_var=True)
def _make_response_variable_1_or_negative_1(self, category, tdm):
'''
Parameters
----------
category, str
tdm, TermDocMatrix
Returns
-------
np.array
'''
return (tdm.get_category_names_by_row() == category).astype(int) * 2 - 1
def _anscombe_transform(self, X):
'''
Parameters
----------
ansX
Returns
-------
csr_matrix
'''
return 2 * np.sqrt(np.array(X) + 3. / 8)
|
from io import open
import json
from .shcommon import ShEventNotFound
class ShHistory(object):
"""
This class is responsible for input history.
:param stash: the StaSh core
:type stash: StaSh
"""
ENCODING = "utf-8"
DEFAULT = "_default"
def __init__(self, stash):
self.stash = stash
self._histories = {}
self._current = self.DEFAULT
self.allow_double = self.stash.config.getboolean("history", "allow_double_lines")
self.hide_whitespace = self.stash.config.getboolean("history", "hide_whitespace_lines")
self.ipython_style_history_search = self.stash.config.getboolean('history', 'ipython_style_history_search')
self.maxsize = self.stash.config.getint("history", "maxsize")
self.templine = ""
self.idx = -1
@classmethod
def load(cls, path, stash):
"""
Load the history from a path.
:param path: path to load from.
:type path: str
:param config: the StaSh core
:type config: StaSh
:return: the history loaded from the file
:rtype: ShHistory
"""
shh = cls(stash)
try:
with open(path, "r", encoding=cls.ENCODING) as fin:
h = json.loads(u"" + fin.read())
except ValueError:
h = {"StaSh.runtime": cls.load_old_format(path)}
shh._histories = h
return shh
@classmethod
def load_old_format(cls, path):
"""
Load the content of an old-style history.
:param path: path to load from
:type path: str
:return: the lines loaded from the file
:rtype: list of str
"""
with open(path, "r", encoding=cls.ENCODING) as fin:
lines = [line.strip() for line in fin.readlines()]
return lines
def save(self, path):
"""
Save the history to a path.
:param path: path to save to.
:type path: str
"""
with open(path, "w", encoding=self.ENCODING) as fout:
s = json.dumps(self._histories)
fout.write(u"" + s) # ensure unicode
def clear(self, target=None):
"""
Clear the history
:param target: history to clear or None for current
:type history: str or None
"""
if target is None:
target = self._current
if target in self._histories:
del self._histories[target]
def clear_all(self):
"""
Clear all histories.
"""
self._histories = {}
def swap(self, target):
"""
Swap the history
:param target: identifier to get the history for
:type target: str or None
"""
self._current = target
def add(self, line, always=False):
"""
Add a line to the history.
:param line: line to add to history
:type line: str
:param always: always add this line, regardless of config
:type always: bool
"""
if self._current not in self._histories:
self._histories[self._current] = []
stripped = line.strip()
last_line = (self._histories[self._current][-1] if len(self._histories[self._current]) > 0 else None)
if not always:
# check if this line should be added
if stripped == last_line and not self.allow_double:
# prevent double lines
return
if line.startswith(" ") and self.hide_whitespace:
# hide lines starting with a whitespace
return
self._histories[self._current].append(stripped)
# ensure maxsize
while len(self._histories[self._current]) > max(0, self.maxsize):
self._histories[self._current].pop(0)
# reset index
self.reset_idx()
def getlist(self):
"""
Return a list of the current history.
:return: list of current history entries
:rtype: list of str
"""
if self._current not in self._histories:
self._histories[self._current] = []
return self._histories[self._current][::-1]
def search(self, tok):
"""
Search the history.
:param tok:
:type tok:
:return: last entry in history matching the search
:rtype: str
"""
history = self.getlist()
search_string = tok[1:]
if search_string == '':
return ''
if search_string == '!':
return history[0]
try:
idx = int(search_string)
try:
return history[::-1][idx]
except IndexError:
raise ShEventNotFound(tok)
except ValueError:
for entry in history:
if entry.startswith(search_string):
return entry
raise ShEventNotFound(tok)
def reset_idx(self):
"""
Reset the index of the current position in the history
"""
self.idx = -1
def up(self):
"""
Move upwards in the history.
"""
# Save the unfinished line user is typing before showing entries from history
history = self.getlist()
if self.idx == -1:
self.templine = self.stash.mini_buffer.modifiable_string.rstrip()
self.idx += 1
if self.idx >= len(history):
self.idx = len(history) - 1
else:
entry = history[self.idx]
# If move up away from an unfinished input line, try search history for
# a line starts with the unfinished line
if self.idx == 0 and self.ipython_style_history_search:
for idx, hs in enumerate(history):
if hs.startswith(self.templine):
entry = hs
self.idx = idx
break
self.stash.mini_buffer.feed(None, entry)
def down(self):
"""
Move downwqrds in the history
"""
history = self.getlist()
self.idx -= 1
if self.idx < -1:
self.idx = -1
else:
if self.idx == -1:
entry = self.templine
else:
entry = history[self.idx]
self.stash.mini_buffer.feed(None, entry)
|
import os.path
from django.contrib.auth.models import Permission, User
from django.test.testcases import TestCase
from django.test.utils import override_settings
class ImportExportPermissionTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', '[email protected]',
'password')
user.is_staff = True
user.is_superuser = False
user.save()
self.user = user
self.client.login(username='admin', password='password')
def set_user_book_model_permission(self, action):
permission = Permission.objects.get(codename="%s_book" % action)
self.user.user_permissions.add(permission)
@override_settings(IMPORT_EXPORT_IMPORT_PERMISSION_CODE='change')
def test_import(self):
# user has no permission to import
response = self.client.get('/admin/core/book/import/')
self.assertEqual(response.status_code, 403)
# POST the import form
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 403)
response = self.client.post('/admin/core/book/process_import/', {})
self.assertEqual(response.status_code, 403)
# user has sufficient permission to import
self.set_user_book_model_permission('change')
response = self.client.get('/admin/core/book/import/')
self.assertEqual(response.status_code, 200)
# POST the import form
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data)
self.assertEqual(response.status_code, 302)
@override_settings(IMPORT_EXPORT_EXPORT_PERMISSION_CODE='change')
def test_import_with_permission_set(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 403)
data = {'file_format': '0'}
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 403)
self.set_user_book_model_permission('change')
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
data = {'file_format': '0'}
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
@override_settings(IMPORT_EXPORT_EXPORT_PERMISSION_CODE='add')
def test_check_export_button(self):
self.set_user_book_model_permission('change')
response = self.client.get('/admin/core/book/')
widget = "import_link"
self.assertIn(widget, response.content.decode())
widget = "export_link"
self.assertNotIn(widget, response.content.decode())
@override_settings(IMPORT_EXPORT_IMPORT_PERMISSION_CODE='add')
def test_check_import_button(self):
self.set_user_book_model_permission('change')
response = self.client.get('/admin/core/book/')
widget = "import_link"
self.assertNotIn(widget, response.content.decode())
widget = "export_link"
self.assertIn(widget, response.content.decode())
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from pathlib import Path
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db, directory=Path('temp_folder/temp_migrations'))
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
@manager.command
def add():
db.session.add(User(name='test'))
db.session.commit()
if __name__ == '__main__':
manager.run()
|
import os.path
from django.core.files.storage import DefaultStorage
from weblate.screenshots.models import Screenshot
from weblate.utils.celery import app
@app.task(trail=False)
def cleanup_screenshot_files():
"""Remove stale screenshots."""
storage = DefaultStorage()
try:
files = storage.listdir("screenshots")[1]
except OSError:
return
for name in files:
fullname = os.path.join("screenshots", name)
if not Screenshot.objects.filter(image=fullname).exists():
storage.delete(fullname)
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
3600 * 24, cleanup_screenshot_files.s(), name="screenshot-files-cleanup"
)
|
import argparse
import chainer
from chainer import iterators
import chainermn
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from eval_semantic_segmentation import models
from eval_semantic_segmentation import setup
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc'))
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--pretrained-model')
parser.add_argument('--batchsize', type=int)
parser.add_argument('--input-size', type=int, default=None)
args = parser.parse_args()
comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model,
args.batchsize, args.input_size)
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
if not comm.rank == 0:
apply_to_iterator(model.predict, None, comm=comm)
return
it = iterators.MultithreadIterator(
dataset, batchsize * comm.size, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, it, hook=ProgressHook(len(dataset)), comm=comm)
# Delete an iterator of images to save memory usage.
del in_values
eval_(out_values, rest_values)
if __name__ == '__main__':
main()
|
import pytest
import numpy as np
from elephas.utils import rdd_utils
pytest.mark.usefixtures("spark_context")
def test_to_simple_rdd(spark_context):
features = np.ones((5, 10))
labels = np.ones((5,))
rdd = rdd_utils.to_simple_rdd(spark_context, features, labels)
assert rdd.count() == 5
first = rdd.first()
assert first[0].shape == (10,)
assert first[1] == 1.0
def test_to_labeled_rdd_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[0, 0, 1.0], [0, 1.0, 0]])
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, True)
assert lp_rdd.count() == 2
first = lp_rdd.first()
assert first.features.shape == (10,)
assert first.label == 2.0
def test_to_labeled_rdd_not_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[2.0], [1.0]])
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, False)
assert lp_rdd.count() == 2
first = lp_rdd.first()
assert first.features.shape == (10,)
assert first.label == 2.0
def test_from_labeled_rdd(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[2.0], [1.0]]).reshape((2,))
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, False)
x, y = rdd_utils.from_labeled_point(lp_rdd, False, None)
assert x.shape == features.shape
assert y.shape == labels.shape
def test_from_labeled_rdd_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[0, 0, 1.0], [0, 1.0, 0]])
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, True)
x, y = rdd_utils.from_labeled_point(lp_rdd, True, 3)
assert x.shape == features.shape
assert y.shape == labels.shape
def test_encode_label():
label = 3
nb_classes = 10
encoded = rdd_utils.encode_label(label, nb_classes)
assert len(encoded) == nb_classes
for i in range(10):
if i == label:
assert encoded[i] == 1
else:
assert encoded[i] == 0
def test_lp_to_simple_rdd_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[0, 0, 1.0], [0, 1.0, 0]])
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, True)
rdd = rdd_utils.lp_to_simple_rdd(lp_rdd, categorical=True, nb_classes=3)
assert rdd.first()[0].shape == (10,)
assert rdd.first()[1].shape == (3,)
def test_lp_to_simple_rdd_not_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[2.0], [1.0]]).reshape((2,))
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, False)
rdd = rdd_utils.lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=3)
assert rdd.first()[0].shape == (10,)
assert rdd.first()[1] == 2.0
|
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.experimental.links import YOLOv2Tiny
@testing.parameterize(*testing.product({
'n_fg_class': [1, 5, 20],
}))
class TestYOLOv2Tiny(unittest.TestCase):
def setUp(self):
self.link = YOLOv2Tiny(n_fg_class=self.n_fg_class)
self.insize = 416
self.n_bbox = 13 * 13 * 5
def _check_call(self):
x = self.link.xp.array(
np.random.uniform(-1, 1, size=(1, 3, self.insize, self.insize)),
dtype=np.float32)
locs, objs, confs = self.link(x)
self.assertIsInstance(locs, chainer.Variable)
self.assertIsInstance(locs.array, self.link.xp.ndarray)
self.assertEqual(locs.shape, (1, self.n_bbox, 4))
self.assertIsInstance(objs, chainer.Variable)
self.assertIsInstance(objs.array, self.link.xp.ndarray)
self.assertEqual(objs.shape, (1, self.n_bbox))
self.assertIsInstance(confs, chainer.Variable)
self.assertIsInstance(confs.array, self.link.xp.ndarray)
self.assertEqual(confs.shape, (1, self.n_bbox, self.n_fg_class))
@attr.slow
def test_call_cpu(self):
self._check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
@testing.parameterize(*testing.product({
'n_fg_class': [None, 10, 20],
'pretrained_model': ['voc0712'],
}))
class TestYOLOv2TinyPretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_fg_class': self.n_fg_class,
'pretrained_model': self.pretrained_model,
}
if self.pretrained_model == 'voc0712':
valid = self.n_fg_class in {None, 20}
if valid:
YOLOv2Tiny(**kwargs)
else:
with self.assertRaises(ValueError):
YOLOv2Tiny(**kwargs)
testing.run_module(__name__, __file__)
|
import os
from mock import call
from mock import MagicMock
from mock import patch
from paasta_tools.cli.cmds.check import deploy_check
from paasta_tools.cli.cmds.check import deploy_has_security_check
from paasta_tools.cli.cmds.check import deployments_check
from paasta_tools.cli.cmds.check import docker_check
from paasta_tools.cli.cmds.check import get_deploy_groups_used_by_framework
from paasta_tools.cli.cmds.check import makefile_check
from paasta_tools.cli.cmds.check import makefile_has_a_tab
from paasta_tools.cli.cmds.check import makefile_has_docker_tag
from paasta_tools.cli.cmds.check import makefile_responds_to
from paasta_tools.cli.cmds.check import NoSuchService
from paasta_tools.cli.cmds.check import paasta_check
from paasta_tools.cli.cmds.check import sensu_check
from paasta_tools.cli.cmds.check import service_dir_check
from paasta_tools.cli.cmds.check import smartstack_check
from paasta_tools.cli.utils import PaastaCheckMessages
from paasta_tools.marathon_tools import MarathonServiceConfig
@patch("paasta_tools.cli.cmds.check.git_repo_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.service_dir_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.check.figure_out_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.check.deploy_has_security_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.deploy_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.docker_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.makefile_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.deployments_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.sensu_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.smartstack_check", autospec=True)
@patch("paasta_tools.cli.cmds.check.paasta_validate_soa_configs", autospec=True)
def test_check_paasta_check_calls_everything(
mock_paasta_validate_soa_configs,
mock_smartstart_check,
mock_sensu_check,
mock_deployments_check,
mock_makefile_check,
mock_docker_check,
mock_deploy_check,
mock_deploy_security_check,
mock_figure_out_service_name,
mock_validate_service_name,
mock_service_dir_check,
mock_git_repo_check,
):
# Ensure each check in 'paasta_check' is called
mock_figure_out_service_name.return_value = "servicedocs"
mock_validate_service_name.return_value = None
args = MagicMock()
args.yelpsoa_config_root = "/fake/path"
paasta_check(args)
assert mock_git_repo_check.called
assert mock_service_dir_check.called
assert mock_deploy_check.called
assert mock_deploy_security_check.called
assert mock_docker_check.called
assert mock_makefile_check.called
assert mock_sensu_check.called
assert mock_smartstart_check.called
assert mock_paasta_validate_soa_configs.called
service_path = os.path.join(
args.yelpsoa_config_root, mock_figure_out_service_name.return_value
)
mock_deploy_check.assert_called_once_with(service_path)
@patch("paasta_tools.cli.cmds.check.validate_service_name", autospec=True)
def test_check_service_dir_check_pass(mock_validate_service_name, capfd):
mock_validate_service_name.return_value = None
service = "fake_service"
soa_dir = "/fake_yelpsoa_configs"
expected_output = "%s\n" % PaastaCheckMessages.service_dir_found(service, soa_dir)
service_dir_check(service, soa_dir)
output, _ = capfd.readouterr()
assert output == expected_output
@patch("paasta_tools.cli.cmds.check.validate_service_name", autospec=True)
def test_check_service_dir_check_fail(mock_validate_service_name, capfd):
service = "fake_service"
soa_dir = "/fake_yelpsoa_configs"
mock_validate_service_name.side_effect = NoSuchService(service)
expected_output = "%s\n" % PaastaCheckMessages.service_dir_missing(service, soa_dir)
service_dir_check(service, soa_dir)
output, _ = capfd.readouterr()
assert output == expected_output
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_deploy_check_pass(mock_is_file_in_dir, capfd):
# Deploy check passes when file found in service path
mock_is_file_in_dir.return_value = True
deploy_check("service_path")
expected_output = "%s\n" % PaastaCheckMessages.DEPLOY_YAML_FOUND
output, _ = capfd.readouterr()
assert output == expected_output
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_deploy_check_fail(mock_is_file_in_dir, capfd):
# Deploy check fails when file not in service path
mock_is_file_in_dir.return_value = False
deploy_check("service_path")
expected_output = "%s\n" % PaastaCheckMessages.DEPLOY_YAML_MISSING
output, _ = capfd.readouterr()
assert output == expected_output
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_docker_exists_and_is_valid(mock_is_file_in_dir, capfd):
mock_is_file_in_dir.return_value = "/fake/path"
docker_check()
output, _ = capfd.readouterr()
assert PaastaCheckMessages.DOCKERFILE_FOUND in output
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_docker_check_file_not_found(mock_is_file_in_dir, capfd):
mock_is_file_in_dir.return_value = False
docker_check()
output, _ = capfd.readouterr()
assert PaastaCheckMessages.DOCKERFILE_MISSING in output
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
@patch("paasta_tools.cli.cmds.check.get_team", autospec=True)
def test_check_sensu_check_pass(mock_get_team, mock_is_file_in_dir, capfd):
# monitoring.yaml exists and team is found
mock_is_file_in_dir.return_value = "/fake/path"
team = "team-service-infra"
mock_get_team.return_value = team
expected_output = "{}\n{}\n".format(
PaastaCheckMessages.SENSU_MONITORING_FOUND,
PaastaCheckMessages.sensu_team_found(team),
)
sensu_check(service="fake_service", service_path="path", soa_dir="path")
output, _ = capfd.readouterr()
assert output == expected_output
mock_get_team.assert_called_once_with(
service="fake_service", overrides={}, soa_dir="path"
)
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
@patch("paasta_tools.cli.cmds.check.get_team", autospec=True)
def test_check_sensu_team_missing(mock_get_team, mock_is_file_in_dir, capfd):
# monitoring.yaml exists but team is not found
mock_is_file_in_dir.return_value = "/fake/path"
mock_get_team.return_value = None
expected_output = "{}\n{}\n".format(
PaastaCheckMessages.SENSU_MONITORING_FOUND,
PaastaCheckMessages.SENSU_TEAM_MISSING,
)
sensu_check(service="fake_service", service_path="path", soa_dir="path")
output, _ = capfd.readouterr()
assert output == expected_output
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_sensu_check_fail(mock_is_file_in_dir, capfd):
# monitoring.yaml doest exist
mock_is_file_in_dir.return_value = False
expected_output = "%s\n" % PaastaCheckMessages.SENSU_MONITORING_MISSING
sensu_check(service="fake_service", service_path="path", soa_dir="path")
output, _ = capfd.readouterr()
assert output == expected_output
@patch("service_configuration_lib." "read_service_configuration", autospec=True)
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_smartstack_check_pass(
mock_is_file_in_dir, mock_read_service_info, capfd
):
# smartstack.yaml exists and port is found
mock_is_file_in_dir.return_value = True
port = 80
instance = "main"
smartstack_dict = {"smartstack": {instance: {"proxy_port": port}}}
mock_read_service_info.return_value = smartstack_dict
expected_output = "{}\n{}\n".format(
PaastaCheckMessages.SMARTSTACK_YAML_FOUND,
PaastaCheckMessages.smartstack_port_found(instance, port),
)
smartstack_check(service="fake_service", service_path="path", soa_dir="path")
output, _ = capfd.readouterr()
assert output == expected_output
@patch("service_configuration_lib." "read_service_configuration", autospec=True)
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_smartstack_check_missing_port(
mock_is_file_in_dir, mock_read_service_info, capfd
):
# smartstack.yaml, instance exists, but no ports found
mock_is_file_in_dir.return_value = True
instance = "main"
smartstack_dict = {instance: {"foo": 0}}
mock_read_service_info.return_value = smartstack_dict
expected_output = "{}\n{}\n".format(
PaastaCheckMessages.SMARTSTACK_YAML_FOUND,
PaastaCheckMessages.SMARTSTACK_PORT_MISSING,
)
smartstack_check(service="fake_service", service_path="path", soa_dir="path")
output, _ = capfd.readouterr()
assert output == expected_output
@patch("paasta_tools.utils." "read_service_configuration", autospec=True)
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_smartstack_check_missing_instance(
mock_is_file_in_dir, mock_read_service_info, capfd
):
# smartstack.yaml exists, but no instances found
mock_is_file_in_dir.return_value = True
smartstack_dict = {}
mock_read_service_info.return_value = smartstack_dict
expected_output = "{}\n{}\n".format(
PaastaCheckMessages.SMARTSTACK_YAML_FOUND,
PaastaCheckMessages.SMARTSTACK_PORT_MISSING,
)
smartstack_check(service="fake_service", service_path="path", soa_dir="path")
output, _ = capfd.readouterr()
assert output == expected_output
@patch("paasta_tools.cli.cmds.check.is_file_in_dir", autospec=True)
def test_check_smartstack_check_is_ok_when_no_smartstack(mock_is_file_in_dir, capfd):
mock_is_file_in_dir.return_value = False
expected_output = ""
smartstack_check(service="fake_service", service_path="path", soa_dir="path")
output, _ = capfd.readouterr()
assert output == expected_output
@patch("paasta_tools.cli.cmds.check._run", autospec=True)
def test_makefile_responds_to_good(mock_run):
mock_run.return_value = (0, "Output")
actual = makefile_responds_to("present-target")
assert actual is True
@patch("paasta_tools.cli.cmds.check._run", autospec=True)
def test_makefile_responds_to_run(mock_run):
mock_run.return_value = (2, "Output")
actual = makefile_responds_to("non-present-target")
assert actual is False
def test_makefile_has_a_tab_true():
fake_makefile_path = "UNUSED"
fake_contents = "target:\n\tcommand"
with patch(
"paasta_tools.cli.cmds.check.get_file_contents",
autospec=True,
return_value=fake_contents,
):
assert makefile_has_a_tab(fake_makefile_path) is True
def test_makefile_has_a_tab_false():
fake_makefile_path = "UNUSED"
fake_contents = "target:\n command"
with patch(
"paasta_tools.cli.cmds.check.get_file_contents",
autospec=True,
return_value=fake_contents,
):
assert makefile_has_a_tab(fake_makefile_path) is False
def test_makefile_has_docker_tag_true():
fake_makefile_path = "UNUSED"
fake_contents = "Blah\nDOCKER_TAG ?= something:\ntarget:\n command"
with patch(
"paasta_tools.cli.cmds.check.get_file_contents",
autospec=True,
return_value=fake_contents,
):
assert makefile_has_docker_tag(fake_makefile_path) is True
def test_makefile_has_docker_tag_false():
fake_makefile_path = "UNUSED"
fake_contents = "target:\n command"
with patch(
"paasta_tools.cli.cmds.check.get_file_contents",
autospec=True,
return_value=fake_contents,
):
assert makefile_has_docker_tag(fake_makefile_path) is False
@patch("paasta_tools.cli.cmds.check.get_pipeline_config", autospec=True)
def test_deploy_has_security_check_false(mock_pipeline_config, capfd):
mock_pipeline_config.return_value = [
{"step": "itest"},
{"step": "push-to-registry"},
{"step": "hab.canary", "trigger_next_step_manually": True},
{"step": "hab.main"},
]
actual = deploy_has_security_check(service="fake_service", soa_dir="/fake/path")
assert actual is False
@patch("paasta_tools.cli.cmds.check.get_pipeline_config", autospec=True)
def test_deploy_has_security_check_true(mock_pipeline_config, capfd):
mock_pipeline_config.return_value = [
{"step": "itest"},
{"step": "security-check"},
{"step": "push-to-registry"},
{"step": "hab.canary", "trigger_next_step_manually": True},
{"step": "hab.main"},
]
actual = deploy_has_security_check(service="fake_service", soa_dir="/fake/path")
assert actual is True
@patch("paasta_tools.cli.cmds.check.get_instance_config", autospec=True)
@patch("paasta_tools.cli.cmds.check.list_clusters", autospec=True)
@patch("paasta_tools.cli.cmds.check.get_service_instance_list", autospec=True)
def test_get_deploy_groups_used_by_framework(
mock_get_service_instance_list, mock_list_clusters, mock_get_instance_config
):
mock_list_clusters.return_value = ["cluster1"]
mock_get_service_instance_list.return_value = [
("unused", "instance1"),
("unused", "instance2"),
]
mock_get_instance_config.side_effect = lambda service, instance, cluster, soa_dir, load_deployments, instance_type: MarathonServiceConfig(
service=service,
instance=instance,
cluster=cluster,
config_dict={},
branch_dict=None,
)
expected = {"cluster1.instance1", "cluster1.instance2"}
actual = get_deploy_groups_used_by_framework(
"marathon", service="unused", soa_dir="/fake/path"
)
assert actual == expected
@patch("paasta_tools.cli.cmds.check.get_pipeline_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.check.get_deploy_groups_used_by_framework", autospec=True)
def test_marathon_deployments_check_good(
mock_get_deploy_groups_used_by_framework, mock_get_pipeline_deploy_groups, capfd
):
mock_get_pipeline_deploy_groups.return_value = ["hab.canary", "hab.main"]
mock_get_deploy_groups_used_by_framework.return_value = ["hab.canary", "hab.main"]
actual = deployments_check(service="fake_service", soa_dir="/fake/path")
assert actual is True
@patch("paasta_tools.cli.cmds.check.get_pipeline_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.check.get_deploy_groups_used_by_framework", autospec=True)
def test_marathon_deployments_deploy_but_not_marathon(
mock_get_deploy_groups_used_by_framework, mock_get_pipeline_deploy_groups, capfd
):
mock_get_pipeline_deploy_groups.return_value = [
"hab.canary",
"hab.main",
"hab.EXTRA",
]
mock_get_deploy_groups_used_by_framework.return_value = ["hab.canary", "hab.main"]
actual = deployments_check(service="fake_service", soa_dir="/fake/service")
assert actual is False
assert "EXTRA" in capfd.readouterr()[0]
@patch("paasta_tools.cli.cmds.check.get_pipeline_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.check.get_deploy_groups_used_by_framework", autospec=True)
def test_marathon_deployments_marathon_but_not_deploy(
mock_get_deploy_groups_used_by_framework, mock_get_pipeline_deploy_groups, capfd
):
mock_get_pipeline_deploy_groups.return_value = ["hab.canary", "hab.main"]
mock_get_deploy_groups_used_by_framework.return_value = [
"hab.canary",
"hab.main",
"hab.BOGUS",
]
actual = deployments_check(service="fake_service", soa_dir="/fake/path")
assert actual is False
assert "BOGUS" in capfd.readouterr()[0]
def test_makefile_check():
fake_makefile_path = "UNUSED"
fake_contents = "DOCKER_TAG ?= something\ntest:\n\tsomething\nitest:\n\tsomething"
with patch(
"paasta_tools.cli.cmds.check.get_file_contents",
autospec=True,
return_value=fake_contents,
), patch(
"paasta_tools.cli.cmds.check.makefile_has_a_tab", autospec=True
) as mock_makefile_has_a_tab, patch(
"paasta_tools.cli.cmds.check.makefile_responds_to", autospec=True
) as mock_makefile_responds_to, patch(
"paasta_tools.cli.cmds.check.makefile_has_docker_tag", autospec=True
) as mock_makefile_has_docker_tag, patch(
"paasta_tools.cli.cmds.check.is_file_in_dir",
autospec=True,
return_value=fake_makefile_path,
):
makefile_check()
assert mock_makefile_has_a_tab.call_count == 1
calls = [call("test"), call("itest"), call("cook-image")]
mock_makefile_responds_to.assert_has_calls(calls, any_order=True)
assert mock_makefile_has_docker_tag.call_count == 1
|
import asyncio
from datetime import timedelta
import logging
import time
from typing import Optional
from aiohttp import ClientError, ClientSession
import async_timeout
from pyalmond import AbstractAlmondWebAuth, AlmondLocalAuth, WebAlmondAPI
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components import conversation
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_HOST,
CONF_TYPE,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.core import Context, CoreState, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_entry_oauth2_flow,
config_validation as cv,
event,
intent,
network,
storage,
)
from . import config_flow
from .const import DOMAIN, TYPE_LOCAL, TYPE_OAUTH2
STORAGE_VERSION = 1
STORAGE_KEY = DOMAIN
ALMOND_SETUP_DELAY = 30
DEFAULT_OAUTH2_HOST = "https://almond.stanford.edu"
DEFAULT_LOCAL_HOST = "http://localhost:3000"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Any(
vol.Schema(
{
vol.Required(CONF_TYPE): TYPE_OAUTH2,
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_OAUTH2_HOST): cv.url,
}
),
vol.Schema(
{vol.Required(CONF_TYPE): TYPE_LOCAL, vol.Required(CONF_HOST): cv.url}
),
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up the Almond component."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
host = conf[CONF_HOST]
if conf[CONF_TYPE] == TYPE_OAUTH2:
config_flow.AlmondFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
conf[CONF_CLIENT_ID],
conf[CONF_CLIENT_SECRET],
f"{host}/me/api/oauth2/authorize",
f"{host}/me/api/oauth2/token",
),
)
return True
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"type": TYPE_LOCAL, "host": conf[CONF_HOST]},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry):
"""Set up Almond config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
if entry.data["type"] == TYPE_LOCAL:
auth = AlmondLocalAuth(entry.data["host"], websession)
else:
# OAuth2
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
oauth_session = config_entry_oauth2_flow.OAuth2Session(
hass, entry, implementation
)
auth = AlmondOAuth(entry.data["host"], websession, oauth_session)
api = WebAlmondAPI(auth)
agent = AlmondAgent(hass, api, entry)
# Hass.io does its own configuration.
if not entry.data.get("is_hassio"):
# If we're not starting or local, set up Almond right away
if hass.state != CoreState.not_running or entry.data["type"] == TYPE_LOCAL:
await _configure_almond_for_ha(hass, entry, api)
else:
# OAuth2 implementations can potentially rely on the HA Cloud url.
# This url is not be available until 30 seconds after boot.
async def configure_almond(_now):
try:
await _configure_almond_for_ha(hass, entry, api)
except ConfigEntryNotReady:
_LOGGER.warning(
"Unable to configure Almond to connect to Home Assistant"
)
async def almond_hass_start(_event):
event.async_call_later(hass, ALMOND_SETUP_DELAY, configure_almond)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, almond_hass_start)
conversation.async_set_agent(hass, agent)
return True
async def _configure_almond_for_ha(
hass: HomeAssistant, entry: config_entries.ConfigEntry, api: WebAlmondAPI
):
"""Configure Almond to connect to HA."""
try:
if entry.data["type"] == TYPE_OAUTH2:
# If we're connecting over OAuth2, we will only set up connection
# with Home Assistant if we're remotely accessible.
hass_url = network.get_url(hass, allow_internal=False, prefer_cloud=True)
else:
hass_url = network.get_url(hass)
except network.NoURLAvailableError:
# If no URL is available, we're not going to configure Almond to connect to HA.
return
_LOGGER.debug("Configuring Almond to connect to Home Assistant at %s", hass_url)
store = storage.Store(hass, STORAGE_VERSION, STORAGE_KEY)
data = await store.async_load()
if data is None:
data = {}
user = None
if "almond_user" in data:
user = await hass.auth.async_get_user(data["almond_user"])
if user is None:
user = await hass.auth.async_create_system_user("Almond", [GROUP_ID_ADMIN])
data["almond_user"] = user.id
await store.async_save(data)
refresh_token = await hass.auth.async_create_refresh_token(
user,
# Almond will be fine as long as we restart once every 5 years
access_token_expiration=timedelta(days=365 * 5),
)
# Create long lived access token
access_token = hass.auth.async_create_access_token(refresh_token)
# Store token in Almond
try:
with async_timeout.timeout(30):
await api.async_create_device(
{
"kind": "io.home-assistant",
"hassUrl": hass_url,
"accessToken": access_token,
"refreshToken": "",
# 5 years from now in ms.
"accessTokenExpires": (time.time() + 60 * 60 * 24 * 365 * 5) * 1000,
}
)
except (asyncio.TimeoutError, ClientError) as err:
if isinstance(err, asyncio.TimeoutError):
msg = "Request timeout"
else:
msg = err
_LOGGER.warning("Unable to configure Almond: %s", msg)
await hass.auth.async_remove_refresh_token(refresh_token)
raise ConfigEntryNotReady from err
# Clear all other refresh tokens
for token in list(user.refresh_tokens.values()):
if token.id != refresh_token.id:
await hass.auth.async_remove_refresh_token(token)
async def async_unload_entry(hass, entry):
"""Unload Almond."""
conversation.async_set_agent(hass, None)
return True
class AlmondOAuth(AbstractAlmondWebAuth):
"""Almond Authentication using OAuth2."""
def __init__(
self,
host: str,
websession: ClientSession,
oauth_session: config_entry_oauth2_flow.OAuth2Session,
):
"""Initialize Almond auth."""
super().__init__(host, websession)
self._oauth_session = oauth_session
async def async_get_access_token(self):
"""Return a valid access token."""
if not self._oauth_session.valid_token:
await self._oauth_session.async_ensure_token_valid()
return self._oauth_session.token["access_token"]
class AlmondAgent(conversation.AbstractConversationAgent):
"""Almond conversation agent."""
def __init__(
self, hass: HomeAssistant, api: WebAlmondAPI, entry: config_entries.ConfigEntry
):
"""Initialize the agent."""
self.hass = hass
self.api = api
self.entry = entry
@property
def attribution(self):
"""Return the attribution."""
return {"name": "Powered by Almond", "url": "https://almond.stanford.edu/"}
async def async_get_onboarding(self):
"""Get onboard url if not onboarded."""
if self.entry.data.get("onboarded"):
return None
host = self.entry.data["host"]
if self.entry.data.get("is_hassio"):
host = "/core_almond"
return {
"text": "Would you like to opt-in to share your anonymized commands with Stanford to improve Almond's responses?",
"url": f"{host}/conversation",
}
async def async_set_onboarding(self, shown):
"""Set onboarding status."""
self.hass.config_entries.async_update_entry(
self.entry, data={**self.entry.data, "onboarded": shown}
)
return True
async def async_process(
self, text: str, context: Context, conversation_id: Optional[str] = None
) -> intent.IntentResponse:
"""Process a sentence."""
response = await self.api.async_converse_text(text, conversation_id)
first_choice = True
buffer = ""
for message in response["messages"]:
if message["type"] == "text":
buffer += f"\n{message['text']}"
elif message["type"] == "picture":
buffer += f"\n Picture: {message['url']}"
elif message["type"] == "rdl":
buffer += (
f"\n Link: {message['rdl']['displayTitle']} "
f"{message['rdl']['webCallback']}"
)
elif message["type"] == "choice":
if first_choice:
first_choice = False
else:
buffer += ","
buffer += f" {message['title']}"
intent_result = intent.IntentResponse()
intent_result.async_set_speech(buffer.strip())
return intent_result
|
import logging
import blebox_uniapi
import pytest
from homeassistant.components.switch import DEVICE_CLASS_SWITCH
from homeassistant.const import (
ATTR_DEVICE_CLASS,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from .conftest import (
async_setup_entities,
async_setup_entity,
mock_feature,
mock_only_feature,
setup_product_mock,
)
from tests.async_mock import AsyncMock, PropertyMock
@pytest.fixture(name="switchbox")
def switchbox_fixture():
"""Return a default switchBox switch entity mock."""
feature = mock_feature(
"switches",
blebox_uniapi.switch.Switch,
unique_id="BleBox-switchBox-1afe34e750b8-0.relay",
full_name="switchBox-0.relay",
device_class="relay",
is_on=False,
)
feature.async_update = AsyncMock()
product = feature.product
type(product).name = PropertyMock(return_value="My switch box")
type(product).model = PropertyMock(return_value="switchBox")
return (feature, "switch.switchbox_0_relay")
async def test_switchbox_init(switchbox, hass, config):
"""Test switch default state."""
feature_mock, entity_id = switchbox
feature_mock.async_update = AsyncMock()
entry = await async_setup_entity(hass, config, entity_id)
assert entry.unique_id == "BleBox-switchBox-1afe34e750b8-0.relay"
state = hass.states.get(entity_id)
assert state.name == "switchBox-0.relay"
assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_SWITCH
assert state.state == STATE_OFF
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My switch box"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "switchBox"
assert device.sw_version == "1.23"
async def test_switchbox_update_when_off(switchbox, hass, config):
"""Test switch updating when off."""
feature_mock, entity_id = switchbox
def initial_update():
feature_mock.is_on = False
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
async def test_switchbox_update_when_on(switchbox, hass, config):
"""Test switch updating when on."""
feature_mock, entity_id = switchbox
def initial_update():
feature_mock.is_on = True
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ON
async def test_switchbox_on(switchbox, hass, config):
"""Test turning switch on."""
feature_mock, entity_id = switchbox
def initial_update():
feature_mock.is_on = False
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
def turn_on():
feature_mock.is_on = True
feature_mock.async_turn_on = AsyncMock(side_effect=turn_on)
await hass.services.async_call(
"switch",
SERVICE_TURN_ON,
{"entity_id": entity_id},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == STATE_ON
async def test_switchbox_off(switchbox, hass, config):
"""Test turning switch off."""
feature_mock, entity_id = switchbox
def initial_update():
feature_mock.is_on = True
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
def turn_off():
feature_mock.is_on = False
feature_mock.async_turn_off = AsyncMock(side_effect=turn_off)
await hass.services.async_call(
"switch",
SERVICE_TURN_OFF,
{"entity_id": entity_id},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
def relay_mock(relay_id=0):
"""Return a default switchBoxD switch entity mock."""
return mock_only_feature(
blebox_uniapi.switch.Switch,
unique_id=f"BleBox-switchBoxD-1afe34e750b8-{relay_id}.relay",
full_name=f"switchBoxD-{relay_id}.relay",
device_class="relay",
is_on=None,
)
@pytest.fixture(name="switchbox_d")
def switchbox_d_fixture():
"""Set up two mocked Switch features representing a switchBoxD."""
relay1 = relay_mock(0)
relay2 = relay_mock(1)
features = [relay1, relay2]
product = setup_product_mock("switches", features)
type(product).name = PropertyMock(return_value="My relays")
type(product).model = PropertyMock(return_value="switchBoxD")
type(product).brand = PropertyMock(return_value="BleBox")
type(product).firmware_version = PropertyMock(return_value="1.23")
type(product).unique_id = PropertyMock(return_value="abcd0123ef5678")
type(relay1).product = product
type(relay2).product = product
return (features, ["switch.switchboxd_0_relay", "switch.switchboxd_1_relay"])
async def test_switchbox_d_init(switchbox_d, hass, config):
"""Test switch default state."""
feature_mocks, entity_ids = switchbox_d
feature_mocks[0].async_update = AsyncMock()
feature_mocks[1].async_update = AsyncMock()
entries = await async_setup_entities(hass, config, entity_ids)
entry = entries[0]
assert entry.unique_id == "BleBox-switchBoxD-1afe34e750b8-0.relay"
state = hass.states.get(entity_ids[0])
assert state.name == "switchBoxD-0.relay"
assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_SWITCH
assert state.state == STATE_OFF # NOTE: should instead be STATE_UNKNOWN?
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My relays"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "switchBoxD"
assert device.sw_version == "1.23"
entry = entries[1]
assert entry.unique_id == "BleBox-switchBoxD-1afe34e750b8-1.relay"
state = hass.states.get(entity_ids[1])
assert state.name == "switchBoxD-1.relay"
assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_SWITCH
assert state.state == STATE_OFF # NOTE: should instead be STATE_UNKNOWN?
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My relays"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "switchBoxD"
assert device.sw_version == "1.23"
async def test_switchbox_d_update_when_off(switchbox_d, hass, config):
"""Test switch updating when off."""
feature_mocks, entity_ids = switchbox_d
def initial_update0():
feature_mocks[0].is_on = False
feature_mocks[1].is_on = False
feature_mocks[0].async_update = AsyncMock(side_effect=initial_update0)
feature_mocks[1].async_update = AsyncMock()
await async_setup_entities(hass, config, entity_ids)
assert hass.states.get(entity_ids[0]).state == STATE_OFF
assert hass.states.get(entity_ids[1]).state == STATE_OFF
async def test_switchbox_d_update_when_second_off(switchbox_d, hass, config):
"""Test switch updating when off."""
feature_mocks, entity_ids = switchbox_d
def initial_update0():
feature_mocks[0].is_on = True
feature_mocks[1].is_on = False
feature_mocks[0].async_update = AsyncMock(side_effect=initial_update0)
feature_mocks[1].async_update = AsyncMock()
await async_setup_entities(hass, config, entity_ids)
assert hass.states.get(entity_ids[0]).state == STATE_ON
assert hass.states.get(entity_ids[1]).state == STATE_OFF
async def test_switchbox_d_turn_first_on(switchbox_d, hass, config):
"""Test turning switch on."""
feature_mocks, entity_ids = switchbox_d
def initial_update0():
feature_mocks[0].is_on = False
feature_mocks[1].is_on = False
feature_mocks[0].async_update = AsyncMock(side_effect=initial_update0)
feature_mocks[1].async_update = AsyncMock()
await async_setup_entities(hass, config, entity_ids)
feature_mocks[0].async_update = AsyncMock()
def turn_on0():
feature_mocks[0].is_on = True
feature_mocks[0].async_turn_on = AsyncMock(side_effect=turn_on0)
await hass.services.async_call(
"switch",
SERVICE_TURN_ON,
{"entity_id": entity_ids[0]},
blocking=True,
)
assert hass.states.get(entity_ids[0]).state == STATE_ON
assert hass.states.get(entity_ids[1]).state == STATE_OFF
async def test_switchbox_d_second_on(switchbox_d, hass, config):
"""Test turning switch on."""
feature_mocks, entity_ids = switchbox_d
def initial_update0():
feature_mocks[0].is_on = False
feature_mocks[1].is_on = False
feature_mocks[0].async_update = AsyncMock(side_effect=initial_update0)
feature_mocks[1].async_update = AsyncMock()
await async_setup_entities(hass, config, entity_ids)
feature_mocks[0].async_update = AsyncMock()
def turn_on1():
feature_mocks[1].is_on = True
feature_mocks[1].async_turn_on = AsyncMock(side_effect=turn_on1)
await hass.services.async_call(
"switch",
SERVICE_TURN_ON,
{"entity_id": entity_ids[1]},
blocking=True,
)
assert hass.states.get(entity_ids[0]).state == STATE_OFF
assert hass.states.get(entity_ids[1]).state == STATE_ON
async def test_switchbox_d_first_off(switchbox_d, hass, config):
"""Test turning switch on."""
feature_mocks, entity_ids = switchbox_d
def initial_update_any():
feature_mocks[0].is_on = True
feature_mocks[1].is_on = True
feature_mocks[0].async_update = AsyncMock(side_effect=initial_update_any)
feature_mocks[1].async_update = AsyncMock()
await async_setup_entities(hass, config, entity_ids)
feature_mocks[0].async_update = AsyncMock()
def turn_off0():
feature_mocks[0].is_on = False
feature_mocks[0].async_turn_off = AsyncMock(side_effect=turn_off0)
await hass.services.async_call(
"switch",
SERVICE_TURN_OFF,
{"entity_id": entity_ids[0]},
blocking=True,
)
assert hass.states.get(entity_ids[0]).state == STATE_OFF
assert hass.states.get(entity_ids[1]).state == STATE_ON
async def test_switchbox_d_second_off(switchbox_d, hass, config):
"""Test turning switch on."""
feature_mocks, entity_ids = switchbox_d
def initial_update_any():
feature_mocks[0].is_on = True
feature_mocks[1].is_on = True
feature_mocks[0].async_update = AsyncMock(side_effect=initial_update_any)
feature_mocks[1].async_update = AsyncMock()
await async_setup_entities(hass, config, entity_ids)
feature_mocks[0].async_update = AsyncMock()
def turn_off1():
feature_mocks[1].is_on = False
feature_mocks[1].async_turn_off = AsyncMock(side_effect=turn_off1)
await hass.services.async_call(
"switch",
SERVICE_TURN_OFF,
{"entity_id": entity_ids[1]},
blocking=True,
)
assert hass.states.get(entity_ids[0]).state == STATE_ON
assert hass.states.get(entity_ids[1]).state == STATE_OFF
ALL_SWITCH_FIXTURES = ["switchbox", "switchbox_d"]
@pytest.mark.parametrize("feature", ALL_SWITCH_FIXTURES, indirect=["feature"])
async def test_update_failure(feature, hass, config, caplog):
"""Test that update failures are logged."""
caplog.set_level(logging.ERROR)
feature_mock, entity_id = feature
if isinstance(feature_mock, list):
feature_mock[0].async_update = AsyncMock()
feature_mock[1].async_update = AsyncMock()
feature_mock = feature_mock[0]
entity_id = entity_id[0]
feature_mock.async_update = AsyncMock(side_effect=blebox_uniapi.error.ClientError)
await async_setup_entity(hass, config, entity_id)
assert f"Updating '{feature_mock.full_name}' failed: " in caplog.text
|
import asyncio
import logging
from blebox_uniapi.error import Error
from blebox_uniapi.products import Products
from blebox_uniapi.session import ApiHost
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from .const import DEFAULT_SETUP_TIMEOUT, DOMAIN, PRODUCT
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["cover", "sensor", "switch", "air_quality", "light", "climate"]
PARALLEL_UPDATES = 0
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the BleBox devices component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up BleBox devices from a config entry."""
websession = async_get_clientsession(hass)
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
timeout = DEFAULT_SETUP_TIMEOUT
api_host = ApiHost(host, port, timeout, websession, hass.loop)
try:
product = await Products.async_from_host(api_host)
except Error as ex:
_LOGGER.error("Identify failed at %s:%d (%s)", api_host.host, api_host.port, ex)
raise ConfigEntryNotReady from ex
domain = hass.data.setdefault(DOMAIN, {})
domain_entry = domain.setdefault(entry.entry_id, {})
product = domain_entry.setdefault(PRODUCT, product)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
@callback
def create_blebox_entities(
hass, config_entry, async_add_entities, entity_klass, entity_type
):
"""Create entities from a BleBox product's features."""
product = hass.data[DOMAIN][config_entry.entry_id][PRODUCT]
entities = []
if entity_type in product.features:
for feature in product.features[entity_type]:
entities.append(entity_klass(feature))
async_add_entities(entities, True)
class BleBoxEntity(Entity):
"""Implements a common class for entities representing a BleBox feature."""
def __init__(self, feature):
"""Initialize a BleBox entity."""
self._feature = feature
@property
def name(self):
"""Return the internal entity name."""
return self._feature.full_name
@property
def unique_id(self):
"""Return a unique id."""
return self._feature.unique_id
async def async_update(self):
"""Update the entity state."""
try:
await self._feature.async_update()
except Error as ex:
_LOGGER.error("Updating '%s' failed: %s", self.name, ex)
@property
def device_info(self):
"""Return device information for this entity."""
product = self._feature.product
return {
"identifiers": {(DOMAIN, product.unique_id)},
"name": product.name,
"manufacturer": product.brand,
"model": product.model,
"sw_version": product.firmware_version,
}
|
import inspect
import logging
from typing import Any, Mapping, MutableMapping, Optional, Tuple
class KeywordMessage:
"""
Represents a logging message with keyword arguments.
Adapted from: https://stackoverflow.com/a/24683360/2267718
"""
def __init__(self, fmt: Any, args: Any, kwargs: Mapping[str, Any]) -> None:
"""Initialize a new KeywordMessage object."""
self._fmt = fmt
self._args = args
self._kwargs = kwargs
def __str__(self) -> str:
"""Convert the object to a string for logging."""
return str(self._fmt).format(*self._args, **self._kwargs)
class KeywordStyleAdapter(logging.LoggerAdapter):
"""Represents an adapter wrapping the logger allowing KeywordMessages."""
def __init__(
self, logger: logging.Logger, extra: Optional[Mapping[str, Any]] = None
) -> None:
"""Initialize a new StyleAdapter for the provided logger."""
super().__init__(logger, extra or {})
def log(self, level: int, msg: Any, *args: Any, **kwargs: Any) -> None:
"""Log the message provided at the appropriate level."""
if self.isEnabledFor(level):
msg, log_kwargs = self.process(msg, kwargs)
self.logger._log( # pylint: disable=protected-access
level, KeywordMessage(msg, args, kwargs), (), **log_kwargs
)
def process(
self, msg: Any, kwargs: MutableMapping[str, Any]
) -> Tuple[Any, MutableMapping[str, Any]]:
"""Process the keyword args in preparation for logging."""
return (
msg,
{
k: kwargs[k]
for k in inspect.getfullargspec(
self.logger._log # pylint: disable=protected-access
).args[1:]
if k in kwargs
},
)
|
from flexx import flx
class TreeWithControls(flx.TreeWidget):
""" Adds a key press handler to allow controlling the TreeWidget with
the arrow keys, space, and enter.
"""
@flx.emitter
def key_down(self, e):
"""Overload key_down emitter to prevent browser scroll."""
ev = self._create_key_event(e)
if ev.key.startswith('Arrow'):
e.preventDefault()
return ev
@flx.reaction('key_down')
def _handle_highlighting(self, *events):
for ev in events:
if ev.modifiers:
continue
if ev.key == 'Escape':
self.highlight_hide()
elif ev.key == ' ':
if self.max_selected == 0: # space also checks if no selection
self.highlight_toggle_checked()
else:
self.highlight_toggle_selected()
elif ev.key == 'Enter':
self.highlight_toggle_checked()
elif ev.key == 'ArrowRight':
item = self.highlight_get()
if item and item.items:
item.collapsed = None
elif ev.key == 'ArrowLeft':
item = self.highlight_get()
if item and item.items:
item.collapsed = True
elif ev.key == 'ArrowDown':
self.highlight_show(1)
elif ev.key == 'ArrowUp':
self.highlight_show(-1)
class KeyboardControlsTester(flx.Widget):
def init(self):
combo_options = ['Paris', 'New York', 'Enschede', 'Tokio']
with flx.HBox():
self.tree = TreeWithControls(flex=1, max_selected=1)
with flx.VBox(flex=1):
self.combo = flx.ComboBox(options=combo_options, editable=True)
flx.Widget(flex=1) # combobox needs space below it to show dropdown
with self.tree:
for cat in ('foo', 'bar', 'spam'):
with flx.TreeItem(text=cat):
for name in ('Martin', 'Kees', 'Hans'):
item = flx.TreeItem(title=name)
item.set_checked(cat=='foo' or None)
@flx.reaction('combo.text')
def _combo_text_changed(self, *events):
for ev in events:
print('combo text is now', ev.new_value)
if __name__ == '__main__':
m = flx.launch(KeyboardControlsTester)
flx.run()
|
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.browser.webkit import http
@pytest.mark.parametrize('url, expected', [
# Filename in the URL
('http://example.com/path', 'path'),
('http://example.com/foo/path', 'path'),
# No filename at all
('http://example.com', 'qutebrowser-download'),
('http://example.com/', 'qutebrowser-download'),
])
def test_no_content_disposition(stubs, url, expected):
reply = stubs.FakeNetworkReply(url=QUrl(url))
inline, filename = http.parse_content_disposition(reply)
assert inline
assert filename == expected
@pytest.mark.parametrize('content_type, expected_mimetype, expected_rest', [
(None, None, None),
('image/example', 'image/example', None),
('', '', None),
('image/example; encoding=UTF-8', 'image/example', ' encoding=UTF-8'),
])
def test_parse_content_type(stubs, content_type, expected_mimetype,
expected_rest):
if content_type is None:
reply = stubs.FakeNetworkReply()
else:
reply = stubs.FakeNetworkReply(headers={'Content-Type': content_type})
mimetype, rest = http.parse_content_type(reply)
assert mimetype == expected_mimetype
assert rest == expected_rest
|
import asyncio
import base64
from collections import OrderedDict
import logging
from typing import Any, Dict, List, Optional, Set, cast
import bcrypt
import voluptuous as vol
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
STORAGE_VERSION = 1
STORAGE_KEY = "auth_provider.homeassistant"
def _disallow_id(conf: Dict[str, Any]) -> Dict[str, Any]:
"""Disallow ID in config."""
if CONF_ID in conf:
raise vol.Invalid("ID is not allowed for the homeassistant auth provider.")
return conf
CONFIG_SCHEMA = vol.All(AUTH_PROVIDER_SCHEMA, _disallow_id)
@callback
def async_get_provider(hass: HomeAssistant) -> "HassAuthProvider":
"""Get the provider."""
for prv in hass.auth.auth_providers:
if prv.type == "homeassistant":
return cast(HassAuthProvider, prv)
raise RuntimeError("Provider not found")
class InvalidAuth(HomeAssistantError):
"""Raised when we encounter invalid authentication."""
class InvalidUser(HomeAssistantError):
"""Raised when invalid user is specified.
Will not be raised when validating authentication.
"""
class Data:
"""Hold the user data."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the user data store."""
self.hass = hass
self._store = hass.helpers.storage.Store(
STORAGE_VERSION, STORAGE_KEY, private=True
)
self._data: Optional[Dict[str, Any]] = None
# Legacy mode will allow usernames to start/end with whitespace
# and will compare usernames case-insensitive.
# Remove in 2020 or when we launch 1.0.
self.is_legacy = False
@callback
def normalize_username(self, username: str) -> str:
"""Normalize a username based on the mode."""
if self.is_legacy:
return username
return username.strip().casefold()
async def async_load(self) -> None:
"""Load stored data."""
data = await self._store.async_load()
if data is None:
data = {"users": []}
seen: Set[str] = set()
for user in data["users"]:
username = user["username"]
# check if we have duplicates
folded = username.casefold()
if folded in seen:
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that are case-insensitive"
"equivalent. Please change the username: '%s'.",
username,
)
break
seen.add(folded)
# check if we have unstripped usernames
if username != username.strip():
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that start or end in a "
"space. Please change the username: '%s'.",
username,
)
break
self._data = data
@property
def users(self) -> List[Dict[str, str]]:
"""Return users."""
return self._data["users"] # type: ignore
def validate_login(self, username: str, password: str) -> None:
"""Validate a username and password.
Raises InvalidAuth if auth invalid.
"""
username = self.normalize_username(username)
dummy = b"$2b$12$CiuFGszHx9eNHxPuQcwBWez4CwDTOcLTX5CbOpV6gef2nYuXkY7BO"
found = None
# Compare all users to avoid timing attacks.
for user in self.users:
if self.normalize_username(user["username"]) == username:
found = user
if found is None:
# check a hash to make timing the same as if user was found
bcrypt.checkpw(b"foo", dummy)
raise InvalidAuth
user_hash = base64.b64decode(found["password"])
# bcrypt.checkpw is timing-safe
if not bcrypt.checkpw(password.encode(), user_hash):
raise InvalidAuth
def hash_password( # pylint: disable=no-self-use
self, password: str, for_storage: bool = False
) -> bytes:
"""Encode a password."""
hashed: bytes = bcrypt.hashpw(password.encode(), bcrypt.gensalt(rounds=12))
if for_storage:
hashed = base64.b64encode(hashed)
return hashed
def add_auth(self, username: str, password: str) -> None:
"""Add a new authenticated user/pass."""
username = self.normalize_username(username)
if any(
self.normalize_username(user["username"]) == username for user in self.users
):
raise InvalidUser
self.users.append(
{
"username": username,
"password": self.hash_password(password, True).decode(),
}
)
@callback
def async_remove_auth(self, username: str) -> None:
"""Remove authentication."""
username = self.normalize_username(username)
index = None
for i, user in enumerate(self.users):
if self.normalize_username(user["username"]) == username:
index = i
break
if index is None:
raise InvalidUser
self.users.pop(index)
def change_password(self, username: str, new_password: str) -> None:
"""Update the password.
Raises InvalidUser if user cannot be found.
"""
username = self.normalize_username(username)
for user in self.users:
if self.normalize_username(user["username"]) == username:
user["password"] = self.hash_password(new_password, True).decode()
break
else:
raise InvalidUser
async def async_save(self) -> None:
"""Save data."""
await self._store.async_save(self._data)
@AUTH_PROVIDERS.register("homeassistant")
class HassAuthProvider(AuthProvider):
"""Auth provider based on a local storage of users in Home Assistant config dir."""
DEFAULT_TITLE = "Home Assistant Local"
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize an Home Assistant auth provider."""
super().__init__(*args, **kwargs)
self.data: Optional[Data] = None
self._init_lock = asyncio.Lock()
async def async_initialize(self) -> None:
"""Initialize the auth provider."""
async with self._init_lock:
if self.data is not None:
return
data = Data(self.hass)
await data.async_load()
self.data = data
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
return HassLoginFlow(self)
async def async_validate_login(self, username: str, password: str) -> None:
"""Validate a username and password."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(
self.data.validate_login, username, password
)
async def async_add_auth(self, username: str, password: str) -> None:
"""Call add_auth on data."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(self.data.add_auth, username, password)
await self.data.async_save()
async def async_remove_auth(self, username: str) -> None:
"""Call remove_auth on data."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
self.data.async_remove_auth(username)
await self.data.async_save()
async def async_change_password(self, username: str, new_password: str) -> None:
"""Call change_password on data."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(
self.data.change_password, username, new_password
)
await self.data.async_save()
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
norm_username = self.data.normalize_username
username = norm_username(flow_result["username"])
for credential in await self.async_credentials():
if norm_username(credential.data["username"]) == username:
return credential
# Create new credentials.
return self.async_create_credentials({"username": username})
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Get extra info for this credential."""
return UserMeta(name=credentials.data["username"], is_active=True)
async def async_will_remove_credentials(self, credentials: Credentials) -> None:
"""When credentials get removed, also remove the auth."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
try:
self.data.async_remove_auth(credentials.data["username"])
await self.data.async_save()
except InvalidUser:
# Can happen if somehow we didn't clean up a credential
pass
class HassLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
try:
await cast(HassAuthProvider, self._auth_provider).async_validate_login(
user_input["username"], user_input["password"]
)
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
user_input.pop("password")
return await self.async_finish(user_input)
schema: Dict[str, type] = OrderedDict()
schema["username"] = str
schema["password"] = str
return self.async_show_form(
step_id="init", data_schema=vol.Schema(schema), errors=errors
)
|
import unittest
from absl import flags
from perfkitbenchmarker.linux_benchmarks import tensorflow_benchmark
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class TensorflowBenchmarkBatchSizesTestCase(
pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(TensorflowBenchmarkBatchSizesTestCase, self).setUp()
FLAGS.tf_batch_sizes = [99]
def testFlagOverridesDefaultBatchSize(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'alexnet', tensorflow_benchmark.CPU)
self.assertEqual([99], batch_size)
class TensorflowBenchmarkDefaultBatchSizesTestCase(
pkb_common_test_case.PkbCommonTestCase):
def testUnknownGpuTypeReturnsDefaultBatchSize(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'alexnet', 'unknown_gpu_type')
self.assertEqual([64], batch_size)
def testUnknownModelReturnsDefaultBatchSize(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'unknown_model', tensorflow_benchmark.CPU)
self.assertEqual([64], batch_size)
def testCpuAlexnetDefault(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'alexnet', tensorflow_benchmark.CPU)
self.assertEqual([512], batch_size)
def testCpuInception3Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'inception3', tensorflow_benchmark.CPU)
self.assertEqual([64], batch_size)
def testCpuResnet50Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'resnet50', tensorflow_benchmark.CPU)
self.assertEqual([64], batch_size)
def testCpuResnet152Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'resnet152', tensorflow_benchmark.CPU)
self.assertEqual([32], batch_size)
def testCpuVgg16Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'vgg16', tensorflow_benchmark.CPU)
self.assertEqual([32], batch_size)
def testK80AlexnetDefault(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'alexnet', tensorflow_benchmark.NVIDIA_TESLA_K80)
self.assertEqual([512], batch_size)
def testK80Inception3Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'inception3', tensorflow_benchmark.NVIDIA_TESLA_K80)
self.assertEqual([64], batch_size)
def testK80Resnet50Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'resnet50', tensorflow_benchmark.NVIDIA_TESLA_K80)
self.assertEqual([64], batch_size)
def testK80Resnet152Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'resnet152', tensorflow_benchmark.NVIDIA_TESLA_K80)
self.assertEqual([32], batch_size)
def testK80Vgg16Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'vgg16', tensorflow_benchmark.NVIDIA_TESLA_K80)
self.assertEqual([32], batch_size)
def testP100AlexnetDefault(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'alexnet', tensorflow_benchmark.NVIDIA_TESLA_P100)
self.assertEqual([512], batch_size)
def testP100Inception3Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'inception3', tensorflow_benchmark.NVIDIA_TESLA_P100)
self.assertEqual([256], batch_size)
def testP100Resnet50Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'resnet50', tensorflow_benchmark.NVIDIA_TESLA_P100)
self.assertEqual([256], batch_size)
def testP100Resnet152Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'resnet152', tensorflow_benchmark.NVIDIA_TESLA_P100)
self.assertEqual([128], batch_size)
def testP100Vgg16Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'vgg16', tensorflow_benchmark.NVIDIA_TESLA_P100)
self.assertEqual([128], batch_size)
def testV100AlexnetDefault(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'alexnet', tensorflow_benchmark.NVIDIA_TESLA_V100)
self.assertEqual([512], batch_size)
def testV100Inception3Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'inception3', tensorflow_benchmark.NVIDIA_TESLA_V100)
self.assertEqual([256], batch_size)
def testV100Resnet50Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'resnet50', tensorflow_benchmark.NVIDIA_TESLA_V100)
self.assertEqual([256], batch_size)
def testV100Resnet152Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'resnet152', tensorflow_benchmark.NVIDIA_TESLA_V100)
self.assertEqual([128], batch_size)
def testV100Vgg16Default(self):
batch_size = tensorflow_benchmark._GetBatchSizes(
'vgg16', tensorflow_benchmark.NVIDIA_TESLA_V100)
self.assertEqual([128], batch_size)
if __name__ == '__main__':
unittest.main()
|
import asyncio
from typing import Optional, Sequence
from pysmartthings import Capability
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
LightEntity,
)
import homeassistant.util.color as color_util
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add lights for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsLight(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "light")
],
True,
)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
supported = [
Capability.switch,
Capability.switch_level,
Capability.color_control,
Capability.color_temperature,
]
# Must be able to be turned on/off.
if Capability.switch not in capabilities:
return None
# Must have one of these
light_capabilities = [
Capability.color_control,
Capability.color_temperature,
Capability.switch_level,
]
if any(capability in capabilities for capability in light_capabilities):
return supported
return None
def convert_scale(value, value_scale, target_scale, round_digits=4):
"""Convert a value to a different scale."""
return round(value * target_scale / value_scale, round_digits)
class SmartThingsLight(SmartThingsEntity, LightEntity):
"""Define a SmartThings Light."""
def __init__(self, device):
"""Initialize a SmartThingsLight."""
super().__init__(device)
self._brightness = None
self._color_temp = None
self._hs_color = None
self._supported_features = self._determine_features()
def _determine_features(self):
"""Get features supported by the device."""
features = 0
# Brightness and transition
if Capability.switch_level in self._device.capabilities:
features |= SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
# Color Temperature
if Capability.color_temperature in self._device.capabilities:
features |= SUPPORT_COLOR_TEMP
# Color
if Capability.color_control in self._device.capabilities:
features |= SUPPORT_COLOR
return features
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
tasks = []
# Color temperature
if self._supported_features & SUPPORT_COLOR_TEMP and ATTR_COLOR_TEMP in kwargs:
tasks.append(self.async_set_color_temp(kwargs[ATTR_COLOR_TEMP]))
# Color
if self._supported_features & SUPPORT_COLOR and ATTR_HS_COLOR in kwargs:
tasks.append(self.async_set_color(kwargs[ATTR_HS_COLOR]))
if tasks:
# Set temp/color first
await asyncio.gather(*tasks)
# Switch/brightness/transition
if self._supported_features & SUPPORT_BRIGHTNESS and ATTR_BRIGHTNESS in kwargs:
await self.async_set_level(
kwargs[ATTR_BRIGHTNESS], kwargs.get(ATTR_TRANSITION, 0)
)
else:
await self._device.switch_on(set_status=True)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
# Switch/transition
if self._supported_features & SUPPORT_TRANSITION and ATTR_TRANSITION in kwargs:
await self.async_set_level(0, int(kwargs[ATTR_TRANSITION]))
else:
await self._device.switch_off(set_status=True)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_update(self):
"""Update entity attributes when the device status has changed."""
# Brightness and transition
if self._supported_features & SUPPORT_BRIGHTNESS:
self._brightness = int(
convert_scale(self._device.status.level, 100, 255, 0)
)
# Color Temperature
if self._supported_features & SUPPORT_COLOR_TEMP:
self._color_temp = color_util.color_temperature_kelvin_to_mired(
self._device.status.color_temperature
)
# Color
if self._supported_features & SUPPORT_COLOR:
self._hs_color = (
convert_scale(self._device.status.hue, 100, 360),
self._device.status.saturation,
)
async def async_set_color(self, hs_color):
"""Set the color of the device."""
hue = convert_scale(float(hs_color[0]), 360, 100)
hue = max(min(hue, 100.0), 0.0)
saturation = max(min(float(hs_color[1]), 100.0), 0.0)
await self._device.set_color(hue, saturation, set_status=True)
async def async_set_color_temp(self, value: float):
"""Set the color temperature of the device."""
kelvin = color_util.color_temperature_mired_to_kelvin(value)
kelvin = max(min(kelvin, 30000.0), 1.0)
await self._device.set_color_temperature(kelvin, set_status=True)
async def async_set_level(self, brightness: int, transition: int):
"""Set the brightness of the light over transition."""
level = int(convert_scale(brightness, 255, 100, 0))
# Due to rounding, set level to 1 (one) so we don't inadvertently
# turn off the light when a low brightness is set.
level = 1 if level == 0 and brightness > 0 else level
level = max(min(level, 100), 0)
duration = int(transition)
await self._device.set_level(level, duration, set_status=True)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self._color_temp
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return self._hs_color
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.status.switch
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
# SmartThings does not expose this attribute, instead it's
# implemented within each device-type handler. This value is the
# lowest kelvin found supported across 20+ handlers.
return 500 # 2000K
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
# SmartThings does not expose this attribute, instead it's
# implemented within each device-type handler. This value is the
# highest kelvin found supported across 20+ handlers.
return 111 # 9000K
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
|
import datetime
import logging
import os
import sys
import cherrypy
from cherrypy import _cperror
# Silence the no-handlers "warning" (stderr write!) in stdlib logging
logging.Logger.manager.emittedNoHandlerWarning = 1
logfmt = logging.Formatter('%(message)s')
class NullHandler(logging.Handler):
"""A no-op logging handler to silence the logging.lastResort handler."""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
class LogManager(object):
"""An object to assist both simple and advanced logging.
``cherrypy.log`` is an instance of this class.
"""
appid = None
"""The id() of the Application object which owns this log manager. If this
is a global log manager, appid is None."""
error_log = None
"""The actual :class:`logging.Logger` instance for error messages."""
access_log = None
"""The actual :class:`logging.Logger` instance for access messages."""
access_log_format = '{h} {l} {u} {t} "{r}" {s} {b} "{f}" "{a}"'
logger_root = None
"""The "top-level" logger name.
This string will be used as the first segment in the Logger names.
The default is "cherrypy", for example, in which case the Logger names
will be of the form::
cherrypy.error.<appid>
cherrypy.access.<appid>
"""
def __init__(self, appid=None, logger_root='cherrypy'):
self.logger_root = logger_root
self.appid = appid
if appid is None:
self.error_log = logging.getLogger('%s.error' % logger_root)
self.access_log = logging.getLogger('%s.access' % logger_root)
else:
self.error_log = logging.getLogger(
'%s.error.%s' % (logger_root, appid))
self.access_log = logging.getLogger(
'%s.access.%s' % (logger_root, appid))
self.error_log.setLevel(logging.INFO)
self.access_log.setLevel(logging.INFO)
# Silence the no-handlers "warning" (stderr write!) in stdlib logging
self.error_log.addHandler(NullHandler())
self.access_log.addHandler(NullHandler())
cherrypy.engine.subscribe('graceful', self.reopen_files)
def reopen_files(self):
"""Close and reopen all file handlers."""
for log in (self.error_log, self.access_log):
for h in log.handlers:
if isinstance(h, logging.FileHandler):
h.acquire()
h.stream.close()
h.stream = open(h.baseFilename, h.mode)
h.release()
def error(self, msg='', context='', severity=logging.INFO,
traceback=False):
"""Write the given ``msg`` to the error log.
This is not just for errors! Applications may call this at any time
to log application-specific information.
If ``traceback`` is True, the traceback of the current exception
(if any) will be appended to ``msg``.
"""
exc_info = None
if traceback:
exc_info = _cperror._exc_info()
self.error_log.log(
severity,
' '.join((self.time(), context, msg)),
exc_info=exc_info,
)
def __call__(self, *args, **kwargs):
"""An alias for ``error``."""
return self.error(*args, **kwargs)
def access(self):
"""Write to the access log (in Apache/NCSA Combined Log format).
See the
`apache documentation
<http://httpd.apache.org/docs/current/logs.html#combined>`_
for format details.
CherryPy calls this automatically for you. Note there are no arguments;
it collects the data itself from
:class:`cherrypy.request<cherrypy._cprequest.Request>`.
Like Apache started doing in 2.0.46, non-printable and other special
characters in %r (and we expand that to all parts) are escaped using
\\xhh sequences, where hh stands for the hexadecimal representation
of the raw byte. Exceptions from this rule are " and \\, which are
escaped by prepending a backslash, and all whitespace characters,
which are written in their C-style notation (\\n, \\t, etc).
"""
request = cherrypy.serving.request
remote = request.remote
response = cherrypy.serving.response
outheaders = response.headers
inheaders = request.headers
if response.output_status is None:
status = '-'
else:
status = response.output_status.split(b' ', 1)[0]
status = status.decode('ISO-8859-1')
atoms = {'h': remote.name or remote.ip,
'l': '-',
'u': getattr(request, 'login', None) or '-',
't': self.time(),
'r': request.request_line,
's': status,
'b': dict.get(outheaders, 'Content-Length', '') or '-',
'f': dict.get(inheaders, 'Referer', ''),
'a': dict.get(inheaders, 'User-Agent', ''),
'o': dict.get(inheaders, 'Host', '-'),
'i': request.unique_id,
'z': LazyRfc3339UtcTime(),
}
for k, v in atoms.items():
if not isinstance(v, str):
v = str(v)
v = v.replace('"', '\\"').encode('utf8')
# Fortunately, repr(str) escapes unprintable chars, \n, \t, etc
# and backslash for us. All we have to do is strip the quotes.
v = repr(v)[2:-1]
# in python 3.0 the repr of bytes (as returned by encode)
# uses double \'s. But then the logger escapes them yet, again
# resulting in quadruple slashes. Remove the extra one here.
v = v.replace('\\\\', '\\')
# Escape double-quote.
atoms[k] = v
try:
self.access_log.log(
logging.INFO, self.access_log_format.format(**atoms))
except Exception:
self(traceback=True)
def time(self):
"""Return now() in Apache Common Log Format (no timezone)."""
now = datetime.datetime.now()
monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
month = monthnames[now.month - 1].capitalize()
return ('[%02d/%s/%04d:%02d:%02d:%02d]' %
(now.day, month, now.year, now.hour, now.minute, now.second))
def _get_builtin_handler(self, log, key):
for h in log.handlers:
if getattr(h, '_cpbuiltin', None) == key:
return h
# ------------------------- Screen handlers ------------------------- #
def _set_screen_handler(self, log, enable, stream=None):
h = self._get_builtin_handler(log, 'screen')
if enable:
if not h:
if stream is None:
stream = sys.stderr
h = logging.StreamHandler(stream)
h.setFormatter(logfmt)
h._cpbuiltin = 'screen'
log.addHandler(h)
elif h:
log.handlers.remove(h)
@property
def screen(self):
"""Turn stderr/stdout logging on or off.
If you set this to True, it'll add the appropriate StreamHandler for
you. If you set it to False, it will remove the handler.
"""
h = self._get_builtin_handler
has_h = h(self.error_log, 'screen') or h(self.access_log, 'screen')
return bool(has_h)
@screen.setter
def screen(self, newvalue):
self._set_screen_handler(self.error_log, newvalue, stream=sys.stderr)
self._set_screen_handler(self.access_log, newvalue, stream=sys.stdout)
# -------------------------- File handlers -------------------------- #
def _add_builtin_file_handler(self, log, fname):
h = logging.FileHandler(fname)
h.setFormatter(logfmt)
h._cpbuiltin = 'file'
log.addHandler(h)
def _set_file_handler(self, log, filename):
h = self._get_builtin_handler(log, 'file')
if filename:
if h:
if h.baseFilename != os.path.abspath(filename):
h.close()
log.handlers.remove(h)
self._add_builtin_file_handler(log, filename)
else:
self._add_builtin_file_handler(log, filename)
else:
if h:
h.close()
log.handlers.remove(h)
@property
def error_file(self):
"""The filename for self.error_log.
If you set this to a string, it'll add the appropriate FileHandler for
you. If you set it to ``None`` or ``''``, it will remove the handler.
"""
h = self._get_builtin_handler(self.error_log, 'file')
if h:
return h.baseFilename
return ''
@error_file.setter
def error_file(self, newvalue):
self._set_file_handler(self.error_log, newvalue)
@property
def access_file(self):
"""The filename for self.access_log.
If you set this to a string, it'll add the appropriate FileHandler for
you. If you set it to ``None`` or ``''``, it will remove the handler.
"""
h = self._get_builtin_handler(self.access_log, 'file')
if h:
return h.baseFilename
return ''
@access_file.setter
def access_file(self, newvalue):
self._set_file_handler(self.access_log, newvalue)
# ------------------------- WSGI handlers ------------------------- #
def _set_wsgi_handler(self, log, enable):
h = self._get_builtin_handler(log, 'wsgi')
if enable:
if not h:
h = WSGIErrorHandler()
h.setFormatter(logfmt)
h._cpbuiltin = 'wsgi'
log.addHandler(h)
elif h:
log.handlers.remove(h)
@property
def wsgi(self):
"""Write errors to wsgi.errors.
If you set this to True, it'll add the appropriate
:class:`WSGIErrorHandler<cherrypy._cplogging.WSGIErrorHandler>` for you
(which writes errors to ``wsgi.errors``).
If you set it to False, it will remove the handler.
"""
return bool(self._get_builtin_handler(self.error_log, 'wsgi'))
@wsgi.setter
def wsgi(self, newvalue):
self._set_wsgi_handler(self.error_log, newvalue)
class WSGIErrorHandler(logging.Handler):
"A handler class which writes logging records to environ['wsgi.errors']."
def flush(self):
"""Flushes the stream."""
try:
stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors')
except (AttributeError, KeyError):
pass
else:
stream.flush()
def emit(self, record):
"""Emit a record."""
try:
stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors')
except (AttributeError, KeyError):
pass
else:
try:
msg = self.format(record)
fs = '%s\n'
import types
# if no unicode support...
if not hasattr(types, 'UnicodeType'):
stream.write(fs % msg)
else:
try:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode('UTF-8'))
self.flush()
except Exception:
self.handleError(record)
class LazyRfc3339UtcTime(object):
def __str__(self):
"""Return now() in RFC3339 UTC Format."""
now = datetime.datetime.now()
return now.isoformat('T') + 'Z'
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import mock
from perfkitbenchmarker import context
from perfkitbenchmarker import vm_util
from tests import pkb_common_test_case
from six.moves import range
class ThreadLocalBenchmarkSpecTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
# Reset the current benchmark spec.
self.addCleanup(context.SetThreadBenchmarkSpec, None)
def testSetGet(self):
benchmark_spec = mock.MagicMock()
context.SetThreadBenchmarkSpec(benchmark_spec)
self.assertEqual(benchmark_spec, context.GetThreadBenchmarkSpec())
def testPropagation(self):
benchmark_spec = mock.MagicMock()
context.SetThreadBenchmarkSpec(benchmark_spec)
def _DoWork(_):
self.assertEqual(benchmark_spec, context.GetThreadBenchmarkSpec())
new_benchmark_spec = mock.MagicMock()
context.SetThreadBenchmarkSpec(new_benchmark_spec)
self.assertNotEqual(benchmark_spec, context.GetThreadBenchmarkSpec())
self.assertEqual(new_benchmark_spec,
context.GetThreadBenchmarkSpec())
vm_util.RunThreaded(_DoWork, list(range(10)))
if __name__ == '__main__':
unittest.main()
|
import argparse
from src.single_layer_network import train_on_cached_data
def create_parser():
"""Create the argparse parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--neural-net",
default='one_layer_relu',
choices=['one_layer_relu', 'one_layer_relu_conv', 'two_layer_relu_conv'],
help="the neural network architecture to use")
parser.add_argument("--number-of-epochs",
default=5,
type=int,
help="the number of epochs to batch the training data into")
parser.add_argument("--render-results",
action='store_true',
help="output data/predictions to JPEG, in addition to normal JSON")
return parser
def main():
"""Use local data to train the neural net, probably made by bin/create_training_data.py."""
parser = create_parser()
args = parser.parse_args()
train_on_cached_data(args.neural_net, args.number_of_epochs)
if __name__ == "__main__":
main()
|
from typing import Any, Dict
import voluptuous as vol
from homeassistant import config_entries
from .const import (
_LOGGER,
DOMAIN as HMIPC_DOMAIN,
HMIPC_AUTHTOKEN,
HMIPC_HAPID,
HMIPC_NAME,
HMIPC_PIN,
)
from .hap import HomematicipAuth
@config_entries.HANDLERS.register(HMIPC_DOMAIN)
class HomematicipCloudFlowHandler(config_entries.ConfigFlow):
"""Config flow for the HomematicIP Cloud component."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
def __init__(self) -> None:
"""Initialize HomematicIP Cloud config flow."""
self.auth = None
async def async_step_user(self, user_input=None) -> Dict[str, Any]:
"""Handle a flow initialized by the user."""
return await self.async_step_init(user_input)
async def async_step_init(self, user_input=None) -> Dict[str, Any]:
"""Handle a flow start."""
errors = {}
if user_input is not None:
user_input[HMIPC_HAPID] = user_input[HMIPC_HAPID].replace("-", "").upper()
await self.async_set_unique_id(user_input[HMIPC_HAPID])
self._abort_if_unique_id_configured()
self.auth = HomematicipAuth(self.hass, user_input)
connected = await self.auth.async_setup()
if connected:
_LOGGER.info("Connection to HomematicIP Cloud established")
return await self.async_step_link()
_LOGGER.info("Connection to HomematicIP Cloud failed")
errors["base"] = "invalid_sgtin_or_pin"
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(HMIPC_HAPID): str,
vol.Optional(HMIPC_NAME): str,
vol.Optional(HMIPC_PIN): str,
}
),
errors=errors,
)
async def async_step_link(self, user_input=None) -> Dict[str, Any]:
"""Attempt to link with the HomematicIP Cloud access point."""
errors = {}
pressed = await self.auth.async_checkbutton()
if pressed:
authtoken = await self.auth.async_register()
if authtoken:
_LOGGER.info("Write config entry for HomematicIP Cloud")
return self.async_create_entry(
title=self.auth.config.get(HMIPC_HAPID),
data={
HMIPC_HAPID: self.auth.config.get(HMIPC_HAPID),
HMIPC_AUTHTOKEN: authtoken,
HMIPC_NAME: self.auth.config.get(HMIPC_NAME),
},
)
return self.async_abort(reason="connection_aborted")
errors["base"] = "press_the_button"
return self.async_show_form(step_id="link", errors=errors)
async def async_step_import(self, import_info) -> Dict[str, Any]:
"""Import a new access point as a config entry."""
hapid = import_info[HMIPC_HAPID].replace("-", "").upper()
authtoken = import_info[HMIPC_AUTHTOKEN]
name = import_info[HMIPC_NAME]
await self.async_set_unique_id(hapid)
self._abort_if_unique_id_configured()
_LOGGER.info("Imported authentication for %s", hapid)
return self.async_create_entry(
title=hapid,
data={HMIPC_AUTHTOKEN: authtoken, HMIPC_HAPID: hapid, HMIPC_NAME: name},
)
|
import math
import functools
def dec_logger(func):
"""One liner."""
@functools.wraps(func)
def wrapper(*arg, **kargs):
"""Imperative one liner."""
result = func(*arg, **kargs)
print(result)
return result
return wrapper
def n1(x): # noqa
"""Multi line
Docstring.
"""
a = x + 1
def n2(y):
"""Single line docstring."""
@dec_logger
def n3(z):
"""Have multiline.
Docstring
As
Well
"""
# Leave some blank spaces
return str(z) + 'expanded'
b = y + 1
n3(b)
return b
n2(a)
if __name__ == '__main__':
n1(math.pi)
|
from homeassistant.components.media_player import (
DEVICE_CLASS_SPEAKER,
DEVICE_CLASS_TV,
DOMAIN as MP_DOMAIN,
)
from homeassistant.components.vizio.const import (
CONF_ADDITIONAL_CONFIGS,
CONF_APP_ID,
CONF_APPS,
CONF_APPS_TO_INCLUDE_OR_EXCLUDE,
CONF_CONFIG,
CONF_INCLUDE_OR_EXCLUDE,
CONF_MESSAGE,
CONF_NAME_SPACE,
CONF_VOLUME_STEP,
)
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_DEVICE_CLASS,
CONF_EXCLUDE,
CONF_HOST,
CONF_INCLUDE,
CONF_NAME,
CONF_PIN,
CONF_PORT,
CONF_TYPE,
)
from homeassistant.util import slugify
NAME = "Vizio"
NAME2 = "Vizio2"
HOST = "192.168.1.1:9000"
HOST2 = "192.168.1.2:9000"
ACCESS_TOKEN = "deadbeef"
VOLUME_STEP = 2
UNIQUE_ID = "testid"
MODEL = "model"
VERSION = "version"
CH_TYPE = 1
RESPONSE_TOKEN = 1234
PIN = "abcd"
class MockStartPairingResponse:
"""Mock Vizio start pairing response."""
def __init__(self, ch_type: int, token: int) -> None:
"""Initialize mock start pairing response."""
self.ch_type = ch_type
self.token = token
class MockCompletePairingResponse:
"""Mock Vizio complete pairing response."""
def __init__(self, auth_token: str) -> None:
"""Initialize mock complete pairing response."""
self.auth_token = auth_token
CURRENT_EQ = "Music"
EQ_LIST = ["Music", "Movie"]
CURRENT_INPUT = "HDMI"
INPUT_LIST = ["HDMI", "USB", "Bluetooth", "AUX"]
CURRENT_APP = "Hulu"
CURRENT_APP_CONFIG = {CONF_APP_ID: "3", CONF_NAME_SPACE: 4, CONF_MESSAGE: None}
APP_LIST = [
{
"name": "Hulu",
"country": ["*"],
"id": ["1"],
"config": [{"NAME_SPACE": 4, "APP_ID": "3", "MESSAGE": None}],
},
{
"name": "Netflix",
"country": ["*"],
"id": ["2"],
"config": [{"NAME_SPACE": 1, "APP_ID": "2", "MESSAGE": None}],
},
]
APP_NAME_LIST = [app["name"] for app in APP_LIST]
INPUT_LIST_WITH_APPS = INPUT_LIST + ["CAST"]
CUSTOM_CONFIG = {CONF_APP_ID: "test", CONF_MESSAGE: None, CONF_NAME_SPACE: 10}
ADDITIONAL_APP_CONFIG = {
"name": CURRENT_APP,
CONF_CONFIG: CUSTOM_CONFIG,
}
UNKNOWN_APP_CONFIG = {
"APP_ID": "UNKNOWN",
"NAME_SPACE": 10,
"MESSAGE": None,
}
ENTITY_ID = f"{MP_DOMAIN}.{slugify(NAME)}"
MOCK_PIN_CONFIG = {CONF_PIN: PIN}
MOCK_USER_VALID_TV_CONFIG = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_TV,
CONF_ACCESS_TOKEN: ACCESS_TOKEN,
}
MOCK_OPTIONS = {
CONF_VOLUME_STEP: VOLUME_STEP,
}
MOCK_IMPORT_VALID_TV_CONFIG = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_TV,
CONF_ACCESS_TOKEN: ACCESS_TOKEN,
CONF_VOLUME_STEP: VOLUME_STEP,
}
MOCK_TV_WITH_INCLUDE_CONFIG = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_TV,
CONF_ACCESS_TOKEN: ACCESS_TOKEN,
CONF_VOLUME_STEP: VOLUME_STEP,
CONF_APPS: {CONF_INCLUDE: [CURRENT_APP]},
}
MOCK_TV_WITH_EXCLUDE_CONFIG = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_TV,
CONF_ACCESS_TOKEN: ACCESS_TOKEN,
CONF_VOLUME_STEP: VOLUME_STEP,
CONF_APPS: {CONF_EXCLUDE: ["Netflix"]},
}
MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_TV,
CONF_ACCESS_TOKEN: ACCESS_TOKEN,
CONF_VOLUME_STEP: VOLUME_STEP,
CONF_APPS: {CONF_ADDITIONAL_CONFIGS: [ADDITIONAL_APP_CONFIG]},
}
MOCK_SPEAKER_APPS_FAILURE = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_SPEAKER,
CONF_ACCESS_TOKEN: ACCESS_TOKEN,
CONF_VOLUME_STEP: VOLUME_STEP,
CONF_APPS: {CONF_ADDITIONAL_CONFIGS: [ADDITIONAL_APP_CONFIG]},
}
MOCK_TV_APPS_FAILURE = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_TV,
CONF_ACCESS_TOKEN: ACCESS_TOKEN,
CONF_VOLUME_STEP: VOLUME_STEP,
CONF_APPS: None,
}
MOCK_TV_APPS_WITH_VALID_APPS_CONFIG = {
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_TV,
CONF_ACCESS_TOKEN: ACCESS_TOKEN,
CONF_APPS: {CONF_INCLUDE: [CURRENT_APP]},
}
MOCK_TV_CONFIG_NO_TOKEN = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_TV,
}
MOCK_SPEAKER_CONFIG = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_DEVICE_CLASS: DEVICE_CLASS_SPEAKER,
}
MOCK_INCLUDE_APPS = {
CONF_INCLUDE_OR_EXCLUDE: CONF_INCLUDE.title(),
CONF_APPS_TO_INCLUDE_OR_EXCLUDE: [CURRENT_APP],
}
MOCK_INCLUDE_NO_APPS = {
CONF_INCLUDE_OR_EXCLUDE: CONF_INCLUDE.title(),
CONF_APPS_TO_INCLUDE_OR_EXCLUDE: [],
}
VIZIO_ZEROCONF_SERVICE_TYPE = "_viziocast._tcp.local."
ZEROCONF_NAME = f"{NAME}.{VIZIO_ZEROCONF_SERVICE_TYPE}"
ZEROCONF_HOST = HOST.split(":")[0]
ZEROCONF_PORT = HOST.split(":")[1]
MOCK_ZEROCONF_SERVICE_INFO = {
CONF_TYPE: VIZIO_ZEROCONF_SERVICE_TYPE,
CONF_NAME: ZEROCONF_NAME,
CONF_HOST: ZEROCONF_HOST,
CONF_PORT: ZEROCONF_PORT,
"properties": {"name": "SB4031-D5"},
}
|
from __future__ import absolute_import
import optparse
import sys
import re
import os
from .diff import htmldiff
description = """\
"""
parser = optparse.OptionParser(
usage="%prog [OPTIONS] FILE1 FILE2\n"
"%prog --annotate [OPTIONS] INFO1 FILE1 INFO2 FILE2 ...",
description=description,
)
parser.add_option(
'-o', '--output',
metavar="FILE",
dest="output",
default="-",
help="File to write the difference to",
)
parser.add_option(
'-a', '--annotation',
action="store_true",
dest="annotation",
help="Do an annotation")
def main(args=None):
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
if options.annotation:
return annotate(options, args)
if len(args) != 2:
print('Error: you must give two files')
parser.print_help()
sys.exit(1)
file1, file2 = args
input1 = read_file(file1)
input2 = read_file(file2)
body1 = split_body(input1)[1]
pre, body2, post = split_body(input2)
result = htmldiff(body1, body2)
result = pre + result + post
if options.output == '-':
if not result.endswith('\n'):
result += '\n'
sys.stdout.write(result)
else:
with open(options.output, 'wb') as f:
f.write(result)
def read_file(filename):
if filename == '-':
c = sys.stdin.read()
elif not os.path.exists(filename):
raise OSError(
"Input file %s does not exist" % filename)
else:
with open(filename, 'rb') as f:
c = f.read()
return c
body_start_re = re.compile(
r"<body.*?>", re.I|re.S)
body_end_re = re.compile(
r"</body.*?>", re.I|re.S)
def split_body(html):
pre = post = ''
match = body_start_re.search(html)
if match:
pre = html[:match.end()]
html = html[match.end():]
match = body_end_re.search(html)
if match:
post = html[match.start():]
html = html[:match.start()]
return pre, html, post
def annotate(options, args):
print("Not yet implemented")
sys.exit(1)
|
from typing import Optional
from pymodbus.exceptions import ConnectionException, ModbusException
from pymodbus.pdu import ExceptionResponse
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_NAME, CONF_SLAVE
from homeassistant.helpers import config_validation as cv
from .const import (
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CONF_ADDRESS,
CONF_COILS,
CONF_HUB,
CONF_INPUT_TYPE,
CONF_INPUTS,
DEFAULT_HUB,
MODBUS_DOMAIN,
)
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_COILS, CONF_INPUTS),
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_INPUTS): [
vol.All(
cv.deprecated(CALL_TYPE_COIL, CONF_ADDRESS),
vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Optional(
CONF_INPUT_TYPE, default=CALL_TYPE_COIL
): vol.In([CALL_TYPE_COIL, CALL_TYPE_DISCRETE]),
}
),
)
]
}
),
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus binary sensors."""
sensors = []
for entry in config[CONF_INPUTS]:
hub = hass.data[MODBUS_DOMAIN][entry[CONF_HUB]]
sensors.append(
ModbusBinarySensor(
hub,
entry[CONF_NAME],
entry.get(CONF_SLAVE),
entry[CONF_ADDRESS],
entry.get(CONF_DEVICE_CLASS),
entry[CONF_INPUT_TYPE],
)
)
add_entities(sensors)
class ModbusBinarySensor(BinarySensorEntity):
"""Modbus binary sensor."""
def __init__(self, hub, name, slave, address, device_class, input_type):
"""Initialize the Modbus binary sensor."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._address = int(address)
self._device_class = device_class
self._input_type = input_type
self._value = None
self._available = True
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._value
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._device_class
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
def update(self):
"""Update the state of the sensor."""
try:
if self._input_type == CALL_TYPE_COIL:
result = self._hub.read_coils(self._slave, self._address, 1)
else:
result = self._hub.read_discrete_inputs(self._slave, self._address, 1)
except ConnectionException:
self._available = False
return
if isinstance(result, (ModbusException, ExceptionResponse)):
self._available = False
return
self._value = result.bits[0] & 1
self._available = True
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import time
from absl import app
from absl import flags
import grpc
from grpc.framework.interfaces.face.face import ExpirationError
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
ILSVRC_VALIDATION_IMAGES = 'ILSVRC2012_img_val'
MODEL_NAME = 'resnet'
RANDOM_SEED = 98103
DEFAULT_TIMEOUT = 3600 # one hour "infinite" timeout
FLAGS = flags.FLAGS
flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port')
flags.DEFINE_string(
'image_directory', ILSVRC_VALIDATION_IMAGES,
'Path to a directory containing images to be classified. '
'A random image from the directory will be chosen for '
'every classification request.')
flags.DEFINE_integer('runtime', 60, 'Runtime in seconds.')
flags.DEFINE_integer('num_threads', 16,
'Number of concurrent worker threads to launch.')
flags.DEFINE_integer('rpc_timeout', DEFAULT_TIMEOUT,
'Number of seconds to set the rpc timeout to.')
def get_files_in_directory_sorted(directory):
"""Returns a list of files in directory, sorted alphabetically."""
return sorted([
os.path.join(directory, name)
for name in os.listdir(directory)
if os.path.isfile(os.path.join(directory, name))
])
class TfServingClientWorkload(object):
"""Tensorflow Serving client workload generator.
See module-level docstring for more details.
"""
def __init__(self):
self.thread_lock = threading.Lock()
self.num_completed_requests = 0
self.num_failed_requests = 0
self.latencies = []
self.file_list = get_files_in_directory_sorted(FLAGS.image_directory)
self.num_images = len(self.file_list)
channel = grpc.insecure_channel(FLAGS.server)
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
# Fix random seed so that sequence of images sent to server is
# deterministic.
random.seed(RANDOM_SEED)
def get_random_image(self):
"""Returns a random image from self.file_list."""
random_index = random.randint(0, self.num_images - 1)
return self.file_list[random_index]
def classify_random_image(self):
"""Chooses a random image and sends a prediction request to the server.
If a response is receieved before the requests timesout, its latency is
saved, and the request is counted as successful. If the request timesout
or otherwise errors, its latency is discarded, and it is counted as a
failed request.
"""
image = self.get_random_image()
with open(image, 'rb') as f:
data = f.read()
request = predict_pb2.PredictRequest()
request.model_spec.name = MODEL_NAME
request.model_spec.signature_name = 'serving_default'
request.inputs['image_bytes'].CopyFrom(
# Uses version 1 of the TensorFlow protobuf
tf.compat.v1.make_tensor_proto(data, shape=[1]))
try:
start_time = time.time()
self.stub.Predict(request, FLAGS.rpc_timeout)
end_time = time.time()
with self.thread_lock:
self.num_completed_requests += 1
self.latencies.append(end_time - start_time)
except ExpirationError:
with self.thread_lock:
self.num_failed_requests += 1
def run_worker_thread(self):
"""Continuously calls classify_random_image until time is up."""
while (datetime.now() - self.start_time).seconds < FLAGS.runtime:
self.classify_random_image()
def start(self):
"""Creates and launches worker threads and waits for them to finish."""
threads = []
for _ in range(FLAGS.num_threads):
threads.append(threading.Thread(target=self.run_worker_thread))
self.start_time = datetime.now()
for t in threads:
t.start()
for t in threads:
t.join()
self.end_time = datetime.now()
def print_results(self, out=sys.stdout):
"""Prints tests results, to stdout by default."""
actual_runtime = (self.end_time - self.start_time).total_seconds()
req_per_second = self.num_completed_requests / actual_runtime
out.write('Completed requests: %s\n' % self.num_completed_requests)
out.write('Failed requests: %s\n' % self.num_failed_requests)
out.write('Runtime: %s\n' % actual_runtime)
out.write('Number of threads: %s\n' % FLAGS.num_threads)
out.write('Throughput: %s\n' % req_per_second)
out.write('Latency:\n')
for latency in self.latencies:
out.write(str(latency) + '\n')
def main(argv):
"""Runs the test and prints results to stdout."""
del argv
load_test = TfServingClientWorkload()
load_test.start()
load_test.print_results()
if __name__ == '__main__':
app.run(main)
|
from gitless import core
from . import helpers, pprint
def parser(subparsers, _):
"""Adds the branch parser to the given subparsers object."""
desc = 'list, create, delete, or edit branches'
branch_parser = subparsers.add_parser(
'branch', help=desc, description=desc.capitalize(), aliases=['br'])
list_group = branch_parser.add_argument_group('list branches')
list_group.add_argument(
'-r', '--remote',
help='list remote branches in addition to local branches',
action='store_true')
list_group.add_argument(
'-v', '--verbose', help='be verbose, will output the head of each branch',
action='store_true')
create_group = branch_parser.add_argument_group('create branches')
create_group.add_argument(
'-c', '--create', nargs='+', help='create branch(es)', dest='create_b',
metavar='branch')
create_group.add_argument(
'-dp', '--divergent-point',
help='the commit from where to \'branch out\' (only relevant if a new '
'branch is created; defaults to HEAD)', dest='dp')
delete_group = branch_parser.add_argument_group('delete branches')
delete_group.add_argument(
'-d', '--delete', nargs='+', help='delete branch(es)', dest='delete_b',
metavar='branch')
edit_current_branch_group = branch_parser.add_argument_group('edit the current branch')
edit_current_branch_group.add_argument(
'-sh', '--set-head', help='set the head of the current branch',
dest='new_head', metavar='commit_id')
edit_current_branch_group.add_argument(
'-su', '--set-upstream',
help='set the upstream branch of the current branch',
dest='upstream_b', metavar='branch')
edit_current_branch_group.add_argument(
'-uu', '--unset-upstream',
help='unset the upstream branch of the current branch',
action='store_true')
edit_group = branch_parser.add_argument_group('edit branches')
edit_group.add_argument(
'-rn', '--rename-branch', nargs='+',
help='renames the current branch (gl branch -rn new_name) '
'or another specified branch (gl branch -rn branch_name new_name)',
dest='rename_b'
)
branch_parser.set_defaults(func=main)
def main(args, repo):
is_list = bool(args.verbose or args.remote)
is_create = bool(args.create_b or args.dp)
is_delete = bool(args.delete_b)
is_edit = bool(args.new_head or args.upstream_b or args.unset_upstream or args.rename_b)
if is_list + is_create + is_delete + is_edit > 1:
pprint.err('Invalid flag combination')
pprint.err_exp(
'Can only do one of list, create, delete, or edit branches at a time')
return False
ret = True
if args.create_b:
ret = _do_create(args.create_b, args.dp or 'HEAD', repo)
elif args.delete_b:
ret = _do_delete(args.delete_b, repo)
elif args.upstream_b:
ret = _do_set_upstream(args.upstream_b, repo)
elif args.unset_upstream:
ret = _do_unset_upstream(repo)
elif args.new_head:
ret = _do_set_head(args.new_head, repo)
elif args.rename_b:
ret = _do_rename(args.rename_b, repo)
else:
_do_list(repo, args.remote, v=args.verbose)
return ret
def _do_list(repo, list_remote, v=False):
pprint.msg('List of branches:')
pprint.exp('do gl branch -c b to create branch b')
pprint.exp('do gl branch -d b to delete branch b')
pprint.exp('do gl switch b to switch to branch b')
pprint.exp('* = current branch')
pprint.blank()
for b in (repo.lookup_branch(n) for n in sorted(repo.listall_branches())):
current_str = '*' if b.is_current else ' '
upstream_str = '(upstream is {0})'.format(b.upstream) if b.upstream else ''
color = pprint.green if b.is_current else pprint.yellow
pprint.item(
'{0} {1} {2}'.format(current_str, color(b.branch_name), upstream_str))
if v:
pprint.item(' ➜ head is {0}'.format(pprint.commit_str(b.head)))
if list_remote:
for r in sorted(repo.remotes, key=lambda r: r.name):
branches = r.lookupall_branches() if v else r.listall_branches()
b_remote = '' if v else r.name + '/'
for b in branches:
pprint.item(' {0}'.format(pprint.yellow(b_remote + str(b))))
if v:
pprint.item(' ➜ head is {0}'.format(pprint.commit_str(b.head)))
def _do_create(create_b, dp, repo):
errors_found = False
try:
target = repo.revparse_single(dp)
except KeyError:
raise ValueError('Invalid divergent point {0}'.format(dp))
for b_name in create_b:
r = repo
remote_str = ''
if '/' in b_name: # might want to create a remote branch
maybe_remote, maybe_remote_branch = b_name.split('/', 1)
if maybe_remote in repo.remotes:
r = repo.remotes[maybe_remote]
b_name = maybe_remote_branch
conf_msg = 'Branch {0} will be created in remote repository {1}'.format(
b_name, maybe_remote)
if not pprint.conf_dialog(conf_msg):
pprint.msg(
'Aborted: creation of branch {0} in remote repository {1}'.format(
b_name, maybe_remote))
continue
remote_str = ' in remote repository {0}'.format(maybe_remote)
try:
new_branch = r.create_branch(b_name, target)
pprint.ok('Created new branch {0}{1}'.format(b_name, remote_str))
try:
new_branch.upstream = helpers.get_branch(dp, repo)
pprint.ok('Upstream of {0} set to {1}'.format(b_name, dp))
except:
# Not a branch
continue
except ValueError as e:
pprint.err(e)
errors_found = True
return not errors_found
def _do_delete(delete_b, repo):
errors_found = False
for b_name in delete_b:
try:
b = helpers.get_branch(b_name, repo)
branch_str = 'Branch {0} will be removed'.format(b.branch_name)
remote_str = ''
if isinstance(b, core.RemoteBranch):
remote_str = ' from remote repository {0}'.format(b.remote_name)
if not pprint.conf_dialog('{0}{1}'.format(branch_str, remote_str)):
pprint.msg('Aborted: removal of branch {0}'.format(b))
continue
b.delete()
pprint.ok('Branch {0} removed successfully'.format(b))
except ValueError as e:
pprint.err(e)
errors_found = True
except core.BranchIsCurrentError as e:
pprint.err(e)
pprint.err_exp(
'do gl branch b to create or switch to another branch b and then '
'gl branch -d {0} to remove branch {0}'.format(b))
errors_found = True
return not errors_found
def _do_set_upstream(upstream, repo):
curr_b = repo.current_branch
curr_b.upstream = helpers.get_branch(upstream, repo)
pprint.ok('Current branch {0} set to track {1}'.format(curr_b, upstream))
return True
def _do_unset_upstream(repo):
curr_b = repo.current_branch
curr_b.upstream = None
pprint.ok('Upstream unset for current branch {0}'.format(curr_b))
return True
def _do_set_head(commit_id, repo):
try:
commit = repo.revparse_single(commit_id)
except KeyError:
raise ValueError('Invalid head {0}'.format(commit_id))
curr_b = repo.current_branch
curr_b.head = commit.id
pprint.ok(
'Head of current branch {0} is now {1}'.format(curr_b, pprint.commit_str(commit)))
return True
def _do_rename(rename_b, repo):
ret = True
if len(rename_b) == 1 :
# Renaming the current branch
curr_b = repo.current_branch
curr_b.rename(rename_b[0])
pprint.ok('Renamed this branch to {0}'.format(rename_b[0]))
elif len(rename_b) == 2:
# Renaming a specified branch to a new name
b = helpers.get_branch(rename_b[0], repo)
b.rename(rename_b[1])
pprint.ok('Renamed branch {0} to {1}'.format(rename_b[0], rename_b[1]))
else :
# Gave more than 2 arguments
pprint.err(
'Too many arguments given. Expected 1 or 2 arguments.')
ret = False
return ret
|
from unittest.mock import patch
import pytest
from transmissionrpc.error import TransmissionError
from homeassistant.components import transmission
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, mock_coro
MOCK_ENTRY = MockConfigEntry(
domain=transmission.DOMAIN,
data={
transmission.CONF_NAME: "Transmission",
transmission.CONF_HOST: "0.0.0.0",
transmission.CONF_USERNAME: "user",
transmission.CONF_PASSWORD: "pass",
transmission.CONF_PORT: 9091,
},
)
@pytest.fixture(name="api")
def mock_transmission_api():
"""Mock an api."""
with patch("transmissionrpc.Client"):
yield
@pytest.fixture(name="auth_error")
def mock_api_authentication_error():
"""Mock an api."""
with patch(
"transmissionrpc.Client", side_effect=TransmissionError("401: Unauthorized")
):
yield
@pytest.fixture(name="unknown_error")
def mock_api_unknown_error():
"""Mock an api."""
with patch("transmissionrpc.Client", side_effect=TransmissionError):
yield
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a Transmission client."""
assert await async_setup_component(hass, transmission.DOMAIN, {}) is True
assert transmission.DOMAIN not in hass.data
async def test_setup_with_config(hass, api):
"""Test that we import the config and setup the client."""
config = {
transmission.DOMAIN: {
transmission.CONF_NAME: "Transmission",
transmission.CONF_HOST: "0.0.0.0",
transmission.CONF_USERNAME: "user",
transmission.CONF_PASSWORD: "pass",
transmission.CONF_PORT: 9091,
},
transmission.DOMAIN: {
transmission.CONF_NAME: "Transmission2",
transmission.CONF_HOST: "0.0.0.1",
transmission.CONF_USERNAME: "user",
transmission.CONF_PASSWORD: "pass",
transmission.CONF_PORT: 9091,
},
}
assert await async_setup_component(hass, transmission.DOMAIN, config) is True
async def test_successful_config_entry(hass, api):
"""Test that configured transmission is configured successfully."""
entry = MOCK_ENTRY
entry.add_to_hass(hass)
assert await transmission.async_setup_entry(hass, entry) is True
assert entry.options == {
transmission.CONF_SCAN_INTERVAL: transmission.DEFAULT_SCAN_INTERVAL,
transmission.CONF_LIMIT: transmission.DEFAULT_LIMIT,
transmission.CONF_ORDER: transmission.DEFAULT_ORDER,
}
async def test_setup_failed(hass):
"""Test transmission failed due to an error."""
entry = MOCK_ENTRY
entry.add_to_hass(hass)
# test connection error raising ConfigEntryNotReady
with patch(
"transmissionrpc.Client",
side_effect=TransmissionError("111: Connection refused"),
), pytest.raises(ConfigEntryNotReady):
await transmission.async_setup_entry(hass, entry)
# test Authentication error returning false
with patch(
"transmissionrpc.Client", side_effect=TransmissionError("401: Unauthorized")
):
assert await transmission.async_setup_entry(hass, entry) is False
async def test_unload_entry(hass, api):
"""Test removing transmission client."""
entry = MOCK_ENTRY
entry.add_to_hass(hass)
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=mock_coro(True)
) as unload_entry:
assert await transmission.async_setup_entry(hass, entry)
assert await transmission.async_unload_entry(hass, entry)
assert unload_entry.call_count == 2
assert entry.entry_id not in hass.data[transmission.DOMAIN]
|
import operator
import re
from collections import abc
from numbers import Number
from .runtime import Undefined
number_re = re.compile(r"^-?\d+(\.\d+)?$")
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_boolean(value):
"""Return true if the object is a boolean value.
.. versionadded:: 2.11
"""
return value is True or value is False
def test_false(value):
"""Return true if the object is False.
.. versionadded:: 2.11
"""
return value is False
def test_true(value):
"""Return true if the object is True.
.. versionadded:: 2.11
"""
return value is True
# NOTE: The existing 'number' test matches booleans and floats
def test_integer(value):
"""Return true if the object is an integer.
.. versionadded:: 2.11
"""
return isinstance(value, int) and value is not True and value is not False
# NOTE: The existing 'number' test matches booleans and integers
def test_float(value):
"""Return true if the object is a float.
.. versionadded:: 2.11
"""
return isinstance(value, float)
def test_lower(value):
"""Return true if the variable is lowercased."""
return str(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return str(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, str)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, abc.Mapping)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, Number)
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except Exception:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, "__html__")
def test_in(value, seq):
"""Check if value is in seq.
.. versionadded:: 2.10
"""
return value in seq
TESTS = {
"odd": test_odd,
"even": test_even,
"divisibleby": test_divisibleby,
"defined": test_defined,
"undefined": test_undefined,
"none": test_none,
"boolean": test_boolean,
"false": test_false,
"true": test_true,
"integer": test_integer,
"float": test_float,
"lower": test_lower,
"upper": test_upper,
"string": test_string,
"mapping": test_mapping,
"number": test_number,
"sequence": test_sequence,
"iterable": test_iterable,
"callable": test_callable,
"sameas": test_sameas,
"escaped": test_escaped,
"in": test_in,
"==": operator.eq,
"eq": operator.eq,
"equalto": operator.eq,
"!=": operator.ne,
"ne": operator.ne,
">": operator.gt,
"gt": operator.gt,
"greaterthan": operator.gt,
"ge": operator.ge,
">=": operator.ge,
"<": operator.lt,
"lt": operator.lt,
"lessthan": operator.lt,
"<=": operator.le,
"le": operator.le,
}
|
import os
from urllib.parse import urlunsplit
import pygments
from nikola.plugin_categories import ShortcodePlugin
class Plugin(ShortcodePlugin):
"""Plugin for listing shortcode."""
name = "listing"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
Plugin.folders = site.config['LISTINGS_FOLDERS']
return super().set_site(site)
def handler(self, fname, language='text', linenumbers=False, filename=None, site=None, data=None, lang=None, post=None):
"""Create HTML for a listing."""
fname = fname.replace('/', os.sep)
if len(self.folders) == 1:
listings_folder = next(iter(self.folders.keys()))
if fname.startswith(listings_folder):
fpath = os.path.join(fname) # new syntax: specify folder name
else:
# old syntax: don't specify folder name
fpath = os.path.join(listings_folder, fname)
else:
# must be new syntax: specify folder name
fpath = os.path.join(fname)
linenumbers = 'table' if linenumbers else False
deps = [fpath]
with open(fpath, 'r') as inf:
target = urlunsplit(
("link", 'listing', fpath.replace('\\', '/'), '', ''))
src_target = urlunsplit(
("link", 'listing_source', fpath.replace('\\', '/'), '', ''))
src_label = self.site.MESSAGES('Source')
data = inf.read()
lexer = pygments.lexers.get_lexer_by_name(language)
formatter = pygments.formatters.get_formatter_by_name(
'html', linenos=linenumbers)
output = '<a href="{1}">{0}</a> <a href="{3}">({2})</a>' .format(
fname, target, src_label, src_target) + pygments.highlight(data, lexer, formatter)
return output, deps
|
from typing import (Any, Dict, Iterable, List, Mapping, MutableMapping, Optional,
Sequence, Tuple, Union, cast)
import functools
import attr
from qutebrowser.config import configtypes
from qutebrowser.utils import usertypes, qtutils, utils
from qutebrowser.misc import debugcachestats
DATA = cast(Mapping[str, 'Option'], None)
MIGRATIONS = cast('Migrations', None)
_BackendDict = Mapping[str, Union[str, bool]]
@attr.s
class Option:
"""Description of an Option in the config.
Note that this is just an option which exists, with no value associated.
"""
name: str = attr.ib()
typ: configtypes.BaseType = attr.ib()
default: Any = attr.ib()
backends: Iterable[usertypes.Backend] = attr.ib()
raw_backends: Optional[Mapping[str, bool]] = attr.ib()
description: str = attr.ib()
supports_pattern: bool = attr.ib(default=False)
restart: bool = attr.ib(default=False)
no_autoconfig: bool = attr.ib(default=False)
@attr.s
class Migrations:
"""Migrated options in configdata.yml.
Attributes:
renamed: A dict mapping old option names to new names.
deleted: A list of option names which have been removed.
"""
renamed: Dict[str, str] = attr.ib(default=attr.Factory(dict))
deleted: List[str] = attr.ib(default=attr.Factory(list))
def _raise_invalid_node(name: str, what: str, node: Any) -> None:
"""Raise an exception for an invalid configdata YAML node.
Args:
name: The name of the setting being parsed.
what: The name of the thing being parsed.
node: The invalid node.
"""
raise ValueError("Invalid node for {} while reading {}: {!r}".format(
name, what, node))
def _parse_yaml_type(
name: str,
node: Union[str, Mapping[str, Any]],
) -> configtypes.BaseType:
if isinstance(node, str):
# e.g:
# > type: Bool
# -> create the type object without any arguments
type_name = node
kwargs: MutableMapping[str, Any] = {}
elif isinstance(node, dict):
# e.g:
# > type:
# > name: String
# > none_ok: true
# -> create the type object and pass arguments
type_name = node.pop('name')
kwargs = node
valid_values = kwargs.get('valid_values', None)
if valid_values is not None:
kwargs['valid_values'] = configtypes.ValidValues(*valid_values)
else:
_raise_invalid_node(name, 'type', node)
try:
typ = getattr(configtypes, type_name)
except AttributeError:
raise AttributeError("Did not find type {} for {}".format(
type_name, name))
# Parse sub-types
try:
if typ is configtypes.Dict:
kwargs['keytype'] = _parse_yaml_type(name, kwargs['keytype'])
kwargs['valtype'] = _parse_yaml_type(name, kwargs['valtype'])
elif typ is configtypes.List or typ is configtypes.ListOrValue:
kwargs['valtype'] = _parse_yaml_type(name, kwargs['valtype'])
except KeyError as e:
_raise_invalid_node(name, str(e), node)
try:
return typ(**kwargs)
except TypeError as e:
raise TypeError("Error while creating {} with {}: {}".format(
type_name, node, e))
def _parse_yaml_backends_dict(
name: str,
node: _BackendDict,
) -> Sequence[usertypes.Backend]:
"""Parse a dict definition for backends.
Example:
backends:
QtWebKit: true
QtWebEngine: Qt 5.15
"""
str_to_backend = {
'QtWebKit': usertypes.Backend.QtWebKit,
'QtWebEngine': usertypes.Backend.QtWebEngine,
}
if node.keys() != str_to_backend.keys():
_raise_invalid_node(name, 'backends', node)
backends = []
# The value associated to the key, and whether we should add that backend
# or not.
conditionals = {
True: True,
False: False,
'Qt 5.13': qtutils.version_check('5.13'),
'Qt 5.14': qtutils.version_check('5.14'),
'Qt 5.15': qtutils.version_check('5.15'),
}
for key in sorted(node.keys()):
if conditionals[node[key]]:
backends.append(str_to_backend[key])
return backends
def _parse_yaml_backends(
name: str,
node: Union[None, str, _BackendDict],
) -> Sequence[usertypes.Backend]:
"""Parse a backend node in the yaml.
It can have one of those four forms:
- Not present -> setting applies to both backends.
- backend: QtWebKit -> setting only available with QtWebKit
- backend: QtWebEngine -> setting only available with QtWebEngine
- backend:
QtWebKit: true
QtWebEngine: Qt 5.15
-> setting available based on the given conditionals.
Return:
A list of backends.
"""
if node is None:
return [usertypes.Backend.QtWebKit, usertypes.Backend.QtWebEngine]
elif node == 'QtWebKit':
return [usertypes.Backend.QtWebKit]
elif node == 'QtWebEngine':
return [usertypes.Backend.QtWebEngine]
elif isinstance(node, dict):
return _parse_yaml_backends_dict(name, node)
_raise_invalid_node(name, 'backends', node)
raise utils.Unreachable
def _read_yaml(
yaml_data: str,
) -> Tuple[Mapping[str, Option], Migrations]:
"""Read config data from a YAML file.
Args:
yaml_data: The YAML string to parse.
Return:
A tuple with two elements:
- A dict mapping option names to Option elements.
- A Migrations object.
"""
parsed = {}
migrations = Migrations()
data = utils.yaml_load(yaml_data)
keys = {'type', 'default', 'desc', 'backend', 'restart',
'supports_pattern', 'no_autoconfig'}
for name, option in data.items():
if set(option.keys()) == {'renamed'}:
migrations.renamed[name] = option['renamed']
continue
if set(option.keys()) == {'deleted'}:
value = option['deleted']
if value is not True:
raise ValueError("Invalid deleted value: {}".format(value))
migrations.deleted.append(name)
continue
if not set(option.keys()).issubset(keys):
raise ValueError("Invalid keys {} for {}".format(
option.keys(), name))
backends = option.get('backend', None)
parsed[name] = Option(
name=name,
typ=_parse_yaml_type(name, option['type']),
default=option['default'],
backends=_parse_yaml_backends(name, backends),
raw_backends=backends if isinstance(backends, dict) else None,
description=option['desc'],
restart=option.get('restart', False),
supports_pattern=option.get('supports_pattern', False),
no_autoconfig=option.get('no_autoconfig', False),
)
# Make sure no key shadows another.
for key1 in parsed:
for key2 in parsed:
if key2.startswith(key1 + '.'):
raise ValueError("Shadowing keys {} and {}".format(key1, key2))
# Make sure rename targets actually exist.
for old, new in migrations.renamed.items():
if new not in parsed:
raise ValueError("Renaming {} to unknown {}".format(old, new))
return parsed, migrations
@debugcachestats.register()
@functools.lru_cache(maxsize=256)
def is_valid_prefix(prefix: str) -> bool:
"""Check whether the given prefix is a valid prefix for some option."""
return any(key.startswith(prefix + '.') for key in DATA)
def init() -> None:
"""Initialize configdata from the YAML file."""
global DATA, MIGRATIONS
DATA, MIGRATIONS = _read_yaml(utils.read_file('config/configdata.yml'))
|
from pyfamilyhublocal import FamilyHubCam
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import CONF_IP_ADDRESS, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = "FamilyHub Camera"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Family Hub Camera."""
address = config.get(CONF_IP_ADDRESS)
name = config.get(CONF_NAME)
session = async_get_clientsession(hass)
family_hub_cam = FamilyHubCam(address, hass.loop, session)
async_add_entities([FamilyHubCamera(name, family_hub_cam)], True)
class FamilyHubCamera(Camera):
"""The representation of a Family Hub camera."""
def __init__(self, name, family_hub_cam):
"""Initialize camera component."""
super().__init__()
self._name = name
self.family_hub_cam = family_hub_cam
async def async_camera_image(self):
"""Return a still image response."""
return await self.family_hub_cam.async_get_cam_image()
@property
def name(self):
"""Return the name of this camera."""
return self._name
|
import os
import unittest
from absl import flags as flgs
import contextlib2
import mock
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.gcp import gce_network
from perfkitbenchmarker.providers.gcp import google_kubernetes_engine
from perfkitbenchmarker.providers.gcp import util
from tests import pkb_common_test_case
from six.moves import builtins
FLAGS = flgs.FLAGS
_COMPONENT = 'test_component'
_RUN_URI = 'fake-urn-uri'
_NVIDIA_DRIVER_SETUP_DAEMON_SET_SCRIPT = 'https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/nvidia-driver-installer/cos/daemonset-preloaded.yaml'
_NVIDIA_UNRESTRICTED_PERMISSIONS_DAEMON_SET = 'nvidia_unrestricted_permissions_daemonset.yml'
_INSTANCE_GROUPS_LIST_OUTPUT = (
'../../../tests/data/gcloud_compute_instance_groups_list_instances.json')
_NODE_POOLS_LIST_OUTPUT = (
'../../../tests/data/gcloud_container_node_pools_list.json')
@contextlib2.contextmanager
def patch_critical_objects(stdout='', stderr='', return_code=0, flags=FLAGS):
with contextlib2.ExitStack() as stack:
flags.gcloud_path = 'gcloud'
flags.run_uri = _RUN_URI
flags.data_search_paths = ''
stack.enter_context(mock.patch(builtins.__name__ + '.open'))
stack.enter_context(mock.patch(vm_util.__name__ + '.PrependTempDir'))
stack.enter_context(mock.patch(vm_util.__name__ + '.NamedTemporaryFile'))
stack.enter_context(
mock.patch(
util.__name__ + '.GetDefaultProject', return_value='fakeproject'))
stack.enter_context(
mock.patch(
util.__name__ + '.GetDefaultUser', return_value='fakeuser'))
stack.enter_context(
mock.patch(
gce_network.__name__ + '.GceFirewall.GetFirewall',
return_value='fakefirewall'))
stack.enter_context(
mock.patch(
gce_network.__name__ + '.GceNetwork.GetNetwork',
return_value='fakenetwork'))
retval = (stdout, stderr, return_code)
issue_command = stack.enter_context(
mock.patch(vm_util.__name__ + '.IssueCommand', return_value=retval))
yield issue_command
class GoogleKubernetesEngineCustomMachineTypeTestCase(
pkb_common_test_case.PkbCommonTestCase):
@staticmethod
def create_kubernetes_engine_spec():
kubernetes_engine_spec = benchmark_config_spec._ContainerClusterSpec(
'NAME', **{
'cloud': 'GCP',
'vm_spec': {
'GCP': {
'machine_type': {
'cpus': 4,
'memory': '1024MiB',
},
},
},
})
return kubernetes_engine_spec
def testCreate(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._Create()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('gcloud container clusters create', command_string)
self.assertIn('--machine-type custom-4-1024', command_string)
class GoogleKubernetesEngineTestCase(pkb_common_test_case.PkbCommonTestCase):
@staticmethod
def create_kubernetes_engine_spec():
kubernetes_engine_spec = benchmark_config_spec._ContainerClusterSpec(
'NAME', **{
'cloud': 'GCP',
'vm_spec': {
'GCP': {
'machine_type': 'fake-machine-type',
'zone': 'us-central1-a',
'min_cpu_platform': 'skylake',
'boot_disk_type': 'foo',
'boot_disk_size': 200,
'num_local_ssds': 2,
},
},
'vm_count': 2,
})
return kubernetes_engine_spec
def testCreate(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._Create()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('gcloud container clusters create', command_string)
self.assertIn('--num-nodes 2', command_string)
self.assertIn('--machine-type fake-machine-type', command_string)
self.assertIn('--zone us-central1-a', command_string)
self.assertIn('--min-cpu-platform skylake', command_string)
self.assertIn('--disk-size 200', command_string)
self.assertIn('--disk-type foo', command_string)
self.assertIn('--local-ssd-count 2', command_string)
def testCreateQuotaExceeded(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects(
stderr="""
message=Insufficient regional quota to satisfy request: resource "CPUS":
request requires '6400.0' and is short '5820.0'""",
return_code=1) as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
with self.assertRaises(
errors.Benchmarks.QuotaFailure):
cluster._Create()
self.assertEqual(issue_command.call_count, 1)
def testCreateResourcesExhausted(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects(
stderr="""
[ZONE_RESOURCE_POOL_EXHAUSTED_WITH_DETAILS]:
Instance 'test' creation failed: The zone
'projects/artemis-prod/zones/us-central1-a' does not have enough
resources available to fulfill the request.""",
return_code=1) as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
with self.assertRaises(
errors.Benchmarks.InsufficientCapacityCloudFailure):
cluster._Create()
self.assertEqual(issue_command.call_count, 1)
def testPostCreate(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._PostCreate()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn(
'gcloud container clusters get-credentials pkb-{0}'.format(_RUN_URI),
command_string)
self.assertIn('KUBECONFIG', issue_command.call_args[1]['env'])
def testDelete(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._Delete()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('gcloud container clusters delete pkb-{0}'.format(_RUN_URI),
command_string)
self.assertIn('--zone us-central1-a', command_string)
def testExists(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._Exists()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn(
'gcloud container clusters describe pkb-{0}'.format(_RUN_URI),
command_string)
def testGetResourceMetadata(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
metadata = cluster.GetResourceMetadata()
self.assertEqual(issue_command.call_count, 0)
self.assertContainsSubset(
{
'project': 'fakeproject',
'gce_local_ssd_count': 2,
'gce_local_ssd_interface': 'SCSI',
'machine_type': 'fake-machine-type',
'boot_disk_type': 'foo',
'boot_disk_size': 200,
'cloud': 'GCP',
'cluster_type': 'Kubernetes',
'zone': 'us-central1-a',
'size': 2,
'container_cluster_version': 'latest'
}, metadata)
class GoogleKubernetesEngineAutoscalingTestCase(
pkb_common_test_case.PkbCommonTestCase):
@staticmethod
def create_kubernetes_engine_spec():
kubernetes_engine_spec = benchmark_config_spec._ContainerClusterSpec(
'NAME', **{
'cloud': 'GCP',
'vm_spec': {
'GCP': {
'machine_type': 'fake-machine-type',
'zone': 'us-central1-a',
},
},
'min_vm_count': 1,
'vm_count': 2,
'max_vm_count': 3,
})
return kubernetes_engine_spec
def testCreate(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._Create()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('gcloud container clusters create', command_string)
self.assertIn('--enable-autoscaling', command_string)
self.assertIn('--min-nodes 1', command_string)
self.assertIn('--num-nodes 2', command_string)
self.assertIn('--max-nodes 3', command_string)
def testGetResourceMetadata(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
metadata = cluster.GetResourceMetadata()
self.assertEqual(issue_command.call_count, 0)
self.assertContainsSubset(
{
'project': 'fakeproject',
'cloud': 'GCP',
'cluster_type': 'Kubernetes',
'min_size': 1,
'size': 2,
'max_size': 3
}, metadata)
class GoogleKubernetesEngineVersionFlagTestCase(
pkb_common_test_case.PkbCommonTestCase):
@staticmethod
def create_kubernetes_engine_spec():
kubernetes_engine_spec = benchmark_config_spec._ContainerClusterSpec(
'NAME', **{
'cloud': 'GCP',
'vm_spec': {
'GCP': {
'machine_type': 'fake-machine-type',
},
},
})
return kubernetes_engine_spec
def testCreateCustomVersion(self):
spec = self.create_kubernetes_engine_spec()
FLAGS.container_cluster_version = 'fake-version'
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._Create()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('--cluster-version fake-version', command_string)
def testCreateDefaultVersion(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._Create()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('--cluster-version latest', command_string)
class GoogleKubernetesEngineWithGpusTestCase(
pkb_common_test_case.PkbCommonTestCase):
@staticmethod
def create_kubernetes_engine_spec():
kubernetes_engine_spec = benchmark_config_spec._ContainerClusterSpec(
'NAME', **{
'cloud': 'GCP',
'vm_spec': {
'GCP': {
'machine_type': 'fake-machine-type',
'gpu_type': 'k80',
'gpu_count': 2,
},
},
'vm_count': 2,
})
return kubernetes_engine_spec
def testCreate(self):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._Create()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('gcloud container clusters create', command_string)
self.assertIn('--num-nodes 2', command_string)
self.assertIn('--machine-type fake-machine-type', command_string)
self.assertIn('--accelerator type=nvidia-tesla-k80,count=2',
command_string)
@mock.patch('perfkitbenchmarker.kubernetes_helper.CreateFromFile')
def testPostCreate(self, create_from_file_patch):
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects() as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
cluster._PostCreate()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn(
'gcloud container clusters get-credentials pkb-{0}'.format(_RUN_URI),
command_string)
self.assertIn('KUBECONFIG', issue_command.call_args[1]['env'])
expected_args_to_create_from_file = (
_NVIDIA_DRIVER_SETUP_DAEMON_SET_SCRIPT,
data.ResourcePath(
_NVIDIA_UNRESTRICTED_PERMISSIONS_DAEMON_SET)
)
expected_calls = [mock.call(arg)
for arg in expected_args_to_create_from_file]
# Assert that create_from_file was called twice,
# and that the args were as expected (should be the NVIDIA
# driver setup daemon set, followed by the
# NVIDIA unrestricted permissions daemon set.
create_from_file_patch.assert_has_calls(expected_calls)
class GoogleKubernetesEngineGetNodesTestCase(GoogleKubernetesEngineTestCase):
def testGetInstancesFromInstanceGroups(self):
instance_group_name = 'gke-pkb-0c47e6fa-default-pool-167d73ee-grp'
path = os.path.join(os.path.dirname(__file__), _INSTANCE_GROUPS_LIST_OUTPUT)
output = open(path).read()
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects(stdout=output) as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
instances = cluster._GetInstancesFromInstanceGroup(instance_group_name)
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn(
'gcloud compute instance-groups list-instances '
'gke-pkb-0c47e6fa-default-pool-167d73ee-grp', command_string)
expected = set([
'gke-pkb-0c47e6fa-default-pool-167d73ee-hmwk',
'gke-pkb-0c47e6fa-default-pool-167d73ee-t854'
])
self.assertEqual(expected, set(instances)) # order doesn't matter
def testGetInstanceGroups(self):
path = os.path.join(os.path.dirname(__file__), _NODE_POOLS_LIST_OUTPUT)
output = open(path).read()
spec = self.create_kubernetes_engine_spec()
with patch_critical_objects(stdout=output) as issue_command:
cluster = google_kubernetes_engine.GkeCluster(spec)
instance_groups = cluster._GetInstanceGroups()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertEqual(issue_command.call_count, 1)
self.assertIn('gcloud container node-pools list', command_string)
self.assertIn('--cluster', command_string)
expected = set([
'gke-pkb-0c47e6fa-default-pool-167d73ee-grp',
'gke-pkb-0c47e6fa-test-efea7796-grp'
])
self.assertEqual(expected, set(instance_groups)) # order doesn't matter
if __name__ == '__main__':
unittest.main()
|
import io
import json
import os
import threading
from nikola import shortcodes as sc
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, write_metadata, LocaleBorg, map_metadata
try:
from markdown import Markdown
except ImportError:
Markdown = None
class ThreadLocalMarkdown(threading.local):
"""Convert Markdown to HTML using per-thread Markdown objects.
See discussion in #2661.
"""
def __init__(self, extensions, extension_configs):
"""Create a Markdown instance."""
self.markdown = Markdown(extensions=extensions, extension_configs=extension_configs, output_format="html5")
def convert(self, data):
"""Convert data to HTML and reset internal state."""
result = self.markdown.convert(data)
try:
meta = {}
for k in self.markdown.Meta: # This reads everything as lists
meta[k.lower()] = ','.join(self.markdown.Meta[k])
except Exception:
meta = {}
self.markdown.reset()
return result, meta
class CompileMarkdown(PageCompiler):
"""Compile Markdown into HTML."""
name = "markdown"
friendly_name = "Markdown"
demote_headers = True
site = None
supports_metadata = False
def set_site(self, site):
"""Set Nikola site."""
super().set_site(site)
self.config_dependencies = []
extensions = []
for plugin_info in self.get_compiler_extensions():
self.config_dependencies.append(plugin_info.name)
extensions.append(plugin_info.plugin_object)
plugin_info.plugin_object.short_help = plugin_info.description
site_extensions = self.site.config.get("MARKDOWN_EXTENSIONS")
self.config_dependencies.append(str(sorted(site_extensions)))
extensions.extend(site_extensions)
site_extension_configs = self.site.config.get("MARKDOWN_EXTENSION_CONFIGS")
if site_extension_configs:
self.config_dependencies.append(json.dumps(site_extension_configs.values, sort_keys=True))
if Markdown is not None:
self.converters = {}
for lang in self.site.config['TRANSLATIONS']:
lang_extension_configs = site_extension_configs(lang) if site_extension_configs else {}
self.converters[lang] = ThreadLocalMarkdown(extensions, lang_extension_configs)
self.supports_metadata = 'markdown.extensions.meta' in extensions
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):
"""Compile Markdown into HTML strings."""
if lang is None:
lang = LocaleBorg().current_lang
if Markdown is None:
req_missing(['markdown'], 'build this site (compile Markdown)')
if not is_two_file:
_, data = self.split_metadata(data, post, lang)
new_data, shortcodes = sc.extract_shortcodes(data)
output, _ = self.converters[lang].convert(new_data)
output, shortcode_deps = self.site.apply_shortcodes_uuid(output, shortcodes, filename=source_path, extra_context={'post': post})
return output, shortcode_deps
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
if Markdown is None:
req_missing(['markdown'], 'build this site (compile Markdown)')
makedirs(os.path.dirname(dest))
with io.open(dest, "w+", encoding="utf-8") as out_file:
with io.open(source, "r", encoding="utf-8-sig") as in_file:
data = in_file.read()
output, shortcode_deps = self.compile_string(data, source, is_two_file, post, lang)
out_file.write(output)
if post is None:
if shortcode_deps:
self.logger.error(
"Cannot save dependencies for post {0} (post unknown)",
source)
else:
post._depfile[dest] += shortcode_deps
def create_post(self, path, **kw):
"""Create a new post."""
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
# is_page is not used by create_post as of now.
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with io.open(path, "w+", encoding="utf-8") as fd:
if onefile:
fd.write(write_metadata(metadata, comment_wrap=True, site=self.site, compiler=self))
fd.write(content)
def read_metadata(self, post, lang=None):
"""Read the metadata from a post, and return a metadata dict."""
lang = lang or self.site.config['DEFAULT_LANG']
if not self.supports_metadata:
return {}
if Markdown is None:
req_missing(['markdown'], 'build this site (compile Markdown)')
if lang is None:
lang = LocaleBorg().current_lang
source = post.translated_source_path(lang)
with io.open(source, 'r', encoding='utf-8-sig') as inf:
# Note: markdown meta returns lowercase keys
data = inf.read()
# If the metadata starts with "---" it's actually YAML and
# we should not let markdown parse it, because it will do
# bad things like setting empty tags to "''"
if data.startswith('---\n'):
return {}
_, meta = self.converters[lang].convert(data)
# Map metadata from other platforms to names Nikola expects (Issue #2817)
map_metadata(meta, 'markdown_metadata', self.site.config)
return meta
|
import os
import shutil
import tempfile
import unittest
from integration_tests.files import make_unreadable_file, read_file
class Test_make_unreadable_file(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def test(self):
path = os.path.join(self.tmp, "unreadable")
make_unreadable_file(path)
with self.assertRaises(IOError):
read_file(path)
def tearDown(self):
shutil.rmtree(self.tmp)
|
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy.test import helper
def setup_server():
class Root:
@cherrypy.expose
def multipart(self, parts):
return repr(parts)
@cherrypy.expose
def multipart_form_data(self, **kwargs):
return repr(list(sorted(kwargs.items())))
@cherrypy.expose
def flashupload(self, Filedata, Upload, Filename):
return ('Upload: %s, Filename: %s, Filedata: %r' %
(Upload, Filename, Filedata.file.read()))
cherrypy.config.update({'server.max_request_body_size': 0})
cherrypy.tree.mount(Root())
# Client-side code #
class MultipartTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_multipart(self):
text_part = ntou('This is the text version')
html_part = ntou(
"""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html;charset=ISO-8859-1" http-equiv="Content-Type">
</head>
<body bgcolor="#ffffff" text="#000000">
This is the <strong>HTML</strong> version
</body>
</html>
""")
body = '\r\n'.join([
'--123456789',
"Content-Type: text/plain; charset='ISO-8859-1'",
'Content-Transfer-Encoding: 7bit',
'',
text_part,
'--123456789',
"Content-Type: text/html; charset='ISO-8859-1'",
'',
html_part,
'--123456789--'])
headers = [
('Content-Type', 'multipart/mixed; boundary=123456789'),
('Content-Length', str(len(body))),
]
self.getPage('/multipart', headers, 'POST', body)
self.assertBody(repr([text_part, html_part]))
def test_multipart_form_data(self):
body = '\r\n'.join([
'--X',
'Content-Disposition: form-data; name="foo"',
'',
'bar',
'--X',
# Test a param with more than one value.
# See
# https://github.com/cherrypy/cherrypy/issues/1028
'Content-Disposition: form-data; name="baz"',
'',
'111',
'--X',
'Content-Disposition: form-data; name="baz"',
'',
'333',
'--X--'
])
self.getPage('/multipart_form_data', method='POST',
headers=[(
'Content-Type', 'multipart/form-data;boundary=X'),
('Content-Length', str(len(body))),
],
body=body),
self.assertBody(
repr([('baz', [ntou('111'), ntou('333')]), ('foo', ntou('bar'))]))
class SafeMultipartHandlingTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_Flash_Upload(self):
headers = [
('Accept', 'text/*'),
('Content-Type', 'multipart/form-data; '
'boundary=----------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6'),
('User-Agent', 'Shockwave Flash'),
('Host', 'www.example.com:54583'),
('Content-Length', '499'),
('Connection', 'Keep-Alive'),
('Cache-Control', 'no-cache'),
]
filedata = (b'<?xml version="1.0" encoding="UTF-8"?>\r\n'
b'<projectDescription>\r\n'
b'</projectDescription>\r\n')
body = (
b'------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n'
b'Content-Disposition: form-data; name="Filename"\r\n'
b'\r\n'
b'.project\r\n'
b'------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n'
b'Content-Disposition: form-data; '
b'name="Filedata"; filename=".project"\r\n'
b'Content-Type: application/octet-stream\r\n'
b'\r\n' +
filedata +
b'\r\n'
b'------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n'
b'Content-Disposition: form-data; name="Upload"\r\n'
b'\r\n'
b'Submit Query\r\n'
# Flash apps omit the trailing \r\n on the last line:
b'------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6--'
)
self.getPage('/flashupload', headers, 'POST', body)
self.assertBody('Upload: Submit Query, Filename: .project, '
'Filedata: %r' % filedata)
|
import traceback
import re
import contextlib
from typing import TYPE_CHECKING, Callable, Dict, Iterator, Mapping, MutableMapping
import attr
from PyQt5.QtCore import pyqtSlot, QUrl, QObject
from qutebrowser.api import cmdutils
from qutebrowser.config import config
from qutebrowser.commands import cmdexc
from qutebrowser.utils import message, objreg, qtutils, usertypes, utils
from qutebrowser.misc import split, objects
from qutebrowser.keyinput import macros, modeman
if TYPE_CHECKING:
from qutebrowser.mainwindow import tabbedbrowser
_ReplacementFunction = Callable[['tabbedbrowser.TabbedBrowser'], str]
last_command = {}
@attr.s
class ParseResult:
"""The result of parsing a commandline."""
cmd = attr.ib()
args = attr.ib()
cmdline = attr.ib()
def _url(tabbed_browser):
"""Convenience method to get the current url."""
try:
return tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdutils.CommandError(msg)
def _init_variable_replacements() -> Mapping[str, _ReplacementFunction]:
"""Return a dict from variable replacements to fns processing them."""
replacements: Dict[str, _ReplacementFunction] = {
'url': lambda tb: _url(tb).toString(
QUrl.FullyEncoded | QUrl.RemovePassword),
'url:pretty': lambda tb: _url(tb).toString(
QUrl.DecodeReserved | QUrl.RemovePassword),
'url:domain': lambda tb: "{}://{}{}".format(
_url(tb).scheme(), _url(tb).host(),
":" + str(_url(tb).port()) if _url(tb).port() != -1 else ""),
'url:auth': lambda tb: "{}:{}@".format(
_url(tb).userName(),
_url(tb).password()) if _url(tb).userName() else "",
'url:scheme': lambda tb: _url(tb).scheme(),
'url:username': lambda tb: _url(tb).userName(),
'url:password': lambda tb: _url(tb).password(),
'url:host': lambda tb: _url(tb).host(),
'url:port': lambda tb: str(
_url(tb).port()) if _url(tb).port() != -1 else "",
'url:path': lambda tb: _url(tb).path(),
'url:query': lambda tb: _url(tb).query(),
'title': lambda tb: tb.widget.page_title(tb.widget.currentIndex()),
'clipboard': lambda _: utils.get_clipboard(),
'primary': lambda _: utils.get_clipboard(selection=True),
}
for key in list(replacements):
modified_key = '{' + key + '}'
# x = modified_key is to avoid binding x as a closure
replacements[modified_key] = (
lambda _, x=modified_key: x) # type: ignore[misc]
return replacements
VARIABLE_REPLACEMENTS = _init_variable_replacements()
# A regex matching all variable replacements
VARIABLE_REPLACEMENT_PATTERN = re.compile(
"{(?P<var>" + "|".join(VARIABLE_REPLACEMENTS.keys()) + ")}")
def replace_variables(win_id, arglist):
"""Utility function to replace variables like {url} in a list of args."""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
values: MutableMapping[str, str] = {}
args = []
def repl_cb(matchobj):
"""Return replacement for given match."""
var = matchobj.group("var")
if var not in values:
values[var] = VARIABLE_REPLACEMENTS[var](tabbed_browser)
return values[var]
try:
for arg in arglist:
# using re.sub with callback function replaces all variables in a
# single pass and avoids expansion of nested variables (e.g.
# "{url}" from clipboard is not expanded)
args.append(VARIABLE_REPLACEMENT_PATTERN.sub(repl_cb, arg))
except utils.ClipboardError as e:
raise cmdutils.CommandError(e)
return args
class CommandParser:
"""Parse qutebrowser commandline commands.
Attributes:
_partial_match: Whether to allow partial command matches.
"""
def __init__(self, partial_match=False):
self._partial_match = partial_match
def _get_alias(self, text, default=None):
"""Get an alias from the config.
Args:
text: The text to parse.
default : Default value to return when alias was not found.
Return:
The new command string if an alias was found. Default value
otherwise.
"""
parts = text.strip().split(maxsplit=1)
aliases = config.cache['aliases']
if parts[0] not in aliases:
return default
alias = aliases[parts[0]]
try:
new_cmd = '{} {}'.format(alias, parts[1])
except IndexError:
new_cmd = alias
if text.endswith(' '):
new_cmd += ' '
return new_cmd
def _parse_all_gen(self, text, *args, aliases=True, **kwargs):
"""Split a command on ;; and parse all parts.
If the first command in the commandline is a non-split one, it only
returns that.
Args:
text: Text to parse.
aliases: Whether to handle aliases.
*args/**kwargs: Passed to parse().
Yields:
ParseResult tuples.
"""
text = text.strip().lstrip(':').strip()
if not text:
raise cmdexc.NoSuchCommandError("No command given")
if aliases:
text = self._get_alias(text, text)
if ';;' in text:
# Get the first command and check if it doesn't want to have ;;
# split.
first = text.split(';;')[0]
result = self.parse(first, *args, **kwargs)
if result.cmd.no_cmd_split:
sub_texts = [text]
else:
sub_texts = [e.strip() for e in text.split(';;')]
else:
sub_texts = [text]
for sub in sub_texts:
yield self.parse(sub, *args, **kwargs)
def parse_all(self, *args, **kwargs):
"""Wrapper over _parse_all_gen."""
return list(self._parse_all_gen(*args, **kwargs))
def parse(self, text, *, fallback=False, keep=False):
"""Split the commandline text into command and arguments.
Args:
text: Text to parse.
fallback: Whether to do a fallback splitting when the command was
unknown.
keep: Whether to keep special chars and whitespace
Return:
A ParseResult tuple.
"""
cmdstr, sep, argstr = text.partition(' ')
if not cmdstr and not fallback:
raise cmdexc.NoSuchCommandError("No command given")
if self._partial_match:
cmdstr = self._completion_match(cmdstr)
try:
cmd = objects.commands[cmdstr]
except KeyError:
if not fallback:
raise cmdexc.NoSuchCommandError(
'{}: no such command'.format(cmdstr))
cmdline = split.split(text, keep=keep)
return ParseResult(cmd=None, args=None, cmdline=cmdline)
args = self._split_args(cmd, argstr, keep)
if keep and args:
cmdline = [cmdstr, sep + args[0]] + args[1:]
elif keep:
cmdline = [cmdstr, sep]
else:
cmdline = [cmdstr] + args[:]
return ParseResult(cmd=cmd, args=args, cmdline=cmdline)
def _completion_match(self, cmdstr):
"""Replace cmdstr with a matching completion if there's only one match.
Args:
cmdstr: The string representing the entered command so far
Return:
cmdstr modified to the matching completion or unmodified
"""
matches = [cmd for cmd in sorted(objects.commands, key=len)
if cmdstr in cmd]
if len(matches) == 1:
cmdstr = matches[0]
elif len(matches) > 1 and config.val.completion.use_best_match:
cmdstr = matches[0]
return cmdstr
def _split_args(self, cmd, argstr, keep):
"""Split the arguments from an arg string.
Args:
cmd: The command we're currently handling.
argstr: An argument string.
keep: Whether to keep special chars and whitespace
Return:
A list containing the split strings.
"""
if not argstr:
return []
elif cmd.maxsplit is None:
return split.split(argstr, keep=keep)
else:
# If split=False, we still want to split the flags, but not
# everything after that.
# We first split the arg string and check the index of the first
# non-flag args, then we re-split again properly.
# example:
#
# input: "--foo -v bar baz"
# first split: ['--foo', '-v', 'bar', 'baz']
# 0 1 2 3
# second split: ['--foo', '-v', 'bar baz']
# (maxsplit=2)
split_args = split.simple_split(argstr, keep=keep)
flag_arg_count = 0
for i, arg in enumerate(split_args):
arg = arg.strip()
if arg.startswith('-'):
if arg in cmd.flags_with_args:
flag_arg_count += 1
else:
maxsplit = i + cmd.maxsplit + flag_arg_count
return split.simple_split(argstr, keep=keep,
maxsplit=maxsplit)
# If there are only flags, we got it right on the first try
# already.
return split_args
class AbstractCommandRunner(QObject):
"""Abstract base class for CommandRunner."""
def run(self, text, count=None, *, safely=False):
raise NotImplementedError
@pyqtSlot(str, int)
@pyqtSlot(str)
def run_safely(self, text, count=None):
"""Run a command and display exceptions in the statusbar."""
self.run(text, count, safely=True)
class CommandRunner(AbstractCommandRunner):
"""Parse and run qutebrowser commandline commands.
Attributes:
_win_id: The window this CommandRunner is associated with.
"""
def __init__(self, win_id, partial_match=False, parent=None):
super().__init__(parent)
self._parser = CommandParser(partial_match=partial_match)
self._win_id = win_id
@contextlib.contextmanager
def _handle_error(self, safely: bool) -> Iterator[None]:
"""Show exceptions as errors if safely=True is given."""
try:
yield
except cmdexc.Error as e:
if safely:
message.error(str(e), stack=traceback.format_exc())
else:
raise
def run(self, text, count=None, *, safely=False):
"""Parse a command from a line of text and run it.
Args:
text: The text to parse.
count: The count to pass to the command.
safely: Show CmdError exceptions as messages.
"""
record_last_command = True
record_macro = True
mode_manager = modeman.instance(self._win_id)
cur_mode = mode_manager.mode
parsed = None
with self._handle_error(safely):
parsed = self._parser.parse_all(text)
if parsed is None:
return
for result in parsed:
with self._handle_error(safely):
if result.cmd.no_replace_variables:
args = result.args
else:
args = replace_variables(self._win_id, result.args)
result.cmd.run(self._win_id, args, count=count)
if result.cmdline[0] == 'repeat-command':
record_last_command = False
if result.cmdline[0] in ['record-macro', 'run-macro',
'set-cmd-text']:
record_macro = False
if record_last_command:
last_command[cur_mode] = (text, count)
if record_macro and cur_mode == usertypes.KeyMode.normal:
macros.macro_recorder.record_command(text, count)
|
from __future__ import division
import unittest
import numpy as np
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.utils import mask_iou
@testing.parameterize(
{'mask_a': np.array(
[[[False, False], [True, True]],
[[True, True], [False, False]]],
dtype=np.bool),
'mask_b': np.array(
[[[False, False], [True, True]],
[[True, True], [False, False]],
[[True, False], [True, True]],
[[True, True], [False, True]]],
dtype=np.bool),
'expected': np.array(
[[1., 0., 2 / 3, 1 / 4],
[0., 1., 1 / 4, 2 / 3]],
dtype=np.float32)
},
{'mask_a': np.array(
[[[False, False], [True, True]],
[[True, True], [False, False]],
[[True, True], [True, False]],
[[False, True], [True, True]]],
dtype=np.bool),
'mask_b': np.array(
[[[False, False], [True, True]],
[[True, True], [False, False]]],
dtype=np.bool),
'expected': np.array(
[[1., 0.], [0., 1.], [1 / 4, 2 / 3], [2 / 3, 1 / 4]],
dtype=np.float32)
},
{'mask_a': np.zeros((0, 2, 2), dtype=np.bool),
'mask_b': np.array([[[False, False], [False, False]]], dtype=np.bool),
'expected': np.zeros((0, 1), dtype=np.float32)
},
)
class TestMaskIou(unittest.TestCase):
def check(self, mask_a, mask_b, expected):
iou = mask_iou(mask_a, mask_b)
self.assertIsInstance(iou, type(expected))
np.testing.assert_equal(
cuda.to_cpu(iou),
cuda.to_cpu(expected))
def test_mask_iou_cpu(self):
self.check(self.mask_a, self.mask_b, self.expected)
@attr.gpu
def test_mask_iou_gpu(self):
self.check(
cuda.to_gpu(self.mask_a),
cuda.to_gpu(self.mask_b),
cuda.to_gpu(self.expected))
@testing.parameterize(
{'mask_a': np.array([[[False], [True, True]]], dtype=np.bool),
'mask_b': np.array([[[False, False], [True, True]]], dtype=np.bool)
},
{'mask_a': np.array([[[False, False, True], [True, True]]], dtype=np.bool),
'mask_b': np.array([[[False, False], [True, True]]], dtype=np.bool)
},
{'mask_a': np.array([[[False, False], [True, True]]], dtype=np.bool),
'mask_b': np.array([[[False], [True, True]]], dtype=np.bool)
},
{'mask_a': np.array([[[False, False], [True, True]]], dtype=np.bool),
'mask_b': np.array([[[False, False, True], [True, True]]], dtype=np.bool)
},
)
class TestMaskIouInvalidShape(unittest.TestCase):
def test_mask_iou_invalid(self):
with self.assertRaises(IndexError):
mask_iou(self.mask_a, self.mask_b)
testing.run_module(__name__, __file__)
|
from __future__ import print_function
import codecs
import os.path
import re
import sys
import time
import traceback
import unittest
try:
from cStringIO import StringIO
python2 = True
except ImportError:
from io import StringIO
python2 = False
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
def cdata(cdata_text):
return '<![CDATA[\n{}\n]]>'.format(cdata_text)
class _TestInfo(object):
"""Information about a particular test.
Used by _XMLTestResult."""
def __init__(self, test, time):
(self._class, self._method) = test.id().rsplit('.', 1)
self._time = time
self._error = None
self._failure = None
@staticmethod
def create_success(test, time):
"""Create a _TestInfo instance for a successful test."""
return _TestInfo(test, time)
@staticmethod
def create_failure(test, time, failure):
"""Create a _TestInfo instance for a failed test."""
info = _TestInfo(test, time)
info._failure = failure
return info
@staticmethod
def create_error(test, time, error):
"""Create a _TestInfo instance for an erroneous test."""
info = _TestInfo(test, time)
info._error = error
return info
def xml(self):
"""Create an XML tag with information about this test case.
"""
testcase = ET.Element('testcase')
testcase.set('classname', self._class)
testcase.set('name', self._method)
testcase.set('time', '%.4f' % self._time)
if self._failure is not None:
self._print_error(testcase, 'failure', self._failure)
if self._error is not None:
self._print_error(testcase, 'error', self._error)
return testcase
def print_report(self, stream):
"""Print information about this test case in XML format to the
supplied stream.
"""
stream.write(ET.tostring(self.xml()))
def print_report_text(self, stream):
# stream.write(' <testcase classname="%(class)s" name="%(method)s" time="%(time).4f">' % \
# {
# "class": self._class,
# "method": self._method,
# "time": self._time,
# })
stream.write('[Testcase: ' + self._method + ']')
if self._failure is not None:
stream.write(' ... FAILURE!\n')
self._print_error_text(stream, 'failure', self._failure)
if self._error is not None:
stream.write(' ... ERROR!\n')
self._print_error_text(stream, 'error', self._error)
if self._failure is None and self._error is None:
stream.write(' ... ok\n')
def _print_error(self, testcase, tagname, error):
"""
Append an XML tag with information from a failure or error to the
supplied testcase.
"""
tag = ET.SubElement(testcase, tagname)
tag.set('type', str(error[0].__name__))
tb_stream = StringIO()
traceback.print_tb(error[2], None, tb_stream)
tag.text = '%s\n%s' % (str(error[1]), tb_stream.getvalue())
def _print_error_text(self, stream, tagname, error):
"""Print information from a failure or error to the supplied stream."""
text = escape(str(error[1]))
stream.write('%s: %s\n' % (tagname.upper(), text))
tb_stream = StringIO()
traceback.print_tb(error[2], None, tb_stream)
stream.write(escape(tb_stream.getvalue()))
stream.write('-' * 80 + '\n')
class _XMLTestResult(unittest.TestResult):
"""A test result class that stores result as XML.
Used by XMLTestRunner."""
def __init__(self, classname):
unittest.TestResult.__init__(self)
self._test_name = classname
self._start_time = None
self._tests = []
self._error = None
self._failure = None
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self._error = None
self._failure = None
self._start_time = time.time()
def stopTest(self, test):
time_taken = time.time() - self._start_time
unittest.TestResult.stopTest(self, test)
if self._error:
info = _TestInfo.create_error(test, time_taken, self._error)
elif self._failure:
info = _TestInfo.create_failure(test, time_taken, self._failure)
else:
info = _TestInfo.create_success(test, time_taken)
self._tests.append(info)
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self._error = err
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self._failure = err
def filter_nonprintable_text(self, text):
pattern = r'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\xFF\u0100-\uD7FF\uE000-\uFDCF\uFDE0-\uFFFD]'
if python2:
pattern = pattern.decode('unicode_escape')
else:
pattern = codecs.decode(pattern, 'unicode_escape')
invalid_chars = re.compile(pattern)
def invalid_char_replacer(m):
return '&#x' + ('%04X' % ord(m.group(0))) + ';'
return re.sub(invalid_chars, invalid_char_replacer, str(text))
def xml(self, time_taken, out, err):
"""
@return XML tag representing the object
@rtype: xml.etree.ElementTree.Element
"""
test_suite = ET.Element('testsuite')
test_suite.set('errors', str(len(self.errors)))
test_suite.set('failures', str(len(self.failures)))
test_suite.set('name', self._test_name)
test_suite.set('tests', str(self.testsRun))
test_suite.set('time', '%.3f' % time_taken)
for info in self._tests:
test_suite.append(info.xml())
system_out = ET.SubElement(test_suite, 'system-out')
system_out.text = cdata(self.filter_nonprintable_text(out))
system_err = ET.SubElement(test_suite, 'system-err')
system_err.text = cdata(self.filter_nonprintable_text(err))
return ET.ElementTree(test_suite)
def print_report(self, stream, time_taken, out, err):
"""Prints the XML report to the supplied stream.
The time the tests took to perform as well as the captured standard
output and standard error streams must be passed in.a
"""
root = self.xml(time_taken, out, err).getroot()
stream.write(ET.tostring(root, encoding='utf-8', method='xml').decode('utf-8'))
def print_report_text(self, stream, time_taken, out, err):
"""Prints the text report to the supplied stream.
The time the tests took to perform as well as the captured standard
output and standard error streams must be passed in.a
"""
# stream.write('<testsuite errors="%(e)d" failures="%(f)d" ' % \
# { "e": len(self.errors), "f": len(self.failures) })
# stream.write('name="%(n)s" tests="%(t)d" time="%(time).3f">\n' % \
# {
# "n": self._test_name,
# "t": self.testsRun,
# "time": time_taken,
# })
for info in self._tests:
info.print_report_text(stream)
class XMLTestRunner(object):
"""A test runner that stores results in XML format compatible with JUnit.
XMLTestRunner(stream=None) -> XML test runner
The XML file is written to the supplied stream. If stream is None, the
results are stored in a file called TEST-<module>.<class>.xml in the
current working directory (if not overridden with the path property),
where <module> and <class> are the module and class name of the test class."""
def __init__(self, stream=None):
self._stream = stream
self._path = '.'
def run(self, test):
"""Run the given test case or test suite."""
class_ = test.__class__
classname = class_.__module__ + '.' + class_.__name__
if self._stream is None:
filename = 'TEST-%s.xml' % classname
stream = open(os.path.join(self._path, filename), 'w')
stream.write('<?xml version="1.0" encoding="utf-8"?>\n')
else:
stream = self._stream
result = _XMLTestResult(classname)
start_time = time.time()
# TODO: Python 2.5: Use the with statement
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
test(result)
try:
out_s = sys.stdout.getvalue()
except AttributeError:
out_s = ''
try:
err_s = sys.stderr.getvalue()
except AttributeError:
err_s = ''
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
time_taken = time.time() - start_time
result.print_report(stream, time_taken, out_s, err_s)
stream.flush()
result.print_report_text(sys.stdout, time_taken, out_s, err_s)
return result
def _set_path(self, path):
self._path = path
path = property(
lambda self: self._path, _set_path, None,
"""The path where the XML files are stored.
This property is ignored when the XML file is written to a file
stream.""")
|
import unittest
import boto3
import moto
@moto.mock_s3()
def setUpModule():
bucket = boto3.resource('s3').create_bucket(Bucket='mybucket')
bucket.wait_until_exists()
@moto.mock_s3()
def tearDownModule():
resource = boto3.resource('s3')
bucket = resource.Bucket('mybucket')
try:
bucket.delete()
except resource.meta.client.exceptions.NoSuchBucket:
pass
bucket.wait_until_not_exists()
@moto.mock_s3()
class Test(unittest.TestCase):
def test(self):
resource = boto3.resource('s3')
bucket = resource.Bucket('mybucket')
self.assertEqual(bucket.name, 'mybucket')
expected = b'hello'
resource.Object('mybucket', 'mykey').put(Body=expected)
actual = resource.Object('mybucket', 'mykey').get()['Body'].read()
self.assertEqual(expected, actual)
def tearDown(self):
boto3.resource('s3').Object('mybucket', 'mykey').delete()
|
import os
import attr
import pytest
import bs4
from PyQt5.QtCore import QUrl
from qutebrowser.utils import urlutils
from helpers import utils as testutils
pytestmark = pytest.mark.qtwebengine_skip("Title is empty when parsing for "
"some reason?")
class DirLayout:
"""Provide a fake directory layout to test dirbrowser."""
LAYOUT = [
'folder0/file00',
'folder0/file01',
'folder1/folder10/file100',
'folder1/file10',
'folder1/file11',
'file0',
'file1',
]
@classmethod
def layout_folders(cls):
"""Return all folders in the root directory of the layout."""
folders = set()
for path in cls.LAYOUT:
parts = path.split('/')
if len(parts) > 1:
folders.add(parts[0])
folders = list(folders)
folders.sort()
return folders
@classmethod
def get_folder_content(cls, name):
"""Return (folders, files) for the given folder in the root dir."""
folders = set()
files = set()
for path in cls.LAYOUT:
if not path.startswith(name + '/'):
continue
parts = path.split('/')
if len(parts) == 2:
files.add(parts[1])
else:
folders.add(parts[1])
folders = list(folders)
folders.sort()
files = list(files)
files.sort()
return (folders, files)
def __init__(self, factory):
self._factory = factory
self.base = factory.getbasetemp()
self.layout = factory.mktemp('layout')
self._mklayout()
def _mklayout(self):
for filename in self.LAYOUT:
self.layout.ensure(filename)
def file_url(self):
"""Return a file:// link to the directory."""
return urlutils.file_url(str(self.layout))
def path(self, *parts):
"""Return the path to the given file inside the layout folder."""
return os.path.normpath(str(self.layout.join(*parts)))
def base_path(self):
"""Return the path of the base temporary folder."""
return os.path.normpath(str(self.base))
@attr.s
class Parsed:
path = attr.ib()
parent = attr.ib()
folders = attr.ib()
files = attr.ib()
@attr.s
class Item:
path = attr.ib()
link = attr.ib()
text = attr.ib()
def parse(quteproc):
"""Parse the dirbrowser content from the given quteproc.
Args:
quteproc: The quteproc fixture.
"""
html = quteproc.get_content(plain=False)
soup = bs4.BeautifulSoup(html, 'html.parser')
with testutils.ignore_bs4_warning():
print(soup.prettify())
title_prefix = 'Browse directory: '
# Strip off the title prefix to obtain the path of the folder that
# we're browsing
path = soup.title.string[len(title_prefix):]
path = os.path.normpath(path)
container = soup('div', id='dirbrowserContainer')[0]
parent_elem = container('ul', class_='parent')
if not parent_elem:
parent = None
else:
parent = QUrl(parent_elem[0].li.a['href']).toLocalFile()
parent = os.path.normpath(parent)
folders = []
files = []
for css_class, list_ in [('folders', folders), ('files', files)]:
for li in container('ul', class_=css_class)[0]('li'):
item_path = QUrl(li.a['href']).toLocalFile()
item_path = os.path.normpath(item_path)
list_.append(Item(path=item_path, link=li.a['href'],
text=str(li.a.string)))
return Parsed(path=path, parent=parent, folders=folders, files=files)
@pytest.fixture(scope='module')
def dir_layout(tmpdir_factory):
return DirLayout(tmpdir_factory)
def test_parent_folder(dir_layout, quteproc):
quteproc.open_url(dir_layout.file_url())
page = parse(quteproc)
assert page.parent == dir_layout.base_path()
def test_parent_with_slash(dir_layout, quteproc):
"""Test the parent link with a URL that has a trailing slash."""
quteproc.open_url(dir_layout.file_url() + '/')
page = parse(quteproc)
assert page.parent == dir_layout.base_path()
def test_parent_in_root_dir(dir_layout, quteproc):
# This actually works on windows
root_path = os.path.realpath('/')
urlstr = QUrl.fromLocalFile(root_path).toString(QUrl.FullyEncoded)
quteproc.open_url(urlstr)
page = parse(quteproc)
assert page.parent is None
def test_enter_folder_smoke(dir_layout, quteproc):
quteproc.open_url(dir_layout.file_url())
quteproc.send_cmd(':hint all normal')
# a is the parent link, s is the first listed folder/file
quteproc.send_cmd(':follow-hint s')
expected_url = urlutils.file_url(dir_layout.path('folder0'))
quteproc.wait_for_load_finished_url(expected_url)
page = parse(quteproc)
assert page.path == dir_layout.path('folder0')
@pytest.mark.parametrize('folder', DirLayout.layout_folders())
def test_enter_folder(dir_layout, quteproc, folder):
quteproc.open_url(dir_layout.file_url())
quteproc.click_element_by_text(text=folder)
expected_url = urlutils.file_url(dir_layout.path(folder))
quteproc.wait_for_load_finished_url(expected_url)
page = parse(quteproc)
assert page.path == dir_layout.path(folder)
assert page.parent == dir_layout.path()
folders, files = DirLayout.get_folder_content(folder)
foldernames = [item.text for item in page.folders]
assert foldernames == folders
filenames = [item.text for item in page.files]
assert filenames == files
|
from collections import deque
import logging
import statistics
import voluptuous as vol
from homeassistant.components.recorder.models import States
from homeassistant.components.recorder.util import execute, session_scope
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_ENTITY_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (
async_track_point_in_utc_time,
async_track_state_change_event,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.util import dt as dt_util
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
ATTR_AVERAGE_CHANGE = "average_change"
ATTR_CHANGE = "change"
ATTR_CHANGE_RATE = "change_rate"
ATTR_COUNT = "count"
ATTR_MAX_AGE = "max_age"
ATTR_MAX_VALUE = "max_value"
ATTR_MEAN = "mean"
ATTR_MEDIAN = "median"
ATTR_MIN_AGE = "min_age"
ATTR_MIN_VALUE = "min_value"
ATTR_SAMPLING_SIZE = "sampling_size"
ATTR_STANDARD_DEVIATION = "standard_deviation"
ATTR_TOTAL = "total"
ATTR_VARIANCE = "variance"
CONF_SAMPLING_SIZE = "sampling_size"
CONF_MAX_AGE = "max_age"
CONF_PRECISION = "precision"
DEFAULT_NAME = "Stats"
DEFAULT_SIZE = 20
DEFAULT_PRECISION = 2
ICON = "mdi:calculator"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SAMPLING_SIZE, default=DEFAULT_SIZE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_AGE): cv.time_period,
vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Statistics sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
entity_id = config.get(CONF_ENTITY_ID)
name = config.get(CONF_NAME)
sampling_size = config.get(CONF_SAMPLING_SIZE)
max_age = config.get(CONF_MAX_AGE)
precision = config.get(CONF_PRECISION)
async_add_entities(
[StatisticsSensor(entity_id, name, sampling_size, max_age, precision)], True
)
return True
class StatisticsSensor(Entity):
"""Representation of a Statistics sensor."""
def __init__(self, entity_id, name, sampling_size, max_age, precision):
"""Initialize the Statistics sensor."""
self._entity_id = entity_id
self.is_binary = self._entity_id.split(".")[0] == "binary_sensor"
self._name = name
self._sampling_size = sampling_size
self._max_age = max_age
self._precision = precision
self._unit_of_measurement = None
self.states = deque(maxlen=self._sampling_size)
self.ages = deque(maxlen=self._sampling_size)
self.count = 0
self.mean = self.median = self.stdev = self.variance = None
self.total = self.min = self.max = None
self.min_age = self.max_age = None
self.change = self.average_change = self.change_rate = None
self._update_listener = None
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def async_stats_sensor_state_listener(event):
"""Handle the sensor state changes."""
new_state = event.data.get("new_state")
if new_state is None:
return
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
self._add_state_to_queue(new_state)
self.async_schedule_update_ha_state(True)
@callback
def async_stats_sensor_startup(_):
"""Add listener and get recorded state."""
_LOGGER.debug("Startup for %s", self.entity_id)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self._entity_id], async_stats_sensor_state_listener
)
)
if "recorder" in self.hass.config.components:
# Only use the database if it's configured
self.hass.async_create_task(self._async_initialize_from_database())
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_stats_sensor_startup
)
def _add_state_to_queue(self, new_state):
"""Add the state to the queue."""
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
return
try:
if self.is_binary:
self.states.append(new_state.state)
else:
self.states.append(float(new_state.state))
self.ages.append(new_state.last_updated)
except ValueError:
_LOGGER.error(
"%s: parsing error, expected number and received %s",
self.entity_id,
new_state.state,
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self.mean if not self.is_binary else self.count
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement if not self.is_binary else None
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if not self.is_binary:
return {
ATTR_SAMPLING_SIZE: self._sampling_size,
ATTR_COUNT: self.count,
ATTR_MEAN: self.mean,
ATTR_MEDIAN: self.median,
ATTR_STANDARD_DEVIATION: self.stdev,
ATTR_VARIANCE: self.variance,
ATTR_TOTAL: self.total,
ATTR_MIN_VALUE: self.min,
ATTR_MAX_VALUE: self.max,
ATTR_MIN_AGE: self.min_age,
ATTR_MAX_AGE: self.max_age,
ATTR_CHANGE: self.change,
ATTR_AVERAGE_CHANGE: self.average_change,
ATTR_CHANGE_RATE: self.change_rate,
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def _purge_old(self):
"""Remove states which are older than self._max_age."""
now = dt_util.utcnow()
_LOGGER.debug(
"%s: purging records older then %s(%s)",
self.entity_id,
dt_util.as_local(now - self._max_age),
self._max_age,
)
while self.ages and (now - self.ages[0]) > self._max_age:
_LOGGER.debug(
"%s: purging record with datetime %s(%s)",
self.entity_id,
dt_util.as_local(self.ages[0]),
(now - self.ages[0]),
)
self.ages.popleft()
self.states.popleft()
def _next_to_purge_timestamp(self):
"""Find the timestamp when the next purge would occur."""
if self.ages and self._max_age:
# Take the oldest entry from the ages list and add the configured max_age.
# If executed after purging old states, the result is the next timestamp
# in the future when the oldest state will expire.
return self.ages[0] + self._max_age
return None
async def async_update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("%s: updating statistics", self.entity_id)
if self._max_age is not None:
self._purge_old()
self.count = len(self.states)
if not self.is_binary:
try: # require only one data point
self.mean = round(statistics.mean(self.states), self._precision)
self.median = round(statistics.median(self.states), self._precision)
except statistics.StatisticsError as err:
_LOGGER.debug("%s: %s", self.entity_id, err)
self.mean = self.median = STATE_UNKNOWN
try: # require at least two data points
self.stdev = round(statistics.stdev(self.states), self._precision)
self.variance = round(statistics.variance(self.states), self._precision)
except statistics.StatisticsError as err:
_LOGGER.debug("%s: %s", self.entity_id, err)
self.stdev = self.variance = STATE_UNKNOWN
if self.states:
self.total = round(sum(self.states), self._precision)
self.min = round(min(self.states), self._precision)
self.max = round(max(self.states), self._precision)
self.min_age = self.ages[0]
self.max_age = self.ages[-1]
self.change = self.states[-1] - self.states[0]
self.average_change = self.change
self.change_rate = 0
if len(self.states) > 1:
self.average_change /= len(self.states) - 1
time_diff = (self.max_age - self.min_age).total_seconds()
if time_diff > 0:
self.change_rate = self.change / time_diff
self.change = round(self.change, self._precision)
self.average_change = round(self.average_change, self._precision)
self.change_rate = round(self.change_rate, self._precision)
else:
self.total = self.min = self.max = STATE_UNKNOWN
self.min_age = self.max_age = dt_util.utcnow()
self.change = self.average_change = STATE_UNKNOWN
self.change_rate = STATE_UNKNOWN
# If max_age is set, ensure to update again after the defined interval.
next_to_purge_timestamp = self._next_to_purge_timestamp()
if next_to_purge_timestamp:
_LOGGER.debug(
"%s: scheduling update at %s", self.entity_id, next_to_purge_timestamp
)
if self._update_listener:
self._update_listener()
self._update_listener = None
@callback
def _scheduled_update(now):
"""Timer callback for sensor update."""
_LOGGER.debug("%s: executing scheduled update", self.entity_id)
self.async_schedule_update_ha_state(True)
self._update_listener = None
self._update_listener = async_track_point_in_utc_time(
self.hass, _scheduled_update, next_to_purge_timestamp
)
async def _async_initialize_from_database(self):
"""Initialize the list of states from the database.
The query will get the list of states in DESCENDING order so that we
can limit the result to self._sample_size. Afterwards reverse the
list so that we get it in the right order again.
If MaxAge is provided then query will restrict to entries younger then
current datetime - MaxAge.
"""
_LOGGER.debug("%s: initializing values from the database", self.entity_id)
with session_scope(hass=self.hass) as session:
query = session.query(States).filter(
States.entity_id == self._entity_id.lower()
)
if self._max_age is not None:
records_older_then = dt_util.utcnow() - self._max_age
_LOGGER.debug(
"%s: retrieve records not older then %s",
self.entity_id,
records_older_then,
)
query = query.filter(States.last_updated >= records_older_then)
else:
_LOGGER.debug("%s: retrieving all records", self.entity_id)
query = query.order_by(States.last_updated.desc()).limit(
self._sampling_size
)
states = execute(query, to_native=True, validate_entity_ids=False)
for state in reversed(states):
self._add_state_to_queue(state)
self.async_schedule_update_ha_state(True)
_LOGGER.debug("%s: initializing from database completed", self.entity_id)
|
from typing import Optional, Union, cast
import voluptuous as vol
from homeassistant.core import HomeAssistant, callback
from homeassistant.loader import bind_hass
from . import commands, connection, const, decorators, http, messages # noqa
from .connection import ActiveConnection # noqa
from .const import ( # noqa
ERR_HOME_ASSISTANT_ERROR,
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
ERR_TEMPLATE_ERROR,
ERR_TIMEOUT,
ERR_UNAUTHORIZED,
ERR_UNKNOWN_COMMAND,
ERR_UNKNOWN_ERROR,
)
from .decorators import ( # noqa
async_response,
require_admin,
websocket_command,
ws_require_user,
)
from .messages import ( # noqa
BASE_COMMAND_MESSAGE_SCHEMA,
error_message,
event_message,
result_message,
)
# mypy: allow-untyped-calls, allow-untyped-defs
DOMAIN = const.DOMAIN
DEPENDENCIES = ("http",)
@bind_hass
@callback
def async_register_command(
hass: HomeAssistant,
command_or_handler: Union[str, const.WebSocketCommandHandler],
handler: Optional[const.WebSocketCommandHandler] = None,
schema: Optional[vol.Schema] = None,
) -> None:
"""Register a websocket command."""
# pylint: disable=protected-access
if handler is None:
handler = cast(const.WebSocketCommandHandler, command_or_handler)
command = handler._ws_command # type: ignore
schema = handler._ws_schema # type: ignore
else:
command = command_or_handler
handlers = hass.data.get(DOMAIN)
if handlers is None:
handlers = hass.data[DOMAIN] = {}
handlers[command] = (handler, schema)
async def async_setup(hass, config):
"""Initialize the websocket API."""
hass.http.register_view(http.WebsocketAPIView)
commands.async_register_commands(hass, async_register_command)
return True
|
from homeassistant import config_entries, setup
from homeassistant.components.progettihwsw.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
mock_value_step_user = {
"title": "1R & 1IN Board",
"relay_count": 1,
"input_count": 1,
"is_old": False,
}
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_value_step_rm = {
"relay_1": "bistable", # Mocking a single relay board instance.
}
with patch(
"homeassistant.components.progettihwsw.config_flow.ProgettiHWSWAPI.check_board",
return_value=mock_value_step_user,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "", CONF_PORT: 80},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["step_id"] == "relay_modes"
assert result2["errors"] == {}
with patch(
"homeassistant.components.progettihwsw.async_setup",
return_value=True,
), patch(
"homeassistant.components.progettihwsw.async_setup_entry",
return_value=True,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
mock_value_step_rm,
)
assert result3["type"] == RESULT_TYPE_CREATE_ENTRY
assert result3["data"]
assert result3["data"]["title"] == "1R & 1IN Board"
assert result3["data"]["is_old"] is False
assert result3["data"]["relay_count"] == result3["data"]["input_count"] == 1
async def test_form_cannot_connect(hass):
"""Test we handle unexisting board."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["step_id"] == "user"
with patch(
"homeassistant.components.progettihwsw.config_flow.ProgettiHWSWAPI.check_board",
return_value=False,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "", CONF_PORT: 80},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_existing_entry_exception(hass):
"""Test we handle existing board."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["step_id"] == "user"
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: "",
CONF_PORT: 80,
},
)
entry.add_to_hass(hass)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "", CONF_PORT: 80},
)
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_form_user_exception(hass):
"""Test we handle unknown exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["step_id"] == "user"
with patch(
"homeassistant.components.progettihwsw.config_flow.validate_input",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "", CONF_PORT: 80},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
|
import os
import sys
from mock import patch, mock_open
import pytest
from decouple import Config, RepositoryIni, UndefinedValueError
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
from io import StringIO
else:
from io import BytesIO as StringIO
INIFILE = '''
[settings]
KeyTrue=True
KeyOne=1
KeyYes=yes
KeyY=y
KeyOn=on
KeyFalse=False
KeyZero=0
KeyNo=no
KeyN=n
KeyOff=off
KeyEmpty=
#CommentedKey=None
PercentIsEscaped=%%
Interpolation=%(KeyOff)s
IgnoreSpace = text
KeyOverrideByEnv=NotThis
'''
@pytest.fixture(scope='module')
def config():
with patch('decouple.open', return_value=StringIO(INIFILE), create=True):
return Config(RepositoryIni('settings.ini'))
def test_ini_comment(config):
with pytest.raises(UndefinedValueError):
config('CommentedKey')
def test_ini_percent_escape(config):
assert '%' == config('PercentIsEscaped')
def test_ini_interpolation(config):
assert 'off' == config('Interpolation')
def test_ini_bool_true(config):
assert True is config('KeyTrue', cast=bool)
assert True is config('KeyOne', cast=bool)
assert True is config('KeyYes', cast=bool)
assert True is config('KeyY', cast=bool)
assert True is config('KeyOn', cast=bool)
assert True is config('Key1int', default=1, cast=bool)
def test_ini_bool_false(config):
assert False is config('KeyFalse', cast=bool)
assert False is config('KeyZero', cast=bool)
assert False is config('KeyNo', cast=bool)
assert False is config('KeyOff', cast=bool)
assert False is config('KeyN', cast=bool)
assert False is config('KeyEmpty', cast=bool)
assert False is config('Key0int', default=0, cast=bool)
def test_init_undefined(config):
with pytest.raises(UndefinedValueError):
config('UndefinedKey')
def test_ini_default_none(config):
assert None is config('UndefinedKey', default=None)
def test_ini_default_bool(config):
assert False is config('UndefinedKey', default=False, cast=bool)
assert True is config('UndefinedKey', default=True, cast=bool)
def test_ini_default(config):
assert False is config('UndefinedKey', default=False)
assert True is config('UndefinedKey', default=True)
def test_ini_default_invalid_bool(config):
with pytest.raises(ValueError):
config('UndefinedKey', default='NotBool', cast=bool)
def test_ini_empty(config):
assert '' is config('KeyEmpty', default=None)
def test_ini_support_space(config):
assert 'text' == config('IgnoreSpace')
def test_ini_os_environ(config):
os.environ['KeyOverrideByEnv'] = 'This'
assert 'This' == config('KeyOverrideByEnv')
del os.environ['KeyOverrideByEnv']
def test_ini_undefined_but_present_in_os_environ(config):
os.environ['KeyOnlyEnviron'] = ''
assert '' == config('KeyOnlyEnviron')
del os.environ['KeyOnlyEnviron']
def test_ini_empty_string_means_false(config):
assert False is config('KeyEmpty', cast=bool)
|
import logging
import aiohttp
from pynws import SimpleNWS
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from . import base_unique_id
from .const import CONF_STATION, DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
latitude = data[CONF_LATITUDE]
longitude = data[CONF_LONGITUDE]
api_key = data[CONF_API_KEY]
station = data.get(CONF_STATION)
client_session = async_get_clientsession(hass)
ha_api_key = f"{api_key} homeassistant"
nws = SimpleNWS(latitude, longitude, ha_api_key, client_session)
try:
await nws.set_station(station)
except aiohttp.ClientError as err:
_LOGGER.error("Could not connect: %s", err)
raise CannotConnect from err
return {"title": nws.station}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for National Weather Service (NWS)."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(
base_unique_id(user_input[CONF_LATITUDE], user_input[CONF_LONGITUDE])
)
self._abort_if_unique_id_configured()
try:
info = await validate_input(self.hass, user_input)
user_input[CONF_STATION] = info["title"]
return self.async_create_entry(title=info["title"], data=user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
data_schema = vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Required(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Required(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
vol.Optional(CONF_STATION): str,
}
)
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, STATE_UNKNOWN, TEMP_CELSIUS
from homeassistant.setup import async_setup_component
async def test_sensor_upper(hass):
"""Test if source is above threshold."""
config = {
"binary_sensor": {
"platform": "threshold",
"upper": "15",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "sensor.test_monitored" == state.attributes.get("entity_id")
assert 16 == state.attributes.get("sensor_value")
assert "above" == state.attributes.get("position")
assert float(config["binary_sensor"]["upper"]) == state.attributes.get("upper")
assert 0.0 == state.attributes.get("hysteresis")
assert "upper" == state.attributes.get("type")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 14)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 15)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
async def test_sensor_lower(hass):
"""Test if source is below threshold."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "15",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 16)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "above" == state.attributes.get("position")
assert float(config["binary_sensor"]["lower"]) == state.attributes.get("lower")
assert 0.0 == state.attributes.get("hysteresis")
assert "lower" == state.attributes.get("type")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 14)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
async def test_sensor_hysteresis(hass):
"""Test if source is above threshold using hysteresis."""
config = {
"binary_sensor": {
"platform": "threshold",
"upper": "15",
"hysteresis": "2.5",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 20)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "above" == state.attributes.get("position")
assert float(config["binary_sensor"]["upper"]) == state.attributes.get("upper")
assert 2.5 == state.attributes.get("hysteresis")
assert "upper" == state.attributes.get("type")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 13)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 12)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 17)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 18)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
async def test_sensor_in_range_no_hysteresis(hass):
"""Test if source is within the range."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "10",
"upper": "20",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("entity_id") == "sensor.test_monitored"
assert 16 == state.attributes.get("sensor_value")
assert "in_range" == state.attributes.get("position")
assert float(config["binary_sensor"]["lower"]) == state.attributes.get("lower")
assert float(config["binary_sensor"]["upper"]) == state.attributes.get("upper")
assert 0.0 == state.attributes.get("hysteresis")
assert "range" == state.attributes.get("type")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 9)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "below" == state.attributes.get("position")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 21)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "above" == state.attributes.get("position")
assert state.state == "off"
async def test_sensor_in_range_with_hysteresis(hass):
"""Test if source is within the range."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "10",
"upper": "20",
"hysteresis": "2",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "sensor.test_monitored" == state.attributes.get("entity_id")
assert 16 == state.attributes.get("sensor_value")
assert "in_range" == state.attributes.get("position")
assert float(config["binary_sensor"]["lower"]) == state.attributes.get("lower")
assert float(config["binary_sensor"]["upper"]) == state.attributes.get("upper")
assert float(config["binary_sensor"]["hysteresis"]) == state.attributes.get(
"hysteresis"
)
assert "range" == state.attributes.get("type")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 8)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "in_range" == state.attributes.get("position")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 7)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "below" == state.attributes.get("position")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 12)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "below" == state.attributes.get("position")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 13)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "in_range" == state.attributes.get("position")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 22)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "in_range" == state.attributes.get("position")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 23)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "above" == state.attributes.get("position")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 18)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "above" == state.attributes.get("position")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 17)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "in_range" == state.attributes.get("position")
assert state.state == "on"
async def test_sensor_in_range_unknown_state(hass):
"""Test if source is within the range."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "10",
"upper": "20",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "sensor.test_monitored" == state.attributes.get("entity_id")
assert 16 == state.attributes.get("sensor_value")
assert "in_range" == state.attributes.get("position")
assert float(config["binary_sensor"]["lower"]) == state.attributes.get("lower")
assert float(config["binary_sensor"]["upper"]) == state.attributes.get("upper")
assert 0.0 == state.attributes.get("hysteresis")
assert "range" == state.attributes.get("type")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", STATE_UNKNOWN)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "unknown" == state.attributes.get("position")
assert state.state == "off"
async def test_sensor_lower_zero_threshold(hass):
"""Test if a lower threshold of zero is set."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "0",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 16)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "lower" == state.attributes.get("type")
assert float(config["binary_sensor"]["lower"]) == state.attributes.get("lower")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", -3)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
async def test_sensor_upper_zero_threshold(hass):
"""Test if an upper threshold of zero is set."""
config = {
"binary_sensor": {
"platform": "threshold",
"upper": "0",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", -10)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert "upper" == state.attributes.get("type")
assert float(config["binary_sensor"]["upper"]) == state.attributes.get("upper")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 2)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
|
import pytest
from homeassistant.components import hue
import homeassistant.components.automation as automation
from homeassistant.components.hue import device_trigger
from homeassistant.setup import async_setup_component
from .conftest import setup_bridge_for_sensors as setup_bridge
from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
)
REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1}
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, mock_bridge, device_reg):
"""Test we get the expected triggers from a hue remote."""
mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
# 2 remotes, just 1 battery sensor
assert len(hass.states.async_all()) == 1
# Get triggers for specific tap switch
hue_tap_device = device_reg.async_get_device(
{(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={}
)
triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id)
expected_triggers = [
{
"platform": "device",
"domain": hue.DOMAIN,
"device_id": hue_tap_device.id,
"type": t_type,
"subtype": t_subtype,
}
for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys()
]
assert_lists_same(triggers, expected_triggers)
# Get triggers for specific dimmer switch
hue_dimmer_device = device_reg.async_get_device(
{(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={}
)
triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id)
trigger_batt = {
"platform": "device",
"domain": "sensor",
"device_id": hue_dimmer_device.id,
"type": "battery_level",
"entity_id": "sensor.hue_dimmer_switch_1_battery_level",
}
expected_triggers = [
trigger_batt,
*[
{
"platform": "device",
"domain": hue.DOMAIN,
"device_id": hue_dimmer_device.id,
"type": t_type,
"subtype": t_subtype,
}
for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys()
],
]
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls):
"""Test for button press trigger firing."""
mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 1
# Set an automation with a specific tap switch trigger
hue_tap_device = device_reg.async_get_device(
{(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": hue.DOMAIN,
"device_id": hue_tap_device.id,
"type": "remote_button_short_press",
"subtype": "button_4",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "B4 - {{ trigger.event.data.event }}"
},
},
},
{
"trigger": {
"platform": "device",
"domain": hue.DOMAIN,
"device_id": "mock-device-id",
"type": "remote_button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "B1 - {{ trigger.event.data.event }}"
},
},
},
]
},
)
# Fake that the remote is being pressed.
new_sensor_response = dict(REMOTES_RESPONSE)
new_sensor_response["7"]["state"] = {
"buttonevent": 18,
"lastupdated": "2019-12-28T22:58:02",
}
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
await mock_bridge.sensor_manager.coordinator.async_refresh()
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 2
assert len(calls) == 1
assert calls[0].data["some"] == "B4 - 18"
# Fake another button press.
new_sensor_response = dict(REMOTES_RESPONSE)
new_sensor_response["7"]["state"] = {
"buttonevent": 34,
"lastupdated": "2019-12-28T22:58:05",
}
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
await mock_bridge.sensor_manager.coordinator.async_refresh()
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 3
assert len(calls) == 1
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import logging
from compare_gan.architectures import arch_ops as ops
from compare_gan.gans import loss_lib
from compare_gan.gans import modular_gan
from compare_gan.gans import utils
import gin
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
NUM_ROTATIONS = 4
# pylint: disable=not-callable
@gin.configurable(blacklist=["kwargs"])
class S3GAN(modular_gan.ModularGAN):
"""S3GAN which enables auxiliary heads for the modular GAN."""
def __init__(self, self_supervision="rotation",
rotated_batch_fraction=gin.REQUIRED,
weight_rotation_loss_d=1.0,
weight_rotation_loss_g=0.2,
project_y=False,
use_predictor=False,
use_soft_pred=False,
weight_class_loss=1.0,
use_soft_labels=False,
**kwargs):
"""Instantiates the S3GAN.
Args:
self_supervision: One of [rotation_gan, None].
rotated_batch_fraction: This must be a divisor of the total batch size.
rotations of each images on each TPU core. For GPU training #CORES is 1.
weight_rotation_loss_d: Weight for the rotation loss for the discriminator
on real images.
weight_rotation_loss_g: Weight for the rotation loss for the generator
on fake images.
project_y: Boolean, whether an embedding layer as in variant 1) should be
used.
use_predictor: Boolean, whether a predictor (classifier) should be used.
use_soft_pred: Boolean, whether soft labels should be used for the
predicted label vectors in 1).
weight_class_loss: weight of the (predictor) classification loss added to
the discriminator loss.
use_soft_labels: Boolean, if true assumes the labels passed for real
examples are soft labels and accordingly does not transform
**kwargs: Additional arguments passed to `ModularGAN` constructor.
"""
super(S3GAN, self).__init__(**kwargs)
if use_predictor and not project_y:
raise ValueError("Using predictor requires projection.")
assert self_supervision in {"none", "rotation"}
self._self_supervision = self_supervision
self._rotated_batch_fraction = rotated_batch_fraction
self._weight_rotation_loss_d = weight_rotation_loss_d
self._weight_rotation_loss_g = weight_rotation_loss_g
self._project_y = project_y
self._use_predictor = use_predictor
self._use_soft_pred = use_soft_pred
self._weight_class_loss = weight_class_loss
self._use_soft_labels = use_soft_labels
# To safe memory ModularGAN supports feeding real and fake samples
# separately through the discriminator. S3GAN does not support this to
# avoid additional additional complexity in create_loss().
assert not self._deprecated_split_disc_calls, \
"Splitting discriminator calls is not supported in S3GAN."
def discriminator_with_additonal_heads(self, x, y, is_training):
"""Discriminator architecture with additional heads.
Possible heads built on top of feature representation of the discriminator:
(1) Classify the image to the correct class.
(2) Classify the rotation of the image.
Args:
x: An input image tensor.
y: One-hot encoded label. Passing all zeros implies no label was passed.
is_training: boolean, whether or not it is a training call.
Returns:
Tuple of 5 Tensors: (1) discriminator predictions (in [0, 1]), (2) the
corresponding logits, (3) predictions (logits) of the rotation of x from
the auxiliary head, (4) logits of the class prediction from the auxiliary
head, (5) Indicator vector identifying whether y contained a label or -1.
"""
d_probs, d_logits, x_rep = self.discriminator(
x, y=y, is_training=is_training)
use_sn = self.discriminator._spectral_norm # pylint: disable=protected-access
is_label_available = tf.cast(tf.cast(
tf.reduce_sum(y, axis=1, keepdims=True), tf.float32) > 0.5, tf.float32)
assert x_rep.shape.ndims == 2, x_rep.shape
# Predict the rotation of the image.
rotation_logits = None
if "rotation" in self._self_supervision:
with tf.variable_scope("discriminator_rotation", reuse=tf.AUTO_REUSE):
rotation_logits = ops.linear(
x_rep,
NUM_ROTATIONS,
scope="score_classify",
use_sn=use_sn)
logging.info("[Discriminator] rotation head %s -> %s",
x_rep.shape, rotation_logits)
if not self._project_y:
return d_probs, d_logits, rotation_logits, None, is_label_available
# Predict the class of the image.
aux_logits = None
if self._use_predictor:
with tf.variable_scope("discriminator_predictor", reuse=tf.AUTO_REUSE):
aux_logits = ops.linear(x_rep, y.shape[1], use_bias=True,
scope="predictor_linear", use_sn=use_sn)
# Apply the projection discriminator if needed.
if self._use_soft_pred:
y_predicted = tf.nn.softmax(aux_logits)
else:
y_predicted = tf.one_hot(
tf.arg_max(aux_logits, 1), aux_logits.shape[1])
y = (1.0 - is_label_available) * y_predicted + is_label_available * y
y = tf.stop_gradient(y)
logging.info("[Discriminator] %s -> aux_logits=%s, y_predicted=%s",
aux_logits.shape, aux_logits.shape, y_predicted.shape)
class_embedding = self.get_class_embedding(
y=y, embedding_dim=x_rep.shape[-1].value, use_sn=use_sn)
d_logits += tf.reduce_sum(class_embedding * x_rep, axis=1, keepdims=True)
d_probs = tf.nn.sigmoid(d_logits)
return d_probs, d_logits, rotation_logits, aux_logits, is_label_available
def get_class_embedding(self, y, embedding_dim, use_sn):
with tf.variable_scope("discriminator_projection", reuse=tf.AUTO_REUSE):
# We do not use ops.linear() below since it does not have an option to
# override the initializer.
kernel = tf.get_variable(
"kernel", [y.shape[1], embedding_dim], tf.float32,
initializer=tf.initializers.glorot_normal())
if use_sn:
kernel = ops.spectral_norm(kernel)
embedded_y = tf.matmul(y, kernel)
logging.info("[Discriminator] embedded_y for projection: %s",
embedded_y.shape)
return embedded_y
def merge_with_rotation_data(self, real, fake, real_labels, fake_labels,
num_rot_examples):
"""Returns the original data concatenated with the rotated version."""
# Put all rotation angles in a single batch, the first batch_size are
# the original up-right images, followed by rotated_batch_size * 3
# rotated images with 3 different angles. For NUM_ROTATIONS=4 and
# num_rot_examples=2 we have labels_rotated [0, 0, 1, 1, 2, 2, 3, 3].
real_to_rot, fake_to_rot = (
real[-num_rot_examples:], fake[-num_rot_examples:])
real_rotated = utils.rotate_images(real_to_rot, rot90_scalars=(1, 2, 3))
fake_rotated = utils.rotate_images(fake_to_rot, rot90_scalars=(1, 2, 3))
all_features = tf.concat([real, real_rotated, fake, fake_rotated], 0)
all_labels = None
if self.conditional:
real_rotated_labels = tf.tile(real_labels[-num_rot_examples:], [3, 1])
fake_rotated_labels = tf.tile(fake_labels[-num_rot_examples:], [3, 1])
all_labels = tf.concat([real_labels, real_rotated_labels,
fake_labels, fake_rotated_labels], 0)
return all_features, all_labels
def create_loss(self, features, labels, params, is_training=True):
"""Build the loss tensors for discriminator and generator.
This method will set self.d_loss and self.g_loss.
Args:
features: Optional dictionary with inputs to the model ("images" should
contain the real images and "z" the noise for the generator).
labels: Tensor will labels. These are class indices. Use
self._get_one_hot_labels(labels) to get a one hot encoded tensor.
params: Dictionary with hyperparameters passed to TPUEstimator.
Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,
`tpu_context`. `batch_size` is the batch size for this core.
is_training: If True build the model in training mode. If False build the
model for inference mode (e.g. use trained averages for batch norm).
Raises:
ValueError: If set of meta/hyper parameters is not supported.
"""
real_images = features["images"]
if self.conditional:
if self._use_soft_labels:
assert labels.shape[1] == self._dataset.num_classes, \
("Need soft labels of dimension {} but got dimension {}".format(
self._dataset.num_classes, labels.shape[1]))
real_labels = labels
else:
real_labels = self._get_one_hot_labels(labels)
fake_labels = self._get_one_hot_labels(features["sampled_labels"])
if self._experimental_joint_gen_for_disc:
assert "generated" in features
fake_images = features["generated"]
else:
logging.warning("Computing fake images for every sub step separately.")
fake_images = self.generator(
features["z"], y=fake_labels, is_training=is_training)
bs = real_images.shape[0].value
if self._self_supervision:
assert bs % self._rotated_batch_fraction == 0, (
"Rotated batch fraction is invalid: %d doesn't divide %d" %
self._rotated_batch_fraction, bs)
rotated_bs = bs // self._rotated_batch_fraction
num_rot_examples = rotated_bs // NUM_ROTATIONS
logging.info("bs=%s, rotated_bs=%s, num_rot_examples=%s", bs, rotated_bs,
num_rot_examples)
assert num_rot_examples > 0
# Append the data obtained by rotating the last 'num_rotated_samples'
# from the true and the fake data.
if self._self_supervision == "rotation":
assert num_rot_examples <= bs, (num_rot_examples, bs)
all_features, all_labels = self.merge_with_rotation_data(
real_images, fake_images, real_labels, fake_labels, num_rot_examples)
else:
all_features = tf.concat([real_images, fake_images], 0)
all_labels = None
if self.conditional:
all_labels = tf.concat([real_labels, fake_labels], axis=0)
d_predictions, d_logits, rot_logits, aux_logits, is_label_available = (
self.discriminator_with_additonal_heads(
x=all_features, y=all_labels, is_training=is_training))
expected_batch_size = 2 * bs
if self._self_supervision == "rotation":
expected_batch_size += 2 * (NUM_ROTATIONS - 1) * num_rot_examples
if d_logits.shape[0].value != expected_batch_size:
raise ValueError("Batch size unexpected: got %r expected %r" % (
d_logits.shape[0].value, expected_batch_size))
prob_real, prob_fake = tf.split(d_predictions, 2)
prob_real, prob_fake = prob_real[:bs], prob_fake[:bs]
logits_real, logits_fake = tf.split(d_logits, 2)
logits_real, logits_fake = logits_real[:bs], logits_fake[:bs]
# Get the true/fake GAN loss.
self.d_loss, _, _, self.g_loss = loss_lib.get_losses(
d_real=prob_real, d_fake=prob_fake,
d_real_logits=logits_real, d_fake_logits=logits_fake)
# At this point we have the classic GAN loss with possible regularization.
# We now add the rotation loss and summaries if required.
if self._self_supervision == "rotation":
# Extract logits for the rotation task.
rot_real_logits, rot_fake_logits = tf.split(rot_logits, 2)
rot_real_logits = rot_real_logits[-rotated_bs:]
rot_fake_logits = rot_fake_logits[-rotated_bs:]
labels_rotated = tf.constant(np.repeat(
np.arange(NUM_ROTATIONS, dtype=np.int32), num_rot_examples))
rot_onehot = tf.one_hot(labels_rotated, NUM_ROTATIONS)
rot_real_logp = tf.log(tf.nn.softmax(rot_real_logits) + 1e-10)
rot_fake_logp = tf.log(tf.nn.softmax(rot_fake_logits) + 1e-10)
real_loss = -tf.reduce_mean(tf.reduce_sum(rot_onehot * rot_real_logp, 1))
fake_loss = -tf.reduce_mean(tf.reduce_sum(rot_onehot * rot_fake_logp, 1))
self.d_loss += real_loss * self._weight_rotation_loss_d
self.g_loss += fake_loss * self._weight_rotation_loss_g
rot_real_labels = tf.one_hot(
tf.arg_max(rot_real_logits, 1), NUM_ROTATIONS)
rot_fake_labels = tf.one_hot(
tf.arg_max(rot_fake_logits, 1), NUM_ROTATIONS)
accuracy_real = tf.metrics.accuracy(rot_onehot, rot_real_labels)
accuracy_fake = tf.metrics.accuracy(rot_onehot, rot_fake_labels)
self._tpu_summary.scalar("loss/real_loss", real_loss)
self._tpu_summary.scalar("loss/fake_loss", fake_loss)
self._tpu_summary.scalar("accuracy/real", accuracy_real)
self._tpu_summary.scalar("accuracy/fake", accuracy_fake)
# Training the predictor on the features of real data and real labels.
if self._use_predictor:
real_aux_logits, _ = tf.split(aux_logits, 2)
real_aux_logits = real_aux_logits[:bs]
is_label_available, _ = tf.split(is_label_available, 2)
is_label_available = tf.squeeze(is_label_available[:bs])
class_loss_real = tf.losses.softmax_cross_entropy(
real_labels, real_aux_logits, weights=is_label_available)
# Add the loss to the discriminator
self.d_loss += self._weight_class_loss * class_loss_real
self._tpu_summary.scalar("loss/class_loss_real", class_loss_real)
self._tpu_summary.scalar("label_frac", tf.reduce_mean(is_label_available))
|
from datetime import datetime, timedelta
import logging
from typing import Dict, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_EDITABLE,
CONF_ICON,
CONF_ID,
CONF_NAME,
SERVICE_RELOAD,
)
from homeassistant.core import callback
from homeassistant.helpers import collection
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "timer"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
DEFAULT_DURATION = 0
ATTR_DURATION = "duration"
ATTR_REMAINING = "remaining"
ATTR_FINISHES_AT = "finishes_at"
CONF_DURATION = "duration"
STATUS_IDLE = "idle"
STATUS_ACTIVE = "active"
STATUS_PAUSED = "paused"
EVENT_TIMER_FINISHED = "timer.finished"
EVENT_TIMER_CANCELLED = "timer.cancelled"
EVENT_TIMER_STARTED = "timer.started"
EVENT_TIMER_RESTARTED = "timer.restarted"
EVENT_TIMER_PAUSED = "timer.paused"
SERVICE_START = "start"
SERVICE_PAUSE = "pause"
SERVICE_CANCEL = "cancel"
SERVICE_FINISH = "finish"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): cv.time_period,
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_DURATION): cv.time_period,
}
def _format_timedelta(delta: timedelta):
total_seconds = delta.total_seconds()
hours, remainder = divmod(total_seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return f"{int(hours)}:{int(minutes):02}:{int(seconds):02}"
def _none_to_empty_dict(value):
if value is None:
return {}
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.All(
_none_to_empty_dict,
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): vol.All(
cv.time_period, _format_timedelta
),
},
)
)
},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up an input select."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.attach_entity_component_collection(
component, yaml_collection, Timer.from_yaml
)
storage_collection = TimerStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.attach_entity_component_collection(component, storage_collection, Timer)
await yaml_collection.async_load(
[{CONF_ID: id_, **cfg} for id_, cfg in config.get(DOMAIN, {}).items()]
)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, yaml_collection)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, storage_collection)
async def reload_service_handler(service_call: ServiceCallType) -> None:
"""Reload yaml entities."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
conf = {DOMAIN: {}}
await yaml_collection.async_load(
[{CONF_ID: id_, **cfg} for id_, cfg in conf.get(DOMAIN, {}).items()]
)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(
SERVICE_START,
{vol.Optional(ATTR_DURATION, default=DEFAULT_DURATION): cv.time_period},
"async_start",
)
component.async_register_entity_service(SERVICE_PAUSE, {}, "async_pause")
component.async_register_entity_service(SERVICE_CANCEL, {}, "async_cancel")
component.async_register_entity_service(SERVICE_FINISH, {}, "async_finish")
return True
class TimerStorageCollection(collection.StorageCollection):
"""Timer storage based collection."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: Dict) -> Dict:
"""Validate the config is valid."""
data = self.CREATE_SCHEMA(data)
# make duration JSON serializeable
data[CONF_DURATION] = _format_timedelta(data[CONF_DURATION])
return data
@callback
def _get_suggested_id(self, info: Dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_NAME]
async def _update_data(self, data: dict, update_data: Dict) -> Dict:
"""Return a new updated data object."""
data = {**data, **self.UPDATE_SCHEMA(update_data)}
# make duration JSON serializeable
if CONF_DURATION in update_data:
data[CONF_DURATION] = _format_timedelta(data[CONF_DURATION])
return data
class Timer(RestoreEntity):
"""Representation of a timer."""
def __init__(self, config: Dict):
"""Initialize a timer."""
self._config: dict = config
self.editable: bool = True
self._state: str = STATUS_IDLE
self._duration = cv.time_period_str(config[CONF_DURATION])
self._remaining: Optional[timedelta] = None
self._end: Optional[datetime] = None
self._listener = None
@classmethod
def from_yaml(cls, config: Dict) -> "Timer":
"""Return entity instance initialized from yaml storage."""
timer = cls(config)
timer.entity_id = ENTITY_ID_FORMAT.format(config[CONF_ID])
timer.editable = False
return timer
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def force_update(self) -> bool:
"""Return True to fix restart issues."""
return True
@property
def name(self):
"""Return name of the timer."""
return self._config.get(CONF_NAME)
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._config.get(CONF_ICON)
@property
def state(self):
"""Return the current value of the timer."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
attrs = {
ATTR_DURATION: _format_timedelta(self._duration),
ATTR_EDITABLE: self.editable,
}
if self._end is not None:
attrs[ATTR_FINISHES_AT] = self._end.isoformat()
if self._remaining is not None:
attrs[ATTR_REMAINING] = _format_timedelta(self._remaining)
return attrs
@property
def unique_id(self) -> Optional[str]:
"""Return unique id for the entity."""
return self._config[CONF_ID]
async def async_added_to_hass(self):
"""Call when entity is about to be added to Home Assistant."""
# If not None, we got an initial value.
if self._state is not None:
return
state = await self.async_get_last_state()
self._state = state and state.state == state
@callback
def async_start(self, duration: timedelta):
"""Start a timer."""
if self._listener:
self._listener()
self._listener = None
newduration = None
if duration:
newduration = duration
event = EVENT_TIMER_STARTED
if self._state == STATUS_ACTIVE or self._state == STATUS_PAUSED:
event = EVENT_TIMER_RESTARTED
self._state = STATUS_ACTIVE
start = dt_util.utcnow().replace(microsecond=0)
if self._remaining and newduration is None:
self._end = start + self._remaining
elif newduration:
self._duration = newduration
self._remaining = newduration
self._end = start + self._duration
else:
self._remaining = self._duration
self._end = start + self._duration
self.hass.bus.async_fire(event, {"entity_id": self.entity_id})
self._listener = async_track_point_in_utc_time(
self.hass, self._async_finished, self._end
)
self.async_write_ha_state()
@callback
def async_pause(self):
"""Pause a timer."""
if self._listener is None:
return
self._listener()
self._listener = None
self._remaining = self._end - dt_util.utcnow().replace(microsecond=0)
self._state = STATUS_PAUSED
self._end = None
self.hass.bus.async_fire(EVENT_TIMER_PAUSED, {"entity_id": self.entity_id})
self.async_write_ha_state()
@callback
def async_cancel(self):
"""Cancel a timer."""
if self._listener:
self._listener()
self._listener = None
self._state = STATUS_IDLE
self._end = None
self._remaining = None
self.hass.bus.async_fire(EVENT_TIMER_CANCELLED, {"entity_id": self.entity_id})
self.async_write_ha_state()
@callback
def async_finish(self):
"""Reset and updates the states, fire finished event."""
if self._state != STATUS_ACTIVE:
return
self._listener = None
self._state = STATUS_IDLE
self._end = None
self._remaining = None
self.hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id})
self.async_write_ha_state()
@callback
def _async_finished(self, time):
"""Reset and updates the states, fire finished event."""
if self._state != STATUS_ACTIVE:
return
self._listener = None
self._state = STATUS_IDLE
self._end = None
self._remaining = None
self.hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id})
self.async_write_ha_state()
async def async_update_config(self, config: Dict) -> None:
"""Handle when the config is updated."""
self._config = config
self._duration = cv.time_period_str(config[CONF_DURATION])
self.async_write_ha_state()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl import flags
from absl.testing import flagsaver
import mock
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import openfoam_benchmark
from perfkitbenchmarker.linux_packages import openmpi
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class OpenfoamBenchmarkTest(pkb_common_test_case.PkbCommonTestCase,
test_util.SamplesTestMixin):
def setUp(self):
super(OpenfoamBenchmarkTest, self).setUp()
self.mock_vm = mock.Mock()
self.mock_benchmark_spec = mock.Mock(vms=[self.mock_vm])
@mock.patch.object(openmpi, 'GetMpiVersion', return_value='1.10.2')
@mock.patch.object(openfoam_benchmark, '_GetOpenfoamVersion',
return_value='7')
@mock.patch.object(openfoam_benchmark, '_ParseRunCommands',
return_value=['mpirun $(getApplication)'])
@flagsaver.flagsaver(openfoam_dimensions=['80_32_32'])
def testRunCaseReturnsCorrectlyParsedSamples(self,
mock_getopenfoamversion,
mock_getmpiversion,
mock_parseruncommands):
# Run with mocked output data
self.mock_vm.RemoteCommand.return_value = None, '\n'.join(
['real 131.64', 'user 327.05', 'sys 137.04'])
self.mock_vm.NumCpusForBenchmark.return_value = 8
samples = openfoam_benchmark.Run(self.mock_benchmark_spec)
# Verify command is what we expected to run
run_cmd = [
'cd $HOME/OpenFOAM/run/motorBike',
'time -p mpirun $(getApplication)'
]
self.mock_vm.RemoteCommand.assert_called_with(' && '.join(run_cmd))
# Verify sample equality
expected_metadata = {
'case_name': 'motorbike',
'command': '$(getApplication)',
'decomp_method': 'scotch',
'dimensions': '80_32_32',
'full_command': 'mpirun $(getApplication)',
'max_global_cells': 200000000,
'mpi_mapping': 'core:SPAN',
'openfoam_version': '7',
'openmpi_version': '1.10.2',
'total_cpus_available': 8,
'total_cpus_used': 4,
}
unit = 'seconds'
self.assertSamplesEqualUpToTimestamp(
sample.Sample('time_real', 131, unit, expected_metadata), samples[0])
self.assertSamplesEqualUpToTimestamp(
sample.Sample('time_user', 327, unit, expected_metadata), samples[1])
self.assertSamplesEqualUpToTimestamp(
sample.Sample('time_sys', 137, unit, expected_metadata), samples[2])
def testYumInstallRaisesNotImplementedError(self):
self.mock_vm = linux_virtual_machine.Rhel7Mixin()
self.mock_vm.install_packages = True
with self.assertRaises(NotImplementedError):
self.mock_vm.Install('openfoam')
if __name__ == '__main__':
unittest.main()
|
from pylatex import Document, Section, Subsection, Command
from pylatex.utils import italic, NoEscape
def fill_document(doc):
"""Add a section, a subsection and some text to the document.
:param doc: the document
:type doc: :class:`pylatex.document.Document` instance
"""
with doc.create(Section('A section')):
doc.append('Some regular text and some ')
doc.append(italic('italic text. '))
with doc.create(Subsection('A subsection')):
doc.append('Also some crazy characters: $&#{}')
if __name__ == '__main__':
# Basic document
doc = Document('basic')
fill_document(doc)
doc.generate_pdf(clean_tex=False)
doc.generate_tex()
# Document with `\maketitle` command activated
doc = Document()
doc.preamble.append(Command('title', 'Awesome Title'))
doc.preamble.append(Command('author', 'Anonymous author'))
doc.preamble.append(Command('date', NoEscape(r'\today')))
doc.append(NoEscape(r'\maketitle'))
fill_document(doc)
doc.generate_pdf('basic_maketitle', clean_tex=False)
# Add stuff to the document
with doc.create(Section('A second section')):
doc.append('Some text.')
doc.generate_pdf('basic_maketitle2', clean_tex=False)
tex = doc.dumps() # The document as string in LaTeX syntax
|
import tensornetwork as tn
from tensornetwork.backend_contextmanager import _default_backend_stack
import pytest
import numpy as np
import tensorflow as tf
import torch
import jax
from tensornetwork.block_sparse import (U1Charge, BlockSparseTensor, Index,
BaseCharge)
from tensornetwork.block_sparse.blocksparse_utils import _find_diagonal_sparse_blocks#pylint: disable=line-too-long
from tensornetwork.backends.abstract_backend import AbstractBackend
np_dtypes = [np.float32, np.float64, np.complex64, np.complex128, np.int32]
tf_dtypes = [tf.float32, tf.float64, tf.complex64, tf.complex128, tf.int32]
torch_dtypes = [torch.float32, torch.float64, torch.int32, torch.int64]
jax_dtypes = [
jax.numpy.float32, jax.numpy.float64, jax.numpy.complex64,
jax.numpy.complex128, jax.numpy.int32
]
def get_random_symmetric(shape, flows, num_charges, seed=10, dtype=np.float64):
assert np.all(np.asarray(shape) == shape[0])
np.random.seed(seed)
R = len(shape)
charge = BaseCharge(
np.random.randint(-5, 5, (shape[0], num_charges)),
charge_types=[U1Charge] * num_charges)
indices = [Index(charge, flows[n]) for n in range(R)]
return BlockSparseTensor.random(indices=indices, dtype=dtype)
def get_square_matrix(shape, dtype=np.float64):
charge = U1Charge(np.random.randint(-5, 5, shape))
flows = [True, False]
indices = [Index(charge, flows[n]) for n in range(2)]
return BlockSparseTensor.random(indices=indices, dtype=dtype)
def get_zeros(shape, dtype=np.float64):
R = len(shape)
charges = [U1Charge(np.random.randint(-5, 5, shape[n])) for n in range(R)]
flows = list(np.full(R, fill_value=False, dtype=np.bool))
indices = [Index(charges[n], flows[n]) for n in range(R)]
return BlockSparseTensor.zeros(indices=indices, dtype=dtype)
def get_ones(shape, dtype=np.float64):
R = len(shape)
charges = [U1Charge(np.random.randint(-5, 5, shape[n])) for n in range(R)]
flows = list(np.full(R, fill_value=False, dtype=np.bool))
indices = [Index(charges[n], flows[n]) for n in range(R)]
return BlockSparseTensor.ones(indices=indices, dtype=dtype)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_network_copy_reordered(dtype, num_charges):
a = tn.Node(
get_random_symmetric((30, 30, 30), [False, False, False],
num_charges,
dtype=dtype),
backend='symmetric')
b = tn.Node(
get_random_symmetric((30, 30, 30), [False, True, False],
num_charges,
dtype=dtype),
backend='symmetric')
c = tn.Node(
get_random_symmetric((30, 30, 30), [True, False, True],
num_charges,
dtype=dtype),
backend='symmetric')
a[0] ^ b[1]
a[1] ^ c[2]
b[2] ^ c[0]
edge_order = [a[2], c[1], b[0]]
node_dict, edge_dict = tn.copy({a, b, c})
tn.check_correct({a, b, c})
res = a @ b @ c
res.reorder_edges(edge_order)
res_copy = node_dict[a] @ node_dict[b] @ node_dict[c]
res_copy.reorder_edges([edge_dict[e] for e in edge_order])
np.testing.assert_allclose(res.tensor.data, res_copy.tensor.data)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_small_matmul(dtype, num_charges):
a = tn.Node(
get_random_symmetric((100, 100), [True, True], num_charges, dtype=dtype),
backend='symmetric')
b = tn.Node(
get_random_symmetric((100, 100), [False, True], num_charges, dtype=dtype),
backend='symmetric')
edge = tn.connect(a[0], b[0], "edge")
tn.check_correct({a, b})
c = tn.contract(edge, name="a * b")
assert list(c.shape) == [100, 100]
tn.check_correct({c})
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_double_trace(dtype, num_charges):
a = tn.Node(
get_random_symmetric((20, 20, 20, 20), [True, False, True, False],
num_charges,
dtype=dtype),
backend='symmetric')
edge1 = tn.connect(a[0], a[1], "edge1")
edge2 = tn.connect(a[2], a[3], "edge2")
tn.check_correct({a})
val = tn.contract(edge1)
tn.check_correct({val})
val = tn.contract(edge2)
tn.check_correct({val})
adense = tn.Node(a.tensor.todense(), backend='numpy')
e1 = adense[0] ^ adense[1]
e2 = adense[2] ^ adense[3]
tn.contract(e1)
expected = tn.contract(e2)
np.testing.assert_almost_equal(val.tensor.data, expected.tensor)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_real_physics(dtype, num_charges):
# Calcuate the expected value in numpy
t1 = get_random_symmetric((20, 20, 20, 20), [False, False, False, False],
num_charges,
dtype=dtype)
t2 = get_random_symmetric((20, 20, 20), [True, False, True],
num_charges,
dtype=dtype)
t3 = get_random_symmetric((20, 20, 20), [True, True, False],
num_charges,
dtype=dtype)
t1_dense = t1.todense()
t2_dense = t2.todense()
t3_dense = t3.todense()
adense = tn.Node(t1_dense, name="T", backend='numpy')
bdense = tn.Node(t2_dense, name="A", backend='numpy')
cdense = tn.Node(t3_dense, name="B", backend='numpy')
e1 = tn.connect(adense[2], bdense[0], "edge")
e2 = tn.connect(cdense[0], adense[3], "edge2")
e3 = tn.connect(bdense[1], cdense[1], "edge3")
node_result = tn.contract(e1)
node_result = tn.contract(e2)
final_result = tn.contract(e3)
# Build the network
a = tn.Node(t1, name="T", backend='symmetric')
b = tn.Node(t2, name="A", backend='symmetric')
c = tn.Node(t3, name="B", backend='symmetric')
e1 = tn.connect(a[2], b[0], "edge")
e2 = tn.connect(c[0], a[3], "edge2")
e3 = tn.connect(b[1], c[1], "edge3")
tn.check_correct(tn.reachable(a))
node_result = tn.contract(e1)
tn.check_correct(tn.reachable(node_result))
node_result = tn.contract(e2)
tn.check_correct(tn.reachable(node_result))
val = tn.contract(e3)
tn.check_correct(tn.reachable(val))
np.testing.assert_allclose(val.tensor.todense(), final_result.tensor)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_node2_contract_trace(dtype, num_charges):
a = tn.Node(
get_random_symmetric((20, 20, 20), [False, True, False],
num_charges,
dtype=dtype),
backend='symmetric')
b = tn.Node(
get_random_symmetric((20, 20), [True, False], num_charges, dtype=dtype),
backend='symmetric')
tn.connect(b[0], a[2])
trace_edge = tn.connect(a[0], a[1])
c = tn.contract(trace_edge)
tn.check_correct({c})
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_flatten_consistent_result(dtype, num_charges):
a_val = get_random_symmetric((10, 10, 10, 10), [False] * 4,
num_charges,
dtype=dtype)
b_val = get_random_symmetric((10, 10, 10, 10), [True] * 4,
num_charges,
dtype=dtype)
# Create non flattened example to compare against.
a_noflat = tn.Node(a_val, backend='symmetric')
b_noflat = tn.Node(b_val, backend='symmetric')
e1 = tn.connect(a_noflat[1], b_noflat[3])
e2 = tn.connect(a_noflat[3], b_noflat[1])
e3 = tn.connect(a_noflat[2], b_noflat[0])
a_dangling_noflat = a_noflat[0]
b_dangling_noflat = b_noflat[2]
for edge in [e1, e2, e3]:
noflat_result_node = tn.contract(edge)
noflat_result_node.reorder_edges([a_dangling_noflat, b_dangling_noflat])
noflat_result = noflat_result_node.tensor
# Create network with flattening
a_flat = tn.Node(a_val, backend='symmetric')
b_flat = tn.Node(b_val, backend='symmetric')
e1 = tn.connect(a_flat[1], b_flat[3])
e2 = tn.connect(a_flat[3], b_flat[1])
e3 = tn.connect(a_flat[2], b_flat[0])
a_dangling_flat = a_flat[0]
b_dangling_flat = b_flat[2]
final_edge = tn.flatten_edges([e1, e2, e3])
flat_result_node = tn.contract(final_edge)
flat_result_node.reorder_edges([a_dangling_flat, b_dangling_flat])
flat_result = flat_result_node.tensor
flat_result = flat_result.contiguous()
noflat_result = noflat_result.contiguous()
np.testing.assert_allclose(flat_result.data, noflat_result.data)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_flatten_trace_consistent_result(dtype, num_charges):
a_val = get_random_symmetric((5, 5, 5, 5, 5, 5),
[False, False, True, True, True, False],
num_charges,
dtype=dtype)
a_noflat = tn.Node(a_val, backend='symmetric')
e1 = tn.connect(a_noflat[0], a_noflat[4])
e2 = tn.connect(a_noflat[1], a_noflat[2])
e3 = tn.connect(a_noflat[3], a_noflat[5])
for edge in [e1, e2, e3]:
noflat_result = tn.contract(edge).tensor
# Create network with flattening
a_flat = tn.Node(a_val, backend='symmetric')
e1 = tn.connect(a_flat[0], a_flat[4])
e2 = tn.connect(a_flat[1], a_flat[2])
e3 = tn.connect(a_flat[3], a_flat[5])
final_edge = tn.flatten_edges([e1, e2, e3])
flat_result = tn.contract(final_edge).tensor
flat_result = flat_result.contiguous()
noflat_result = noflat_result.contiguous()
np.testing.assert_allclose(flat_result.data, noflat_result.data)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_flatten_trace_consistent_tensor(dtype, num_charges):
a_val = get_random_symmetric((5, 5, 5, 5, 5),
[False, False, True, True, True],
num_charges,
dtype=dtype)
a = tn.Node(a_val, backend='symmetric')
e1 = tn.connect(a[0], a[4])
e2 = tn.connect(a[3], a[2])
tn.flatten_edges([e2, e1])
tn.check_correct({a})
# Check expected values.
a_final = np.reshape(
np.transpose(a_val.todense(), (1, 2, 0, 3, 4)), (5, 25, 25))
np.testing.assert_allclose(a.tensor.todense(), a_final)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_contract_between_trace_edges(dtype, num_charges):
a_val = get_random_symmetric((50, 50), [False, True],
num_charges,
dtype=dtype)
final_val = np.trace(a_val.todense())
a = tn.Node(a_val, backend='symmetric')
tn.connect(a[0], a[1])
b = tn.contract_between(a, a)
tn.check_correct({b})
np.testing.assert_allclose(b.tensor.todense(), final_val)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_at_operator(dtype, num_charges):
a = tn.Node(
get_random_symmetric((50, 50), [False, True], num_charges, dtype=dtype),
backend='symmetric')
b = tn.Node(
get_random_symmetric((50, 50), [False, True], num_charges, dtype=dtype),
backend='symmetric')
tn.connect(a[1], b[0])
c = a @ b
assert isinstance(c, tn.Node)
np.testing.assert_allclose(c.tensor.todense(),
a.tensor.todense() @ b.tensor.todense())
|
import datetime
from caldav.objects import Event
import pytest
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from tests.async_mock import MagicMock, Mock, patch
# pylint: disable=redefined-outer-name
DEVICE_DATA = {"name": "Private Calendar", "device_id": "Private Calendar"}
EVENTS = [
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:1
DTSTAMP:20171125T000000Z
DTSTART:20171127T170000Z
DTEND:20171127T180000Z
SUMMARY:This is a normal event
LOCATION:Hamburg
DESCRIPTION:Surprisingly rainy
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Dynamics.//CalDAV Client//EN
BEGIN:VEVENT
UID:2
DTSTAMP:20171125T000000Z
DTSTART:20171127T100000Z
DTEND:20171127T110000Z
SUMMARY:This is an offset event !!-02:00
LOCATION:Hamburg
DESCRIPTION:Surprisingly shiny
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:3
DTSTAMP:20171125T000000Z
DTSTART:20171127
DTEND:20171128
SUMMARY:This is an all day event
LOCATION:Hamburg
DESCRIPTION:What a beautiful day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:4
DTSTAMP:20171125T000000Z
DTSTART:20171127
SUMMARY:This is an event without dtend or duration
LOCATION:Hamburg
DESCRIPTION:What an endless day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:5
DTSTAMP:20171125T000000Z
DTSTART:20171127
DURATION:PT1H
SUMMARY:This is an event with duration
LOCATION:Hamburg
DESCRIPTION:What a day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:6
DTSTAMP:20171125T000000Z
DTSTART:20171127T100000Z
DURATION:PT1H
SUMMARY:This is an event with duration
LOCATION:Hamburg
DESCRIPTION:What a day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:7
DTSTART;TZID=America/Los_Angeles:20171127T083000
DTSTAMP:20180301T020053Z
DTEND;TZID=America/Los_Angeles:20171127T093000
SUMMARY:Enjoy the sun
LOCATION:San Francisco
DESCRIPTION:Sunny day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:8
DTSTART:20171127T190000
DTEND:20171127T200000
SUMMARY:This is a floating Event
LOCATION:Hamburg
DESCRIPTION:What a day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:9
DTSTAMP:20171125T000000Z
DTSTART:20171027T220000Z
DTEND:20171027T223000Z
SUMMARY:This is a recurring event
LOCATION:Hamburg
DESCRIPTION:Every day for a while
RRULE:FREQ=DAILY;UNTIL=20171227T215959
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:10
DTSTAMP:20171125T000000Z
DTSTART:20171027T230000Z
DURATION:PT30M
SUMMARY:This is a recurring event with a duration
LOCATION:Hamburg
DESCRIPTION:Every day for a while as well
RRULE:FREQ=DAILY;UNTIL=20171227T215959
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:11
DTSTAMP:20171125T000000Z
DTSTART:20171027T233000Z
DTEND:20171027T235959Z
SUMMARY:This is a recurring event that has ended
LOCATION:Hamburg
DESCRIPTION:Every day for a while
RRULE:FREQ=DAILY;UNTIL=20171127T225959
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:12
DTSTAMP:20171125T000000Z
DTSTART:20171027T234500Z
DTEND:20171027T235959Z
SUMMARY:This is a recurring event that never ends
LOCATION:Hamburg
DESCRIPTION:Every day forever
RRULE:FREQ=DAILY
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:13
DTSTAMP:20161125T000000Z
DTSTART:20161127
DTEND:20161128
SUMMARY:This is a recurring all day event
LOCATION:Hamburg
DESCRIPTION:Groundhog Day
RRULE:FREQ=DAILY;COUNT=100
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:14
DTSTAMP:20151125T000000Z
DTSTART:20151127T000000Z
DTEND:20151127T003000Z
SUMMARY:This is an hourly recurring event
LOCATION:Hamburg
DESCRIPTION:The bell tolls for thee
RRULE:FREQ=HOURLY;INTERVAL=1;COUNT=12
END:VEVENT
END:VCALENDAR
""",
]
CALDAV_CONFIG = {
"platform": "caldav",
"url": "http://test.local",
"custom_calendars": [],
}
@pytest.fixture(autouse=True)
def mock_http(hass):
"""Mock the http component."""
hass.http = Mock()
@pytest.fixture
def mock_dav_client():
"""Mock the dav client."""
patch_dav_client = patch(
"caldav.DAVClient", return_value=_mocked_dav_client("First", "Second")
)
with patch_dav_client as dav_client:
yield dav_client
@pytest.fixture(name="calendar")
def mock_private_cal():
"""Mock a private calendar."""
_calendar = _mock_calendar("Private")
calendars = [_calendar]
client = _mocked_dav_client(calendars=calendars)
patch_dav_client = patch("caldav.DAVClient", return_value=client)
with patch_dav_client:
yield _calendar
def _local_datetime(hours, minutes):
"""Build a datetime object for testing in the correct timezone."""
return dt.as_local(datetime.datetime(2017, 11, 27, hours, minutes, 0))
def _mocked_dav_client(*names, calendars=None):
"""Mock requests.get invocations."""
if calendars is None:
calendars = [_mock_calendar(name) for name in names]
principal = Mock()
principal.calendars = MagicMock(return_value=calendars)
client = Mock()
client.principal = MagicMock(return_value=principal)
return client
def _mock_calendar(name):
events = []
for idx, event in enumerate(EVENTS):
events.append(Event(None, "%d.ics" % idx, event, None, str(idx)))
calendar = Mock()
calendar.date_search = MagicMock(return_value=events)
calendar.name = name
return calendar
async def test_setup_component(hass, mock_dav_client):
"""Test setup component with calendars."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.first")
assert state.name == "First"
state = hass.states.get("calendar.second")
assert state.name == "Second"
async def test_setup_component_with_no_calendar_matching(hass, mock_dav_client):
"""Test setup component with wrong calendar."""
config = dict(CALDAV_CONFIG)
config["calendars"] = ["none"]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
all_calendar_states = hass.states.async_entity_ids("calendar")
assert not all_calendar_states
async def test_setup_component_with_a_calendar_match(hass, mock_dav_client):
"""Test setup component with right calendar."""
config = dict(CALDAV_CONFIG)
config["calendars"] = ["Second"]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
all_calendar_states = hass.states.async_entity_ids("calendar")
assert len(all_calendar_states) == 1
state = hass.states.get("calendar.second")
assert state.name == "Second"
async def test_setup_component_with_one_custom_calendar(hass, mock_dav_client):
"""Test setup component with custom calendars."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "HomeOffice", "calendar": "Second", "search": "HomeOffice"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
all_calendar_states = hass.states.async_entity_ids("calendar")
assert len(all_calendar_states) == 1
state = hass.states.get("calendar.second_homeoffice")
assert state.name == "HomeOffice"
@patch("homeassistant.util.dt.now", return_value=_local_datetime(17, 45))
async def test_ongoing_event(mock_now, hass, calendar):
"""Test that the ongoing event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a normal event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 17:00:00",
"end_time": "2017-11-27 18:00:00",
"location": "Hamburg",
"description": "Surprisingly rainy",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(17, 30))
async def test_just_ended_event(mock_now, hass, calendar):
"""Test that the next ongoing event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a normal event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 17:00:00",
"end_time": "2017-11-27 18:00:00",
"location": "Hamburg",
"description": "Surprisingly rainy",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(17, 00))
async def test_ongoing_event_different_tz(mock_now, hass, calendar):
"""Test that the ongoing event with another timezone is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "Enjoy the sun",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 16:30:00",
"description": "Sunny day",
"end_time": "2017-11-27 17:30:00",
"location": "San Francisco",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(19, 10))
async def test_ongoing_floating_event_returned(mock_now, hass, calendar):
"""Test that floating events without timezones work."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
print(dt.DEFAULT_TIME_ZONE)
print(state)
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a floating Event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 19:00:00",
"end_time": "2017-11-27 20:00:00",
"location": "Hamburg",
"description": "What a day",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(8, 30))
async def test_ongoing_event_with_offset(mock_now, hass, calendar):
"""Test that the offset is taken into account."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is an offset event",
"all_day": False,
"offset_reached": True,
"start_time": "2017-11-27 10:00:00",
"end_time": "2017-11-27 11:00:00",
"location": "Hamburg",
"description": "Surprisingly shiny",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(12, 00))
async def test_matching_filter(mock_now, hass, calendar):
"""Test that the matching event is returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": "This is a normal event"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a normal event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 17:00:00",
"end_time": "2017-11-27 18:00:00",
"location": "Hamburg",
"description": "Surprisingly rainy",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(12, 00))
async def test_matching_filter_real_regexp(mock_now, hass, calendar):
"""Test that the event matching the regexp is returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": r".*rainy"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a normal event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 17:00:00",
"end_time": "2017-11-27 18:00:00",
"location": "Hamburg",
"description": "Surprisingly rainy",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(20, 00))
async def test_filter_matching_past_event(mock_now, hass, calendar):
"""Test that the matching past event is not returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": "This is a normal event"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == "off"
@patch("homeassistant.util.dt.now", return_value=_local_datetime(12, 00))
async def test_no_result_with_filtering(mock_now, hass, calendar):
"""Test that nothing is returned since nothing matches."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{
"name": "Private",
"calendar": "Private",
"search": "This is a non-existing event",
}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == "off"
@patch("homeassistant.util.dt.now", return_value=_local_datetime(17, 30))
async def test_all_day_event_returned(mock_now, hass, calendar):
"""Test that the event lasting the whole day is returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": ".*"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is an all day event",
"all_day": True,
"offset_reached": False,
"start_time": "2017-11-27 00:00:00",
"end_time": "2017-11-28 00:00:00",
"location": "Hamburg",
"description": "What a beautiful day",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(21, 45))
async def test_event_rrule(mock_now, hass, calendar):
"""Test that the future recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 22:00:00",
"end_time": "2017-11-27 22:30:00",
"location": "Hamburg",
"description": "Every day for a while",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(22, 15))
async def test_event_rrule_ongoing(mock_now, hass, calendar):
"""Test that the current recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 22:00:00",
"end_time": "2017-11-27 22:30:00",
"location": "Hamburg",
"description": "Every day for a while",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(22, 45))
async def test_event_rrule_duration(mock_now, hass, calendar):
"""Test that the future recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event with a duration",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 23:00:00",
"end_time": "2017-11-27 23:30:00",
"location": "Hamburg",
"description": "Every day for a while as well",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(23, 15))
async def test_event_rrule_duration_ongoing(mock_now, hass, calendar):
"""Test that the ongoing recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event with a duration",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 23:00:00",
"end_time": "2017-11-27 23:30:00",
"location": "Hamburg",
"description": "Every day for a while as well",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(23, 37))
async def test_event_rrule_endless(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event that never ends",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 23:45:00",
"end_time": "2017-11-27 23:59:59",
"location": "Hamburg",
"description": "Every day forever",
}
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2016, 12, 1, 17, 30)),
)
async def test_event_rrule_all_day(mock_now, hass, calendar):
"""Test that the recurring all day event is returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": ".*"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring all day event",
"all_day": True,
"offset_reached": False,
"start_time": "2016-12-01 00:00:00",
"end_time": "2016-12-02 00:00:00",
"location": "Hamburg",
"description": "Groundhog Day",
}
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 0, 15)),
)
async def test_event_rrule_hourly_on_first(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is an hourly recurring event",
"all_day": False,
"offset_reached": False,
"start_time": "2015-11-27 00:00:00",
"end_time": "2015-11-27 00:30:00",
"location": "Hamburg",
"description": "The bell tolls for thee",
}
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 11, 15)),
)
async def test_event_rrule_hourly_on_last(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is an hourly recurring event",
"all_day": False,
"offset_reached": False,
"start_time": "2015-11-27 11:00:00",
"end_time": "2015-11-27 11:30:00",
"location": "Hamburg",
"description": "The bell tolls for thee",
}
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 0, 45)),
)
async def test_event_rrule_hourly_off_first(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 11, 45)),
)
async def test_event_rrule_hourly_off_last(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 12, 15)),
)
async def test_event_rrule_hourly_ended(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
|
from typing import Any, Dict, Optional
from homeassistant.components.binary_sensor import (
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorEntity,
)
from . import DOMAIN, IncomfortChild
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up an InComfort/InTouch binary_sensor device."""
if discovery_info is None:
return
client = hass.data[DOMAIN]["client"]
heaters = hass.data[DOMAIN]["heaters"]
async_add_entities([IncomfortFailed(client, h) for h in heaters])
class IncomfortFailed(IncomfortChild, BinarySensorEntity):
"""Representation of an InComfort Failed sensor."""
def __init__(self, client, heater) -> None:
"""Initialize the binary sensor."""
super().__init__()
self._unique_id = f"{heater.serial_no}_failed"
self.entity_id = f"{BINARY_SENSOR_DOMAIN}.{DOMAIN}_failed"
self._name = "Boiler Fault"
self._client = client
self._heater = heater
@property
def is_on(self) -> bool:
"""Return the status of the sensor."""
return self._heater.status["is_failed"]
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the device state attributes."""
return {"fault_code": self._heater.status["fault_code"]}
|
import logging
from plexapi.exceptions import BadRequest, NotFound
from .errors import MediaNotFound
_LOGGER = logging.getLogger(__name__)
def lookup_movie(library_section, **kwargs):
"""Find a specific movie and return a Plex media object."""
try:
title = kwargs["title"]
except KeyError:
_LOGGER.error("Must specify 'title' for this search")
return None
try:
movies = library_section.search(**kwargs, libtype="movie", maxresults=3)
except BadRequest as err:
_LOGGER.error("Invalid search payload provided: %s", err)
return None
if not movies:
raise MediaNotFound(f"Movie {title}") from None
if len(movies) > 1:
exact_matches = [x for x in movies if x.title.lower() == title.lower()]
if len(exact_matches) == 1:
return exact_matches[0]
match_list = [f"{x.title} ({x.year})" for x in movies]
_LOGGER.warning("Multiple matches found during search: %s", match_list)
return None
return movies[0]
def lookup_tv(library_section, **kwargs):
"""Find TV media and return a Plex media object."""
season_number = kwargs.get("season_number")
episode_number = kwargs.get("episode_number")
try:
show_name = kwargs["show_name"]
show = library_section.get(show_name)
except KeyError:
_LOGGER.error("Must specify 'show_name' for this search")
return None
except NotFound as err:
raise MediaNotFound(f"Show {show_name}") from err
if not season_number:
return show
try:
season = show.season(int(season_number))
except NotFound as err:
raise MediaNotFound(f"Season {season_number} of {show_name}") from err
if not episode_number:
return season
try:
return season.episode(episode=int(episode_number))
except NotFound as err:
episode = f"S{str(season_number).zfill(2)}E{str(episode_number).zfill(2)}"
raise MediaNotFound(f"Episode {episode} of {show_name}") from err
def lookup_music(library_section, **kwargs):
"""Search for music and return a Plex media object."""
album_name = kwargs.get("album_name")
track_name = kwargs.get("track_name")
track_number = kwargs.get("track_number")
try:
artist_name = kwargs["artist_name"]
artist = library_section.get(artist_name)
except KeyError:
_LOGGER.error("Must specify 'artist_name' for this search")
return None
except NotFound as err:
raise MediaNotFound(f"Artist {artist_name}") from err
if album_name:
try:
album = artist.album(album_name)
except NotFound as err:
raise MediaNotFound(f"Album {album_name} by {artist_name}") from err
if track_name:
try:
return album.track(track_name)
except NotFound as err:
raise MediaNotFound(
f"Track {track_name} on {album_name} by {artist_name}"
) from err
if track_number:
for track in album.tracks():
if int(track.index) == int(track_number):
return track
raise MediaNotFound(
f"Track {track_number} on {album_name} by {artist_name}"
) from None
return album
if track_name:
try:
return artist.get(track_name)
except NotFound as err:
raise MediaNotFound(f"Track {track_name} by {artist_name}") from err
return artist
|
from iaqualink import AqualinkLightEffect
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_EFFECT,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import AqualinkEntity, refresh_system
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered lights."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkLight(dev))
async_add_entities(devs, True)
class HassAqualinkLight(AqualinkEntity, LightEntity):
"""Representation of a light."""
@property
def name(self) -> str:
"""Return the name of the light."""
return self.dev.label
@property
def is_on(self) -> bool:
"""Return whether the light is on or off."""
return self.dev.is_on
@refresh_system
async def async_turn_on(self, **kwargs) -> None:
"""Turn on the light.
This handles brightness and light effects for lights that do support
them.
"""
brightness = kwargs.get(ATTR_BRIGHTNESS)
effect = kwargs.get(ATTR_EFFECT)
# For now I'm assuming lights support either effects or brightness.
if effect:
effect = AqualinkLightEffect[effect].value
await self.dev.set_effect(effect)
elif brightness:
# Aqualink supports percentages in 25% increments.
pct = int(round(brightness * 4.0 / 255)) * 25
await self.dev.set_brightness(pct)
else:
await self.dev.turn_on()
@refresh_system
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the light."""
await self.dev.turn_off()
@property
def brightness(self) -> int:
"""Return current brightness of the light.
The scale needs converting between 0-100 and 0-255.
"""
return self.dev.brightness * 255 / 100
@property
def effect(self) -> str:
"""Return the current light effect if supported."""
return AqualinkLightEffect(self.dev.effect).name
@property
def effect_list(self) -> list:
"""Return supported light effects."""
return list(AqualinkLightEffect.__members__)
@property
def supported_features(self) -> int:
"""Return the list of features supported by the light."""
if self.dev.is_dimmer:
return SUPPORT_BRIGHTNESS
if self.dev.is_color:
return SUPPORT_EFFECT
return 0
|
import numpy as np
from scipy import linalg
from ..forward import is_fixed_orient, convert_forward_solution
from ..io.pick import pick_channels_evoked, pick_info, pick_channels_forward
from ..inverse_sparse.mxne_inverse import _make_dipoles_sparse
from ..minimum_norm.inverse import _log_exp_var
from ..utils import logger, verbose, _check_info_inv
from ..dipole import Dipole
from ._compute_beamformer import _prepare_beamformer_input
def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2,
picks=None):
"""RAP-MUSIC for evoked data.
Parameters
----------
data : array, shape (n_channels, n_times)
Evoked data.
info : dict
Measurement info.
times : array
Times.
forward : instance of Forward
Forward operator.
noise_cov : instance of Covariance
The noise covariance.
n_dipoles : int
The number of dipoles to estimate. The default value is 2.
picks : list of int
Caller ensures this is a list of int.
Returns
-------
dipoles : list of instances of Dipole
The dipole fits.
explained_data : array | None
Data explained by the dipoles using a least square fitting with the
selected active dipoles and their estimated orientation.
Computed only if return_explained_data is True.
"""
info = pick_info(info, picks)
del picks
# things are much simpler if we avoid surface orientation
align = forward['source_nn'].copy()
if forward['surf_ori'] and not is_fixed_orient(forward):
forward = convert_forward_solution(forward, surf_ori=False)
is_free_ori, info, _, _, G, whitener, _, _ = _prepare_beamformer_input(
info, forward, noise_cov=noise_cov, rank=None)
forward = pick_channels_forward(forward, info['ch_names'], ordered=True)
del info
# whiten the data (leadfield already whitened)
M = np.dot(whitener, data)
del data
_, eig_vectors = linalg.eigh(np.dot(M, M.T))
phi_sig = eig_vectors[:, -n_dipoles:]
n_orient = 3 if is_free_ori else 1
G.shape = (G.shape[0], -1, n_orient)
gain = forward['sol']['data'].copy()
gain.shape = G.shape
n_channels = G.shape[0]
A = np.empty((n_channels, n_dipoles))
gain_dip = np.empty((n_channels, n_dipoles))
oris = np.empty((n_dipoles, 3))
poss = np.empty((n_dipoles, 3))
G_proj = G.copy()
phi_sig_proj = phi_sig.copy()
idxs = list()
for k in range(n_dipoles):
subcorr_max = -1.
source_idx, source_ori, source_pos = 0, [0, 0, 0], [0, 0, 0]
for i_source in range(G.shape[1]):
Gk = G_proj[:, i_source]
subcorr, ori = _compute_subcorr(Gk, phi_sig_proj)
if subcorr > subcorr_max:
subcorr_max = subcorr
source_idx = i_source
source_ori = ori
source_pos = forward['source_rr'][i_source]
if n_orient == 3 and align is not None:
surf_normal = forward['source_nn'][3 * i_source + 2]
# make sure ori is aligned to the surface orientation
source_ori *= np.sign(source_ori @ surf_normal) or 1.
if n_orient == 1:
source_ori = forward['source_nn'][i_source]
idxs.append(source_idx)
if n_orient == 3:
Ak = np.dot(G[:, source_idx], source_ori)
else:
Ak = G[:, source_idx, 0]
A[:, k] = Ak
oris[k] = source_ori
poss[k] = source_pos
logger.info("source %s found: p = %s" % (k + 1, source_idx))
if n_orient == 3:
logger.info("ori = %s %s %s" % tuple(oris[k]))
projection = _compute_proj(A[:, :k + 1])
G_proj = np.einsum('ab,bso->aso', projection, G)
phi_sig_proj = np.dot(projection, phi_sig)
del G, G_proj
sol = linalg.lstsq(A, M)[0]
if n_orient == 3:
X = sol[:, np.newaxis] * oris[:, :, np.newaxis]
X.shape = (-1, len(times))
else:
X = sol
gain_active = gain[:, idxs]
if n_orient == 3:
gain_dip = (oris * gain_active).sum(-1)
idxs = np.array(idxs)
active_set = np.array(
[[3 * idxs, 3 * idxs + 1, 3 * idxs + 2]]).T.ravel()
else:
gain_dip = gain_active[:, :, 0]
active_set = idxs
gain_active = whitener @ gain_active.reshape(gain.shape[0], -1)
assert gain_active.shape == (n_channels, X.shape[0])
explained_data = gain_dip @ sol
M_estimate = whitener @ explained_data
_log_exp_var(M, M_estimate)
tstep = np.median(np.diff(times)) if len(times) > 1 else 1.
dipoles = _make_dipoles_sparse(
X, active_set, forward, times[0], tstep, M,
gain_active, active_is_idx=True)
for dipole, ori in zip(dipoles, oris):
signs = np.sign((dipole.ori * ori).sum(-1, keepdims=True))
dipole.ori *= signs
dipole.amplitude *= signs[:, 0]
logger.info('[done]')
return dipoles, explained_data
def _make_dipoles(times, poss, oris, sol, gof):
"""Instantiate a list of Dipoles.
Parameters
----------
times : array, shape (n_times,)
The time instants.
poss : array, shape (n_dipoles, 3)
The dipoles' positions.
oris : array, shape (n_dipoles, 3)
The dipoles' orientations.
sol : array, shape (n_times,)
The dipoles' amplitudes over time.
gof : array, shape (n_times,)
The goodness of fit of the dipoles.
Shared between all dipoles.
Returns
-------
dipoles : list
The list of Dipole instances.
"""
oris = np.array(oris)
dipoles = []
for i_dip in range(poss.shape[0]):
i_pos = poss[i_dip][np.newaxis, :].repeat(len(times), axis=0)
i_ori = oris[i_dip][np.newaxis, :].repeat(len(times), axis=0)
dipoles.append(Dipole(times, i_pos, sol[i_dip], i_ori, gof))
return dipoles
def _compute_subcorr(G, phi_sig):
"""Compute the subspace correlation."""
Ug, Sg, Vg = linalg.svd(G, full_matrices=False)
# Now we look at the actual rank of the forward fields
# in G and handle the fact that it might be rank defficient
# eg. when using MEG and a sphere model for which the
# radial component will be truly 0.
rank = np.sum(Sg > (Sg[0] * 1e-6))
if rank == 0:
return 0, np.zeros(len(G))
rank = max(rank, 2) # rank cannot be 1
Ug, Sg, Vg = Ug[:, :rank], Sg[:rank], Vg[:rank]
tmp = np.dot(Ug.T.conjugate(), phi_sig)
Uc, Sc, _ = linalg.svd(tmp, full_matrices=False)
X = np.dot(Vg.T / Sg[None, :], Uc[:, 0]) # subcorr
return Sc[0], X / linalg.norm(X)
def _compute_proj(A):
"""Compute the orthogonal projection operation for a manifold vector A."""
U, _, _ = linalg.svd(A, full_matrices=False)
return np.identity(A.shape[0]) - np.dot(U, U.T.conjugate())
@verbose
def rap_music(evoked, forward, noise_cov, n_dipoles=5, return_residual=False,
verbose=None):
"""RAP-MUSIC source localization method.
Compute Recursively Applied and Projected MUltiple SIgnal Classification
(RAP-MUSIC) on evoked data.
.. note:: The goodness of fit (GOF) of all the returned dipoles is the
same and corresponds to the GOF of the full set of dipoles.
Parameters
----------
evoked : instance of Evoked
Evoked data to localize.
forward : instance of Forward
Forward operator.
noise_cov : instance of Covariance
The noise covariance.
n_dipoles : int
The number of dipoles to look for. The default value is 5.
return_residual : bool
If True, the residual is returned as an Evoked instance.
%(verbose)s
Returns
-------
dipoles : list of instance of Dipole
The dipole fits.
residual : instance of Evoked
The residual a.k.a. data not explained by the dipoles.
Only returned if return_residual is True.
See Also
--------
mne.fit_dipole
Notes
-----
The references are:
J.C. Mosher and R.M. Leahy. 1999. Source localization using recursively
applied and projected (RAP) MUSIC. Signal Processing, IEEE Trans. 47, 2
(February 1999), 332-340.
DOI=10.1109/78.740118 https://doi.org/10.1109/78.740118
Mosher, J.C.; Leahy, R.M., EEG and MEG source localization using
recursively applied (RAP) MUSIC, Signals, Systems and Computers, 1996.
pp.1201,1207 vol.2, 3-6 Nov. 1996
doi: 10.1109/ACSSC.1996.599135
.. versionadded:: 0.9.0
"""
info = evoked.info
data = evoked.data
times = evoked.times
picks = _check_info_inv(info, forward, data_cov=None, noise_cov=noise_cov)
data = data[picks]
dipoles, explained_data = _apply_rap_music(data, info, times, forward,
noise_cov, n_dipoles,
picks)
if return_residual:
residual = evoked.copy()
selection = [info['ch_names'][p] for p in picks]
residual = pick_channels_evoked(residual,
include=selection)
residual.data -= explained_data
active_projs = [p for p in residual.info['projs'] if p['active']]
for p in active_projs:
p['active'] = False
residual.add_proj(active_projs, remove_existing=True)
residual.apply_proj()
return dipoles, residual
else:
return dipoles
|
import numpy as np
from scattertext.termscoring.ScaledFScore import ScaledFZScore
from scattertext.Common import DEFAULT_SCALER_ALGO, DEFAULT_BETA
from scattertext.termscoring import ScaledFScore
from scipy.stats import norm
from scattertext.termsignificance.TermSignificance import TermSignificance
class ScaledFScoreSignificance(TermSignificance):
def __init__(self, scaler_algo = DEFAULT_SCALER_ALGO, beta=DEFAULT_BETA):
'''
Parameters
----------
scaler_algo : str
Function that scales an array to a range \in [0 and 1]. Use 'percentile', 'normcdf'. Default normcdf.
beta : float
Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Defaults to 1.
'''
self.scaler_algo = scaler_algo
self.beta = beta
def get_name(self):
return "Scaled F-Score"
def get_p_vals(self, X):
'''
Imputes p-values from the Z-scores of `ScaledFScore` scores. Assuming incorrectly
that the scaled f-scores are normally distributed.
Parameters
----------
X : np.array
Array of word counts, shape (N, 2) where N is the vocab size. X[:,0] is the
positive class, while X[:,1] is the negative class.
Returns
-------
np.array of p-values
'''
z_scores = ScaledFZScore(self.scaler_algo, self.beta).get_scores(X[:,0], X[:,1])
return norm.cdf(z_scores)
|
from __future__ import unicode_literals
import traceback
from lib.data.data import pyoptions
from lib.fun.filter import filterforfun
from lib.fun.fun import cool, unique, finalsavepath, finishprinter
def magic(func):
storepath = finalsavepath(func.__name__)
try:
with open(storepath, "a") as f:
for item in unique(func()):
item = filterforfun(item)
if item:
f.write(item + pyoptions.CRLF)
finishprinter(storepath)
except Exception as e:
print(cool.red('[-] Exception as following:') + pyoptions.CRLF)
print(traceback.print_exc())
|
from mock import Mock
from paasta_tools.frameworks import constraints
def test_nested_inc_increments_by_step():
op = "MAX_PER"
av = "default"
an = "pool"
st: constraints.ConstraintState = {}
constraints.nested_inc(op, None, av, an, st, 3)
assert st["MAX_PER"]["pool"]["default"] == 3
constraints.nested_inc(op, None, av, an, st, -1)
assert st["MAX_PER"]["pool"]["default"] == 2
def test_check_offer_constraints_returns_true_when_satisfied():
attr = Mock(text=Mock(value="test"))
attr.configure_mock(name="pool")
offer = Mock(attributes=[attr])
cons = [
["pool", "MAX_PER", "5"],
["pool", "EQUALS", "test"],
["pool", "LIKE", "te.*$"],
["pool", "UNLIKE", "ta.*"],
]
state = {"MAX_PER": {"pool": {"test": 0}}}
assert constraints.check_offer_constraints(offer, cons, state) is True
state = {"MAX_PER": {"pool": {"test": 6}}}
assert constraints.check_offer_constraints(offer, cons, state) is False
def test_update_constraint_state_increments_counters():
attr = Mock(text=Mock(value="test"))
attr.configure_mock(name="pool")
offer = Mock(attributes=[attr])
cons = [["pool", "MAX_PER", "5"]]
state: constraints.ConstraintState = {}
constraints.update_constraint_state(offer, cons, state)
assert state["MAX_PER"]["pool"]["test"] == 1
|
import logging
from random import randint
from typing import Optional
from aiopvpc import PVPCData
from homeassistant import config_entries
from homeassistant.const import CONF_NAME, CURRENCY_EURO, ENERGY_KILO_WATT_HOUR
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.event import async_call_later, async_track_time_change
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.dt as dt_util
from .const import ATTR_TARIFF
_LOGGER = logging.getLogger(__name__)
ATTR_PRICE = "price"
ICON = "mdi:currency-eur"
UNIT = f"{CURRENCY_EURO}/{ENERGY_KILO_WATT_HOUR}"
_DEFAULT_TIMEOUT = 10
async def async_setup_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry, async_add_entities
):
"""Set up the electricity price sensor from config_entry."""
name = config_entry.data[CONF_NAME]
pvpc_data_handler = PVPCData(
tariff=config_entry.data[ATTR_TARIFF],
local_timezone=hass.config.time_zone,
websession=async_get_clientsession(hass),
logger=_LOGGER,
timeout=_DEFAULT_TIMEOUT,
)
async_add_entities(
[ElecPriceSensor(name, config_entry.unique_id, pvpc_data_handler)], False
)
class ElecPriceSensor(RestoreEntity):
"""Class to hold the prices of electricity as a sensor."""
unit_of_measurement = UNIT
icon = ICON
should_poll = False
def __init__(self, name, unique_id, pvpc_data_handler):
"""Initialize the sensor object."""
self._name = name
self._unique_id = unique_id
self._pvpc_data = pvpc_data_handler
self._num_retries = 0
self._hourly_tracker = None
self._price_tracker = None
async def async_will_remove_from_hass(self) -> None:
"""Cancel listeners for sensor updates."""
self._hourly_tracker()
self._price_tracker()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._pvpc_data.state = state.state
# Update 'state' value in hour changes
self._hourly_tracker = async_track_time_change(
self.hass, self.update_current_price, second=[0], minute=[0]
)
# Update prices at random time, 2 times/hour (don't want to upset API)
random_minute = randint(1, 29)
mins_update = [random_minute, random_minute + 30]
self._price_tracker = async_track_time_change(
self.hass, self.async_update_prices, second=[0], minute=mins_update
)
_LOGGER.debug(
"Setup of price sensor %s (%s) with tariff '%s', "
"updating prices each hour at %s min",
self.name,
self.entity_id,
self._pvpc_data.tariff,
mins_update,
)
await self.async_update_prices(dt_util.utcnow())
self.update_current_price(dt_util.utcnow())
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._pvpc_data.state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._pvpc_data.state_available
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._pvpc_data.attributes
@callback
def update_current_price(self, now):
"""Update the sensor state, by selecting the current price for this hour."""
self._pvpc_data.process_state_and_attributes(now)
self.async_write_ha_state()
async def async_update_prices(self, now):
"""Update electricity prices from the ESIOS API."""
prices = await self._pvpc_data.async_update_prices(now)
if not prices and self._pvpc_data.source_available:
self._num_retries += 1
if self._num_retries > 2:
_LOGGER.warning(
"%s: repeated bad data update, mark component as unavailable source",
self.entity_id,
)
self._pvpc_data.source_available = False
return
retry_delay = 2 * self._num_retries * self._pvpc_data.timeout
_LOGGER.debug(
"%s: Bad update[retry:%d], will try again in %d s",
self.entity_id,
self._num_retries,
retry_delay,
)
async_call_later(self.hass, retry_delay, self.async_update_prices)
return
if not prices:
_LOGGER.debug("%s: data source is not yet available", self.entity_id)
return
self._num_retries = 0
if not self._pvpc_data.source_available:
self._pvpc_data.source_available = True
_LOGGER.warning("%s: component has recovered data access", self.entity_id)
self.update_current_price(now)
|
from nikola.plugin_categories import Taxonomy
from nikola import utils
class ClassifyAuthors(Taxonomy):
"""Classify the posts by authors."""
name = "classify_authors"
classification_name = "author"
overview_page_variable_name = "authors"
more_than_one_classifications_per_post = False
has_hierarchy = False
template_for_classification_overview = "authors.tmpl"
apply_to_posts = True
apply_to_pages = False
minimum_post_count_per_classification_in_overview = 1
omit_empty_classifications = False
add_other_languages_variable = True
path_handler_docstrings = {
'author_index': """ Link to the authors index.
Example:
link://authors/ => /authors/index.html""",
'author': """Link to an author's page.
Example:
link://author/joe => /authors/joe.html""",
'author_atom': """Link to an author's Atom feed.
Example:
link://author_atom/joe => /authors/joe.atom""",
'author_rss': """Link to an author's RSS feed.
Example:
link://author_rss/joe => /authors/joe.xml""",
}
def set_site(self, site):
"""Set Nikola site."""
super().set_site(site)
self.show_list_as_index = site.config['AUTHOR_PAGES_ARE_INDEXES']
self.more_than_one_classifications_per_post = site.config.get('MULTIPLE_AUTHORS_PER_POST', False)
self.template_for_single_list = "authorindex.tmpl" if self.show_list_as_index else "author.tmpl"
self.translation_manager = utils.ClassificationTranslationManager()
def is_enabled(self, lang=None):
"""Return True if this taxonomy is enabled, or False otherwise."""
if not self.site.config["ENABLE_AUTHOR_PAGES"]:
return False
if lang is not None:
return self.generate_author_pages
return True
def classify(self, post, lang):
"""Classify the given post for the given language."""
if self.more_than_one_classifications_per_post:
return post.authors(lang=lang)
else:
return [post.author(lang=lang)]
def get_classification_friendly_name(self, classification, lang, only_last_component=False):
"""Extract a friendly name from the classification."""
return classification
def get_overview_path(self, lang, dest_type='page'):
"""Return a path for the list of all classifications."""
path = self.site.config['AUTHOR_PATH'](lang)
return [component for component in path.split('/') if component], 'always'
def get_path(self, classification, lang, dest_type='page'):
"""Return a path for the given classification."""
if self.site.config['SLUG_AUTHOR_PATH']:
slug = utils.slugify(classification, lang)
else:
slug = classification
return [self.site.config['AUTHOR_PATH'](lang), slug], 'auto'
def provide_overview_context_and_uptodate(self, lang):
"""Provide data for the context and the uptodate list for the list of all classifiations."""
kw = {
"messages": self.site.MESSAGES,
}
context = {
"title": kw["messages"][lang]["Authors"],
"description": kw["messages"][lang]["Authors"],
"permalink": self.site.link("author_index", None, lang),
"pagekind": ["list", "authors_page"],
}
kw.update(context)
return context, kw
def provide_context_and_uptodate(self, classification, lang, node=None):
"""Provide data for the context and the uptodate list for the list of the given classifiation."""
descriptions = self.site.config['AUTHOR_PAGES_DESCRIPTIONS']
kw = {
"messages": self.site.MESSAGES,
}
context = {
"author": classification,
"title": kw["messages"][lang]["Posts by %s"] % classification,
"description": descriptions[lang][classification] if lang in descriptions and classification in descriptions[lang] else None,
"pagekind": ["index" if self.show_list_as_index else "list", "author_page"],
}
kw.update(context)
return context, kw
def get_other_language_variants(self, classification, lang, classifications_per_language):
"""Return a list of variants of the same author in other languages."""
return self.translation_manager.get_translations_as_list(classification, lang, classifications_per_language)
def postprocess_posts_per_classification(self, posts_per_classification_per_language, flat_hierarchy_per_lang=None, hierarchy_lookup_per_lang=None):
"""Rearrange, modify or otherwise use the list of posts per classification and per language."""
more_than_one = False
for lang, posts_per_author in posts_per_classification_per_language.items():
authors = set()
for author, posts in posts_per_author.items():
for post in posts:
if not self.site.config["SHOW_UNTRANSLATED_POSTS"] and not post.is_translation_available(lang):
continue
authors.add(author)
if len(authors) > 1:
more_than_one = True
self.generate_author_pages = self.site.config["ENABLE_AUTHOR_PAGES"] and more_than_one
self.site.GLOBAL_CONTEXT["author_pages_generated"] = self.generate_author_pages
self.translation_manager.add_defaults(posts_per_classification_per_language)
|
import pytest
import matchzoo as mz
@pytest.fixture
def term_index():
return {'G': 1, 'C': 2, 'D': 3, 'A': 4, '_PAD': 0}
def test_embedding(term_index):
embed = mz.embedding.load_from_file(mz.datasets.embeddings.EMBED_RANK)
matrix = embed.build_matrix(term_index)
assert matrix.shape == (len(term_index), 50)
embed = mz.embedding.load_from_file(mz.datasets.embeddings.EMBED_10_GLOVE,
mode='glove')
matrix = embed.build_matrix(term_index)
assert matrix.shape == (len(term_index), 10)
assert embed.input_dim == 5
|
import numpy as np
import os
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.datasets.voc import voc_utils
from chainercv.utils import read_image
from chainercv.utils import read_label
class VOCInstanceSegmentationDataset(GetterDataset):
"""Instance segmentation dataset for PASCAL `VOC2012`_.
.. _`VOC2012`: http://host.robots.ox.ac.uk/pascal/VOC/voc2012/
Args:
data_dir (string): Path to the root of the training data. If this is
:obj:`auto`, this class will automatically download data for you
under :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/voc`.
split ({'train', 'val', 'trainval'}): Select a split of the dataset.
This dataset returns the following data.
.. csv-table::
:header: name, shape, dtype, format
:obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \
"RGB, :math:`[0, 255]`"
:obj:`mask`, ":math:`(R, H, W)`", :obj:`bool`, --
:obj:`label`, ":math:`(R,)`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
"""
def __init__(self, data_dir='auto', split='train'):
super(VOCInstanceSegmentationDataset, self).__init__()
if split not in ['train', 'trainval', 'val']:
raise ValueError(
'please pick split from \'train\', \'trainval\', \'val\'')
if data_dir == 'auto':
data_dir = voc_utils.get_voc('2012', split)
id_list_file = os.path.join(
data_dir, 'ImageSets/Segmentation/{0}.txt'.format(split))
self.ids = [id_.strip() for id_ in open(id_list_file)]
self.data_dir = data_dir
self.add_getter('img', self._get_image)
self.add_getter(('mask', 'label'), self._get_annotations)
def __len__(self):
return len(self.ids)
def _get_image(self, i):
data_id = self.ids[i]
img_file = os.path.join(
self.data_dir, 'JPEGImages', data_id + '.jpg')
return read_image(img_file, color=True)
def _get_annotations(self, i):
data_id = self.ids[i]
label_img, inst_img = self._load_label_inst(data_id)
mask, label = voc_utils.image_wise_to_instance_wise(
label_img, inst_img)
return mask, label
def _load_label_inst(self, data_id):
label_file = os.path.join(
self.data_dir, 'SegmentationClass', data_id + '.png')
inst_file = os.path.join(
self.data_dir, 'SegmentationObject', data_id + '.png')
label_img = read_label(label_file, dtype=np.int32)
label_img[label_img == 255] = -1
inst_img = read_label(inst_file, dtype=np.int32)
inst_img[inst_img == 0] = -1
inst_img[inst_img == 255] = -1
return label_img, inst_img
|
import argparse
import glob
import os
import time
import random
import sys
def clamp_to_min_max(value, min, max):
if value > max:
value = max
elif value < min:
value = min
return value
def clamp_to_u8(value):
return clamp_to_min_max(value, 0, 255)
def parse_args():
parser = argparse.ArgumentParser(description="Set a custom colour effect")
parser.add_argument('-d', '--device', type=str, help="Device string like \"0003:1532:0045.000C\"")
args = parser.parse_args()
return args
def run():
args = parse_args()
if args.device is None:
mouse_dirs = glob.glob(os.path.join('/sys/bus/hid/drivers/razermouse/', "*:*:*.*"))
if len(mouse_dirs) > 1:
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
if len(mouse_dirs) < 1:
print("No mouse directories found. Make sure the driver is binded", file=sys.stderr)
sys.exit(1)
mouse_dir = mouse_dirs[0]
else:
mouse_dir = os.path.join('/sys/bus/hid/drivers/razermouse/', args.device)
if not os.path.isdir(mouse_dir):
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
#choices = [b'\xff\x00\x00', b'\xff\xff\x00', b'\x00\xff\x00', b'\x00\xff\xff', b'\x00\x00\xff', b'\xff\x00\xff']
choices = [b'\xff\x00\x00']
for repeat in range(0, 10):
payload = b''
for i in range(0, 15):
payload += random.choice(choices)
set_colour_filename = os.path.join(mouse_dir, "set_key_row")
set_custom_mode_filename = os.path.join(mouse_dir, "mode_custom")
with open(set_colour_filename, 'wb') as set_colour_file:
set_colour_file.write(payload)
with open(set_custom_mode_filename, 'w') as set_custom_mode_file:
set_custom_mode_file.write("1")
time.sleep(0.2)
print("Done")
if __name__ == '__main__':
run()
|
from aiohttp import ClientConnectionError
from homeassistant.components.bond.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from homeassistant.setup import async_setup_component
from .common import patch_bond_version, patch_setup_entry, setup_bond_entity
from tests.common import MockConfigEntry
async def test_async_setup_no_domain_config(hass: HomeAssistant):
"""Test setup without configuration is noop."""
result = await async_setup_component(hass, DOMAIN, {})
assert result is True
async def test_async_setup_raises_entry_not_ready(hass: HomeAssistant):
"""Test that it throws ConfigEntryNotReady when exception occurs during setup."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
config_entry.add_to_hass(hass)
with patch_bond_version(side_effect=ClientConnectionError()):
await hass.config_entries.async_setup(config_entry.entry_id)
assert config_entry.state == ENTRY_STATE_SETUP_RETRY
async def test_async_setup_entry_sets_up_hub_and_supported_domains(hass: HomeAssistant):
"""Test that configuring entry sets up cover domain."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
with patch_bond_version(
return_value={
"bondid": "test-bond-id",
"target": "test-model",
"fw_ver": "test-version",
}
):
with patch_setup_entry(
"cover"
) as mock_cover_async_setup_entry, patch_setup_entry(
"fan"
) as mock_fan_async_setup_entry, patch_setup_entry(
"light"
) as mock_light_async_setup_entry, patch_setup_entry(
"switch"
) as mock_switch_async_setup_entry:
result = await setup_bond_entity(hass, config_entry, patch_device_ids=True)
assert result is True
await hass.async_block_till_done()
assert config_entry.entry_id in hass.data[DOMAIN]
assert config_entry.state == ENTRY_STATE_LOADED
assert config_entry.unique_id == "test-bond-id"
# verify hub device is registered correctly
device_registry = await dr.async_get_registry(hass)
hub = device_registry.async_get_device(
identifiers={(DOMAIN, "test-bond-id")}, connections=set()
)
assert hub.name == "test-bond-id"
assert hub.manufacturer == "Olibra"
assert hub.model == "test-model"
assert hub.sw_version == "test-version"
# verify supported domains are setup
assert len(mock_cover_async_setup_entry.mock_calls) == 1
assert len(mock_fan_async_setup_entry.mock_calls) == 1
assert len(mock_light_async_setup_entry.mock_calls) == 1
assert len(mock_switch_async_setup_entry.mock_calls) == 1
async def test_unload_config_entry(hass: HomeAssistant):
"""Test that configuration entry supports unloading."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
result = await setup_bond_entity(
hass,
config_entry,
patch_version=True,
patch_device_ids=True,
patch_platforms=True,
)
assert result is True
await hass.async_block_till_done()
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.entry_id not in hass.data[DOMAIN]
assert config_entry.state == ENTRY_STATE_NOT_LOADED
|
import unittest
import httpretty
from mock import patch, mock_open, MagicMock
import logging
import os
import json
import time
import acdcli.api.oauth as oauth
from acdcli.api.account import _Usage
from acdcli.api.common import *
from acdcli.api.client import ACDClient
from .test_helper import gen_rand_id
logging.basicConfig(level=logging.INFO)
path = os.path.join(os.path.dirname(__file__), 'dummy_files')
class APITestCase(unittest.TestCase):
def setUp(self):
self.acd = ACDClient(path)
self.acd.BOReq._wait = lambda: None
def testMetadataUrl(self):
self.assertEqual(self.acd.metadata_url, 'https://cdws.us-east-1.amazonaws.com/drive/v1/')
def testContentUrl(self):
self.assertEqual(self.acd.content_url, 'https://content-na.drive.amazonaws.com/cdproxy/')
def testValidID0(self):
self.assertTrue(is_valid_id('abcdefghijklmnopqrstuv'))
def testValidID1(self):
self.assertTrue(is_valid_id('0123456789012345678901'))
def testValidID2(self):
self.assertTrue(is_valid_id('a0b1c2d3e4f5g6h7i8j9k0'))
def testValidID3(self):
self.assertTrue(is_valid_id('a0b1c2d3e4f--6h7i8j9k0'))
def testValidIDs(self):
for _ in range(1000):
self.assertTrue(is_valid_id(gen_rand_id()))
def testInvalidID0(self):
self.assertFalse(is_valid_id(''))
def testInvalidID1(self):
self.assertFalse(is_valid_id('äbcdéfghíjklmnöpqrstüv'))
def testInvalidID2(self):
self.assertFalse(is_valid_id('abcdefghijklmnopqrstu'))
#
# account
#
@httpretty.activate
def testUsage(self):
httpretty. \
register_uri(httpretty.GET, self.acd.metadata_url + 'account/usage',
body=json.dumps({"lastCalculated": "2014-08-13T23:17:41.365Z",
"video": {"billable": {"bytes": 23524252, "count": 22},
"total": {"bytes": 23524252, "count": 22}},
"other": {"billable": {"bytes": 29999771, "count": 871},
"total": {"bytes": 29999771, "count": 871}},
"doc": {"billable": {"bytes": 807170, "count": 10},
"total": {"bytes": 807170, "count": 10}},
"photo": {"billable": {"bytes": 9477988, "count": 25},
"total": {"bytes": 9477988, "count": 25}}})
)
self.assertIsInstance(self.acd.get_account_usage(), _Usage)
@httpretty.activate
def testUsageEmpty(self):
httpretty.register_uri(httpretty.GET, self.acd.metadata_url + 'account/usage', body='{}')
self.assertEqual(str(self.acd.get_account_usage()), '')
#
# metadata
#
@httpretty.activate
def testChanges(self):
httpretty.register_uri(httpretty.POST, self.acd.metadata_url + 'changes',
body='{"checkpoint": "foo", "reset": true, '
'"nodes": [ {"kind": "FILE", "status": "TRASH"} ], '
'"statusCode": 200}\n'
'{"end": true}')
tmp = self.acd.get_changes()
changesets = [c for c in self.acd._iter_changes_lines(tmp)]
self.assertEqual(len(changesets), 1)
changeset = changesets[0]
self.assertEqual(len(changeset.nodes), 1)
self.assertEqual(len(changeset.purged_nodes), 0)
self.assertEqual(changeset.checkpoint, 'foo')
self.assertTrue(changeset.reset)
@httpretty.activate
def testChangesMissingEnd(self):
httpretty.register_uri(httpretty.POST, self.acd.metadata_url + 'changes',
body='{"checkpoint": "foo", "reset": true, "nodes": [], '
'"statusCode": 200}\n')
tmp = self.acd.get_changes()
changesets = [c for c in self.acd._iter_changes_lines(tmp)]
self.assertEqual(len(changesets), 1)
changeset = changesets[0]
self.assertEqual(len(changeset.nodes), 0)
self.assertEqual(len(changeset.purged_nodes), 0)
self.assertEqual(changeset.checkpoint, 'foo')
self.assertTrue(changeset.reset)
@httpretty.activate
def testChangesCorruptJSON(self):
httpretty.register_uri(httpretty.POST, self.acd.metadata_url + 'changes',
body='{"checkpoint": }')
with self.assertRaises(RequestError):
tmp = self.acd.get_changes()
[cs for cs in self.acd._iter_changes_lines(tmp)]
#
# oauth
#
dummy_token = {'access_token': 'foo', 'expires_in': 3600, 'refresh_token': 'bar'}
def testOAuthActualHandler(self):
self.assertIsInstance(self.acd.handler, oauth.AppspotOAuthHandler)
@httpretty.activate
def testOAuthAppSpotRefresh(self):
httpretty.register_uri(httpretty.POST, oauth.AppspotOAuthHandler.APPSPOT_URL,
body=json.dumps(self.dummy_token))
exp_token = {'access_token': '', 'expires_in': 3600, 'exp_time': 0.0, 'refresh_token': ''}
mock_file = mock_open(read_data=json.dumps(exp_token))
os.path.isfile = MagicMock()
with patch('builtins.open', mock_file, create=True):
with patch('os.fsync', MagicMock):
with patch('os.rename', MagicMock):
h = oauth.AppspotOAuthHandler('')
mock_file.assert_any_call(oauth.OAuthHandler.OAUTH_DATA_FILE)
self.assertIn(oauth.OAuthHandler.KEYS.EXP_TIME, h.oauth_data)
self.assertGreater(h.oauth_data[oauth.OAuthHandler.KEYS.EXP_TIME], time.time())
mock_file().write.assert_any_call(str(h.oauth_data[oauth.AppspotOAuthHandler.KEYS.EXP_TIME]))
def testOAuthLocalRefresh(self):
# TODO: find out how to mock multiple files
pass
def testOAuthValidation(self):
s = json.dumps(self.dummy_token)
o = oauth.OAuthHandler.validate(s)
self.assertIsInstance(o, dict)
def testOAuthValidationMissingRefresh(self):
inv = json.dumps({'access_token': 'foo', 'expires_in': 3600})
with self.assertRaises(RequestError):
oauth.OAuthHandler.validate(inv)
def testOAuthValidationMissingAccess(self):
inv = json.dumps({'expires_in': 3600, 'refresh_token': 'bar'})
with self.assertRaises(RequestError):
oauth.OAuthHandler.validate(inv)
def testOAuthValidationMissingExpiration(self):
inv = json.dumps({'access_token': 'foo', 'refresh_token': 'bar'})
with self.assertRaises(RequestError):
oauth.OAuthHandler.validate(inv)
|
import urllib2
try:
import json
except ImportError:
import simplejson as json
import diamond.collector
class DropwizardCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(DropwizardCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DropwizardCollector, self).get_default_config()
config.update({
'host': '127.0.0.1',
'port': 8081,
'path': 'dropwizard',
})
return config
def collect(self):
if json is None:
self.log.error('Unable to import json')
return {}
url = 'http://%s:%i/metrics' % (
self.config['host'], int(self.config['port']))
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError as err:
self.log.error("%s: %s", url, err)
return
try:
result = json.load(response)
except (TypeError, ValueError):
self.log.error("Unable to parse response from elasticsearch as" +
" a json object")
return
metrics = {}
memory = result['jvm']['memory']
mempool = memory['memory_pool_usages']
jvm = result['jvm']
thread_st = jvm['thread-states']
metrics['jvm.memory.totalInit'] = memory['totalInit']
metrics['jvm.memory.totalUsed'] = memory['totalUsed']
metrics['jvm.memory.totalMax'] = memory['totalMax']
metrics['jvm.memory.totalCommitted'] = memory['totalCommitted']
metrics['jvm.memory.heapInit'] = memory['heapInit']
metrics['jvm.memory.heapUsed'] = memory['heapUsed']
metrics['jvm.memory.heapMax'] = memory['heapMax']
metrics['jvm.memory.heapCommitted'] = memory['heapCommitted']
metrics['jvm.memory.heap_usage'] = memory['heap_usage']
metrics['jvm.memory.non_heap_usage'] = memory['non_heap_usage']
metrics['jvm.memory.code_cache'] = mempool['Code Cache']
metrics['jvm.memory.eden_space'] = mempool['PS Eden Space']
metrics['jvm.memory.old_gen'] = mempool['PS Old Gen']
metrics['jvm.memory.perm_gen'] = mempool['PS Perm Gen']
metrics['jvm.memory.survivor_space'] = mempool['PS Survivor Space']
metrics['jvm.daemon_thread_count'] = jvm['daemon_thread_count']
metrics['jvm.thread_count'] = jvm['thread_count']
metrics['jvm.fd_usage'] = jvm['fd_usage']
metrics['jvm.thread_states.timed_waiting'] = thread_st['timed_waiting']
metrics['jvm.thread_states.runnable'] = thread_st['runnable']
metrics['jvm.thread_states.blocked'] = thread_st['blocked']
metrics['jvm.thread_states.waiting'] = thread_st['waiting']
metrics['jvm.thread_states.new'] = thread_st['new']
metrics['jvm.thread_states.terminated'] = thread_st['terminated']
for key in metrics:
self.publish(key, metrics[key])
|
import asyncio
from datetime import timedelta
import logging
from vilfo import Client as VilfoClient
from vilfo.exceptions import VilfoException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import Throttle
from .const import ATTR_BOOT_TIME, ATTR_LOAD, DOMAIN, ROUTER_DEFAULT_HOST
PLATFORMS = ["sensor"]
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the Vilfo Router component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Vilfo Router from a config entry."""
host = entry.data[CONF_HOST]
access_token = entry.data[CONF_ACCESS_TOKEN]
vilfo_router = VilfoRouterData(hass, host, access_token)
await vilfo_router.async_update()
if not vilfo_router.available:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = vilfo_router
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class VilfoRouterData:
"""Define an object to hold sensor data."""
def __init__(self, hass, host, access_token):
"""Initialize."""
self._vilfo = VilfoClient(host, access_token)
self.hass = hass
self.host = host
self.available = False
self.firmware_version = None
self.mac_address = self._vilfo.mac
self.data = {}
self._unavailable_logged = False
@property
def unique_id(self):
"""Get the unique_id for the Vilfo Router."""
if self.mac_address:
return self.mac_address
if self.host == ROUTER_DEFAULT_HOST:
return self.host
return self.host
def _fetch_data(self):
board_information = self._vilfo.get_board_information()
load = self._vilfo.get_load()
return {
"board_information": board_information,
"load": load,
}
@Throttle(DEFAULT_SCAN_INTERVAL)
async def async_update(self):
"""Update data using calls to VilfoClient library."""
try:
data = await self.hass.async_add_executor_job(self._fetch_data)
self.firmware_version = data["board_information"]["version"]
self.data[ATTR_BOOT_TIME] = data["board_information"]["bootTime"]
self.data[ATTR_LOAD] = data["load"]
self.available = True
except VilfoException as error:
if not self._unavailable_logged:
_LOGGER.error(
"Could not fetch data from %s, error: %s", self.host, error
)
self._unavailable_logged = True
self.available = False
return
if self.available and self._unavailable_logged:
_LOGGER.info("Vilfo Router %s is available again", self.host)
self._unavailable_logged = False
|
from unittest import TestCase
import numpy as np
from scattertext import stretch_0_to_1
class TestScalers(TestCase):
def test_stretch_0_to_1(self):
a = np.array([0.8, 0.5, 0., -0.2, -0.3, 0.4])
out = stretch_0_to_1(a)
np.testing.assert_almost_equal(out, np.array([1., 0.8125, 0.5, 0.16666667, 0., 0.75, ]))
np.testing.assert_almost_equal(a, np.array([0.8, 0.5, 0., -0.2, -0.3, 0.4]))
out = stretch_0_to_1(np.array([]))
np.testing.assert_almost_equal(out, np.array([]))
out = stretch_0_to_1(np.array([1, .5]))
np.testing.assert_almost_equal(out, np.array([1., 0.75]))
out = stretch_0_to_1(np.array([-1, -.5]))
np.testing.assert_almost_equal(out, np.array([0, 0.25]))
|
from rest_framework import renderers
from rest_framework import serializers
from rest_framework.utils import encoders
from shop.money import AbstractMoney
class JSONEncoder(encoders.JSONEncoder):
"""JSONEncoder subclass that knows how to encode Money."""
def default(self, obj):
if isinstance(obj, AbstractMoney):
return '{:f}'.format(obj)
return super().default(obj)
class JSONRenderer(renderers.JSONRenderer):
encoder_class = JSONEncoder
class MoneyField(serializers.Field):
"""Money objects are serialized into their readable notation."""
def __init__(self, *args, **kwargs):
kwargs.update(read_only=True)
super().__init__(*args, **kwargs)
def to_representation(self, obj):
return '{:f}'.format(obj)
|
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from .common import setup_platform
from tests.common import load_fixture
async def test_entity_registry(hass, requests_mock):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, LIGHT_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("light.front_light")
assert entry.unique_id == 765432
entry = entity_registry.async_get("light.internal_light")
assert entry.unique_id == 345678
async def test_light_off_reports_correctly(hass, requests_mock):
"""Tests that the initial state of a device that should be off is correct."""
await setup_platform(hass, LIGHT_DOMAIN)
state = hass.states.get("light.front_light")
assert state.state == "off"
assert state.attributes.get("friendly_name") == "Front light"
async def test_light_on_reports_correctly(hass, requests_mock):
"""Tests that the initial state of a device that should be on is correct."""
await setup_platform(hass, LIGHT_DOMAIN)
state = hass.states.get("light.internal_light")
assert state.state == "on"
assert state.attributes.get("friendly_name") == "Internal light"
async def test_light_can_be_turned_on(hass, requests_mock):
"""Tests the light turns on correctly."""
await setup_platform(hass, LIGHT_DOMAIN)
# Mocks the response for turning a light on
requests_mock.put(
"https://api.ring.com/clients_api/doorbots/765432/floodlight_light_on",
text=load_fixture("ring_doorbot_siren_on_response.json"),
)
state = hass.states.get("light.front_light")
assert state.state == "off"
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.front_light"}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("light.front_light")
assert state.state == "on"
async def test_updates_work(hass, requests_mock):
"""Tests the update service works correctly."""
await setup_platform(hass, LIGHT_DOMAIN)
state = hass.states.get("light.front_light")
assert state.state == "off"
# Changes the return to indicate that the light is now on.
requests_mock.get(
"https://api.ring.com/clients_api/ring_devices",
text=load_fixture("ring_devices_updated.json"),
)
await hass.services.async_call("ring", "update", {}, blocking=True)
await hass.async_block_till_done()
state = hass.states.get("light.front_light")
assert state.state == "on"
|
import time
from absl import flags
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
flags.DEFINE_integer('k8s_get_retry_count', 18,
'Maximum number of waits for getting LoadBalancer external IP')
flags.DEFINE_integer('k8s_get_wait_interval', 10,
'Wait interval for getting LoadBalancer external IP')
def checkKubernetesFlags():
if not FLAGS.kubectl:
raise Exception('Please provide path to kubectl tool using --kubectl '
'flag. Exiting.')
if not FLAGS.kubeconfig:
raise Exception('Please provide path to kubeconfig using --kubeconfig '
'flag. Exiting.')
def CreateFromFile(file_name):
checkKubernetesFlags()
create_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'create',
'-f', file_name]
vm_util.IssueRetryableCommand(create_cmd)
def DeleteFromFile(file_name):
checkKubernetesFlags()
delete_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'delete',
'-f', file_name, '--ignore-not-found']
vm_util.IssueRetryableCommand(delete_cmd)
def DeleteAllFiles(file_list):
for file in file_list:
DeleteFromFile(file)
def CreateAllFiles(file_list):
for file in file_list:
CreateFromFile(file)
def Get(resource, resourceInstanceName, labelFilter, jsonSelector):
checkKubernetesFlags()
get_pod_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig,
'get', resource]
if len(resourceInstanceName) > 0:
get_pod_cmd.append(resourceInstanceName)
if len(labelFilter) > 0:
get_pod_cmd.append('-l ' + labelFilter)
get_pod_cmd.append('-ojsonpath={{{}}}'.format(jsonSelector))
stdout, stderr, _ = vm_util.IssueCommand(get_pod_cmd, suppress_warning=True,
raise_on_failure=False)
if len(stderr) > 0:
raise Exception("Error received from kubectl get: " + stderr)
return stdout
def GetWithWaitForContents(resource, resourceInstanceName, filter, jsonFilter):
ret = Get(resource, resourceInstanceName, filter, jsonFilter)
numWaitsLeft = FLAGS.k8s_get_retry_count
while len(ret) == 0 and numWaitsLeft > 0:
time.sleep(FLAGS.k8s_get_wait_interval)
ret = Get(resource, resourceInstanceName, filter, jsonFilter)
numWaitsLeft -= 1
return ret
def CreateResource(resource_body):
with vm_util.NamedTemporaryFile(mode='w') as tf:
tf.write(resource_body)
tf.close()
CreateFromFile(tf.name)
def DeleteResource(resource_body):
with vm_util.NamedTemporaryFile() as tf:
tf.write(resource_body)
tf.close()
DeleteFromFile(tf.name)
|
from uuid import uuid4
from yandextank.plugins.OpenTSDBUploader.decoder import Decoder
class TestDecoder(object):
def test_metrics_cast(self):
test_uuid = str(uuid4())
tank_tag = 'test_tank_tag'
comment = 'test comment'
raw_metrics = {
'metric1': -123,
'metric2': -123.456,
'metric3': 123,
'metric4': 123.456,
'metric5': 0,
'metric6': -0.1,
'metric7': 0.1,
'metric8': 'down',
}
timestamp = 123456789
host = '127.0.0.1'
data = [{
'data': {
host: {
'comment': comment,
'metrics': raw_metrics
}
},
'timestamp': timestamp
}]
expected_metrics = {
'metric1': -123.0,
'metric2': -123.456,
'metric3': 123.0,
'metric4': 123.456,
'metric5': 0.0,
'metric6': -0.1,
'metric7': 0.1,
'metric8': 'down'
}
decoder = Decoder(tank_tag, test_uuid, {}, True, True)
result_points = decoder.decode_monitoring(data)
assert (len(result_points) == len(expected_metrics))
# check other props
for r_point in result_points:
assert (r_point['timestamp'] == timestamp)
assert (r_point['metric'] == 'monitoring')
assert (r_point['tags']['comment'] == comment)
assert (r_point['tags']['host'] == host)
assert (r_point['tags']['tank'] == tank_tag)
assert (r_point['tags']['uuid'] == test_uuid)
if r_point['tags']['field'] not in expected_metrics:
assert False
if not isinstance(r_point['value'], type(
expected_metrics[r_point['tags']['field']])):
assert False
if not r_point['value'] == expected_metrics[r_point['tags']
['field']]:
assert False
|
from alarmdecoder.util import NoDeviceError
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.alarmdecoder import config_flow
from homeassistant.components.alarmdecoder.const import (
CONF_ALT_NIGHT_MODE,
CONF_AUTO_BYPASS,
CONF_CODE_ARM_REQUIRED,
CONF_DEVICE_BAUD,
CONF_DEVICE_PATH,
CONF_RELAY_ADDR,
CONF_RELAY_CHAN,
CONF_ZONE_LOOP,
CONF_ZONE_NAME,
CONF_ZONE_NUMBER,
CONF_ZONE_RFID,
CONF_ZONE_TYPE,
DEFAULT_ARM_OPTIONS,
DEFAULT_ZONE_OPTIONS,
DOMAIN,
OPTIONS_ARM,
OPTIONS_ZONES,
PROTOCOL_SERIAL,
PROTOCOL_SOCKET,
)
from homeassistant.components.binary_sensor import DEVICE_CLASS_WINDOW
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_PROTOCOL
from homeassistant.core import HomeAssistant
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.mark.parametrize(
"protocol,connection,title",
[
(
PROTOCOL_SOCKET,
{
CONF_HOST: "alarmdecoder123",
CONF_PORT: 10001,
},
"alarmdecoder123:10001",
),
(
PROTOCOL_SERIAL,
{
CONF_DEVICE_PATH: "/dev/ttyUSB123",
CONF_DEVICE_BAUD: 115000,
},
"/dev/ttyUSB123",
),
],
)
async def test_setups(hass: HomeAssistant, protocol, connection, title):
"""Test flow for setting up the available AlarmDecoder protocols."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PROTOCOL: protocol},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "protocol"
with patch("homeassistant.components.alarmdecoder.config_flow.AdExt.open"), patch(
"homeassistant.components.alarmdecoder.config_flow.AdExt.close"
), patch(
"homeassistant.components.alarmdecoder.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.alarmdecoder.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], connection
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == title
assert result["data"] == {
**connection,
CONF_PROTOCOL: protocol,
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_setup_connection_error(hass: HomeAssistant):
"""Test flow for setup with a connection error."""
port = 1001
host = "alarmdecoder"
protocol = PROTOCOL_SOCKET
connection_settings = {CONF_HOST: host, CONF_PORT: port}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PROTOCOL: protocol},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "protocol"
with patch(
"homeassistant.components.alarmdecoder.config_flow.AdExt.open",
side_effect=NoDeviceError,
), patch("homeassistant.components.alarmdecoder.config_flow.AdExt.close"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], connection_settings
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_options_arm_flow(hass: HomeAssistant):
"""Test arm options flow."""
user_input = {
CONF_ALT_NIGHT_MODE: True,
CONF_AUTO_BYPASS: True,
CONF_CODE_ARM_REQUIRED: True,
}
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"edit_selection": "Arming Settings"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "arm_settings"
with patch(
"homeassistant.components.alarmdecoder.async_setup_entry", return_value=True
):
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=user_input,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert entry.options == {
OPTIONS_ARM: user_input,
OPTIONS_ZONES: DEFAULT_ZONE_OPTIONS,
}
async def test_options_zone_flow(hass: HomeAssistant):
"""Test options flow for adding/deleting zones."""
zone_number = "2"
zone_settings = {CONF_ZONE_NAME: "Front Entry", CONF_ZONE_TYPE: DEVICE_CLASS_WINDOW}
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"edit_selection": "Zones"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_select"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_ZONE_NUMBER: zone_number},
)
with patch(
"homeassistant.components.alarmdecoder.async_setup_entry", return_value=True
):
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=zone_settings,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert entry.options == {
OPTIONS_ARM: DEFAULT_ARM_OPTIONS,
OPTIONS_ZONES: {zone_number: zone_settings},
}
# Make sure zone can be removed...
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"edit_selection": "Zones"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_select"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_ZONE_NUMBER: zone_number},
)
with patch(
"homeassistant.components.alarmdecoder.async_setup_entry", return_value=True
):
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert entry.options == {
OPTIONS_ARM: DEFAULT_ARM_OPTIONS,
OPTIONS_ZONES: {},
}
async def test_options_zone_flow_validation(hass: HomeAssistant):
"""Test input validation for zone options flow."""
zone_number = "2"
zone_settings = {CONF_ZONE_NAME: "Front Entry", CONF_ZONE_TYPE: DEVICE_CLASS_WINDOW}
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"edit_selection": "Zones"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_select"
# Zone Number must be int
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_ZONE_NUMBER: "asd"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_select"
assert result["errors"] == {CONF_ZONE_NUMBER: "int"}
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_ZONE_NUMBER: zone_number},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_details"
# CONF_RELAY_ADDR & CONF_RELAY_CHAN are inclusive
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**zone_settings, CONF_RELAY_ADDR: "1"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_details"
assert result["errors"] == {"base": "relay_inclusive"}
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**zone_settings, CONF_RELAY_CHAN: "1"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_details"
assert result["errors"] == {"base": "relay_inclusive"}
# CONF_RELAY_ADDR, CONF_RELAY_CHAN must be int
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**zone_settings, CONF_RELAY_ADDR: "abc", CONF_RELAY_CHAN: "abc"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_details"
assert result["errors"] == {
CONF_RELAY_ADDR: "int",
CONF_RELAY_CHAN: "int",
}
# CONF_ZONE_LOOP depends on CONF_ZONE_RFID
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**zone_settings, CONF_ZONE_LOOP: "1"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_details"
assert result["errors"] == {CONF_ZONE_LOOP: "loop_rfid"}
# CONF_ZONE_LOOP must be int
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**zone_settings, CONF_ZONE_RFID: "rfid123", CONF_ZONE_LOOP: "ab"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_details"
assert result["errors"] == {CONF_ZONE_LOOP: "int"}
# CONF_ZONE_LOOP must be between [1,4]
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={**zone_settings, CONF_ZONE_RFID: "rfid123", CONF_ZONE_LOOP: "5"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "zone_details"
assert result["errors"] == {CONF_ZONE_LOOP: "loop_range"}
# All valid settings
with patch(
"homeassistant.components.alarmdecoder.async_setup_entry", return_value=True
):
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
**zone_settings,
CONF_ZONE_RFID: "rfid123",
CONF_ZONE_LOOP: "2",
CONF_RELAY_ADDR: "12",
CONF_RELAY_CHAN: "1",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert entry.options == {
OPTIONS_ARM: DEFAULT_ARM_OPTIONS,
OPTIONS_ZONES: {
zone_number: {
**zone_settings,
CONF_ZONE_RFID: "rfid123",
CONF_ZONE_LOOP: 2,
CONF_RELAY_ADDR: 12,
CONF_RELAY_CHAN: 1,
}
},
}
@pytest.mark.parametrize(
"protocol,connection",
[
(
PROTOCOL_SOCKET,
{
CONF_HOST: "alarmdecoder123",
CONF_PORT: 10001,
},
),
(
PROTOCOL_SERIAL,
{
CONF_DEVICE_PATH: "/dev/ttyUSB123",
CONF_DEVICE_BAUD: 115000,
},
),
],
)
async def test_one_device_allowed(hass, protocol, connection):
"""Test that only one AlarmDecoder device is allowed."""
flow = config_flow.AlarmDecoderFlowHandler()
flow.hass = hass
MockConfigEntry(
domain=DOMAIN,
data=connection,
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PROTOCOL: protocol},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "protocol"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], connection
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
|
from django.views.generic.list import ListView
from zinnia.models.entry import Entry
from zinnia.settings import PAGINATION
from zinnia.views.mixins.prefetch_related import PrefetchCategoriesAuthorsMixin
class BaseEntryChannel(object):
"""
Mixin for displaying a custom selection of entries
based on a search query, useful to build SEO/SMO pages
aggregating entries on a thematic or for building a
custom homepage.
"""
query = ''
def get_queryset(self):
"""
Override the get_queryset method to build
the queryset with entry matching query.
"""
return Entry.published.search(self.query)
def get_context_data(self, **kwargs):
"""
Add query in context.
"""
context = super(BaseEntryChannel, self).get_context_data(**kwargs)
context.update({'query': self.query})
return context
class EntryChannel(PrefetchCategoriesAuthorsMixin,
BaseEntryChannel,
ListView):
"""
Channel view for entries combinating these mixins:
- PrefetchCategoriesAuthorsMixin to prefetch related Categories
and Authors to belonging the entry list.
- BaseEntryChannel to provide the behavior of the view.
- ListView to implement the ListView and template name resolution.
"""
paginate_by = PAGINATION
|
import binascii
import unicodedata
import base64
import cherrypy
from cherrypy._cpcompat import ntou, tonative
__author__ = 'visteya'
__date__ = 'April 2009'
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def checkpassword(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return checkpassword
def basic_auth(realm, checkpassword, debug=False, accept_charset='utf-8'):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in :rfc:`2617`
and :rfc:`7617`.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
realm
A string containing the authentication realm.
checkpassword
A callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
fallback_charset = 'ISO-8859-1'
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
# split() error, base64.decodestring() error
msg = 'Bad Request'
with cherrypy.HTTPError.handle((ValueError, binascii.Error), 400, msg):
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
charsets = accept_charset, fallback_charset
decoded_params = base64.b64decode(params.encode('ascii'))
decoded_params = _try_decode(decoded_params, charsets)
decoded_params = ntou(decoded_params)
decoded_params = unicodedata.normalize('NFC', decoded_params)
decoded_params = tonative(decoded_params)
username, password = decoded_params.split(':', 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
charset = accept_charset.upper()
charset_declaration = (
(', charset="%s"' % charset)
if charset != fallback_charset
else ''
)
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = (
'Basic realm="%s"%s' % (realm, charset_declaration)
)
raise cherrypy.HTTPError(
401, 'You are not authorized to access that resource')
def _try_decode(subject, charsets):
for charset in charsets[:-1]:
try:
return tonative(subject, charset)
except ValueError:
pass
return tonative(subject, charsets[-1])
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('instance-2')
def test_distro(host):
f = host.file('/etc/redhat-release')
assert f.is_file
def test_cpus(host):
cpus = host.ansible("setup")['ansible_facts']['ansible_processor_vcpus']
assert 2 == int(cpus)
def test_memory(host):
total_memory = host.ansible(
"setup")['ansible_facts']['ansible_memtotal_mb']
assert (1024 + 1024 / 2) <= int(total_memory) <= 2048
def test_does_not_have_shared_directory(host):
f = host.file('/vagrant')
assert not f.is_directory
def test_internal_interface(host):
assert '192.168.0.2' in host.interface('eth2').addresses
|
import unittest
import mock
from perfkitbenchmarker.linux_packages import azure_cli
class AzureCliTest(unittest.TestCase):
def setUp(self):
self.vm = mock.Mock()
def assertCallArgsEqual(self, call_args_singles, mock_method):
"""Compare the list of single arguments to all mocked calls in mock_method.
Mock calls can be tested like this:
(('x',),) == call('x')
As all the mocked method calls have one single argument (ie 'x') they need
to be converted into the tuple of positional arguments tuple that mock
expects.
Args:
call_args_singles: List of single arguments sent to the mock_method,
ie ['x', 'y'] is for when mock_method was called twice: once with
x and then with y.
mock_method: Method that was mocked and called with call_args_singles.
"""
# convert from ['a', 'b'] into [(('a',),), (('b',),)]
expected = [((arg,),) for arg in call_args_singles]
self.assertEqual(expected, mock_method.call_args_list)
def assertInstallPackageCommandsEqual(self, expected_packages):
# tests the calls to vm.InstallPackages(str)
self.assertCallArgsEqual(expected_packages, self.vm.InstallPackages)
def assertRemoteCommandsEqual(self, expected_cmds):
# tests the calls to vm.RemoteCommand(str)
self.assertCallArgsEqual(expected_cmds, self.vm.RemoteCommand)
def assertVmInstallCommandsEqual(self, expected_cmds):
# tests the calls to vm.Install(str)
self.assertCallArgsEqual(expected_cmds, self.vm.Install)
def assertOnlyKnownMethodsCalled(self, *known_methods):
# this test will fail if vm.foo() was called and "foo" was not in the
# known methods
found_methods = set()
for mock_call in self.vm.mock_calls:
found_methods.add(mock_call[0])
self.assertEqual(set(known_methods), found_methods)
def testShowHowMockingWorks(self):
# show how assertCallArgsEqual works by calling a method twice with two
# different strings
cmds = ['echo', 'bash']
for cmd in cmds:
self.vm.foo(cmd)
self.assertCallArgsEqual(cmds, self.vm.foo)
def testYumInstall(self):
azure_cli.YumInstall(self.vm)
self.assertRemoteCommandsEqual([
'echo "[azure-cli]\n'
'name=Azure CLI\n'
'baseurl=https://packages.microsoft.com/yumrepos/azure-cli\n'
'enabled=1\n'
'gpgcheck=1\n'
'gpgkey=https://packages.microsoft.com/keys/microsoft.asc\n"'
' | sudo tee /etc/yum.repos.d/azure-cli.repo',
'sudo rpm --import https://packages.microsoft.com/keys/microsoft.asc'
])
self.assertInstallPackageCommandsEqual(['azure-cli'])
self.assertOnlyKnownMethodsCalled('RemoteCommand', 'InstallPackages')
def testAptInstall(self):
self.vm.RemoteCommand.return_value = ('wheezy', '')
azure_cli.AptInstall(self.vm)
self.assertRemoteCommandsEqual([
'lsb_release -cs', 'echo "deb [arch=amd64] '
'https://packages.microsoft.com/repos/azure-cli/ wheezy main" | sudo '
'tee /etc/apt/sources.list.d/azure-cli.list',
'curl -L https://packages.microsoft.com/keys/microsoft.asc | sudo '
'apt-key add -',
'sudo apt-get update'
])
self.assertInstallPackageCommandsEqual(['apt-transport-https', 'azure-cli'])
self.assertVmInstallCommandsEqual(['python', 'lsb_release', 'curl'])
self.assertOnlyKnownMethodsCalled('RemoteCommand', 'Install',
'InstallPackages')
if __name__ == '__main__':
unittest.main()
|
_ENABLED = True
try:
from Foundation import NSUserNotification, NSUserNotificationCenter
except ImportError:
_ENABLED = False
import datetime
import os
import sys
import traceback
import Foundation
from . import _internal
from . import compat
def on_notification(f):
"""Decorator for registering a function to serve as a "notification center"
for the application. This function will receive the data associated with an
incoming macOS notification sent using :func:`rumps.notification`. This
occurs whenever the user clicks on a notification for this application in
the macOS Notification Center.
.. code-block:: python
@rumps.notifications
def notification_center(info):
if 'unix' in info:
print 'i know this'
"""
on_notification.__dict__['*'] = f
return f
def _gather_info_issue_9(): # pragma: no cover
missing_plist = False
missing_bundle_ident = False
info_plist_path = os.path.join(os.path.dirname(sys.executable), 'Info.plist')
try:
with open(info_plist_path) as f:
import plistlib
try:
load_plist = plistlib.load
except AttributeError:
load_plist = plistlib.readPlist
try:
load_plist(f)['CFBundleIdentifier']
except Exception:
missing_bundle_ident = True
except IOError as e:
import errno
if e.errno == errno.ENOENT: # No such file or directory
missing_plist = True
info = '\n\n'
if missing_plist:
info += 'In this case there is no file at "%(info_plist_path)s"'
info += '\n\n'
confidence = 'should'
elif missing_bundle_ident:
info += 'In this case the file at "%(info_plist_path)s" does not contain a value for "CFBundleIdentifier"'
info += '\n\n'
confidence = 'should'
else:
confidence = 'may'
info += 'Running the following command %(confidence)s fix the issue:\n'
info += '/usr/libexec/PlistBuddy -c \'Add :CFBundleIdentifier string "rumps"\' %(info_plist_path)s\n'
return info % {'info_plist_path': info_plist_path, 'confidence': confidence}
def _default_user_notification_center():
notification_center = NSUserNotificationCenter.defaultUserNotificationCenter()
if notification_center is None: # pragma: no cover
info = (
'Failed to setup the notification center. This issue occurs when the "Info.plist" file '
'cannot be found or is missing "CFBundleIdentifier".'
)
try:
info += _gather_info_issue_9()
except Exception:
pass
raise RuntimeError(info)
else:
return notification_center
def _init_nsapp(nsapp):
if _ENABLED:
try:
notification_center = _default_user_notification_center()
except RuntimeError:
pass
else:
notification_center.setDelegate_(nsapp)
@_internal.guard_unexpected_errors
def _clicked(ns_user_notification_center, ns_user_notification):
from . import rumps
ns_user_notification_center.removeDeliveredNotification_(ns_user_notification)
ns_dict = ns_user_notification.userInfo()
if ns_dict is None:
data = None
else:
dumped = ns_dict['value']
app = getattr(rumps.App, '*app_instance', rumps.App)
try:
data = app.serializer.loads(dumped)
except Exception:
traceback.print_exc()
return
try:
notification_handler = getattr(on_notification, '*')
except AttributeError:
# notification center function not specified, no error but log warning
rumps._log(
'WARNING: notification received but no function specified for '
'answering it; use @notifications decorator to register a function.'
)
else:
notification = Notification(ns_user_notification, data)
try:
_internal.call_as_function_or_method(notification_handler, notification)
except Exception:
traceback.print_exc()
def notify(title, subtitle, message, data=None, sound=True,
action_button=None, other_button=None, has_reply_button=False,
icon=None):
"""Send a notification to Notification Center (OS X 10.8+). If running on a
version of macOS that does not support notifications, a ``RuntimeError``
will be raised. Apple says,
"The userInfo content must be of reasonable serialized size (less than
1k) or an exception will be thrown."
So don't do that!
:param title: text in a larger font.
:param subtitle: text in a smaller font below the `title`.
:param message: text representing the body of the notification below the
`subtitle`.
:param data: will be passed to the application's "notification center" (see
:func:`rumps.notifications`) when this notification is clicked.
:param sound: whether the notification should make a noise when it arrives.
:param action_button: title for the action button.
:param other_button: title for the other button.
:param has_reply_button: whether or not the notification has a reply button.
:param icon: the filename of an image for the notification's icon, will
replace the default.
"""
from . import rumps
if not _ENABLED:
raise RuntimeError('OS X 10.8+ is required to send notifications')
_internal.require_string_or_none(title, subtitle, message)
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setSubtitle_(subtitle)
notification.setInformativeText_(message)
if data is not None:
app = getattr(rumps.App, '*app_instance', rumps.App)
dumped = app.serializer.dumps(data)
objc_string = _internal.string_to_objc(dumped)
ns_dict = Foundation.NSMutableDictionary.alloc().init()
ns_dict.setDictionary_({'value': objc_string})
notification.setUserInfo_(ns_dict)
if icon is not None:
notification.set_identityImage_(rumps._nsimage_from_file(icon))
if sound:
notification.setSoundName_("NSUserNotificationDefaultSoundName")
if action_button:
notification.setActionButtonTitle_(action_button)
notification.set_showsButtons_(True)
if other_button:
notification.setOtherButtonTitle_(other_button)
notification.set_showsButtons_(True)
if has_reply_button:
notification.setHasReplyButton_(True)
notification.setDeliveryDate_(Foundation.NSDate.dateWithTimeInterval_sinceDate_(0, Foundation.NSDate.date()))
notification_center = _default_user_notification_center()
notification_center.scheduleNotification_(notification)
class Notification(compat.collections_abc.Mapping):
def __init__(self, ns_user_notification, data):
self._ns = ns_user_notification
self._data = data
def __repr__(self):
return '<{0}: [data: {1}]>'.format(type(self).__name__, repr(self._data))
@property
def title(self):
return compat.text_type(self._ns.title())
@property
def subtitle(self):
return compat.text_type(self._ns.subtitle())
@property
def message(self):
return compat.text_type(self._ns.informativeText())
@property
def activation_type(self):
activation_type = self._ns.activationType()
if activation_type == 1:
return 'contents_clicked'
elif activation_type == 2:
return 'action_button_clicked'
elif activation_type == 3:
return 'replied'
elif activation_type == 4:
return 'additional_action_clicked'
@property
def delivered_at(self):
ns_date = self._ns.actualDeliveryDate()
seconds = ns_date.timeIntervalSince1970()
dt = datetime.datetime.fromtimestamp(seconds)
return dt
@property
def response(self):
ns_attributed_string = self._ns.response()
if ns_attributed_string is None:
return None
ns_string = ns_attributed_string.string()
return compat.text_type(ns_string)
@property
def data(self):
return self._data
def _check_if_mapping(self):
if not isinstance(self._data, compat.collections_abc.Mapping):
raise TypeError(
'notification cannot be used as a mapping when data is not a '
'mapping'
)
def __getitem__(self, key):
self._check_if_mapping()
return self._data[key]
def __iter__(self):
self._check_if_mapping()
return iter(self._data)
def __len__(self):
self._check_if_mapping()
return len(self._data)
|
from homeassistant.components import cover, tellduslive
from homeassistant.components.cover import CoverEntity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .entry import TelldusLiveEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up tellduslive sensors dynamically."""
async def async_discover_cover(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[tellduslive.DOMAIN]
async_add_entities([TelldusLiveCover(client, device_id)])
async_dispatcher_connect(
hass,
tellduslive.TELLDUS_DISCOVERY_NEW.format(cover.DOMAIN, tellduslive.DOMAIN),
async_discover_cover,
)
class TelldusLiveCover(TelldusLiveEntity, CoverEntity):
"""Representation of a cover."""
@property
def is_closed(self):
"""Return the current position of the cover."""
return self.device.is_down
def close_cover(self, **kwargs):
"""Close the cover."""
self.device.down()
self._update_callback()
def open_cover(self, **kwargs):
"""Open the cover."""
self.device.up()
self._update_callback()
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.device.stop()
self._update_callback()
|
import json
import sys
import numpy as np
from scattertext import AsianNLP
from scattertext import WhitespaceNLP
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, WhitespaceNLP.Doc):
return repr(obj)
elif isinstance(obj, AsianNLP.Doc):
return repr(obj)
elif 'spacy' in sys.modules:
import spacy
if isinstance(obj, spacy.tokens.doc.Doc):
return repr(obj)
else:
return super(MyEncoder, self).default(obj)
class VizDataAdapter:
def __init__(self, words_dict):
self._word_dict = words_dict
@property
def word_dict(self):
return self._word_dict
@word_dict.setter
def word_dict(self, val):
self._word_dict = val
@word_dict.deleter
def word_dict(self):
del self._word_dict
def to_javascript(self, function_name = 'getDataAndInfo'):
return 'function '+function_name+'() { return' + self.to_json() + '; }'
def to_json(self):
word_dict_json = json.dumps(self.word_dict, cls=MyEncoder)
return word_dict_json
|
import re
from functools import reduce
from itertools import islice
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.db.models.functions import Lower
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.urls import reverse
from django.utils.translation import gettext_lazy
from whoosh.analysis import LanguageAnalyzer, NgramAnalyzer, SimpleAnalyzer
from whoosh.analysis.filters import StopFilter
from whoosh.lang import NoStopWords
from weblate.checks.same import strip_string
from weblate.formats.auto import AutodetectFormat
from weblate.lang.models import Language, get_default_lang
from weblate.trans.defines import GLOSSARY_LENGTH, PROJECT_NAME_LENGTH
from weblate.trans.models.component import Component
from weblate.trans.models.project import Project
from weblate.utils.colors import COLOR_CHOICES
from weblate.utils.db import re_escape
from weblate.utils.decorators import disable_for_loaddata
from weblate.utils.errors import report_error
SPLIT_RE = re.compile(r"[\s,.:!?]+", re.UNICODE)
class GlossaryQuerySet(models.QuerySet):
def for_project(self, project):
return self.filter(Q(project=project) | Q(links=project))
def filter_access(self, user):
if user.is_superuser:
return self
return self.filter(project_id__in=user.allowed_project_ids)
class Glossary(models.Model):
project = models.ForeignKey(Project, on_delete=models.deletion.CASCADE)
links = models.ManyToManyField(
Project,
verbose_name=gettext_lazy("Additional projects"),
blank=True,
related_name="linked_glossaries",
help_text=gettext_lazy(
"Choose additional projects where this glossary can be used."
),
)
name = models.CharField(
verbose_name=gettext_lazy("Glossary name"),
max_length=PROJECT_NAME_LENGTH,
unique=True,
)
color = models.CharField(
verbose_name=gettext_lazy("Color"),
max_length=30,
choices=COLOR_CHOICES,
blank=False,
default=None,
)
source_language = models.ForeignKey(
Language,
verbose_name=gettext_lazy("Source language"),
default=get_default_lang,
on_delete=models.deletion.CASCADE,
)
objects = GlossaryQuerySet.as_manager()
class Meta:
verbose_name = "glossary"
verbose_name_plural = "glossaries"
def __str__(self):
return self.name
class TermManager(models.Manager):
# pylint: disable=no-init
def upload(self, request, glossary, language, fileobj, method):
"""Handle glossary upload."""
from weblate.trans.models.change import Change
store = AutodetectFormat.parse(fileobj)
ret = 0
# process all units
for _unused, unit in store.iterate_merge(False):
source = unit.source
target = unit.target
# Ignore too long terms
if len(source) > 190 or len(target) > 190:
continue
# Get object
try:
term, created = self.get_or_create(
glossary=glossary,
language=language,
source=source,
defaults={"target": target},
)
except Term.MultipleObjectsReturned:
term = self.filter(glossary=glossary, language=language, source=source)[
0
]
created = False
# Already existing entry found
if not created:
# Same as current -> ignore
if target == term.target:
continue
if method == "add":
# Add term
self.create(
user=request.user,
action=Change.ACTION_DICTIONARY_UPLOAD,
glossary=glossary,
language=language,
source=source,
target=target,
)
elif method == "overwrite":
# Update term
term.target = target
term.save()
ret += 1
return ret
def create(self, user, **kwargs):
"""Create new glossary object."""
from weblate.trans.models.change import Change
action = kwargs.pop("action", Change.ACTION_DICTIONARY_NEW)
created = super().create(**kwargs)
Change.objects.create(
action=action, glossary_term=created, user=user, target=created.target
)
return created
class TermQuerySet(models.QuerySet):
# pylint: disable=no-init
def for_project(self, project, source_language=None):
glossaries = Glossary.objects.for_project(project)
if source_language is not None:
glossaries = glossaries.filter(source_language=source_language)
return self.filter(glossary__in=glossaries)
def get_terms(self, unit):
"""Return list of term pairs for an unit."""
words = set()
source_language = unit.translation.component.source_language
# Filters stop words for a language
try:
stopfilter = StopFilter(lang=source_language.base_code)
except NoStopWords:
stopfilter = StopFilter()
# Prepare analyzers
# - basic simple analyzer to split on non-word chars
# - simple analyzer just splits words based on regexp to catch in word dashes
# - language analyzer if available (it is for English)
analyzers = [
SimpleAnalyzer() | stopfilter,
SimpleAnalyzer(expression=SPLIT_RE, gaps=True) | stopfilter,
LanguageAnalyzer(source_language.base_code),
]
# Add ngram analyzer for languages like Chinese or Japanese
if source_language.uses_ngram():
analyzers.append(NgramAnalyzer(4))
# Extract words from all plurals and from context
flags = unit.all_flags
for text in unit.get_source_plurals() + [unit.context]:
text = strip_string(text, flags).lower()
for analyzer in analyzers:
# Some Whoosh analyzers break on unicode
try:
words.update(token.text for token in analyzer(text))
except (UnicodeDecodeError, IndexError):
report_error(cause="Term words parsing")
if len(words) > 1000:
break
if len(words) > 1000:
break
if "" in words:
words.remove("")
if not words:
# No extracted words, no glossary
return self.none()
# Build the query for fetching the words
# We want case insensitive lookup
words = islice(words, 1000)
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql":
# Use regex as that is utilizing pg_trgm index
results = self.filter(
source__iregex=r"(^|[ \t\n\r\f\v])({})($|[ \t\n\r\f\v])".format(
"|".join(re_escape(word) for word in words)
),
)
else:
# MySQL
results = self.filter(
reduce(
lambda x, y: x | y,
(models.Q(source__search=word) for word in words),
),
)
return (
results.for_project(unit.translation.component.project, source_language)
.filter(language=unit.translation.language)
.order()
)
def order(self):
return self.order_by(Lower("source"))
class Term(models.Model):
glossary = models.ForeignKey(
Glossary,
on_delete=models.deletion.CASCADE,
verbose_name=gettext_lazy("Glossary"),
)
language = models.ForeignKey(Language, on_delete=models.deletion.CASCADE)
source = models.CharField(
max_length=GLOSSARY_LENGTH,
db_index=True,
verbose_name=gettext_lazy("Source"),
)
target = models.CharField(
max_length=GLOSSARY_LENGTH,
verbose_name=gettext_lazy("Translation"),
)
objects = TermManager.from_queryset(TermQuerySet)()
class Meta:
verbose_name = "glossary term"
verbose_name_plural = "glossary terms"
def __str__(self):
return f"{self.glossary}/{self.language}: {self.source} -> {self.target}"
def get_absolute_url(self):
return reverse("edit_glossary", kwargs={"pk": self.id})
def get_parent_url(self):
return reverse(
"show_glossary",
kwargs={"project": self.glossary.project.slug, "lang": self.language.code},
)
def edit(self, request, source, target, glossary):
"""Edit term in a glossary."""
from weblate.trans.models.change import Change
self.source = source
self.target = target
self.glossary = glossary
self.save()
Change.objects.create(
action=Change.ACTION_DICTIONARY_EDIT,
glossary_term=self,
user=request.user,
target=self.target,
)
def check_perm(self, user, perm):
return user.has_perm(perm, self.glossary.project) or any(
user.has_perm(perm, prj) for prj in self.glossary.links.iterator()
)
@receiver(post_save, sender=Component)
@disable_for_loaddata
def create_glossary(sender, instance, created, **kwargs):
"""Creates glossary on project creation."""
project = instance.project
glossaries = {
glossary.source_language_id: glossary
for glossary in project.glossary_set.iterator()
}
# Does the glossary for source language exist?
if instance.source_language_id in glossaries:
return
if glossaries:
base_name = f"{project}: {instance.source_language.name}"
else:
base_name = project.name
# Find unused name
name = base_name
suffix = 0
while Glossary.objects.filter(name=name).exists():
suffix += 1
name = f"{name} ({suffix})"
project.glossary_set.create(
name=name,
color="silver",
source_language=instance.source_language,
)
|
import typing
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from keras.layers import Permute
from keras.layers import Reshape
from keras import activations
from keras import initializers
class SpatialGRU(Layer):
"""
Spatial GRU layer.
:param units: Number of SpatialGRU units.
:param activation: Activation function to use. Default:
hyperbolic tangent (`tanh`). If you pass `None`, no
activation is applied (ie. "linear" activation: `a(x) = x`).
:param recurrent_activation: Activation function to use for
the recurrent step. Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied (ie. "linear"
activation: `a(x) = x`).
:param kernel_initializer: Initializer for the `kernel` weights
matrix, used for the linear transformation of the inputs.
:param recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the
recurrent state.
:param direction: Scanning direction. `lt` (i.e., left top)
indicates the scanning from left top to right bottom, and
`rb` (i.e., right bottom) indicates the scanning from
right bottom to left top.
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.SpatialGRU(units=10,
... direction='lt')
>>> num_batch, channel, left_len, right_len = 5, 5, 3, 2
>>> layer.build([num_batch, channel, left_len, right_len])
"""
def __init__(
self,
units: int = 10,
activation: str = 'tanh',
recurrent_activation: str = 'sigmoid',
kernel_initializer: str = 'glorot_uniform',
recurrent_initializer: str = 'orthogonal',
direction: str = 'lt',
**kwargs
):
""":class:`SpatialGRU` constructor."""
super().__init__(**kwargs)
self._units = units
self._activation = activations.get(activation)
self._recurrent_activation = activations.get(recurrent_activation)
self._kernel_initializer = initializers.get(kernel_initializer)
self._recurrent_initializer = initializers.get(recurrent_initializer)
self._direction = direction
def build(self, input_shape: typing.Any):
"""
Build the layer.
:param input_shape: the shapes of the input tensors.
"""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# L = `input_left` sequence length
# R = `input_right` sequence length
# C = number of channels
# U = number of units
# input_shape = [B, C, L, R]
self._batch_size = input_shape[0]
self._channel = input_shape[1]
self._input_dim = self._channel + 3 * self._units
self._text1_maxlen = input_shape[2]
self._text2_maxlen = input_shape[3]
self._recurrent_step = self._text1_maxlen * self._text2_maxlen
# W = [3*U+C, 7*U]
self._W = self.add_weight(
name='W',
shape=(self._input_dim, self._units * 7),
initializer=self._kernel_initializer,
trainable=True
)
# U = [3*U, U]
self._U = self.add_weight(
name='U',
shape=(self._units * 3, self._units),
initializer=self._recurrent_initializer,
trainable=True
)
# bias = [8*U,]
self._bias = self.add_weight(
name='bias',
shape=(self._units * 8,),
initializer='zeros',
trainable=True
)
# w_rl, w_rt, w_rd = [B, 3*U]
self._wr = self._W[:, :self._units * 3]
# b_rl, b_rt, b_rd = [B, 3*U]
self._br = self._bias[:self._units * 3]
# w_zi, w_zl, w_zt, w_zd = [B, 4*U]
self._wz = self._W[:, self._units * 3: self._units * 7]
# b_zi, b_zl, b_zt, b_zd = [B, 4*U]
self._bz = self._bias[self._units * 3: self._units * 7]
# w_ij = [C, U]
self._w_ij = self.add_weight(
name='W_ij',
shape=(self._channel, self._units),
initializer=self._recurrent_initializer,
trainable=True
)
# b_ij = [7*U]
self._b_ij = self._bias[self._units * 7:]
super(SpatialGRU, self).build(input_shape)
def softmax_by_row(self, z: typing.Any) -> tuple:
"""Conduct softmax on each dimension across the four gates."""
# z_transform: [B, U, 4]
z_transform = Permute((2, 1))(Reshape((4, self._units))(z))
size = [-1, 1, -1]
# Perform softmax on each slice
for i in range(0, self._units):
begin = [0, i, 0]
# z_slice: [B, 1, 4]
z_slice = tf.slice(z_transform, begin, size)
if i == 0:
z_s = tf.nn.softmax(z_slice)
else:
z_s = tf.concat([z_s, tf.nn.softmax(z_slice)], 1)
# zi, zl, zt, zd: [B, U]
zi, zl, zt, zd = tf.unstack(z_s, axis=2)
return zi, zl, zt, zd
def calculate_recurrent_unit(
self,
inputs: typing.Any,
states: typing.Any,
step: int,
h: typing.Any,
) -> tuple:
"""
Calculate recurrent unit.
:param inputs: A TensorArray which contains interaction
between left text and right text.
:param states: A TensorArray which stores the hidden state
of every step.
:param step: Recurrent step.
:param h: Hidden state from last operation.
"""
# Get index i, j
i = tf.math.floordiv(step, tf.constant(self._text2_maxlen))
j = tf.math.mod(step, tf.constant(self._text2_maxlen))
# Get hidden state h_diag, h_top, h_left
# h_diag, h_top, h_left = [B, U]
h_diag = states.read(i * (self._text2_maxlen + 1) + j)
h_top = states.read(i * (self._text2_maxlen + 1) + j + 1)
h_left = states.read((i + 1) * (self._text2_maxlen + 1) + j)
# Get interaction between word i, j: s_ij
# s_ij = [B, C]
s_ij = inputs.read(step)
# Concatenate h_top, h_left, h_diag, s_ij
# q = [B, 3*U+C]
q = tf.concat([tf.concat([h_top, h_left], 1),
tf.concat([h_diag, s_ij], 1)], 1)
# Calculate reset gate
# r = [B, 3*U]
r = self._recurrent_activation(
self._time_distributed_dense(self._wr, q, self._br))
# Calculate updating gate
# z: [B, 4*U]
z = self._time_distributed_dense(self._wz, q, self._bz)
# Perform softmax
# zi, zl, zt, zd: [B, U]
zi, zl, zt, zd = self.softmax_by_row(z)
# Get h_ij_
# h_ij_ = [B, U]
h_ij_l = self._time_distributed_dense(self._w_ij, s_ij, self._b_ij)
h_ij_r = K.dot(r * (tf.concat([h_left, h_top, h_diag], 1)), self._U)
h_ij_ = self._activation(h_ij_l + h_ij_r)
# Calculate h_ij
# h_ij = [B, U]
h_ij = zl * h_left + zt * h_top + zd * h_diag + zi * h_ij_
# Write h_ij to states
states = states.write(((i + 1) * (self._text2_maxlen + 1) + j + 1),
h_ij)
h_ij.set_shape(h_top.get_shape())
return inputs, states, step + 1, h_ij
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of SpatialGRU.
:param inputs: input tensors.
"""
batch_size = tf.shape(inputs)[0]
# h0 = [B, U]
self._bounder_state_h0 = tf.zeros([batch_size, self._units])
# input_x = [L, R, B, C]
input_x = tf.transpose(inputs, [2, 3, 0, 1])
if self._direction == 'rb':
# input_x: [R, L, B, C]
input_x = tf.reverse(input_x, [0, 1])
elif self._direction != 'lt':
raise ValueError(f"Invalid direction. "
f"`{self._direction}` received. "
f"Must be in `lt`, `rb`.")
# input_x = [L*R*B, C]
input_x = tf.reshape(input_x, [-1, self._channel])
# input_x = L*R * [B, C]
input_x = tf.split(
axis=0,
num_or_size_splits=self._text1_maxlen * self._text2_maxlen,
value=input_x
)
# inputs = L*R * [B, C]
inputs = tf.TensorArray(
dtype=tf.float32,
size=self._text1_maxlen * self._text2_maxlen,
name='inputs'
)
inputs = inputs.unstack(input_x)
# states = (L+1)*(R+1) * [B, U]
states = tf.TensorArray(
dtype=tf.float32,
size=(self._text1_maxlen + 1) * (self._text2_maxlen + 1),
name='states',
clear_after_read=False
)
# Initialize states
for i in range(self._text2_maxlen + 1):
states = states.write(i, self._bounder_state_h0)
for i in range(1, self._text1_maxlen + 1):
states = states.write(i * (self._text2_maxlen + 1),
self._bounder_state_h0)
# Calculate h_ij
# h_ij = [B, U]
_, _, _, h_ij = tf.while_loop(
cond=lambda _0, _1, i, _3: tf.less(i, self._recurrent_step),
body=self.calculate_recurrent_unit,
loop_vars=(
inputs,
states,
tf.constant(0, dtype=tf.int32),
self._bounder_state_h0
),
parallel_iterations=1,
swap_memory=True
)
return h_ij
def compute_output_shape(self, input_shape: typing.Any) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors.
"""
output_shape = [input_shape[0], self._units]
return tuple(output_shape)
@classmethod
def _time_distributed_dense(cls, w, x, b):
x = K.dot(x, w)
x = K.bias_add(x, b)
return x
|
import diamond.collector
import socket
import re
class MemcachedCollector(diamond.collector.Collector):
GAUGES = [
'bytes',
'connection_structures',
'curr_connections',
'curr_items',
'threads',
'reserved_fds',
'limit_maxbytes',
'hash_power_level',
'hash_bytes',
'hash_is_expanding',
'uptime'
]
def get_default_config_help(self):
config_help = super(MemcachedCollector, self).get_default_config_help()
config_help.update({
'publish':
"Which rows of 'status' you would like to publish." +
" Telnet host port' and type stats and hit enter to see the" +
" list of possibilities. Leave unset to publish all.",
'hosts':
"List of hosts, and ports to collect. Set an alias by " +
" prefixing the host:port with alias@",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemcachedCollector, self).get_default_config()
config.update({
'path': 'memcached',
# Which rows of 'status' you would like to publish.
# 'telnet host port' and type stats and hit enter to see the list of
# possibilities.
# Leave unset to publish all
# 'publish': ''
# Connection settings
'hosts': ['localhost:11211']
})
return config
def get_raw_stats(self, host, port):
data = ''
# connect
try:
if port is None:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(host)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, int(port)))
# give up after a reasonable amount of time
sock.settimeout(3)
# request stats
sock.send('stats\n')
# stats can be sent across multiple packets, so make sure we've
# read up until the END marker
while True:
received = sock.recv(4096)
if not received:
break
data += received
if data.endswith('END\r\n'):
break
except socket.error:
self.log.exception('Failed to get stats from %s:%s',
host, port)
sock.close()
return data
def get_stats(self, host, port):
# stuff that's always ignored, aren't 'stats'
ignored = ('libevent', 'pointer_size', 'time', 'version',
'repcached_version', 'replication', 'accepting_conns',
'pid')
pid = None
stats = {}
data = self.get_raw_stats(host, port)
# parse stats
for line in data.splitlines():
pieces = line.split(' ')
if pieces[0] != 'STAT' or pieces[1] in ignored:
continue
elif pieces[1] == 'pid':
pid = pieces[2]
continue
if '.' in pieces[2]:
stats[pieces[1]] = float(pieces[2])
else:
stats[pieces[1]] = int(pieces[2])
# get max connection limit
self.log.debug('pid %s', pid)
try:
cmdline = "/proc/%s/cmdline" % pid
f = open(cmdline, 'r')
m = re.search("-c\x00(\d+)", f.readline())
if m is not None:
self.log.debug('limit connections %s', m.group(1))
stats['limit_maxconn'] = m.group(1)
f.close()
except:
self.log.debug("Cannot parse command line options for memcached")
return stats
def collect(self):
hosts = self.config.get('hosts')
# Convert a string config value to be an array
if isinstance(hosts, basestring):
hosts = [hosts]
for host in hosts:
matches = re.search('((.+)\@)?([^:]+)(:(\d+))?', host)
alias = matches.group(2)
hostname = matches.group(3)
port = matches.group(5)
if alias is None:
alias = hostname
stats = self.get_stats(hostname, port)
# figure out what we're configured to get, defaulting to everything
desired = self.config.get('publish', stats.keys())
# for everything we want
for stat in desired:
if stat in stats:
# we have it
if stat in self.GAUGES:
self.publish_gauge(alias + "." + stat, stats[stat])
else:
self.publish_counter(alias + "." + stat, stats[stat])
else:
# we don't, must be somehting configured in publish so we
# should log an error about it
self.log.error("No such key '%s' available, issue 'stats' "
"for a full list", stat)
|
from os import environ
from os.path import join, split
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from twisted.internet.defer import inlineCallbacks
from twisted.internet._sslverify import OpenSSLCertificateAuthorities
from twisted.internet.ssl import CertificateOptions
from OpenSSL import crypto
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
@inlineCallbacks
def onJoin(self, details):
counter = 0
while True:
print("publish: com.myapp.topic1", counter)
yield self.publish('com.myapp.topic1', counter)
counter += 1
yield sleep(1)
if __name__ == '__main__':
# load the self-signed cert the server is using
examples_dir = join(split(__file__)[0], '..', '..', '..', '..')
cert_fname = join(examples_dir, 'router', '.crossbar', 'server.crt')
cert = crypto.load_certificate(
crypto.FILETYPE_PEM,
open(cert_fname, 'r').read()
)
# tell Twisted to use just the one certificate we loaded to verify connections
options = CertificateOptions(
trustRoot=OpenSSLCertificateAuthorities([cert]),
)
# ...which we pass as "ssl=" to ApplicationRunner (passed to SSL4ClientEndpoint)
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", "wss://127.0.0.1:8083/ws"),
"crossbardemo",
ssl=options, # try removing this, but still use self-signed cert
)
runner.run(Component)
|
import os
SECRET_KEY = '_'
STATIC_URL = '/static/'
INSTALLED_APPS = (
'django.contrib.staticfiles',
'react',
'tests.django_test_app',
)
TEST_ROOT = os.path.dirname(__file__)
COMPONENT_ROOT = os.path.join(TEST_ROOT, 'components')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
class Components(object):
HELLO_WORLD_JS = os.path.join(COMPONENT_ROOT, 'HelloWorld.js')
HELLO_WORLD_JSX = os.path.join(COMPONENT_ROOT, 'HelloWorld.jsx')
REACT_ADDONS = os.path.join(COMPONENT_ROOT, 'ReactAddonsComponent.jsx')
DJANGO_REL_PATH = 'django_test_app/StaticFileFinderComponent.jsx'
PERF_TEST = os.path.join(COMPONENT_ROOT, 'PerfTestComponent.jsx')
HELLO_WORLD_JSX_WRAPPER = os.path.join(COMPONENT_ROOT, 'HelloWorldWrapper.jsx')
ERROR_THROWING = os.path.join(COMPONENT_ROOT, 'ErrorThrowingComponent.jsx')
SYNTAX_ERROR = os.path.join(COMPONENT_ROOT, 'SyntaxErrorComponent.jsx')
|
import datetime
import pytest
import voluptuous_serialize
import homeassistant.components.automation as automation
from homeassistant.components.humidifier import DOMAIN, const, device_trigger
from homeassistant.const import ATTR_SUPPORTED_FEATURES, STATE_OFF, STATE_ON
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_fire_time_changed,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a humidifier device."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
entity_id = f"{DOMAIN}.test_5678"
hass.states.async_set(
entity_id,
STATE_ON,
{
const.ATTR_HUMIDITY: 23,
const.ATTR_MODE: "home",
const.ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_SUPPORTED_FEATURES: 1,
},
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "target_humidity_changed",
"device_id": device_entry.id,
"entity_id": entity_id,
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set(
"humidifier.entity",
STATE_ON,
{
const.ATTR_HUMIDITY: 23,
const.ATTR_MODE: "home",
const.ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_SUPPORTED_FEATURES: 1,
},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "target_humidity_changed",
"below": 20,
},
"action": {
"service": "test.automation",
"data_template": {"some": "target_humidity_changed_below"},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "target_humidity_changed",
"above": 30,
},
"action": {
"service": "test.automation",
"data_template": {"some": "target_humidity_changed_above"},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "target_humidity_changed",
"above": 30,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "target_humidity_changed_above_for"},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
# Fake that the humidity is changing
hass.states.async_set("humidifier.entity", STATE_ON, {const.ATTR_HUMIDITY: 7})
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "target_humidity_changed_below"
# Fake that the humidity is changing
hass.states.async_set("humidifier.entity", STATE_ON, {const.ATTR_HUMIDITY: 37})
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "target_humidity_changed_above"
# Wait 6 minutes
async_fire_time_changed(hass, dt_util.utcnow() + datetime.timedelta(minutes=6))
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[2].data["some"] == "target_humidity_changed_above_for"
# Fake turn off
hass.states.async_set("humidifier.entity", STATE_OFF, {const.ATTR_HUMIDITY: 37})
await hass.async_block_till_done()
assert len(calls) == 4
assert (
calls[3].data["some"] == "turn_off device - humidifier.entity - on - off - None"
)
# Fake turn on
hass.states.async_set("humidifier.entity", STATE_ON, {const.ATTR_HUMIDITY: 37})
await hass.async_block_till_done()
assert len(calls) == 5
assert (
calls[4].data["some"] == "turn_on device - humidifier.entity - off - on - None"
)
async def test_invalid_config(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set(
"humidifier.entity",
STATE_ON,
{
const.ATTR_HUMIDITY: 23,
const.ATTR_MODE: "home",
const.ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_SUPPORTED_FEATURES: 1,
},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "humidifier.entity",
"type": "target_humidity_changed",
"below": 20,
"invalid": "invalid",
},
"action": {
"service": "test.automation",
"data_template": {"some": "target_humidity_changed"},
},
},
]
},
)
# Fake that the humidity is changing
hass.states.async_set("humidifier.entity", STATE_ON, {const.ATTR_HUMIDITY: 7})
await hass.async_block_till_done()
# Should not trigger for invalid config
assert len(calls) == 0
async def test_get_trigger_capabilities_on(hass):
"""Test we get the expected capabilities from a humidifier trigger."""
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": "humidifier",
"type": "turned_on",
"entity_id": "humidifier.upstairs",
"above": "23",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "for", "optional": True, "type": "positive_time_period_dict"}]
async def test_get_trigger_capabilities_off(hass):
"""Test we get the expected capabilities from a humidifier trigger."""
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": "humidifier",
"type": "turned_off",
"entity_id": "humidifier.upstairs",
"above": "23",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "for", "optional": True, "type": "positive_time_period_dict"}]
async def test_get_trigger_capabilities_humidity(hass):
"""Test we get the expected capabilities from a humidifier trigger."""
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": "humidifier",
"type": "target_humidity_changed",
"entity_id": "humidifier.upstairs",
"above": "23",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"description": {"suffix": "%"},
"name": "above",
"optional": True,
"type": "integer",
},
{
"description": {"suffix": "%"},
"name": "below",
"optional": True,
"type": "integer",
},
{"name": "for", "optional": True, "type": "positive_time_period_dict"},
]
|
import asyncio
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
METOFFICE_COORDINATOR,
METOFFICE_DATA,
METOFFICE_NAME,
)
from .data import MetOfficeData
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "weather"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Met Office weather component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a Met Office entry."""
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
api_key = entry.data[CONF_API_KEY]
site_name = entry.data[CONF_NAME]
metoffice_data = MetOfficeData(hass, api_key, latitude, longitude)
await metoffice_data.async_update_site()
if metoffice_data.site_name is None:
raise ConfigEntryNotReady()
metoffice_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Coordinator for {site_name}",
update_method=metoffice_data.async_update,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_hass_data = hass.data.setdefault(DOMAIN, {})
metoffice_hass_data[entry.entry_id] = {
METOFFICE_DATA: metoffice_data,
METOFFICE_COORDINATOR: metoffice_coordinator,
METOFFICE_NAME: site_name,
}
# Fetch initial data so we have data when entities subscribe
await metoffice_coordinator.async_refresh()
if metoffice_data.now is None:
raise ConfigEntryNotReady()
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
|
import functools
from typing import Hashable, Set, Union
import numpy as np
import pandas as pd
from xarray.core import duck_array_ops, formatting, utils
from xarray.core.dataarray import DataArray
from xarray.core.dataset import Dataset
from xarray.core.indexes import default_indexes
from xarray.core.variable import IndexVariable, Variable
__all__ = (
"assert_allclose",
"assert_chunks_equal",
"assert_duckarray_equal",
"assert_duckarray_allclose",
"assert_equal",
"assert_identical",
)
def _decode_string_data(data):
if data.dtype.kind == "S":
return np.core.defchararray.decode(data, "utf-8", "replace")
return data
def _data_allclose_or_equiv(arr1, arr2, rtol=1e-05, atol=1e-08, decode_bytes=True):
if any(arr.dtype.kind == "S" for arr in [arr1, arr2]) and decode_bytes:
arr1 = _decode_string_data(arr1)
arr2 = _decode_string_data(arr2)
exact_dtypes = ["M", "m", "O", "S", "U"]
if any(arr.dtype.kind in exact_dtypes for arr in [arr1, arr2]):
return duck_array_ops.array_equiv(arr1, arr2)
else:
return duck_array_ops.allclose_or_equiv(arr1, arr2, rtol=rtol, atol=atol)
def assert_equal(a, b):
"""Like :py:func:`numpy.testing.assert_array_equal`, but for xarray
objects.
Raises an AssertionError if two objects are not equal. This will match
data values, dimensions and coordinates, but not names or attributes
(except for Dataset objects for which the variable names must match).
Arrays with NaN in the same location are considered equal.
Parameters
----------
a : xarray.Dataset, xarray.DataArray or xarray.Variable
The first object to compare.
b : xarray.Dataset, xarray.DataArray or xarray.Variable
The second object to compare.
See also
--------
assert_identical, assert_allclose, Dataset.equals, DataArray.equals,
numpy.testing.assert_array_equal
"""
__tracebackhide__ = True
assert type(a) == type(b)
if isinstance(a, (Variable, DataArray)):
assert a.equals(b), formatting.diff_array_repr(a, b, "equals")
elif isinstance(a, Dataset):
assert a.equals(b), formatting.diff_dataset_repr(a, b, "equals")
else:
raise TypeError("{} not supported by assertion comparison".format(type(a)))
def assert_identical(a, b):
"""Like :py:func:`xarray.testing.assert_equal`, but also matches the
objects' names and attributes.
Raises an AssertionError if two objects are not identical.
Parameters
----------
a : xarray.Dataset, xarray.DataArray or xarray.Variable
The first object to compare.
b : xarray.Dataset, xarray.DataArray or xarray.Variable
The second object to compare.
See also
--------
assert_equal, assert_allclose, Dataset.equals, DataArray.equals
"""
__tracebackhide__ = True
assert type(a) == type(b)
if isinstance(a, Variable):
assert a.identical(b), formatting.diff_array_repr(a, b, "identical")
elif isinstance(a, DataArray):
assert a.name == b.name
assert a.identical(b), formatting.diff_array_repr(a, b, "identical")
elif isinstance(a, (Dataset, Variable)):
assert a.identical(b), formatting.diff_dataset_repr(a, b, "identical")
else:
raise TypeError("{} not supported by assertion comparison".format(type(a)))
def assert_allclose(a, b, rtol=1e-05, atol=1e-08, decode_bytes=True):
"""Like :py:func:`numpy.testing.assert_allclose`, but for xarray objects.
Raises an AssertionError if two objects are not equal up to desired
tolerance.
Parameters
----------
a : xarray.Dataset, xarray.DataArray or xarray.Variable
The first object to compare.
b : xarray.Dataset, xarray.DataArray or xarray.Variable
The second object to compare.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
decode_bytes : bool, optional
Whether byte dtypes should be decoded to strings as UTF-8 or not.
This is useful for testing serialization methods on Python 3 that
return saved strings as bytes.
See also
--------
assert_identical, assert_equal, numpy.testing.assert_allclose
"""
__tracebackhide__ = True
assert type(a) == type(b)
equiv = functools.partial(
_data_allclose_or_equiv, rtol=rtol, atol=atol, decode_bytes=decode_bytes
)
equiv.__name__ = "allclose"
def compat_variable(a, b):
a = getattr(a, "variable", a)
b = getattr(b, "variable", b)
return a.dims == b.dims and (a._data is b._data or equiv(a.data, b.data))
if isinstance(a, Variable):
allclose = compat_variable(a, b)
assert allclose, formatting.diff_array_repr(a, b, compat=equiv)
elif isinstance(a, DataArray):
allclose = utils.dict_equiv(
a.coords, b.coords, compat=compat_variable
) and compat_variable(a.variable, b.variable)
assert allclose, formatting.diff_array_repr(a, b, compat=equiv)
elif isinstance(a, Dataset):
allclose = a._coord_names == b._coord_names and utils.dict_equiv(
a.variables, b.variables, compat=compat_variable
)
assert allclose, formatting.diff_dataset_repr(a, b, compat=equiv)
else:
raise TypeError("{} not supported by assertion comparison".format(type(a)))
def _format_message(x, y, err_msg, verbose):
diff = x - y
abs_diff = max(abs(diff))
rel_diff = "not implemented"
n_diff = int(np.count_nonzero(diff))
n_total = diff.size
fraction = f"{n_diff} / {n_total}"
percentage = float(n_diff / n_total * 100)
parts = [
"Arrays are not equal",
err_msg,
f"Mismatched elements: {fraction} ({percentage:.0f}%)",
f"Max absolute difference: {abs_diff}",
f"Max relative difference: {rel_diff}",
]
if verbose:
parts += [
f" x: {x!r}",
f" y: {y!r}",
]
return "\n".join(parts)
def assert_duckarray_allclose(
actual, desired, rtol=1e-07, atol=0, err_msg="", verbose=True
):
""" Like `np.testing.assert_allclose`, but for duckarrays. """
__tracebackhide__ = True
allclose = duck_array_ops.allclose_or_equiv(actual, desired, rtol=rtol, atol=atol)
assert allclose, _format_message(actual, desired, err_msg=err_msg, verbose=verbose)
def assert_duckarray_equal(x, y, err_msg="", verbose=True):
""" Like `np.testing.assert_array_equal`, but for duckarrays """
__tracebackhide__ = True
if not utils.is_duck_array(x) and not utils.is_scalar(x):
x = np.asarray(x)
if not utils.is_duck_array(y) and not utils.is_scalar(y):
y = np.asarray(y)
if (utils.is_duck_array(x) and utils.is_scalar(y)) or (
utils.is_scalar(x) and utils.is_duck_array(y)
):
equiv = (x == y).all()
else:
equiv = duck_array_ops.array_equiv(x, y)
assert equiv, _format_message(x, y, err_msg=err_msg, verbose=verbose)
def assert_chunks_equal(a, b):
"""
Assert that chunksizes along chunked dimensions are equal.
Parameters
----------
a : xarray.Dataset or xarray.DataArray
The first object to compare.
b : xarray.Dataset or xarray.DataArray
The second object to compare.
"""
if isinstance(a, DataArray) != isinstance(b, DataArray):
raise TypeError("a and b have mismatched types")
left = a.unify_chunks()
right = b.unify_chunks()
assert left.chunks == right.chunks
def _assert_indexes_invariants_checks(indexes, possible_coord_variables, dims):
assert isinstance(indexes, dict), indexes
assert all(isinstance(v, pd.Index) for v in indexes.values()), {
k: type(v) for k, v in indexes.items()
}
index_vars = {
k for k, v in possible_coord_variables.items() if isinstance(v, IndexVariable)
}
assert indexes.keys() <= index_vars, (set(indexes), index_vars)
# Note: when we support non-default indexes, these checks should be opt-in
# only!
defaults = default_indexes(possible_coord_variables, dims)
assert indexes.keys() == defaults.keys(), (set(indexes), set(defaults))
assert all(v.equals(defaults[k]) for k, v in indexes.items()), (indexes, defaults)
def _assert_variable_invariants(var: Variable, name: Hashable = None):
if name is None:
name_or_empty: tuple = ()
else:
name_or_empty = (name,)
assert isinstance(var._dims, tuple), name_or_empty + (var._dims,)
assert len(var._dims) == len(var._data.shape), name_or_empty + (
var._dims,
var._data.shape,
)
assert isinstance(var._encoding, (type(None), dict)), name_or_empty + (
var._encoding,
)
assert isinstance(var._attrs, (type(None), dict)), name_or_empty + (var._attrs,)
def _assert_dataarray_invariants(da: DataArray):
assert isinstance(da._variable, Variable), da._variable
_assert_variable_invariants(da._variable)
assert isinstance(da._coords, dict), da._coords
assert all(isinstance(v, Variable) for v in da._coords.values()), da._coords
assert all(set(v.dims) <= set(da.dims) for v in da._coords.values()), (
da.dims,
{k: v.dims for k, v in da._coords.items()},
)
assert all(
isinstance(v, IndexVariable) for (k, v) in da._coords.items() if v.dims == (k,)
), {k: type(v) for k, v in da._coords.items()}
for k, v in da._coords.items():
_assert_variable_invariants(v, k)
if da._indexes is not None:
_assert_indexes_invariants_checks(da._indexes, da._coords, da.dims)
def _assert_dataset_invariants(ds: Dataset):
assert isinstance(ds._variables, dict), type(ds._variables)
assert all(isinstance(v, Variable) for v in ds._variables.values()), ds._variables
for k, v in ds._variables.items():
_assert_variable_invariants(v, k)
assert isinstance(ds._coord_names, set), ds._coord_names
assert ds._coord_names <= ds._variables.keys(), (
ds._coord_names,
set(ds._variables),
)
assert type(ds._dims) is dict, ds._dims
assert all(isinstance(v, int) for v in ds._dims.values()), ds._dims
var_dims: Set[Hashable] = set()
for v in ds._variables.values():
var_dims.update(v.dims)
assert ds._dims.keys() == var_dims, (set(ds._dims), var_dims)
assert all(
ds._dims[k] == v.sizes[k] for v in ds._variables.values() for k in v.sizes
), (ds._dims, {k: v.sizes for k, v in ds._variables.items()})
assert all(
isinstance(v, IndexVariable)
for (k, v) in ds._variables.items()
if v.dims == (k,)
), {k: type(v) for k, v in ds._variables.items() if v.dims == (k,)}
assert all(v.dims == (k,) for (k, v) in ds._variables.items() if k in ds._dims), {
k: v.dims for k, v in ds._variables.items() if k in ds._dims
}
if ds._indexes is not None:
_assert_indexes_invariants_checks(ds._indexes, ds._variables, ds._dims)
assert isinstance(ds._encoding, (type(None), dict))
assert isinstance(ds._attrs, (type(None), dict))
def _assert_internal_invariants(xarray_obj: Union[DataArray, Dataset, Variable]):
"""Validate that an xarray object satisfies its own internal invariants.
This exists for the benefit of xarray's own test suite, but may be useful
in external projects if they (ill-advisedly) create objects using xarray's
private APIs.
"""
if isinstance(xarray_obj, Variable):
_assert_variable_invariants(xarray_obj)
elif isinstance(xarray_obj, DataArray):
_assert_dataarray_invariants(xarray_obj)
elif isinstance(xarray_obj, Dataset):
_assert_dataset_invariants(xarray_obj)
else:
raise TypeError(
"{} is not a supported type for xarray invariant checks".format(
type(xarray_obj)
)
)
|
Subsets and Splits